max_stars_repo_path
stringlengths
4
245
max_stars_repo_name
stringlengths
7
115
max_stars_count
int64
101
368k
id
stringlengths
2
8
content
stringlengths
6
1.03M
test/IECore/BasicPreset.py
ericmehl/cortex
386
27
########################################################################## # # Copyright (c) 2010-2012, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # * Neither the name of Image Engine Design nor the names of any # other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## from __future__ import with_statement import os import sys import shutil import unittest import IECore class TestBasicPreset( unittest.TestCase ) : def testCopy( self ) : testObj = IECore.Parameterised( "testParameterised1" ) testObj.parameters().addParameters( [ IECore.BoolParameter( "a", "", True ), IECore.FloatParameter( "b", "", 1.0 ), ] ) testObj2 = IECore.Parameterised( "testParameterised2" ) testObj2.parameters().addParameters( [ IECore.BoolParameter( "a", "", False ), IECore.FloatParameter( "c", "", 0.0 ), ] ) p = IECore.BasicPreset( testObj, testObj.parameters() ) self.assertTrue( p.applicableTo( testObj, testObj.parameters() ) ) self.assertFalse( p.applicableTo( testObj2, testObj2.parameters() ) ) testObj.parameters()["a"].setTypedValue( False ) testObj.parameters()["b"].setTypedValue( 0.0 ) p( testObj, testObj.parameters() ) self.assertEqual( testObj.parameters()["a"].getTypedValue(), True ) self.assertEqual( testObj.parameters()["b"].getTypedValue(), 1.0 ) p2 = IECore.BasicPreset( testObj, testObj.parameters(), parameters=( testObj.parameters()["a"], ) ) self.assertTrue( p2.applicableTo( testObj, testObj.parameters() ) ) self.assertTrue( p2.applicableTo( testObj2, testObj.parameters() ) ) p2( testObj2, testObj2.parameters() ) self.assertEqual( testObj2.parameters()["a"].getTypedValue(), True ) self.assertEqual( testObj2.parameters()["c"].getTypedValue(), 0.0 ) def testLoad( self ) : testObj = IECore.Parameterised( "testParameterised1" ) testObj.parameters().addParameters( [ IECore.BoolParameter( "a", "", True ), IECore.FloatParameter( "b", "", 1.0 ), ] ) testObj2 = IECore.Parameterised( "testParameterised1" ) testObj2.parameters().addParameters( [ IECore.BoolParameter( "a", "", False ), IECore.FloatParameter( "c", "", 0.0 ), ] ) savePath = os.path.abspath( os.path.join( os.path.dirname( __file__ ), "data", "basicPreset" ) ) messageHandler = IECore.CapturingMessageHandler() with messageHandler : p = IECore.BasicPreset( os.path.join( savePath, "basicPresetLoadTest", "basicPresetLoadTest-1.cob" ) ) self.assertEqual( len( messageHandler.messages ), 0 ) self.assertTrue( p.applicableTo( testObj, testObj.parameters() ) ) self.assertFalse( p.applicableTo( testObj2, testObj2.parameters() ) ) testObj.parameters()["a"].setTypedValue( False ) testObj.parameters()["b"].setTypedValue( 0.0 ) p( testObj, testObj.parameters() ) self.assertEqual( testObj.parameters()["a"].getTypedValue(), True ) self.assertEqual( testObj.parameters()["b"].getTypedValue(), 1.0 ) def testSave( self ) : testObj = IECore.Parameterised( "testParameterised1" ) testObj.parameters().addParameters( [ IECore.BoolParameter( "a", "", True ), IECore.FloatParameter( "b", "", 1.0 ), ] ) testObj2 = IECore.Parameterised( "testParameterised1" ) testObj2.parameters().addParameters( [ IECore.BoolParameter( "a", "", False ), IECore.FloatParameter( "c", "", 0.0 ), ] ) savePath = os.path.abspath( os.path.join( os.path.dirname( __file__ ), "data", "basicPreset" ) ) preset = IECore.BasicPreset( testObj, testObj.parameters() ) # Save for the classLoader and check its there, we test the 'loadability' later... preset.save( savePath, "basicPresetTest" ) self.assertTrue( os.path.isfile( os.path.join( savePath, "basicPresetTest", "basicPresetTest-1.cob" ) ) ) self.assertTrue( os.path.isfile( os.path.join( savePath, "basicPresetTest", "basicPresetTest-1.py" ) ) ) # save without the classLoader and check its there preset.save( savePath, "basicPresetTest", classLoadable=False ) self.assertTrue( os.path.isfile( os.path.join( savePath, "basicPresetTest.cob" ) ) ) # reload p = IECore.BasicPreset( os.path.join( savePath, "basicPresetTest.cob" ) ) self.assertTrue( p.applicableTo( testObj, testObj.parameters() ) ) self.assertFalse( p.applicableTo( testObj2, testObj2.parameters() ) ) testObj.parameters()["a"].setTypedValue( False ) testObj.parameters()["b"].setTypedValue( 0.0 ) p( testObj, testObj.parameters() ) self.assertEqual( testObj.parameters()["a"].getTypedValue(), True ) self.assertEqual( testObj.parameters()["b"].getTypedValue(), 1.0 ) preset2 = IECore.BasicPreset( testObj, testObj.parameters(), parameters=( testObj.parameters()["a"], ) ) preset2.save( savePath, "basicPresetTest2", classLoadable=False ) #reload p2 = IECore.BasicPreset( os.path.join( savePath, "basicPresetTest2.cob" ) ) self.assertTrue( p2.applicableTo( testObj, testObj.parameters() ) ) self.assertTrue( p2.applicableTo( testObj2, testObj.parameters() ) ) p2( testObj2, testObj2.parameters() ) self.assertEqual( testObj2.parameters()["a"].getTypedValue(), True ) self.assertEqual( testObj2.parameters()["c"].getTypedValue(), 0.0 ) def testClassLoader( self ) : testObj = IECore.Parameterised( "testParameterised1" ) testObj.parameters().addParameters( [ IECore.BoolParameter( "a", "", True ), IECore.FloatParameter( "b", "", 1.0 ), ] ) savePath = os.path.abspath( os.path.join( os.path.dirname( __file__ ), "data", "basicPreset" ) ) preset = IECore.BasicPreset( testObj, testObj.parameters() ) preset.save( savePath, "basicPresetTestClassLoader" ) # make sure that no messages are emitted during loading messageHandler = IECore.CapturingMessageHandler() with messageHandler : loader = IECore.ClassLoader( IECore.SearchPath( savePath ) ) p = loader.load( "basicPresetTestClassLoader" )() self.assertEqual( len( messageHandler.messages ), 0 ) self.assertTrue( isinstance( p, IECore.BasicPreset ) ) p.metadata() def testClasses( self ) : testObj = IECore.Parameterised( "testParameterised1" ) testObj.parameters().addParameters( [ IECore.BoolParameter( "a", "", True ), IECore.ClassParameter( "b", "", "IECORE_OP_PATHS", os.path.join( "maths", "multiply" ), 2 ), ] ) testObj2 = IECore.Parameterised( "testParameterised2" ) testObj2.parameters().addParameters( [ IECore.ClassParameter( "c", "", "IECORE_OP_PATHS" ), ] ) classes1 = testObj.parameters()["b"].getClass( True ) classes2 = testObj2.parameters()["c"].getClass( True ) self.assertNotEqual( classes1[1:], classes2[1:] ) p = IECore.BasicPreset( testObj, testObj.parameters()["b"] ) self.assertTrue( p.applicableTo( testObj, testObj.parameters()["b"] ) ) self.assertFalse( p.applicableTo( testObj, testObj.parameters() ) ) self.assertTrue( p.applicableTo( testObj2, testObj2.parameters()["c"] ) ) p( testObj2, testObj2.parameters()["c"] ) classes1 = testObj.parameters()["b"].getClass( True ) classes2 = testObj2.parameters()["c"].getClass( True ) self.assertEqual( classes1[1:], classes2[1:] ) def testClassVectors( self ) : testObj = IECore.Parameterised( "testParameterised1" ) testObj.parameters().addParameters( [ IECore.BoolParameter( "a", "", True ), IECore.ClassVectorParameter( "b", "", "IECORE_OP_PATHS" ), ] ) testObj.parameters()["b"].setClasses( [ ( "mult", os.path.join( "maths", "multiply" ), 2 ), ( "coIO", "compoundObjectInOut", 1 ), ] ) testObj2 = IECore.Parameterised( "testParameterised2" ) testObj2.parameters().addParameters( [ IECore.ClassVectorParameter( "c", "", "IECORE_OP_PATHS" ), ] ) classes1 = [ c[1:] for c in testObj.parameters()["b"].getClasses( True ) ] classes2 = [ c[1:] for c in testObj2.parameters()["c"].getClasses( True ) ] self.assertNotEqual( classes1, classes2 ) p = IECore.BasicPreset( testObj, testObj.parameters()["b"] ) self.assertTrue( p.applicableTo( testObj, testObj.parameters()["b"] ) ) self.assertFalse( p.applicableTo( testObj, testObj.parameters() ) ) self.assertTrue( p.applicableTo( testObj2, testObj2.parameters()["c"] ) ) p( testObj2, testObj2.parameters()["c"] ) classes1 = [ c[1:] for c in testObj.parameters()["b"].getClasses( True ) ] classes2 = [ c[1:] for c in testObj2.parameters()["c"].getClasses( True ) ] self.assertEqual( classes1, classes2 ) def testCompoundVectorParameter( self ) : p = IECore.Parameterised( "test" ) p.parameters().addParameters( [ IECore.BoolParameter( "a", "", False ), IECore.CompoundVectorParameter( "c", "", members = [ IECore.StringVectorParameter( "s", "", IECore.StringVectorData() ), IECore.BoolVectorParameter( "b", "", IECore.BoolVectorData() ), ] ) ] ) p["c"]["s"].setValue( IECore.StringVectorData( [ "1", "2", "3" ] ) ) p["c"]["b"].setValue( IECore.BoolVectorData( [ True, False, True ] ) ) v = p.parameters().getValue().copy() preset = IECore.BasicPreset( p, p.parameters() ) self.assertTrue( preset.applicableTo( p, p.parameters() ) ) p.parameters().setValue( p.parameters().defaultValue ) self.assertNotEqual( p.parameters().getValue(), v ) preset( p, p.parameters() ) self.assertEqual( p.parameters().getValue(), v ) def tearDown( self ) : savePath = os.path.abspath( os.path.join( os.path.dirname( __file__ ), "data", "basicPreset" ) ) paths = ( os.path.join( savePath, "basicPresetTest" ), os.path.join( savePath, "basicPresetTest.cob" ), os.path.join( savePath, "basicPresetTest2.cob" ), os.path.join( savePath, "basicPresetTestClassLoader" ), ) for p in paths : if os.path.isdir( p ) : shutil.rmtree( p ) elif os.path.isfile( p ) : os.remove( p ) if __name__ == "__main__": unittest.main()
src/biotite/copyable.py
danijoo/biotite
208
56
<filename>src/biotite/copyable.py # This source code is part of the Biotite package and is distributed # under the 3-Clause BSD License. Please see 'LICENSE.rst' for further # information. __name__ = "biotite" __author__ = "<NAME>" __all__ = ["Copyable"] import abc class Copyable(metaclass=abc.ABCMeta): """ Base class for all objects, that should be copyable. The public method `copy()` first creates a fresh instance of the class of the instance, that is copied via the `__copy_create__()` method. All variables, that could not be set via the constructor, are then copied via `__copy_fill__()`, starting with the method in the uppermost base class and ending with the class of the instance to be copied. This approach solves the problem of encapsulated variables in superclasses. """ def copy(self): """ Create a deep copy of this object. Returns ------- copy A copy of this object. """ clone = self.__copy_create__() self.__copy_fill__(clone) return clone def __copy_create__(self): """ Instantiate a new object of this class. Only the constructor should be called in this method. All further attributes, that need to be copied are handled in `__copy_fill__()` Do not call the `super()` method here. This method must be overridden, if the constructor takes parameters. Returns ------- copy A freshly instantiated copy of *self*. """ return type(self)() def __copy_fill__(self, clone): """ Copy all necessary attributes to the new object. Always call the `super()` method as first statement. Parameters ---------- clone The freshly instantiated copy of *self*. """ pass
tests/keras/layers/wrappers_test.py
kalyc/keras-apache-mxnet
300
59
<filename>tests/keras/layers/wrappers_test.py<gh_stars>100-1000 import pytest import numpy as np import copy from numpy.testing import assert_allclose from keras.utils import CustomObjectScope from keras.layers import wrappers, Input, Layer from keras.layers import RNN from keras import layers from keras.models import Sequential, Model, model_from_json from keras import backend as K from keras.utils.generic_utils import object_list_uid, to_list @pytest.mark.skipif(K.backend() == 'mxnet', reason='MXNet backend does not support TimeDistributed and RNN yet') def test_TimeDistributed(): # first, test with Dense layer model = Sequential() model.add(wrappers.TimeDistributed(layers.Dense(2), input_shape=(3, 4))) model.add(layers.Activation('relu')) model.compile(optimizer='rmsprop', loss='mse') model.fit(np.random.random((10, 3, 4)), np.random.random((10, 3, 2)), epochs=1, batch_size=10) # test config model.get_config() # test when specifying a batch_input_shape test_input = np.random.random((1, 3, 4)) test_output = model.predict(test_input) weights = model.layers[0].get_weights() reference = Sequential() reference.add(wrappers.TimeDistributed(layers.Dense(2), batch_input_shape=(1, 3, 4))) reference.add(layers.Activation('relu')) reference.compile(optimizer='rmsprop', loss='mse') reference.layers[0].set_weights(weights) reference_output = reference.predict(test_input) assert_allclose(test_output, reference_output, atol=1e-05) # test with Embedding model = Sequential() model.add(wrappers.TimeDistributed(layers.Embedding(5, 6), batch_input_shape=(10, 3, 4), dtype='int32')) model.compile(optimizer='rmsprop', loss='mse') model.fit(np.random.randint(5, size=(10, 3, 4), dtype='int32'), np.random.random((10, 3, 4, 6)), epochs=1, batch_size=10) # compare to not using batch_input_shape test_input = np.random.randint(5, size=(10, 3, 4), dtype='int32') test_output = model.predict(test_input) weights = model.layers[0].get_weights() reference = Sequential() reference.add(wrappers.TimeDistributed(layers.Embedding(5, 6), input_shape=(3, 4), dtype='int32')) reference.compile(optimizer='rmsprop', loss='mse') reference.layers[0].set_weights(weights) reference_output = reference.predict(test_input) assert_allclose(test_output, reference_output, atol=1e-05) # test with Conv2D model = Sequential() model.add(wrappers.TimeDistributed(layers.Conv2D(5, (2, 2), padding='same'), input_shape=(2, 4, 4, 3))) model.add(layers.Activation('relu')) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch(np.random.random((1, 2, 4, 4, 3)), np.random.random((1, 2, 4, 4, 5))) model = model_from_json(model.to_json()) model.summary() # test stacked layers model = Sequential() model.add(wrappers.TimeDistributed(layers.Dense(2), input_shape=(3, 4))) model.add(wrappers.TimeDistributed(layers.Dense(3))) model.add(layers.Activation('relu')) model.compile(optimizer='rmsprop', loss='mse') model.fit(np.random.random((10, 3, 4)), np.random.random((10, 3, 3)), epochs=1, batch_size=10) # test wrapping Sequential model model = Sequential() model.add(layers.Dense(3, input_dim=2)) outer_model = Sequential() outer_model.add(wrappers.TimeDistributed(model, input_shape=(3, 2))) outer_model.compile(optimizer='rmsprop', loss='mse') outer_model.fit(np.random.random((10, 3, 2)), np.random.random((10, 3, 3)), epochs=1, batch_size=10) # test with functional API x = Input(shape=(3, 2)) y = wrappers.TimeDistributed(model)(x) outer_model = Model(x, y) outer_model.compile(optimizer='rmsprop', loss='mse') outer_model.fit(np.random.random((10, 3, 2)), np.random.random((10, 3, 3)), epochs=1, batch_size=10) # test with BatchNormalization model = Sequential() model.add(wrappers.TimeDistributed( layers.BatchNormalization(center=True, scale=True), name='bn', input_shape=(10, 2))) model.compile(optimizer='rmsprop', loss='mse') # Assert that mean and variance are 0 and 1. td = model.layers[0] assert np.array_equal(td.get_weights()[2], np.array([0, 0])) assert np.array_equal(td.get_weights()[3], np.array([1, 1])) # Train model.train_on_batch(np.random.normal(loc=2, scale=2, size=(1, 10, 2)), np.broadcast_to(np.array([0, 1]), (1, 10, 2))) # Assert that mean and variance changed. assert not np.array_equal(td.get_weights()[2], np.array([0, 0])) assert not np.array_equal(td.get_weights()[3], np.array([1, 1])) # Verify input_map has one mapping from inputs to reshaped inputs. uid = object_list_uid(model.inputs) assert len(td._input_map.keys()) == 1 assert uid in td._input_map assert K.int_shape(td._input_map[uid]) == (None, 2) @pytest.mark.skipif(K.backend() == 'mxnet', reason='MXNet backend does not support TimeDistributed and RNN yet') @pytest.mark.skipif((K.backend() == 'cntk'), reason='Flaky with CNTK backend') def test_TimeDistributed_learning_phase(): # test layers that need learning_phase to be set np.random.seed(1234) x = Input(shape=(3, 2)) y = wrappers.TimeDistributed(layers.Dropout(.999))(x, training=True) model = Model(x, y) y = model.predict(np.random.random((10, 3, 2))) assert_allclose(np.mean(y), 0., atol=1e-1, rtol=1e-1) @pytest.mark.skipif(K.backend() == 'mxnet', reason='MXNet backend does not support TimeDistributed and RNN yet') def test_TimeDistributed_trainable(): # test layers that need learning_phase to be set x = Input(shape=(3, 2)) layer = wrappers.TimeDistributed(layers.BatchNormalization()) _ = layer(x) assert len(layer.updates) == 2 assert len(layer.trainable_weights) == 2 layer.trainable = False assert len(layer.updates) == 0 assert len(layer.trainable_weights) == 0 layer.trainable = True assert len(layer.updates) == 2 assert len(layer.trainable_weights) == 2 @pytest.mark.skipif((K.backend() == 'cntk' or K.backend() == 'mxnet'), reason='Unknown timestamps for RNN not supported in CNTK and MXNet.') def test_TimeDistributed_with_masked_embedding_and_unspecified_shape(): # test with unspecified shape and Embeddings with mask_zero model = Sequential() model.add(wrappers.TimeDistributed(layers.Embedding(5, 6, mask_zero=True), input_shape=(None, None))) # the shape so far: (N, t_1, t_2, 6) model.add(wrappers.TimeDistributed(layers.SimpleRNN(7, return_sequences=True))) model.add(wrappers.TimeDistributed(layers.SimpleRNN(8, return_sequences=False))) model.add(layers.SimpleRNN(1, return_sequences=False)) model.compile(optimizer='rmsprop', loss='mse') model_input = np.random.randint(low=1, high=5, size=(10, 3, 4), dtype='int32') for i in range(4): model_input[i, i:, i:] = 0 model.fit(model_input, np.random.random((10, 1)), epochs=1, batch_size=10) mask_outputs = [model.layers[0].compute_mask(model.input)] for layer in model.layers[1:]: mask_outputs.append(layer.compute_mask(layer.input, mask_outputs[-1])) func = K.function([model.input], mask_outputs[:-1]) mask_outputs_val = func([model_input]) ref_mask_val_0 = model_input > 0 # embedding layer ref_mask_val_1 = ref_mask_val_0 # first RNN layer ref_mask_val_2 = np.any(ref_mask_val_1, axis=-1) # second RNN layer ref_mask_val = [ref_mask_val_0, ref_mask_val_1, ref_mask_val_2] for i in range(3): assert np.array_equal(mask_outputs_val[i], ref_mask_val[i]) assert mask_outputs[-1] is None # final layer @pytest.mark.skipif(K.backend() == 'mxnet', reason='MXNet backend does not support TimeDistributed and RNN yet') def test_TimeDistributed_with_masking_layer(): # test with Masking layer model = Sequential() model.add(wrappers.TimeDistributed(layers.Masking(mask_value=0.,), input_shape=(None, 4))) model.add(wrappers.TimeDistributed(layers.Dense(5))) model.compile(optimizer='rmsprop', loss='mse') model_input = np.random.randint(low=1, high=5, size=(10, 3, 4)) for i in range(4): model_input[i, i:, :] = 0. model.compile(optimizer='rmsprop', loss='mse') model.fit(model_input, np.random.random((10, 3, 5)), epochs=1, batch_size=6) mask_outputs = [model.layers[0].compute_mask(model.input)] mask_outputs += [model.layers[1].compute_mask(model.layers[1].input, mask_outputs[-1])] func = K.function([model.input], mask_outputs) mask_outputs_val = func([model_input]) assert np.array_equal(mask_outputs_val[0], np.any(model_input, axis=-1)) assert np.array_equal(mask_outputs_val[1], np.any(model_input, axis=-1)) def test_regularizers(): model = Sequential() model.add(wrappers.TimeDistributed( layers.Dense(2, kernel_regularizer='l1'), input_shape=(3, 4))) model.add(layers.Activation('relu')) model.compile(optimizer='rmsprop', loss='mse') assert len(model.layers[0].layer.losses) == 1 assert len(model.layers[0].losses) == 1 assert len(model.layers[0].get_losses_for(None)) == 1 assert len(model.losses) == 1 model = Sequential() model.add(wrappers.TimeDistributed( layers.Dense(2, activity_regularizer='l1'), input_shape=(3, 4))) model.add(layers.Activation('relu')) model.compile(optimizer='rmsprop', loss='mse') assert len(model.losses) == 1 def test_Bidirectional(): rnn = layers.SimpleRNN samples = 2 dim = 2 timesteps = 2 output_dim = 2 dropout_rate = 0.2 for mode in ['sum', 'concat']: x = np.random.random((samples, timesteps, dim)) target_dim = 2 * output_dim if mode == 'concat' else output_dim y = np.random.random((samples, target_dim)) # test with Sequential model model = Sequential() model.add(wrappers.Bidirectional(rnn(output_dim, dropout=dropout_rate, recurrent_dropout=dropout_rate), merge_mode=mode, input_shape=(timesteps, dim))) model.compile(loss='mse', optimizer='sgd') model.fit(x, y, epochs=1, batch_size=1) # test config model.get_config() model = model_from_json(model.to_json()) model.summary() # test stacked bidirectional layers model = Sequential() model.add(wrappers.Bidirectional(rnn(output_dim, return_sequences=True), merge_mode=mode, input_shape=(timesteps, dim))) model.add(wrappers.Bidirectional(rnn(output_dim), merge_mode=mode)) model.compile(loss='mse', optimizer='sgd') model.fit(x, y, epochs=1, batch_size=1) # test with functional API inputs = Input((timesteps, dim)) outputs = wrappers.Bidirectional(rnn(output_dim, dropout=dropout_rate, recurrent_dropout=dropout_rate), merge_mode=mode)(inputs) model = Model(inputs, outputs) model.compile(loss='mse', optimizer='sgd') model.fit(x, y, epochs=1, batch_size=1) # Bidirectional and stateful inputs = Input(batch_shape=(1, timesteps, dim)) outputs = wrappers.Bidirectional(rnn(output_dim, stateful=True), merge_mode=mode)(inputs) model = Model(inputs, outputs) model.compile(loss='mse', optimizer='sgd') model.fit(x, y, epochs=1, batch_size=1) @pytest.mark.skipif((K.backend() == 'cntk'), reason='Unknown timestamps not supported in CNTK.') def test_Bidirectional_dynamic_timesteps(): # test with functional API with dynamic length rnn = layers.SimpleRNN samples = 2 dim = 2 timesteps = 2 output_dim = 2 dropout_rate = 0.2 for mode in ['sum', 'concat']: x = np.random.random((samples, timesteps, dim)) target_dim = 2 * output_dim if mode == 'concat' else output_dim y = np.random.random((samples, target_dim)) inputs = Input((None, dim)) outputs = wrappers.Bidirectional(rnn(output_dim, dropout=dropout_rate, recurrent_dropout=dropout_rate), merge_mode=mode)(inputs) model = Model(inputs, outputs) model.compile(loss='mse', optimizer='sgd') model.fit(x, y, epochs=1, batch_size=1) @pytest.mark.parametrize('merge_mode', ['sum', 'mul', 'ave', 'concat', None]) def test_Bidirectional_merged_value(merge_mode): rnn = layers.LSTM samples = 2 dim = 5 timesteps = 3 units = 3 X = [np.random.rand(samples, timesteps, dim)] if merge_mode == 'sum': merge_func = lambda y, y_rev: y + y_rev elif merge_mode == 'mul': merge_func = lambda y, y_rev: y * y_rev elif merge_mode == 'ave': merge_func = lambda y, y_rev: (y + y_rev) / 2 elif merge_mode == 'concat': merge_func = lambda y, y_rev: np.concatenate((y, y_rev), axis=-1) else: merge_func = lambda y, y_rev: [y, y_rev] # basic case inputs = Input((timesteps, dim)) layer = wrappers.Bidirectional(rnn(units, return_sequences=True), merge_mode=merge_mode) f_merged = K.function([inputs], to_list(layer(inputs))) f_forward = K.function([inputs], [layer.forward_layer.call(inputs)]) f_backward = K.function([inputs], [K.reverse(layer.backward_layer.call(inputs), 1)]) y_merged = f_merged(X) y_expected = to_list(merge_func(f_forward(X)[0], f_backward(X)[0])) assert len(y_merged) == len(y_expected) for x1, x2 in zip(y_merged, y_expected): assert_allclose(x1, x2, atol=1e-5) # test return_state inputs = Input((timesteps, dim)) layer = wrappers.Bidirectional(rnn(units, return_state=True), merge_mode=merge_mode) f_merged = K.function([inputs], layer(inputs)) f_forward = K.function([inputs], layer.forward_layer.call(inputs)) f_backward = K.function([inputs], layer.backward_layer.call(inputs)) n_states = len(layer.layer.states) y_merged = f_merged(X) y_forward = f_forward(X) y_backward = f_backward(X) y_expected = to_list(merge_func(y_forward[0], y_backward[0])) assert len(y_merged) == len(y_expected) + n_states * 2 for x1, x2 in zip(y_merged, y_expected): assert_allclose(x1, x2, atol=1e-5) # test if the state of a BiRNN is the concatenation of the underlying RNNs y_merged = y_merged[-n_states * 2:] y_forward = y_forward[-n_states:] y_backward = y_backward[-n_states:] for state_birnn, state_inner in zip(y_merged, y_forward + y_backward): assert_allclose(state_birnn, state_inner, atol=1e-5) @pytest.mark.skipif(K.backend() == 'theano' or K.backend() == 'mxnet', reason='Not supported.') @pytest.mark.parametrize('merge_mode', ['sum', 'concat', None]) def test_Bidirectional_dropout(merge_mode): rnn = layers.LSTM samples = 2 dim = 5 timesteps = 3 units = 3 X = [np.random.rand(samples, timesteps, dim)] inputs = Input((timesteps, dim)) wrapped = wrappers.Bidirectional(rnn(units, dropout=0.2, recurrent_dropout=0.2), merge_mode=merge_mode) outputs = to_list(wrapped(inputs, training=True)) assert all(not getattr(x, '_uses_learning_phase') for x in outputs) inputs = Input((timesteps, dim)) wrapped = wrappers.Bidirectional(rnn(units, dropout=0.2, return_state=True), merge_mode=merge_mode) outputs = to_list(wrapped(inputs)) assert all(x._uses_learning_phase for x in outputs) model = Model(inputs, outputs) assert model.uses_learning_phase y1 = to_list(model.predict(X)) y2 = to_list(model.predict(X)) for x1, x2 in zip(y1, y2): assert_allclose(x1, x2, atol=1e-5) def test_Bidirectional_state_reuse(): rnn = layers.LSTM samples = 2 dim = 5 timesteps = 3 units = 3 input1 = Input((timesteps, dim)) layer = wrappers.Bidirectional(rnn(units, return_state=True, return_sequences=True)) state = layer(input1)[1:] # test passing invalid initial_state: passing a tensor input2 = Input((timesteps, dim)) with pytest.raises(ValueError): output = wrappers.Bidirectional(rnn(units))(input2, initial_state=state[0]) # test valid usage: passing a list output = wrappers.Bidirectional(rnn(units))(input2, initial_state=state) model = Model([input1, input2], output) assert len(model.layers) == 4 assert isinstance(model.layers[-1].input, list) inputs = [np.random.rand(samples, timesteps, dim), np.random.rand(samples, timesteps, dim)] outputs = model.predict(inputs) @pytest.mark.skipif(K.backend() == 'mxnet', reason='MXNet backend does not support custom RNN cell yet') def test_Bidirectional_with_constants(): class RNNCellWithConstants(Layer): def __init__(self, units, **kwargs): self.units = units self.state_size = units super(RNNCellWithConstants, self).__init__(**kwargs) def build(self, input_shape): if not isinstance(input_shape, list): raise TypeError('expects constants shape') [input_shape, constant_shape] = input_shape # will (and should) raise if more than one constant passed self.input_kernel = self.add_weight( shape=(input_shape[-1], self.units), initializer='uniform', name='kernel') self.recurrent_kernel = self.add_weight( shape=(self.units, self.units), initializer='uniform', name='recurrent_kernel') self.constant_kernel = self.add_weight( shape=(constant_shape[-1], self.units), initializer='uniform', name='constant_kernel') self.built = True def call(self, inputs, states, constants): [prev_output] = states [constant] = constants h_input = K.dot(inputs, self.input_kernel) h_state = K.dot(prev_output, self.recurrent_kernel) h_const = K.dot(constant, self.constant_kernel) output = h_input + h_state + h_const return output, [output] def get_config(self): config = {'units': self.units} base_config = super(RNNCellWithConstants, self).get_config() return dict(list(base_config.items()) + list(config.items())) # Test basic case. x = Input((5, 5)) c = Input((3,)) cell = RNNCellWithConstants(32) custom_objects = {'RNNCellWithConstants': RNNCellWithConstants} with CustomObjectScope(custom_objects): layer = wrappers.Bidirectional(RNN(cell)) y = layer(x, constants=c) model = Model([x, c], y) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch( [np.zeros((6, 5, 5)), np.zeros((6, 3))], np.zeros((6, 64)) ) # Test basic case serialization. x_np = np.random.random((6, 5, 5)) c_np = np.random.random((6, 3)) y_np = model.predict([x_np, c_np]) weights = model.get_weights() config = layer.get_config() with CustomObjectScope(custom_objects): layer = wrappers.Bidirectional.from_config(copy.deepcopy(config)) y = layer(x, constants=c) model = Model([x, c], y) model.set_weights(weights) y_np_2 = model.predict([x_np, c_np]) assert_allclose(y_np, y_np_2, atol=1e-4) # test flat list inputs with CustomObjectScope(custom_objects): layer = wrappers.Bidirectional.from_config(copy.deepcopy(config)) y = layer([x, c]) model = Model([x, c], y) model.set_weights(weights) y_np_3 = model.predict([x_np, c_np]) assert_allclose(y_np, y_np_3, atol=1e-4) @pytest.mark.skipif(K.backend() == 'mxnet', reason='MXNet backend does not support custom RNN cell yet') def test_Bidirectional_with_constants_layer_passing_initial_state(): class RNNCellWithConstants(Layer): def __init__(self, units, **kwargs): self.units = units self.state_size = units super(RNNCellWithConstants, self).__init__(**kwargs) def build(self, input_shape): if not isinstance(input_shape, list): raise TypeError('expects constants shape') [input_shape, constant_shape] = input_shape # will (and should) raise if more than one constant passed self.input_kernel = self.add_weight( shape=(input_shape[-1], self.units), initializer='uniform', name='kernel') self.recurrent_kernel = self.add_weight( shape=(self.units, self.units), initializer='uniform', name='recurrent_kernel') self.constant_kernel = self.add_weight( shape=(constant_shape[-1], self.units), initializer='uniform', name='constant_kernel') self.built = True def call(self, inputs, states, constants): [prev_output] = states [constant] = constants h_input = K.dot(inputs, self.input_kernel) h_state = K.dot(prev_output, self.recurrent_kernel) h_const = K.dot(constant, self.constant_kernel) output = h_input + h_state + h_const return output, [output] def get_config(self): config = {'units': self.units} base_config = super(RNNCellWithConstants, self).get_config() return dict(list(base_config.items()) + list(config.items())) # Test basic case. x = Input((5, 5)) c = Input((3,)) s_for = Input((32,)) s_bac = Input((32,)) cell = RNNCellWithConstants(32) custom_objects = {'RNNCellWithConstants': RNNCellWithConstants} with CustomObjectScope(custom_objects): layer = wrappers.Bidirectional(RNN(cell)) y = layer(x, initial_state=[s_for, s_bac], constants=c) model = Model([x, s_for, s_bac, c], y) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch( [np.zeros((6, 5, 5)), np.zeros((6, 32)), np.zeros((6, 32)), np.zeros((6, 3))], np.zeros((6, 64)) ) # Test basic case serialization. x_np = np.random.random((6, 5, 5)) s_fw_np = np.random.random((6, 32)) s_bk_np = np.random.random((6, 32)) c_np = np.random.random((6, 3)) y_np = model.predict([x_np, s_fw_np, s_bk_np, c_np]) weights = model.get_weights() config = layer.get_config() with CustomObjectScope(custom_objects): layer = wrappers.Bidirectional.from_config(copy.deepcopy(config)) y = layer(x, initial_state=[s_for, s_bac], constants=c) model = Model([x, s_for, s_bac, c], y) model.set_weights(weights) y_np_2 = model.predict([x_np, s_fw_np, s_bk_np, c_np]) assert_allclose(y_np, y_np_2, atol=1e-4) # verify that state is used y_np_2_different_s = model.predict([x_np, s_fw_np + 10., s_bk_np + 10., c_np]) with pytest.raises(AssertionError): assert_allclose(y_np, y_np_2_different_s, atol=1e-4) # test flat list inputs with CustomObjectScope(custom_objects): layer = wrappers.Bidirectional.from_config(copy.deepcopy(config)) y = layer([x, s_for, s_bac, c]) model = Model([x, s_for, s_bac, c], y) model.set_weights(weights) y_np_3 = model.predict([x_np, s_fw_np, s_bk_np, c_np]) assert_allclose(y_np, y_np_3, atol=1e-4) def test_Bidirectional_trainable(): # test layers that need learning_phase to be set x = Input(shape=(3, 2)) layer = wrappers.Bidirectional(layers.SimpleRNN(3)) _ = layer(x) assert len(layer.trainable_weights) == 6 layer.trainable = False assert len(layer.trainable_weights) == 0 layer.trainable = True assert len(layer.trainable_weights) == 6 def test_Bidirectional_updates(): x = Input(shape=(3, 2)) layer = wrappers.Bidirectional(layers.SimpleRNN(3)) assert len(layer.updates) == 0 assert len(layer.get_updates_for(None)) == 0 assert len(layer.get_updates_for(x)) == 0 layer.forward_layer.add_update(0, inputs=x) layer.forward_layer.add_update(1, inputs=None) layer.backward_layer.add_update(0, inputs=x) layer.backward_layer.add_update(1, inputs=None) assert len(layer.updates) == 4 assert len(layer.get_updates_for(None)) == 2 assert len(layer.get_updates_for(x)) == 2 def test_Bidirectional_losses(): x = Input(shape=(3, 2)) layer = wrappers.Bidirectional( layers.SimpleRNN(3, kernel_regularizer='l1', bias_regularizer='l1')) _ = layer(x) assert len(layer.losses) == 4 assert len(layer.get_losses_for(None)) == 4 assert len(layer.get_losses_for(x)) == 0 layer.forward_layer.add_loss(0, inputs=x) layer.forward_layer.add_loss(1, inputs=None) layer.backward_layer.add_loss(0, inputs=x) layer.backward_layer.add_loss(1, inputs=None) assert len(layer.losses) == 8 assert len(layer.get_losses_for(None)) == 6 assert len(layer.get_losses_for(x)) == 2 if __name__ == '__main__': pytest.main([__file__])
src/tornado-3.2.2/tornado/platform/common.py
code-annotator/tornado-annotated
645
60
<gh_stars>100-1000 """Lowest-common-denominator implementations of platform functionality.""" from __future__ import absolute_import, division, print_function, with_statement import errno import socket from tornado.platform import interface class Waker(interface.Waker): """Create an OS independent asynchronous pipe. For use on platforms that don't have os.pipe() (or where pipes cannot be passed to select()), but do have sockets. This includes Windows and Jython. """ def __init__(self): # Based on Zope async.py: http://svn.zope.org/zc.ngi/trunk/src/zc/ngi/async.py self.writer = socket.socket() # Disable buffering -- pulling the trigger sends 1 byte, # and we want that sent immediately, to wake up ASAP. self.writer.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) count = 0 while 1: count += 1 # Bind to a local port; for efficiency, let the OS pick # a free port for us. # Unfortunately, stress tests showed that we may not # be able to connect to that port ("Address already in # use") despite that the OS picked it. This appears # to be a race bug in the Windows socket implementation. # So we loop until a connect() succeeds (almost always # on the first try). See the long thread at # http://mail.zope.org/pipermail/zope/2005-July/160433.html # for hideous details. a = socket.socket() a.bind(("127.0.0.1", 0)) a.listen(1) connect_address = a.getsockname() # assigned (host, port) pair try: self.writer.connect(connect_address) break # success except socket.error as detail: if (not hasattr(errno, 'WSAEADDRINUSE') or detail[0] != errno.WSAEADDRINUSE): # "Address already in use" is the only error # I've seen on two WinXP Pro SP2 boxes, under # Pythons 2.3.5 and 2.4.1. raise # (10048, 'Address already in use') # assert count <= 2 # never triggered in Tim's tests if count >= 10: # I've never seen it go above 2 a.close() self.writer.close() raise socket.error("Cannot bind trigger!") # Close `a` and try again. Note: I originally put a short # sleep() here, but it didn't appear to help or hurt. a.close() self.reader, addr = a.accept() self.reader.setblocking(0) self.writer.setblocking(0) a.close() self.reader_fd = self.reader.fileno() def fileno(self): return self.reader.fileno() def write_fileno(self): return self.writer.fileno() def wake(self): try: self.writer.send(b"x") except (IOError, socket.error): pass def consume(self): try: while True: result = self.reader.recv(1024) if not result: break except (IOError, socket.error): pass def close(self): self.reader.close() self.writer.close()
docs/source/auto_examples/plot_usage.py
ruhugu/brokenaxes
362
81
""" Basic usage =========== This example presents the basic usage of brokenaxes """ import matplotlib.pyplot as plt from brokenaxes import brokenaxes import numpy as np fig = plt.figure(figsize=(5,2)) bax = brokenaxes(xlims=((0, .1), (.4, .7)), ylims=((-1, .7), (.79, 1)), hspace=.05) x = np.linspace(0, 1, 100) bax.plot(x, np.sin(10 * x), label='sin') bax.plot(x, np.cos(10 * x), label='cos') bax.legend(loc=3) bax.set_xlabel('time') bax.set_ylabel('value')
clpy/sparse/util.py
fixstars/clpy
142
116
<filename>clpy/sparse/util.py import clpy import clpy.sparse.base _preamble_atomic_add = ''' #if __CUDA_ARCH__ < 600 __device__ double atomicAdd(double* address, double val) { unsigned long long* address_as_ull = (unsigned long long*)address; unsigned long long old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } #endif ''' def isintlike(x): try: return bool(int(x) == x) except (TypeError, ValueError): return False def isscalarlike(x): return clpy.isscalar(x) or (clpy.sparse.base.isdense(x) and x.ndim == 0) def isshape(x): if not isinstance(x, tuple) or len(x) != 2: return False m, n = x return isintlike(m) and isintlike(n)
test/test_cartesian.py
hwazni/discopy
205
117
from pytest import raises from discopy.cartesian import * def test_Box_repr(): f = Box('f', 1, 2, lambda x: (x, x)) assert "Box('f', 1, 2" in repr(f) def test_Function_str(): f = Function(2, 1, lambda x, y: x + y) assert 'Function(dom=2, cod=1,' in str(f) def test_Function_call(): f = Swap(2, 1) values = (2, 3) with raises(TypeError) as err: f(*values) assert str(err.value) == messages.expected_input_length(f, values) def test_Function_then(): f, g = Function(2, 1, lambda x, y: x + y), Function(1, 1, lambda x: x + 1) assert Function.id(2).then(*(f, g))(20, 21) == 42 def test_Function_then_err(): f = Function(2, 1, lambda x, y: x + y) g = (lambda x: x, ) with raises(TypeError) as err: f >> g assert str(err.value) == messages.type_err(Function, g) g = Function.id(2) with raises(AxiomError) as err: f >> g assert str(err.value) == messages.does_not_compose(f, g) def test_Function_tensor(): assert Function.id(3)(1, 2, 3)\ == Function.id(0).tensor(*(3 * [Function.id(1)]))(1, 2, 3) def test_Function_tensor_err(): f = Function(2, 1, lambda x, y: x + y) g = (lambda x: x, ) with raises(TypeError) as err: f @ g assert str(err.value) == messages.type_err(Function, g)
bsp/nrf5x/tools/sdk_dist.py
BreederBai/rt-thread
7,482
136
import os import sys import shutil cwd_path = os.getcwd() sys.path.append(os.path.join(os.path.dirname(cwd_path), 'rt-thread', 'tools')) # BSP dist function def dist_do_building(BSP_ROOT, dist_dir): from mkdist import bsp_copy_files import rtconfig library_dir = os.path.join(dist_dir, 'libraries') print("=> copy nrf52 bsp libraries") library_path = os.path.join(os.path.dirname(BSP_ROOT), 'libraries') bsp_copy_files(library_path, library_dir)
splash/render_options.py
tashidexiaoL/splashnew
3,612
178
<reponame>tashidexiaoL/splashnew # -*- coding: utf-8 -*- import os import json from splash import defaults from splash.utils import to_bytes, path_join_secure from splash.errors import BadOption class RenderOptions(object): """ Options that control how to render a response. """ _REQUIRED = object() def __init__(self, data, max_timeout): self.data = data self.max_timeout = max_timeout @classmethod def raise_error(cls, argument, description, type='bad_argument', **kwargs): params = { 'type': type, 'argument': argument, 'description': description } params.update(kwargs) raise BadOption(params) @classmethod def fromrequest(cls, request, max_timeout): """ Initialize options from a Twisted Request. """ # 1. GET / POST data data = {key.decode('utf-8'): values[0].decode('utf-8') for key, values in request.args.items()} if request.method == b'POST': content_type = request.getHeader(b'content-type') if content_type: request.content.seek(0) # 2. application/json POST data if b'application/json' in content_type: try: content = request.content.read().decode('utf-8') data.update(json.loads(content)) except ValueError as e: raise BadOption({ 'type': 'invalid_json', 'description': "Can't decode JSON", 'message': str(e), }) # 3. js_source from application/javascript POST requests if b'application/javascript' in content_type: data['js_source'] = request.content.read().decode('utf-8') request.content.seek(0) data['uid'] = id(request) return cls(data, max_timeout) def get_expired_args(self, cache): """ Return a list of argument names from load_args which can't be loaded """ return cache.get_missing(self.get_load_args().items()) def save_args_to_cache(self, cache): """ Process save_args and put all values to cache. Return a list of (name, key) pairs. """ save_args = self.get_save_args() save_values = [self.data.get(name) for name in save_args] keys = cache.add_many(save_values) return list(zip(save_args, keys)) def load_cached_args(self, cache): load_args = self.get_load_args() for name, key in (load_args or {}).items(): self.data[name] = cache[key] def get(self, name, default=_REQUIRED, type=str, range=None): value = self.data.get(name) if value is not None: if type is not None: try: value = type(value) except ValueError: msg = "Argument %r has a wrong type" % (name,) self.raise_error(name, msg, required_type=type.__name__) if range is not None and not (range[0] <= value <= range[1]): self.raise_error(name, 'Argument is out of the allowed range', min=range[0], max=range[1], value=value) return value elif default is self._REQUIRED: self.raise_error(name, 'Required argument is missing: %s' % name, type='argument_required') else: return default def _get_bool(self, name, default=_REQUIRED): return self.get(name, default, type=int, range=(0, 1)) def _get_url(self, name, default=_REQUIRED): url = self.get(name, default, type=None) if isinstance(url, bytes): url = url.decode('utf8') return url def get_uid(self): return self.get('uid') def get_url(self): return self._get_url("url") def get_baseurl(self): return self._get_url("baseurl", default=None) def get_wait(self): return self.get("wait", defaults.WAIT_TIME, type=float, range=(0, self.get_timeout())) def get_timeout(self): default = min(self.max_timeout, defaults.TIMEOUT) return self.get("timeout", default, type=float, range=(0, self.max_timeout)) def get_resource_timeout(self): return self.get("resource_timeout", defaults.RESOURCE_TIMEOUT, type=float, range=(0, 1e6)) def get_response_body(self): return self._get_bool("response_body", defaults.RESPONSE_BODY_ENABLED) def get_request_body(self): return self._get_bool("request_body", defaults.REQUEST_BODY_ENABLED) def get_images(self): return self._get_bool("images", defaults.AUTOLOAD_IMAGES) def get_proxy(self): return self.get("proxy", default=None) def get_js_source(self): return self.get("js_source", default=None) def get_width(self): return self.get("width", None, type=int, range=(1, defaults.MAX_WIDTH)) def get_height(self): return self.get("height", None, type=int, range=(1, defaults.MAX_HEIGTH)) def get_scale_method(self): scale_method = self.get("scale_method", defaults.IMAGE_SCALE_METHOD) allowed_scale_methods = ['raster', 'vector'] if scale_method not in allowed_scale_methods: self.raise_error( argument='scale_method', description="Invalid 'scale_method': %s" % scale_method, allowed=allowed_scale_methods, received=scale_method, ) return scale_method def get_quality(self): return self.get("quality", defaults.JPEG_QUALITY, type=int, range=(0, 100)) def get_http_method(self): method = self.get("http_method", "GET") if method.upper() not in ["POST", "GET"]: self.raise_error("http_method", "Unsupported HTTP method {}".format(method)) return method def get_body(self): body = self.get("body", None, to_bytes) method = self.get("http_method", "GET").upper() if method == 'GET' and body: self.raise_error("body", "GET request should not have a body") return body def get_render_all(self, wait=None): result = self._get_bool("render_all", False) if result == 1 and wait == 0: self.raise_error("render_all", "Pass non-zero 'wait' to render full webpage") return result def get_lua_source(self): return self.get("lua_source") def get_js_profile(self, js_profiles_path): js_profile = self.get("js", default=None) if not js_profile: return js_profile if js_profiles_path is None: self.raise_error('js', 'Javascript profiles are not enabled on server') try: profile_dir = path_join_secure(js_profiles_path, js_profile) except ValueError as e: # security check fails print(e) self.raise_error('js', 'Javascript profile does not exist') if not os.path.isdir(profile_dir): self.raise_error('js', 'Javascript profile does not exist') return profile_dir def get_headers(self): headers = self.get("headers", default=None, type=None) if headers is None: return headers if not isinstance(headers, (list, tuple, dict)): self.raise_error( argument='headers', description="'headers' must be either a JSON array of " "(name, value) pairs or a JSON object" ) if isinstance(headers, (list, tuple)): for el in headers: string_only = all(isinstance(e, str) for e in el) if not (isinstance(el, (list, tuple)) and len(el) == 2 and string_only): self.raise_error( argument='headers', description="'headers' must be either a JSON array of " "(name, value) pairs or a JSON object" ) return headers def get_save_args(self): save_args = self.get("save_args", default=None, type=None) if save_args is None: return [] if isinstance(save_args, str): # comma-separated string save_args = save_args.split(',') if not isinstance(save_args, list): self.raise_error( argument="save_args", description="'save_args' should be either a comma-separated " "string or a JSON array with argument names", ) # JSON array if not all(isinstance(a, str) for a in save_args): self.raise_error( argument="save_args", description="'save_args' should be a list of strings", ) return save_args def get_load_args(self): load_args = self.get("load_args", default=None, type=None) if load_args is None: return {} if isinstance(load_args, str): try: load_args = dict( kv.split("=", 1) for kv in load_args.split(';') ) except ValueError: self.raise_error( argument="load_args", description="'load_args' string value is not a " "semicolon-separated list of name=hash pairs" ) if not isinstance(load_args, dict): self.raise_error( argument="load_args", description="'load_args' should be either a JSON object with " "argument hashes or a semicolon-separated list " "of name=hash pairs" ) return load_args def get_viewport(self, wait=None): viewport = self.get("viewport", defaults.VIEWPORT_SIZE) if viewport == 'full': if wait == 0: self.raise_error("viewport", "Pass non-zero 'wait' to render full webpage") else: try: validate_size_str(viewport) except ValueError as e: self.raise_error("viewport", str(e)) return viewport def get_filters(self, pool=None, adblock_rules=None): filter_names = self.get('filters', '') filter_names = [f for f in filter_names.split(',') if f] if pool is None and adblock_rules is None: # skip validation return filter_names if not filter_names: return filter_names if pool is not None: adblock_rules = pool.network_manager_factory.adblock_rules if adblock_rules is None: self.raise_error( "filters", "Invalid filter names: %s" % (filter_names,) ) if adblock_rules is not None: unknown_filters = adblock_rules.get_unknown_filters(filter_names) if unknown_filters: self.raise_error( "filters", "Invalid filter names: %s" % (unknown_filters,) ) return filter_names def get_allowed_domains(self): allowed_domains = self.get("allowed_domains", default=None) if allowed_domains is not None: return allowed_domains.split(',') def get_allowed_content_types(self): content_types = self.get("allowed_content_types", default=['*']) if isinstance(content_types, str): content_types = list(filter(None, content_types.split(','))) return content_types def get_forbidden_content_types(self): content_types = self.get("forbidden_content_types", default=[]) if isinstance(content_types, str): content_types = list(filter(None, content_types.split(','))) return content_types def get_html5_media(self): return self._get_bool("html5_media", defaults.HTML5_MEDIA_ENABLED) def get_engine(self, browser_engines_enabled=None): engine = self.get("engine", default="webkit", type=str) if engine not in {"webkit", "chromium"}: self.raise_error("engine", "Unknown render engine {}".format(engine)) if browser_engines_enabled is not None: if engine not in browser_engines_enabled: self.raise_error("engine", "Disabled render engine {}".format(engine)) return engine def get_http2(self): engine = self.get_engine() if self.get_engine() == "webkit": default = defaults.WEBKIT_HTTP2_ENABLED else: assert engine == 'chromium' default = defaults.CHROMIUM_HTTP2_ENABLED return self._get_bool("http2", default) def get_common_params(self, js_profiles_path): wait = self.get_wait() return { 'url': self.get_url(), 'baseurl': self.get_baseurl(), 'wait': wait, 'resource_timeout': self.get_resource_timeout(), 'viewport': self.get_viewport(wait), 'render_all': self.get_render_all(wait), 'images': self.get_images(), 'headers': self.get_headers(), 'proxy': self.get_proxy(), 'js_profile': self.get_js_profile(js_profiles_path), 'js_source': self.get_js_source(), 'http_method': self.get_http_method(), 'body': self.get_body(), 'html5_media': self.get_html5_media(), 'http2': self.get_http2(), # 'lua': self.get_lua(), } def get_image_params(self): return { 'width': self.get_width(), 'height': self.get_height(), 'scale_method': self.get_scale_method() } def get_png_params(self): return self.get_image_params() def get_jpeg_params(self): params = {'quality': self.get_quality()} params.update(self.get_image_params()) return params def get_include_params(self): return dict( html=self._get_bool("html", defaults.DO_HTML), iframes=self._get_bool("iframes", defaults.DO_IFRAMES), png=self._get_bool("png", defaults.DO_PNG), jpeg=self._get_bool("jpeg", defaults.DO_JPEG), script=self._get_bool("script", defaults.SHOW_SCRIPT), console=self._get_bool("console", defaults.SHOW_CONSOLE), history=self._get_bool("history", defaults.SHOW_HISTORY), har=self._get_bool("har", defaults.SHOW_HAR), ) def validate_size_str(size_str): """ Validate size string in WxH format. Can be used to validate both viewport and window size strings. Does not special-case ``'full'`` viewport. Raises ``ValueError`` if anything goes wrong. :param size_str: string to validate """ max_width = defaults.VIEWPORT_MAX_WIDTH max_heigth = defaults.VIEWPORT_MAX_HEIGTH max_area = defaults.VIEWPORT_MAX_AREA try: w, h = map(int, size_str.split('x')) except ValueError: raise ValueError("Invalid viewport format: %s" % size_str) else: if not ((0 < w <= max_width) and (0 < h <= max_heigth) and (w * h < max_area)): raise ValueError("Viewport (%dx%d, area=%d) is out of range (%dx%d, area=%d)" % (w, h, w * h, max_width, max_heigth, max_area))
glue/__init__.py
HPLegion/glue
550
186
<reponame>HPLegion/glue<filename>glue/__init__.py # Set up configuration variables __all__ = ['custom_viewer', 'qglue', 'test'] import os import sys from pkg_resources import get_distribution, DistributionNotFound try: __version__ = get_distribution('glue-core').version except DistributionNotFound: __version__ = 'undefined' from ._mpl_backend import MatplotlibBackendSetter sys.meta_path.append(MatplotlibBackendSetter()) from glue.viewers.custom.helper import custom_viewer # Load user's configuration file from .config import load_configuration env = load_configuration() from .qglue import qglue from .main import load_plugins # noqa def test(no_optional_skip=False): from pytest import main root = os.path.abspath(os.path.dirname(__file__)) args = [root, '-x'] if no_optional_skip: args.append('--no-optional-skip') return main(args=args) from glue._settings_helpers import load_settings load_settings() # In PyQt 5.5+, PyQt overrides the default exception catching and fatally # crashes the Qt application without printing out any details about the error. # Below we revert the exception hook to the original Python one. Note that we # can't just do sys.excepthook = sys.__excepthook__ otherwise PyQt will detect # the default excepthook is in place and override it. def handle_exception(exc_type, exc_value, exc_traceback): sys.__excepthook__(exc_type, exc_value, exc_traceback) sys.excepthook = handle_exception
djangox/lib/python3.8/site-packages/allauth/socialaccount/providers/dropbox/views.py
DemarcusL/django_wiki_lab
6,342
201
import requests from allauth.socialaccount.providers.oauth2.views import ( OAuth2Adapter, OAuth2CallbackView, OAuth2LoginView, ) from .provider import DropboxOAuth2Provider class DropboxOAuth2Adapter(OAuth2Adapter): provider_id = DropboxOAuth2Provider.id access_token_url = "https://api.dropbox.com/oauth2/token" authorize_url = "https://www.dropbox.com/oauth2/authorize" profile_url = "https://api.dropbox.com/2/users/get_current_account" redirect_uri_protocol = "https" def complete_login(self, request, app, token, **kwargs): response = requests.post( self.profile_url, headers={"Authorization": "Bearer %s" % (token.token,)}, ) response.raise_for_status() return self.get_provider().sociallogin_from_response(request, response.json()) oauth_login = OAuth2LoginView.adapter_view(DropboxOAuth2Adapter) oauth_callback = OAuth2CallbackView.adapter_view(DropboxOAuth2Adapter)
src/ros_comm/rosmsg/setup.py
jungleni/ros_code_reading
742
212
<reponame>jungleni/ros_code_reading #!/usr/bin/env python from distutils.core import setup from catkin_pkg.python_setup import generate_distutils_setup d = generate_distutils_setup( packages=['rosmsg'], package_dir={'': 'src'}, scripts=['scripts/rosmsg', 'scripts/rosmsg-proto', 'scripts/rossrv'], requires=['genmsg', 'rosbag', 'roslib', 'rospkg'] ) setup(**d)
pixloc/visualization/viz_3d.py
jmorlana/pixloc
457
227
<filename>pixloc/visualization/viz_3d.py """ 3D visualization primitives based on Plotly. We might want to instead use a more powerful library like Open3D. Plotly however supports animations, buttons and sliders. 1) Initialize a figure with `fig = init_figure()` 2) Plot points, cameras, lines, or create a slider animation. 3) Call `fig.show()` to render the figure. """ import plotly.graph_objects as go import numpy as np from ..pixlib.geometry.utils import to_homogeneous def init_figure(height=800): """Initialize a 3D figure.""" fig = go.Figure() fig.update_layout( height=height, scene_camera=dict( eye=dict(x=0., y=-.1, z=-2), up=dict(x=0, y=-1., z=0)), scene=dict( xaxis=dict(showbackground=False), yaxis=dict(showbackground=False), aspectmode='data', dragmode='orbit'), margin=dict(l=0, r=0, b=0, t=0, pad=0)) # noqa E741 return fig def plot_points(fig, pts, color='rgba(255, 0, 0, 1)', ps=2): """Plot a set of 3D points.""" x, y, z = pts.T tr = go.Scatter3d( x=x, y=y, z=z, mode='markers', marker_size=ps, marker_color=color, marker_line_width=.2) fig.add_trace(tr) def plot_camera(fig, R, t, K, color='rgb(0, 0, 255)'): """Plot a camera as a cone with camera frustum.""" x, y, z = t u, v, w = R @ -np.array([0, 0, 1]) tr = go.Cone( x=[x], y=[y], z=[z], u=[u], v=[v], w=[w], anchor='tip', showscale=False, colorscale=[[0, color], [1, color]], sizemode='absolute') fig.add_trace(tr) W, H = K[0, 2]*2, K[1, 2]*2 corners = np.array([[0, 0], [W, 0], [W, H], [0, H], [0, 0]]) corners = to_homogeneous(corners) @ np.linalg.inv(K).T corners = (corners/2) @ R.T + t x, y, z = corners.T tr = go.Scatter3d( x=x, y=y, z=z, line=dict(color='rgba(0, 0, 0, .5)'), marker=dict(size=0.0001), showlegend=False) fig.add_trace(tr) def create_slider_animation(fig, traces): """Create a slider that animates a list of traces (e.g. 3D points).""" slider = {'steps': []} frames = [] fig.add_trace(traces[0]) idx = len(fig.data) - 1 for i, tr in enumerate(traces): frames.append(go.Frame(name=str(i), traces=[idx], data=[tr])) step = {"args": [ [str(i)], {"frame": {"redraw": True}, "mode": "immediate"}], "label": i, "method": "animate"} slider['steps'].append(step) fig.frames = tuple(frames) fig.layout.sliders = (slider,)
tests/test_subpixel_upsample.py
Project-MONAI/MONAI
2,971
244
# Copyright 2020 - 2021 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch import torch.nn as nn from parameterized import parameterized from monai.networks import eval_mode from monai.networks.blocks import SubpixelUpsample from monai.networks.layers.factories import Conv TEST_CASE_SUBPIXEL = [] for inch in range(1, 5): for dim in range(1, 4): for factor in range(1, 3): test_case = [ {"dimensions": dim, "in_channels": inch, "scale_factor": factor}, (2, inch, *([8] * dim)), (2, inch, *([8 * factor] * dim)), ] TEST_CASE_SUBPIXEL.append(test_case) TEST_CASE_SUBPIXEL_2D_EXTRA = [ {"dimensions": 2, "in_channels": 2, "scale_factor": 3}, (2, 2, 8, 4), # different size for H and W (2, 2, 24, 12), ] TEST_CASE_SUBPIXEL_3D_EXTRA = [ {"dimensions": 3, "in_channels": 1, "scale_factor": 2}, (2, 1, 16, 8, 4), # different size for H, W and D (2, 1, 32, 16, 8), ] conv_block = nn.Sequential( Conv[Conv.CONV, 3](1, 4, kernel_size=1), Conv[Conv.CONV, 3](4, 8, kernel_size=3, stride=1, padding=1) ) TEST_CASE_SUBPIXEL_CONV_BLOCK_EXTRA = [ {"dimensions": 3, "in_channels": 1, "scale_factor": 2, "conv_block": conv_block}, (2, 1, 16, 8, 4), # different size for H, W and D (2, 1, 32, 16, 8), ] TEST_CASE_SUBPIXEL.append(TEST_CASE_SUBPIXEL_2D_EXTRA) TEST_CASE_SUBPIXEL.append(TEST_CASE_SUBPIXEL_3D_EXTRA) TEST_CASE_SUBPIXEL.append(TEST_CASE_SUBPIXEL_CONV_BLOCK_EXTRA) # add every test back with the pad/pool sequential component omitted for tests in list(TEST_CASE_SUBPIXEL): args: dict = tests[0] # type: ignore args = dict(args) args["apply_pad_pool"] = False TEST_CASE_SUBPIXEL.append([args, tests[1], tests[2]]) class TestSUBPIXEL(unittest.TestCase): @parameterized.expand(TEST_CASE_SUBPIXEL) def test_subpixel_shape(self, input_param, input_shape, expected_shape): net = SubpixelUpsample(**input_param) with eval_mode(net): result = net.forward(torch.randn(input_shape)) self.assertEqual(result.shape, expected_shape) if __name__ == "__main__": unittest.main()
nemo/collections/asr/parts/numba/rnnt_loss/rnnt_numpy.py
madhukarkm/NeMo
4,145
257
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Copyright 2018-2019, <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from torch.autograd import Function, Variable from torch.nn import Module def check_type(var, t, name): if var.dtype is not t: raise TypeError("{} must be {}".format(name, t)) def check_contiguous(var, name): if not var.is_contiguous(): raise ValueError("{} must be contiguous".format(name)) def check_dim(var, dim, name): if len(var.shape) != dim: raise ValueError("{} must be {}D".format(name, dim)) def certify_inputs(log_probs, labels, lengths, label_lengths): # check_type(log_probs, torch.float32, "log_probs") check_type(labels, torch.int32, "labels") check_type(label_lengths, torch.int32, "label_lengths") check_type(lengths, torch.int32, "lengths") check_contiguous(log_probs, "log_probs") check_contiguous(labels, "labels") check_contiguous(label_lengths, "label_lengths") check_contiguous(lengths, "lengths") if lengths.shape[0] != log_probs.shape[0]: raise ValueError( f"Must have a length per example. " f"Given lengths dim: {lengths.shape[0]}, " f"Log probs dim : {log_probs.shape[0]}" ) if label_lengths.shape[0] != log_probs.shape[0]: raise ValueError( "Must have a label length per example. " f"Given label lengths dim : {label_lengths.shape[0]}, " f"Log probs dim : {log_probs.shape[0]}" ) check_dim(log_probs, 4, "log_probs") check_dim(labels, 2, "labels") check_dim(lengths, 1, "lenghts") check_dim(label_lengths, 1, "label_lenghts") max_T = torch.max(lengths) max_U = torch.max(label_lengths) T, U = log_probs.shape[1:3] if T != max_T: raise ValueError(f"Input length mismatch! Given T: {T}, Expected max T from input lengths: {max_T}") if U != max_U + 1: raise ValueError(f"Output length mismatch! Given U: {U}, Expected max U from target lengths: {max_U} + 1") def _assert_no_grad(tensor): assert not tensor.requires_grad, ( "gradients only computed for log_probs - please " "mark other tensors as not requiring gradients" ) def forward_pass(log_probs, labels, blank): """ Computes probability of the forward variable alpha. Args: log_probs: Tensor of shape [T, U, V+1] labels: Labels of shape [B, U] blank: Index of the blank token. Returns: A tuple of the forward variable probabilities - alpha of shape [T, U] and the log likelihood of this forward step. """ T, U, _ = log_probs.shape alphas = np.zeros((T, U), dtype='f') for t in range(1, T): alphas[t, 0] = alphas[t - 1, 0] + log_probs[t - 1, 0, blank] for u in range(1, U): alphas[0, u] = alphas[0, u - 1] + log_probs[0, u - 1, labels[u - 1]] for t in range(1, T): for u in range(1, U): no_emit = alphas[t - 1, u] + log_probs[t - 1, u, blank] emit = alphas[t, u - 1] + log_probs[t, u - 1, labels[u - 1]] alphas[t, u] = np.logaddexp(emit, no_emit) loglike = alphas[T - 1, U - 1] + log_probs[T - 1, U - 1, blank] return alphas, loglike def backward_pass(log_probs, labels, blank): """ Computes probability of the backward variable beta. Args: log_probs: Tensor of shape [T, U, V+1] labels: Labels of shape [B, U] blank: Index of the blank token. Returns: A tuple of the backward variable probabilities - beta of shape [T, U] and the log likelihood of this backward step. """ T, U, _ = log_probs.shape betas = np.zeros((T, U), dtype='f') betas[T - 1, U - 1] = log_probs[T - 1, U - 1, blank] for t in reversed(range(T - 1)): betas[t, U - 1] = betas[t + 1, U - 1] + log_probs[t, U - 1, blank] for u in reversed(range(U - 1)): betas[T - 1, u] = betas[T - 1, u + 1] + log_probs[T - 1, u, labels[u]] for t in reversed(range(T - 1)): for u in reversed(range(U - 1)): no_emit = betas[t + 1, u] + log_probs[t, u, blank] emit = betas[t, u + 1] + log_probs[t, u, labels[u]] betas[t, u] = np.logaddexp(emit, no_emit) return betas, betas[0, 0] def compute_gradient(log_probs, alphas, betas, labels, blank, fastemit_lambda): """ Computes the gradients of the log_probs with respect to the log probability of this step occuring. Args: Args: log_probs: Tensor of shape [T, U, V+1] alphas: Tensor of shape [T, U] which represents the forward variable. betas: Tensor of shape [T, U] which represents the backward variable. labels: Labels of shape [B, U] blank: Index of the blank token. Returns: Gradients of shape [T, U, V+1] with respect to the forward log probability """ T, U, _ = log_probs.shape grads = np.full(log_probs.shape, -float("inf")) log_like = betas[0, 0] # == alphas[T - 1, U - 1] + betas[T - 1, U - 1] # // grad to last blank transition grads[T - 1, U - 1, blank] = alphas[T - 1, U - 1] grads[: T - 1, :, blank] = alphas[: T - 1, :] + betas[1:, :] # // grad to label transition for u, l in enumerate(labels): grads[:, u, l] = alphas[:, u] + betas[:, u + 1] grads = -np.exp(grads + log_probs - log_like) if fastemit_lambda > 0.0: for u, l in enumerate(labels): grads[:, u, l] = (1.0 + fastemit_lambda) * grads[:, u, l] return grads def fastemit_regularization(log_probs, labels, alphas, betas, blank, fastemit_lambda): """ Describes the computation of FastEmit regularization from the paper - [FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization](https://arxiv.org/abs/2010.11148) Args: log_probs: Tensor of shape [T, U, V+1] labels: Unused. Labels of shape [B, U] alphas: Tensor of shape [T, U] which represents the forward variable. betas: Unused. Tensor of shape [T, U] which represents the backward variable. blank: Index of the blank token. fastemit_lambda: Float scaling factor for FastEmit regularization. Returns: The regularized negative log likelihood - lambda * P˜(At, u|x) """ # General calculation of the fastemit regularization alignments T, U, _ = log_probs.shape # alignment = np.zeros((T, U), dtype='float32') # # for t in range(0, T): # alignment[t, U - 1] = alphas[t, U - 1] + betas[t, U - 1] # # for t in range(0, T): # for u in range(0, U - 1): # emit = alphas[t, u] + log_probs[t, u, labels[u]] + betas[t, u + 1] # alignment[t, u] = emit # reg = fastemit_lambda * (alignment[T - 1, U - 1]) # The above is equivalent to below, without need of computing above # reg = fastemit_lambda * (alphas[T - 1, U - 1] + betas[T - 1, U - 1]) # The above is also equivalent to below, without need of computing the betas alignment matrix reg = fastemit_lambda * (alphas[T - 1, U - 1] + log_probs[T - 1, U - 1, blank]) return -reg def transduce(log_probs, labels, blank=0, fastemit_lambda=0.0): """ Args: log_probs: 3D array with shape [input len, output len + 1, vocab size] labels: 1D array with shape [output time steps] blank: Index of the blank token. fastemit_lambda: Float scaling factor for FastEmit regularization. Returns: float: The negative log-likelihood 3D array: Gradients with respect to the unnormalized input actications 2d arrays: Alphas matrix (TxU) 2d array: Betas matrix (TxU) """ alphas, ll_forward = forward_pass(log_probs, labels, blank) betas, ll_backward = backward_pass(log_probs, labels, blank) grads = compute_gradient(log_probs, alphas, betas, labels, blank, fastemit_lambda) return -ll_forward, grads, alphas, betas def transduce_batch(log_probs, labels, flen, glen, blank=0, fastemit_lambda=0.0): """ Compute the transducer loss of the batch. Args: log_probs: [B, T, U, V+1]. Activation matrix normalized with log-softmax. labels: [B, U+1] - ground truth labels with <SOS> padded as blank token in the beginning. flen: Length vector of the acoustic sequence. glen: Length vector of the target sequence. blank: Id of the blank token. fastemit_lambda: Float scaling factor for FastEmit regularization. Returns: Batch of transducer forward log probabilities (loss) and the gradients of the activation matrix. """ grads = np.zeros_like(log_probs) costs = [] for b in range(log_probs.shape[0]): t = int(flen[b]) u = int(glen[b]) + 1 ll, g, alphas, betas = transduce(log_probs[b, :t, :u, :], labels[b, : u - 1], blank, fastemit_lambda) grads[b, :t, :u, :] = g reg = fastemit_regularization( log_probs[b, :t, :u, :], labels[b, : u - 1], alphas, betas, blank, fastemit_lambda ) ll += reg costs.append(ll) return costs, grads class _RNNT(Function): @staticmethod def forward(ctx, acts, labels, act_lens, label_lens, blank, fastemit_lambda): costs, grads = transduce_batch( acts.detach().cpu().numpy(), labels.cpu().numpy(), act_lens.cpu().numpy(), label_lens.cpu().numpy(), blank, fastemit_lambda, ) costs = torch.FloatTensor([sum(costs)]) grads = torch.Tensor(grads).to(acts) ctx.grads = grads return costs @staticmethod def backward(ctx, grad_output): return ctx.grads, None, None, None, None, None class RNNTLoss(Module): """ Parameters: `blank_label` (int): default 0 - label index of blank token fastemit_lambda: Float scaling factor for FastEmit regularization. """ def __init__(self, blank: int = 0, fastemit_lambda: float = 0.0): super(RNNTLoss, self).__init__() self.blank = blank self.fastemit_lambda = fastemit_lambda self.rnnt = _RNNT.apply def forward(self, acts, labels, act_lens, label_lens): assert len(labels.size()) == 2 _assert_no_grad(labels) _assert_no_grad(act_lens) _assert_no_grad(label_lens) certify_inputs(acts, labels, act_lens, label_lens) acts = torch.nn.functional.log_softmax(acts, -1) return self.rnnt(acts, labels, act_lens, label_lens, self.blank, self.fastemit_lambda) if __name__ == '__main__': loss = RNNTLoss(fastemit_lambda=0.01) torch.manual_seed(0) acts = torch.randn(1, 2, 5, 3) labels = torch.tensor([[0, 2, 1, 2]], dtype=torch.int32) act_lens = torch.tensor([2], dtype=torch.int32) label_lens = torch.tensor([len(labels[0])], dtype=torch.int32) loss_val = loss(acts, labels, act_lens, label_lens)
gfirefly/dbentrust/dbutils.py
handsome3163/H2Dgame-Firefly
675
270
<reponame>handsome3163/H2Dgame-Firefly<gh_stars>100-1000 #coding:utf8 ''' Created on 2013-8-21 @author: lan (www.9miao.com) ''' import itertools import datetime def safeunicode(obj, encoding='utf-8'): r""" Converts any given object to unicode string. >>> safeunicode('hello') u'hello' >>> safeunicode(2) u'2' >>> safeunicode('\xe1\x88\xb4') u'\u1234' """ t = type(obj) if t is unicode: return obj elif t is str: return obj.decode(encoding) elif t in [int, float, bool]: return unicode(obj) elif hasattr(obj, '__unicode__') or isinstance(obj, unicode): return unicode(obj) else: return str(obj).decode(encoding) def safestr(obj, encoding='utf-8'): r""" Converts any given object to utf-8 encoded string. >>> safestr('hello') 'hello' >>> safestr(u'\u1234') '\xe1\x88\xb4' >>> safestr(2) '2' """ if isinstance(obj, unicode): return obj.encode(encoding) elif isinstance(obj, str): return obj elif hasattr(obj, 'next'): # iterator return itertools.imap(safestr, obj) else: return str(obj) def sqlify(obj): """ converts `obj` to its proper SQL version >>> sqlify(None) 'NULL' >>> sqlify(True) "'t'" >>> sqlify(3) '3' """ # because `1 == True and hash(1) == hash(True)` # we have to do this the hard way... if obj is None: return 'NULL' elif obj is True: return "'t'" elif obj is False: return "'f'" elif datetime and isinstance(obj, datetime.datetime): return repr(obj.isoformat()) else: if isinstance(obj, unicode): obj = obj.encode('utf8') return repr(obj) def sqllist(lst): """ Converts the arguments for use in something like a WHERE clause. >>> sqllist(['a', 'b']) 'a, b' >>> sqllist('a') 'a' >>> sqllist(u'abc') u'abc' """ if isinstance(lst, basestring): return lst else: return ', '.join(lst) def _sqllist(values): """ >>> _sqllist([1, 2, 3]) <sql: '(1, 2, 3)'> """ items = [] items.append('(') for i, v in enumerate(values): if i != 0: items.append(', ') items.append(sqlparam(v)) items.append(')') return SQLQuery(items) def sqlquote(a): """ Ensures `a` is quoted properly for use in a SQL query. >>> 'WHERE x = ' + sqlquote(True) + ' AND y = ' + sqlquote(3) <sql: "WHERE x = 't' AND y = 3"> >>> 'WHERE x = ' + sqlquote(True) + ' AND y IN ' + sqlquote([2, 3]) <sql: "WHERE x = 't' AND y IN (2, 3)"> """ if isinstance(a, list): return _sqllist(a) else: return sqlparam(a).sqlquery() def _interpolate(sformat): """ Takes a format string and returns a list of 2-tuples of the form (boolean, string) where boolean says whether string should be evaled or not. from <http://lfw.org/python/Itpl.py> (public domain, Ka-Ping Yee) """ from tokenize import tokenprog tokenprog = tokenprog def matchorfail(text, pos): match = tokenprog.match(text, pos) if match is None: raise _ItplError(text, pos) return match, match.end() namechars = "abcdefghijklmnopqrstuvwxyz" \ "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_"; chunks = [] pos = 0 while 1: dollar = sformat.find("$", pos) if dollar < 0: break nextchar = sformat[dollar + 1] if nextchar == "{": chunks.append((0, sformat[pos:dollar])) pos, level = dollar + 2, 1 while level: match, pos = matchorfail(sformat, pos) tstart, tend = match.regs[3] token = sformat[tstart:tend] if token == "{": level = level + 1 elif token == "}": level = level - 1 chunks.append((1, sformat[dollar + 2:pos - 1])) elif nextchar in namechars: chunks.append((0, sformat[pos:dollar])) match, pos = matchorfail(sformat, dollar + 1) while pos < len(sformat): if sformat[pos] == "." and \ pos + 1 < len(sformat) and sformat[pos + 1] in namechars: match, pos = matchorfail(sformat, pos + 1) elif sformat[pos] in "([": pos, level = pos + 1, 1 while level: match, pos = matchorfail(sformat, pos) tstart, tend = match.regs[3] token = sformat[tstart:tend] if token[0] in "([": level = level + 1 elif token[0] in ")]": level = level - 1 else: break chunks.append((1, sformat[dollar + 1:pos])) else: chunks.append((0, sformat[pos:dollar + 1])) pos = dollar + 1 + (nextchar == "$") if pos < len(sformat): chunks.append((0, sformat[pos:])) return chunks def sqlwhere(dictionary, grouping=' AND '): """ Converts a `dictionary` to an SQL WHERE clause `SQLQuery`. >>> sqlwhere({'cust_id': 2, 'order_id':3}) <sql: 'order_id = 3 AND cust_id = 2'> >>> sqlwhere({'cust_id': 2, 'order_id':3}, grouping=', ') <sql: 'order_id = 3, cust_id = 2'> >>> sqlwhere({'a': 'a', 'b': 'b'}).query() 'a = %s AND b = %s' """ return SQLQuery.join([k + ' = ' + sqlparam(v) for k, v in dictionary.items()], grouping) def reparam(string_, dictionary): """ Takes a string and a dictionary and interpolates the string using values from the dictionary. Returns an `SQLQuery` for the result. >>> reparam("s = $s", dict(s=True)) <sql: "s = 't'"> >>> reparam("s IN $s", dict(s=[1, 2])) <sql: 's IN (1, 2)'> """ dictionary = dictionary.copy() # eval mucks with it result = [] for live, chunk in _interpolate(string_): if live: v = eval(chunk, dictionary) result.append(sqlquote(v)) else: result.append(chunk) return SQLQuery.join(result, '') class UnknownParamstyle(Exception): """ raised for unsupported db paramstyles (currently supported: qmark, numeric, format, pyformat) """ pass class _ItplError(ValueError): def __init__(self, text, pos): ValueError.__init__(self) self.text = text self.pos = pos def __str__(self): return "unfinished expression in %s at char %d" % ( repr(self.text), self.pos) class SQLParam(object): """ Parameter in SQLQuery. >>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam("joe")]) >>> q <sql: "SELECT * FROM test WHERE name='joe'"> >>> q.query() 'SELECT * FROM test WHERE name=%s' >>> q.values() ['joe'] """ __slots__ = ["value"] def __init__(self, value): self.value = value def get_marker(self, paramstyle='pyformat'): if paramstyle == 'qmark': return '?' elif paramstyle == 'numeric': return ':1' elif paramstyle is None or paramstyle in ['format', 'pyformat']: return '%s' raise UnknownParamstyle, paramstyle def sqlquery(self): return SQLQuery([self]) def __add__(self, other): return self.sqlquery() + other def __radd__(self, other): return other + self.sqlquery() def __str__(self): return str(self.value) def __repr__(self): return '<param: %s>' % repr(self.value) sqlparam = SQLParam class SQLQuery(object): """ You can pass this sort of thing as a clause in any db function. Otherwise, you can pass a dictionary to the keyword argument `vars` and the function will call reparam for you. Internally, consists of `items`, which is a list of strings and SQLParams, which get concatenated to produce the actual query. """ __slots__ = ["items"] # tested in sqlquote's docstring def __init__(self, items=None): r"""Creates a new SQLQuery. >>> SQLQuery("x") <sql: 'x'> >>> q = SQLQuery(['SELECT * FROM ', 'test', ' WHERE x=', SQLParam(1)]) >>> q <sql: 'SELECT * FROM test WHERE x=1'> >>> q.query(), q.values() ('SELECT * FROM test WHERE x=%s', [1]) >>> SQLQuery(SQLParam(1)) <sql: '1'> """ if items is None: self.items = [] elif isinstance(items, list): self.items = items elif isinstance(items, SQLParam): self.items = [items] elif isinstance(items, SQLQuery): self.items = list(items.items) else: self.items = [items] # Take care of SQLLiterals for i, item in enumerate(self.items): if isinstance(item, SQLParam) and isinstance(item.value, SQLLiteral): self.items[i] = item.value.v def append(self, value): self.items.append(value) def __add__(self, other): if isinstance(other, basestring): items = [other] elif isinstance(other, SQLQuery): items = other.items else: return NotImplemented return SQLQuery(self.items + items) def __radd__(self, other): if isinstance(other, basestring): items = [other] else: return NotImplemented return SQLQuery(items + self.items) def __iadd__(self, other): if isinstance(other, (basestring, SQLParam)): self.items.append(other) elif isinstance(other, SQLQuery): self.items.extend(other.items) else: return NotImplemented return self def __len__(self): return len(self.query()) def query(self, paramstyle=None): """ Returns the query part of the sql query. >>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')]) >>> q.query() 'SELECT * FROM test WHERE name=%s' >>> q.query(paramstyle='qmark') 'SELECT * FROM test WHERE name=?' """ s = [] for x in self.items: if isinstance(x, SQLParam): x = x.get_marker(paramstyle) s.append(safestr(x)) else: x = safestr(x) # automatically escape % characters in the query # For backward compatability, ignore escaping when the query looks already escaped if paramstyle in ['format', 'pyformat']: if '%' in x and '%%' not in x: x = x.replace('%', '%%') s.append(x) return "".join(s) def values(self): """ Returns the values of the parameters used in the sql query. >>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')]) >>> q.values() ['joe'] """ return [i.value for i in self.items if isinstance(i, SQLParam)] def join(items, sep=' ', prefix=None, suffix=None, target=None): """ Joins multiple queries. >>> SQLQuery.join(['a', 'b'], ', ') <sql: 'a, b'> Optinally, prefix and suffix arguments can be provided. >>> SQLQuery.join(['a', 'b'], ', ', prefix='(', suffix=')') <sql: '(a, b)'> If target argument is provided, the items are appended to target instead of creating a new SQLQuery. """ if target is None: target = SQLQuery() target_items = target.items if prefix: target_items.append(prefix) for i, item in enumerate(items): if i != 0: target_items.append(sep) if isinstance(item, SQLQuery): target_items.extend(item.items) else: target_items.append(item) if suffix: target_items.append(suffix) return target join = staticmethod(join) def _str(self): try: return self.query() % tuple([sqlify(x) for x in self.values()]) except (ValueError, TypeError): return self.query() def __str__(self): return safestr(self._str()) def __unicode__(self): return safeunicode(self._str()) def __repr__(self): return '<sql: %s>' % repr(str(self)) class SQLLiteral: """ Protects a string from `sqlquote`. >>> sqlquote('NOW()') <sql: "'NOW()'"> >>> sqlquote(SQLLiteral('NOW()')) <sql: 'NOW()'> """ def __init__(self, v): self.v = v def __repr__(self): return self.v class SQLProducer: """Database""" def __init__(self): """Creates a database. """ pass def query(self, sql_query,processed=False, svars=None): """ Execute SQL query `sql_query` using dictionary `vars` to interpolate it. If `processed=True`, `vars` is a `reparam`-style list to use instead of interpolating. >>> db = DB(None, {}) >>> db.query("SELECT * FROM foo", _test=True) <sql: 'SELECT * FROM foo'> >>> db.query("SELECT * FROM foo WHERE x = $x", vars=dict(x='f'), _test=True) <sql: "SELECT * FROM foo WHERE x = 'f'"> >>> db.query("SELECT * FROM foo WHERE x = " + sqlquote('f'), _test=True) <sql: "SELECT * FROM foo WHERE x = 'f'"> """ if svars is None: svars = {} if not processed and not isinstance(sql_query, SQLQuery): sql_query = reparam(sql_query, svars) return sql_query def sql_clauses(self, what, tables, where, group, order, limit, offset): return ( ('SELECT', what), ('FROM', sqllist(tables)), ('WHERE', where), ('GROUP BY', group), ('ORDER BY', order), ('LIMIT', limit), ('OFFSET', offset)) def gen_clause(self, sql, val, svars): if isinstance(val, (int, long)): if sql == 'WHERE': nout = 'id = ' + sqlquote(val) else: nout = SQLQuery(val) elif isinstance(val, (list, tuple)) and len(val) == 2: nout = SQLQuery(val[0], val[1]) # backwards-compatibility elif isinstance(val, SQLQuery): nout = val else: nout = reparam(val, svars) def xjoin(a, b): if a and b: return a + ' ' + b else: return a or b return xjoin(sql, nout) def _where(self, where, svars): if isinstance(where, (int, long)): where = "id = " + sqlparam(where) elif isinstance(where, (list, tuple)) and len(where) == 2: where = SQLQuery(where[0], where[1]) elif isinstance(where, SQLQuery): pass else: where = reparam(where, svars) return where def select(self, tables, svars=None, what='*', where=None, order=None, group=None, limit=None, offset=None, _test=False): """ Selects `what` from `tables` with clauses `where`, `order`, `group`, `limit`, and `offset`. Uses vars to interpolate. Otherwise, each clause can be a SQLQuery. >>> db = DB(None, {}) >>> db.select('foo', _test=True) <sql: 'SELECT * FROM foo'> >>> db.select(['foo', 'bar'], where="foo.bar_id = bar.id", limit=5, _test=True) <sql: 'SELECT * FROM foo, bar WHERE foo.bar_id = bar.id LIMIT 5'> """ if svars is None: svars = {} sql_clauses = self.sql_clauses(what, tables, where, group, order, limit, offset) clauses = [self.gen_clause(sql, val, svars) for sql, val in sql_clauses if val is not None] qout = SQLQuery.join(clauses) if _test: return qout return self.query(qout, processed=True) def insert(self, tablename, seqname=None, _test=False, **values): """ Inserts `values` into `tablename`. Returns current sequence ID. Set `seqname` to the ID if it's not the default, or to `False` if there isn't one. >>> db = DB(None, {}) >>> q = db.insert('foo', name='bob', age=2, created=SQLLiteral('NOW()'), _test=True) >>> q <sql: "INSERT INTO foo (age, name, created) VALUES (2, 'bob', NOW())"> >>> q.query() 'INSERT INTO foo (age, name, created) VALUES (%s, %s, NOW())' >>> q.values() [2, 'bob'] """ def q(x): return "(" + x + ")" if values: _keys = SQLQuery.join(values.keys(), ', ') _values = SQLQuery.join([sqlparam(v) for v in values.values()], ', ') sql_query = "INSERT INTO %s " % tablename + q(_keys) + ' VALUES ' + q(_values) else: sql_query = SQLQuery(self._get_insert_default_values_query(tablename)) return sql_query def _get_insert_default_values_query(self, table): return "INSERT INTO %s DEFAULT VALUES" % table def multiple_insert(self, tablename, values, seqname=None, _test=False): """ Inserts multiple rows into `tablename`. The `values` must be a list of dictioanries, one for each row to be inserted, each with the same set of keys. Returns the list of ids of the inserted rows. Set `seqname` to the ID if it's not the default, or to `False` if there isn't one. >>> db = DB(None, {}) >>> db.supports_multiple_insert = True >>> values = [{"name": "foo", "email": "<EMAIL>"}, {"name": "bar", "email": "<EMAIL>"}] >>> db.multiple_insert('person', values=values, _test=True) <sql: "INSERT INTO person (name, email) VALUES ('foo', '<EMAIL>'), ('bar', '<EMAIL>')"> """ if not values: return [] if not self.supports_multiple_insert: out = [self.insert(tablename, seqname=seqname, _test=_test, **v) for v in values] if seqname is False: return None else: return out keys = values[0].keys() #@@ make sure all keys are valid # make sure all rows have same keys. for v in values: if v.keys() != keys: raise ValueError, 'Bad data' sql_query = SQLQuery('INSERT INTO %s (%s) VALUES ' % (tablename, ', '.join(keys))) for i, row in enumerate(values): if i != 0: sql_query.append(", ") SQLQuery.join([SQLParam(row[k]) for k in keys], sep=", ", target=sql_query, prefix="(", suffix=")") if _test: return sql_query db_cursor = self._db_cursor() if seqname is not False: sql_query = self._process_insert_query(sql_query, tablename, seqname) if isinstance(sql_query, tuple): # for some databases, a separate query has to be made to find # the id of the inserted row. q1, q2 = sql_query self._db_execute(db_cursor, q1) self._db_execute(db_cursor, q2) else: self._db_execute(db_cursor, sql_query) try: out = db_cursor.fetchone()[0] out = range(out-len(values)+1, out+1) except Exception: out = None if not self.ctx.transactions: self.ctx.commit() return out def update(self, tables, where, svars=None, _test=False, **values): """ Update `tables` with clause `where` (interpolated using `vars`) and setting `values`. >>> db = DB(None, {}) >>> name = 'Joseph' >>> q = db.update('foo', where='name = $name', name='bob', age=2, ... created=SQLLiteral('NOW()'), vars=locals(), _test=True) >>> q <sql: "UPDATE foo SET age = 2, name = 'bob', created = NOW() WHERE name = 'Joseph'"> >>> q.query() 'UPDATE foo SET age = %s, name = %s, created = NOW() WHERE name = %s' >>> q.values() [2, 'bob', 'Joseph'] """ if svars is None: svars = {} where = self._where(where, svars) query = ( "UPDATE " + sqllist(tables) + " SET " + sqlwhere(values, ', ') + " WHERE " + where) if _test: return query db_cursor = self._db_cursor() self._db_execute(db_cursor, query) if not self.ctx.transactions: self.ctx.commit() return db_cursor.rowcount def delete(self, table, where, using=None, svars=None, _test=False): """ Deletes from `table` with clauses `where` and `using`. >>> db = DB(None, {}) >>> name = 'Joe' >>> db.delete('foo', where='name = $name', vars=locals(), _test=True) <sql: "DELETE FROM foo WHERE name = 'Joe'"> """ if svars is None: svars = {} where = self._where(where, svars) q = 'DELETE FROM ' + table if using: q += ' USING ' + sqllist(using) if where: q += ' WHERE ' + where return q sqlproducer = SQLProducer()
Arrays/LeftRotation.py
anand722000/algo_ds_101
175
286
#!/bin/python3 import math import os import random import re import sys # Complete the rotLeft function below. def rotLeft(a, d): alist = list(a) b = alist[d:]+alist[:d] return b if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') nd = input().split() n = int(nd[0]) d = int(nd[1]) a = list(map(int, input().rstrip().split())) result = rotLeft(a, d) fptr.write(' '.join(map(str, result))) fptr.write('\n') fptr.close()
nearpy/examples/example2.py
samyoo78/NearPy
624
289
# -*- coding: utf-8 -*- # Copyright (c) 2013 <NAME> # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import numpy import scipy import unittest import time from nearpy import Engine from nearpy.distances import CosineDistance from nearpy.hashes import RandomBinaryProjections, HashPermutations, HashPermutationMapper def example2(): # Dimension of feature space DIM = 100 # Number of data points (dont do too much because of exact search) POINTS = 20000 ########################################################## print('Performing indexing with HashPermutations...') t0 = time.time() # Create permutations meta-hash permutations = HashPermutations('permut') # Create binary hash as child hash rbp_perm = RandomBinaryProjections('rbp_perm', 14) rbp_conf = {'num_permutation':50,'beam_size':10,'num_neighbour':100} # Add rbp as child hash of permutations hash permutations.add_child_hash(rbp_perm, rbp_conf) # Create engine engine_perm = Engine(DIM, lshashes=[permutations], distance=CosineDistance()) # First index some random vectors matrix = numpy.zeros((POINTS,DIM)) for i in range(POINTS): v = numpy.random.randn(DIM) matrix[i] = v engine_perm.store_vector(v) # Then update permuted index permutations.build_permuted_index() t1 = time.time() print('Indexing took %f seconds' % (t1-t0)) # Get random query vector query = numpy.random.randn(DIM) # Do random query on engine 3 print('\nNeighbour distances with HashPermutations:') print(' -> Candidate count is %d' % engine_perm.candidate_count(query)) results = engine_perm.neighbours(query) dists = [x[2] for x in results] print(dists) # Real neighbours print('\nReal neighbour distances:') query = query.reshape((DIM)) dists = CosineDistance().distance(matrix, query) dists = dists.reshape((-1,)) dists = sorted(dists) print(dists[:10]) ########################################################## print('\nPerforming indexing with HashPermutationMapper...') t0 = time.time() # Create permutations meta-hash permutations2 = HashPermutationMapper('permut2') # Create binary hash as child hash rbp_perm2 = RandomBinaryProjections('rbp_perm2', 14) # Add rbp as child hash of permutations hash permutations2.add_child_hash(rbp_perm2) # Create engine engine_perm2 = Engine(DIM, lshashes=[permutations2], distance=CosineDistance()) # First index some random vectors matrix = numpy.zeros((POINTS,DIM)) for i in range(POINTS): v = numpy.random.randn(DIM) matrix[i] = v engine_perm2.store_vector(v) t1 = time.time() print('Indexing took %f seconds' % (t1-t0)) # Get random query vector query = numpy.random.randn(DIM) # Do random query on engine 4 print('\nNeighbour distances with HashPermutationMapper:') print(' -> Candidate count is %d' % engine_perm2.candidate_count(query)) results = engine_perm2.neighbours(query) dists = [x[2] for x in results] print(dists) # Real neighbours print('\nReal neighbour distances:') query = query.reshape((DIM)) dists = CosineDistance().distance(matrix,query) dists = dists.reshape((-1,)) dists = sorted(dists) print(dists[:10]) ########################################################## print('\nPerforming indexing with multiple binary hashes...') t0 = time.time() hashes = [] for k in range(20): hashes.append(RandomBinaryProjections('rbp_%d' % k, 10)) # Create engine engine_rbps = Engine(DIM, lshashes=hashes, distance=CosineDistance()) # First index some random vectors matrix = numpy.zeros((POINTS,DIM)) for i in range(POINTS): v = numpy.random.randn(DIM) matrix[i] = v engine_rbps.store_vector(v) t1 = time.time() print('Indexing took %f seconds' % (t1-t0)) # Get random query vector query = numpy.random.randn(DIM) # Do random query on engine 4 print('\nNeighbour distances with multiple binary hashes:') print(' -> Candidate count is %d' % engine_rbps.candidate_count(query)) results = engine_rbps.neighbours(query) dists = [x[2] for x in results] print(dists) # Real neighbours print('\nReal neighbour distances:') query = query.reshape((DIM)) dists = CosineDistance().distance(matrix,query) dists = dists.reshape((-1,)) dists = sorted(dists) print(dists[:10]) ##########################################################
src/mem/slicc/ast/TypeDeclAST.py
qianlong4526888/haha
135
299
# Copyright (c) 1999-2008 <NAME> and <NAME> # Copyright (c) 2009 The Hewlett-Packard Development Company # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from slicc.ast.DeclAST import DeclAST from slicc.symbols.Type import Type class TypeDeclAST(DeclAST): def __init__(self, slicc, type_ast, pairs, field_asts): super(TypeDeclAST, self).__init__(slicc, pairs) self.type_ast = type_ast self.field_asts = field_asts def __repr__(self): return "[TypeDecl: %r]" % (self.type_ast) def files(self, parent=None): if "external" in self: return set() if parent: ident = "%s_%s" % (parent, self.type_ast.ident) else: ident = self.type_ast.ident return set(("%s.hh" % ident, "%s.cc" % ident)) def generate(self): ident = str(self.type_ast) machine = self.symtab.state_machine # Make the new type new_type = Type(self.symtab, ident, self.location, self.pairs, self.state_machine) if machine: machine.addType(new_type) self.symtab.newSymbol(new_type) self.symtab.pushFrame() # Add all of the fields of the type to it for field in self.field_asts: field.generate(new_type) self.symtab.popFrame()
src/biotite/file.py
danijoo/biotite
208
308
<reponame>danijoo/biotite # This source code is part of the Biotite package and is distributed # under the 3-Clause BSD License. Please see 'LICENSE.rst' for further # information. __name__ = "biotite" __author__ = "<NAME>" __all__ = ["File", "TextFile", "InvalidFileError"] import abc import io import warnings from .copyable import Copyable import copy class File(Copyable, metaclass=abc.ABCMeta): """ Base class for all file classes. The constructor creates an empty file, that can be filled with data using the class specific setter methods. Conversely, the class method :func:`read()` reads a file from disk (or a file-like object from other sources). In order to write the instance content into a file the :func:`write()` method is used. """ def __init__(self): # Support for deprecated instance method 'read()': # When creating an instance, the 'read()' class method is # replaced by the instance method, so that subsequent # 'read()' calls are delegated to the instance method self.read = self._deprecated_read @classmethod @abc.abstractmethod def read(cls, file): """ Parse a file (or file-like object). Parameters ---------- file : file-like object or str The file to be read. Alternatively a file path can be supplied. Returns ------- file_object : File An instance from the respective :class:`File` subclass representing the parsed file. """ pass def _deprecated_read(self, file, *args, **kwargs): """ Support for deprecated instance method :func:`read()`. Internally this calls the :func:`read()` class method and replaces the data in `self` with the data from the newly created :class:`File` object """ warnings.warn( "Instance method 'read()' is deprecated, " "use class method instead", DeprecationWarning ) cls = type(self) new_file = cls.read(file, *args, **kwargs) self.__dict__.update(new_file.__dict__) @abc.abstractmethod def write(self, file): """ Write the contents of this :class:`File` object into a file. Parameters ---------- file_name : file-like object or str The file to be written to. Alternatively a file path can be supplied. """ pass class TextFile(File, metaclass=abc.ABCMeta): """ Base class for all line based text files. When reading a file, the text content is saved as list of strings, one for each line. When writing a file, this list is written into the file. Attributes ---------- lines : list List of string representing the lines in the text file. PROTECTED: Do not modify from outside. """ def __init__(self): super().__init__() self.lines = [] @classmethod def read(cls, file, *args, **kwargs): # File name if isinstance(file, str): with open(file, "r") as f: lines = f.read().splitlines() # File object else: if not is_text(file): raise TypeError("A file opened in 'text' mode is required") lines = file.read().splitlines() file_object = cls(*args, **kwargs) file_object.lines = lines return file_object @staticmethod def read_iter(file): """ Create an iterator over each line of the given text file. Parameters ---------- file : file-like object or str The file to be read. Alternatively a file path can be supplied. Yields ------ line : str The current line in the file. """ # File name if isinstance(file, str): with open(file, "r") as f: while True: line = f.readline() if not line: break yield line # File object else: if not is_text(file): raise TypeError("A file opened in 'text' mode is required") while True: line = file.readline() if not line: break yield line def write(self, file): """ Write the contents of this object into a file (or file-like object). Parameters ---------- file_name : file-like object or str The file to be written to. Alternatively a file path can be supplied. """ if isinstance(file, str): with open(file, "w") as f: f.write("\n".join(self.lines) + "\n") else: if not is_text(file): raise TypeError("A file opened in 'text' mode is required") file.write("\n".join(self.lines) + "\n") def __copy_fill__(self, clone): super().__copy_fill__(clone) clone.lines = copy.copy(self.lines) def __str__(self): return("\n".join(self.lines)) class InvalidFileError(Exception): """ Indicates that the file is not suitable for the requested action, either because the file does not contain the required data or because the file is malformed. """ pass def wrap_string(text, width): """ A much simpler and hence much more efficient version of `textwrap.wrap()`. This function simply wraps the given `text` after `width` characters, ignoring sentences, whitespaces, etc. """ lines = [] for i in range(0, len(text), width): lines.append(text[i : i+width]) return lines def is_binary(file): if isinstance(file, io.BufferedIOBase): return True # for file wrappers, e.g. 'TemporaryFile' elif hasattr(file, "file") and isinstance(file.file, io.BufferedIOBase): return True else: return False def is_text(file): if isinstance(file, io.TextIOBase): return True # for file wrappers, e.g. 'TemporaryFile' elif hasattr(file, "file") and isinstance(file.file, io.TextIOBase): return True else: return False
electrum/dnssec.py
Jesusown/electrum
5,905
321
#!/usr/bin/env python # # Electrum - lightweight Bitcoin client # Copyright (C) 2015 <NAME> # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # Check DNSSEC trust chain. # Todo: verify expiration dates # # Based on # http://backreference.org/2010/11/17/dnssec-verification-with-dig/ # https://github.com/rthalley/dnspython/blob/master/tests/test_dnssec.py import dns import dns.name import dns.query import dns.dnssec import dns.message import dns.resolver import dns.rdatatype import dns.rdtypes.ANY.NS import dns.rdtypes.ANY.CNAME import dns.rdtypes.ANY.DLV import dns.rdtypes.ANY.DNSKEY import dns.rdtypes.ANY.DS import dns.rdtypes.ANY.NSEC import dns.rdtypes.ANY.NSEC3 import dns.rdtypes.ANY.NSEC3PARAM import dns.rdtypes.ANY.RRSIG import dns.rdtypes.ANY.SOA import dns.rdtypes.ANY.TXT import dns.rdtypes.IN.A import dns.rdtypes.IN.AAAA from .logging import get_logger _logger = get_logger(__name__) # hard-coded trust anchors (root KSKs) trust_anchors = [ # KSK-2017: dns.rrset.from_text('.', 1 , 'IN', 'DNSKEY', '<KEY>), # KSK-2010: dns.rrset.from_text('.', 15202, 'IN', 'DNSKEY', '257 3 8 AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQbSEW0O8gcCjF FVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh/RStIoO8g0NfnfL2MTJRkxoX bfDaUeVPQuYEhg37NZWAJQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaD X6RS6CXpoY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3LQpz W<KEY>S Qageu+ipAdTTJ25AsRTAoub8ONGcLmqrAmRLKBP1dfwhYB4N7knNnulq QxA+Uk1ihz0='), ] def _check_query(ns, sub, _type, keys): q = dns.message.make_query(sub, _type, want_dnssec=True) response = dns.query.tcp(q, ns, timeout=5) assert response.rcode() == 0, 'No answer' answer = response.answer assert len(answer) != 0, ('No DNS record found', sub, _type) assert len(answer) != 1, ('No DNSSEC record found', sub, _type) if answer[0].rdtype == dns.rdatatype.RRSIG: rrsig, rrset = answer elif answer[1].rdtype == dns.rdatatype.RRSIG: rrset, rrsig = answer else: raise Exception('No signature set in record') if keys is None: keys = {dns.name.from_text(sub):rrset} dns.dnssec.validate(rrset, rrsig, keys) return rrset def _get_and_validate(ns, url, _type): # get trusted root key root_rrset = None for dnskey_rr in trust_anchors: try: # Check if there is a valid signature for the root dnskey root_rrset = _check_query(ns, '', dns.rdatatype.DNSKEY, {dns.name.root: dnskey_rr}) break except dns.dnssec.ValidationFailure: # It's OK as long as one key validates continue if not root_rrset: raise dns.dnssec.ValidationFailure('None of the trust anchors found in DNS') keys = {dns.name.root: root_rrset} # top-down verification parts = url.split('.') for i in range(len(parts), 0, -1): sub = '.'.join(parts[i-1:]) name = dns.name.from_text(sub) # If server is authoritative, don't fetch DNSKEY query = dns.message.make_query(sub, dns.rdatatype.NS) response = dns.query.udp(query, ns, 3) assert response.rcode() == dns.rcode.NOERROR, "query error" rrset = response.authority[0] if len(response.authority) > 0 else response.answer[0] rr = rrset[0] if rr.rdtype == dns.rdatatype.SOA: continue # get DNSKEY (self-signed) rrset = _check_query(ns, sub, dns.rdatatype.DNSKEY, None) # get DS (signed by parent) ds_rrset = _check_query(ns, sub, dns.rdatatype.DS, keys) # verify that a signed DS validates DNSKEY for ds in ds_rrset: for dnskey in rrset: htype = 'SHA256' if ds.digest_type == 2 else 'SHA1' good_ds = dns.dnssec.make_ds(name, dnskey, htype) if ds == good_ds: break else: continue break else: raise Exception("DS does not match DNSKEY") # set key for next iteration keys = {name: rrset} # get TXT record (signed by zone) rrset = _check_query(ns, url, _type, keys) return rrset def query(url, rtype): # 8.8.8.8 is Google's public DNS server nameservers = ['8.8.8.8'] ns = nameservers[0] try: out = _get_and_validate(ns, url, rtype) validated = True except Exception as e: _logger.info(f"DNSSEC error: {repr(e)}") out = dns.resolver.resolve(url, rtype) validated = False return out, validated
TopQuarkAnalysis/TopJetCombination/python/TtSemiLepJetCombMaxSumPtWMass_cfi.py
ckamtsikis/cmssw
852
340
import FWCore.ParameterSet.Config as cms # # module to make the MaxSumPtWMass jet combination # findTtSemiLepJetCombMaxSumPtWMass = cms.EDProducer("TtSemiLepJetCombMaxSumPtWMass", ## jet input jets = cms.InputTag("selectedPatJets"), ## lepton input leps = cms.InputTag("selectedPatMuons"), ## maximum number of jets to be considered maxNJets = cms.int32(4), ## nominal WMass parameter (in GeV) wMass = cms.double(80.4), ## use b-tagging two distinguish between light and b jets useBTagging = cms.bool(False), ## choose algorithm for b-tagging bTagAlgorithm = cms.string("trackCountingHighEffBJetTags"), ## minimum b discriminator value required for b jets and ## maximum b discriminator value allowed for non-b jets minBDiscBJets = cms.double(1.0), maxBDiscLightJets = cms.double(3.0) )
libsaas/services/twilio/applications.py
MidtownFellowship/libsaas
155
344
<gh_stars>100-1000 from libsaas import http, parsers from libsaas.services import base from libsaas.services.twilio import resource class ApplicationsBase(resource.TwilioResource): path = 'Applications' class Application(ApplicationsBase): def create(self, *args, **kwargs): raise base.MethodNotSupported() class Applications(ApplicationsBase): @base.apimethod def get(self, FriendlyName=None, Page=None, PageSize=None, AfterSid=None): """ Fetch the Applications belonging to an account. :var FriendlyName: Only return the Account resources with friendly names that exactly match this name. :vartype FriendlyName: str :var Page: The current page number. Zero-indexed, so the first page is 0. :vartype Page: int :var PageSize: How many resources to return in each list page. The default is 50, and the maximum is 1000. :vartype PageSize: int :var AfterSid: The last Sid returned in the previous page, used to avoid listing duplicated resources if new ones are created while paging. :vartype AfterSid: str """ params = resource.get_params(None, locals()) request = http.Request('GET', self.get_url(), params) return request, parsers.parse_json def update(self, *args, **kwargs): raise base.MethodNotSupported() def delete(self, *args, **kwargs): raise base.MethodNotSupported() class ConnectAppsBase(resource.TwilioResource): path = 'ConnectApps' def create(self, *args, **kwargs): raise base.MethodNotSupported() def delete(self, *args, **kwargs): raise base.MethodNotSupported() class ConnectApp(ConnectAppsBase): pass class ConnectApps(ConnectAppsBase): @base.apimethod def get(self, Page=None, PageSize=None, AfterSid=None): """ Fetch the Connect Apps belonging to an account. :var Page: The current page number. Zero-indexed, so the first page is 0. :vartype Page: int :var PageSize: How many resources to return in each list page. The default is 50, and the maximum is 1000. :vartype PageSize: int :var AfterSid: The last Sid returned in the previous page, used to avoid listing duplicated resources if new ones are created while paging. :vartype AfterSid: str """ params = resource.get_params(None, locals()) request = http.Request('GET', self.get_url(), params) return request, parsers.parse_json def update(self, *args, **kwargs): raise base.MethodNotSupported() class AuthorizedConnectAppsBase(resource.TwilioResource): path = 'AuthorizedConnectApps' def create(self, *args, **kwargs): raise base.MethodNotSupported() def update(self, *args, **kwargs): raise base.MethodNotSupported() def delete(self, *args, **kwargs): raise base.MethodNotSupported() class AuthorizedConnectApp(AuthorizedConnectAppsBase): pass class AuthorizedConnectApps(AuthorizedConnectAppsBase): @base.apimethod def get(self, Page=None, PageSize=None, AfterSid=None): """ Fetch the Authorized Connect Apps belonging to an account. :var Page: The current page number. Zero-indexed, so the first page is 0. :vartype Page: int :var PageSize: How many resources to return in each list page. The default is 50, and the maximum is 1000. :vartype PageSize: int :var AfterSid: The last Sid returned in the previous page, used to avoid listing duplicated resources if new ones are created while paging. :vartype AfterSid: str """ params = resource.get_params(None, locals()) request = http.Request('GET', self.get_url(), params) return request, parsers.parse_json
tests/ast/nodes/test_from_node.py
upgradvisor/vyper
1,471
351
from vyper import ast as vy_ast def test_output_class(): old_node = vy_ast.parse_to_ast("foo = 42") new_node = vy_ast.Int.from_node(old_node, value=666) assert isinstance(new_node, vy_ast.Int) def test_source(): old_node = vy_ast.parse_to_ast("foo = 42") new_node = vy_ast.Int.from_node(old_node, value=666) assert old_node.src == new_node.src assert old_node.node_source_code == new_node.node_source_code def test_kwargs(): old_node = vy_ast.parse_to_ast("42").body[0].value new_node = vy_ast.Int.from_node(old_node, value=666) assert old_node.value == 42 assert new_node.value == 666 def test_compare_nodes(): old_node = vy_ast.parse_to_ast("foo = 42") new_node = vy_ast.Int.from_node(old_node, value=666) assert not vy_ast.compare_nodes(old_node, new_node) def test_new_node_has_no_parent(): old_node = vy_ast.parse_to_ast("foo = 42") new_node = vy_ast.Int.from_node(old_node, value=666) assert new_node._parent is None assert new_node._depth == 0
Python/Examples/Macros/SettingsAxesOptimization.py
archformco/RoboDK-API
161
425
# This example shows how to read or modify the Axes Optimization settings using the RoboDK API and a JSON string. # You can select "Axes optimization" in a robot machining menu or the robot parameters to view the axes optimization settings. # It is possible to update the axes optimization settings attached to a robot or a robot machining project manually or using the API. # # More information about the RoboDK API here: # https://robodk.com/doc/en/RoboDK-API.html # For more information visit: # https://robodk.com/doc/en/PythonAPI/robolink.html from robolink import * # RoboDK API # JSON tools import json # Start the RoboDK API RDK = Robolink() # Ask the user to select a robot arm (6 axis robot wich can have external axes) robot = RDK.ItemUserPick("Select a robot arm",ITEM_TYPE_ROBOT_ARM) # Default optimization settings test template AxesOptimSettings = { # Optimization parameters: "Active": 1, # Use generic axes optimization: 0=Disabled or 1=Enabled "Algorithm": 2, # Optimization algorithm to use: 1=Nelder Mead, 2=Samples, 3=Samples+Nelder Mead "MaxIter": 650, # Max. number of iterations "Tol": 0.0016, # Tolerance to stop iterations # Absolute Reference joints (double): "AbsJnt_1": 104.17, "AbsJnt_2": 11.22, "AbsJnt_3": 15.97, "AbsJnt_4": -87.48, "AbsJnt_5": -75.36, "AbsJnt_6": 63.03, "AbsJnt_7": 174.13, "AbsJnt_8": 173.60, "AbsJnt_9": 0, # Using Absolute reference joints (0: No, 1: Yes): "AbsOn_1": 1, "AbsOn_2": 1, "AbsOn_3": 1, "AbsOn_4": 1, "AbsOn_5": 1, "AbsOn_6": 1, "AbsOn_7": 1, "AbsOn_8": 1, "AbsOn_9": 1, # Weight for absolute reference joints (double): "AbsW_1": 100, "AbsW_2": 100, "AbsW_3": 100, "AbsW_4": 89, "AbsW_5": 90, "AbsW_6": 92, "AbsW_7": 92, "AbsW_8": 96, "AbsW_9": 50, # Using for relative joint motion smoothing (0: No, 1: Yes): "RelOn_1": 1, "RelOn_2": 1, "RelOn_3": 1, "RelOn_4": 1, "RelOn_5": 1, "RelOn_6": 1, "RelOn_7": 1, "RelOn_8": 1, "RelOn_9": 1, # Weight for relative joint motion (double): "RelW_1": 5, "RelW_2": 47, "RelW_3": 44, "RelW_4": 43, "RelW_5": 36, "RelW_6": 47, "RelW_7": 53, "RelW_8": 59, "RelW_9": 0, } # Update one value, for example, make it active: ToUpdate = {} ToUpdate["Active"] = 1 json_str = json.dumps(json.dumps(ToUpdate)) status = robot.setParam("OptimAxes", json_str) print(status) # Example to make a partial or full update count = 1 while True: for i in range(7): # Partial update ToUpdate = {} ToUpdate["AbsJnt_" + str(i+1)] = (count+i)*4 ToUpdate["AbsOn_" + str(i+1)] = count % 2 ToUpdate["AbsW_" + str(i+1)] = (count+i) json_str = json.dumps(json.dumps(ToUpdate)) status = robot.setParam("OptimAxes", json_str) print(status) # Full update #OptimAxes_TEST["RefJoint_" + str(i+1)] = (count+i)*4 #OptimAxes_TEST["RefWeight_" + str(i+1)] = (count+i) #OptimAxes_TEST["RefOn_" + str(i+1)] = count % 2 # Full update #print(robot.setParam("OptimAxes", str(AxesOptimSettings))) count = count + 1 # Read settings json_data = robot.setParam("OptimAxes") json_object = json.loads(json_data) print(json.dumps(json_object, indent=4)) pause(0.2) # Example to read the current axes optimization settings: while True: json_data = robot.setParam("OptimAxes") json_object = json.loads(json_data) print(json.dumps(json_object, indent=4)) pause(0.2)
tests/test.py
kjanik70/tflearn
10,882
433
''' This file contains test cases for tflearn ''' import tensorflow.compat.v1 as tf import tflearn import unittest class TestActivations(unittest.TestCase): ''' This class contains test cases for the functions in tflearn/activations.py ''' PLACES = 4 # Number of places to match when testing floating point values def test_linear(self): f = tflearn.linear # Case 1 x = tf.placeholder(tf.float32, shape=()) self.assertEqual(f(x), x) # Case 2 x = tf.placeholder(tf.int64, shape=()) self.assertEqual(f(x), x) def test_tanh(self): f = tflearn.tanh x = tf.placeholder(tf.float32, shape=()) with tf.Session() as sess: # Case 1 self.assertEqual(sess.run(f(x), feed_dict={x:0}), 0) # Case 2 self.assertAlmostEqual(sess.run(f(x), feed_dict={x:0.5}), 0.4621, places=TestActivations.PLACES) # Case 3 self.assertAlmostEqual(sess.run(f(x), feed_dict={x:-0.25}), -0.2449, places=TestActivations.PLACES) def test_leaky_relu(self): f = lambda x: tflearn.leaky_relu(x, alpha=0.2) x = tf.placeholder(tf.float32, shape=()) with tf.Session() as sess: # Case 1 self.assertEqual(sess.run(f(x), feed_dict={x:0}), 0) # Case 2 self.assertAlmostEqual(sess.run(f(x), feed_dict={x:1}), 1, places=TestActivations.PLACES) # Case 3 self.assertAlmostEqual(sess.run(f(x), feed_dict={x:-1}), -0.2, places=TestActivations.PLACES) # Case 4 self.assertAlmostEqual(sess.run(f(x), feed_dict={x:-5}), -1, places=TestActivations.PLACES) def test_apply_activation(self): lrelu_02 = lambda x: tflearn.leaky_relu(x, alpha=0.2) x = tf.constant(-0.25, tf.float32) with tf.Session() as sess: # Case 1: 'linear' self.assertEqual( sess.run(tflearn.activation(x, 'linear')), -0.25) # Case 2: 'relu' self.assertEqual( sess.run(tflearn.activation(x, 'relu')), 0) # Case 3: 'leaky_relu' self.assertAlmostEqual( sess.run(tflearn.activation(x, 'leaky_relu')), -0.025, places=TestActivations.PLACES) # Case 4: 'tanh' self.assertAlmostEqual( sess.run(tflearn.activation(x, 'tanh')), -0.2449, places=TestActivations.PLACES) # Case 5: lrelu_02 (callable) self.assertAlmostEqual( sess.run(tflearn.activation(x, lrelu_02)), -0.05, places=TestActivations.PLACES) if __name__ == "__main__": unittest.main()
venv/Lib/site-packages/patsy/test_regressions.py
EkremBayar/bayar
710
443
# This file is part of Patsy # Copyright (C) 2013 <NAME> <<EMAIL>> # See file LICENSE.txt for license information. # Regression tests for fixed bugs (when not otherwise better covered somewhere # else) from patsy import (EvalEnvironment, dmatrix, build_design_matrices, PatsyError, Origin) def test_issue_11(): # Give a sensible error message for level mismatches # (At some points we've failed to put an origin= on these errors) env = EvalEnvironment.capture() data = {"X" : [0,1,2,3], "Y" : [1,2,3,4]} formula = "C(X) + Y" new_data = {"X" : [0,0,1,2,3,3,4], "Y" : [1,2,3,4,5,6,7]} info = dmatrix(formula, data) try: build_design_matrices([info.design_info], new_data) except PatsyError as e: assert e.origin == Origin(formula, 0, 4) else: assert False
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/s/super/super_with_arguments.py
ciskoinch8/vimrc
463
447
<filename>vimfiles/bundle/vim-python/submodules/pylint/tests/functional/s/super/super_with_arguments.py class Foo: pass class Bar(Foo): def __init__(self): super(Bar, self).__init__() # [super-with-arguments] class Baz(Foo): def __init__(self): super().__init__() class Qux(Foo): def __init__(self): super(Bar, self).__init__() class NotSuperCall(Foo): def __init__(self): super.test(Bar, self).__init__() class InvalidSuperCall(Foo): def __init__(self): super(InvalidSuperCall.__class__, self).__init__() def method_accepting_cls(cls, self): # Using plain `super()` is not valid here, since there's no `__class__` cell found # (Exact exception would be 'RuntimeError: super(): __class__ cell not found') # Instead, we expect to *not* see a warning about `super-with-arguments`. # Explicitly passing `cls`, and `self` to `super()` is what's required. super(cls, self).__init__()
examples/cmrc2018_example/main.trainer.py
fangd123/TextBrewer
1,121
480
<filename>examples/cmrc2018_example/main.trainer.py<gh_stars>1000+ import logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%Y/%m/%d %H:%M:%S', level=logging.INFO, ) logger = logging.getLogger("Main") import os,random import numpy as np import torch from processing import convert_examples_to_features, read_squad_examples from processing import ChineseFullTokenizer from pytorch_pretrained_bert.my_modeling import BertConfig from optimization import BERTAdam import config from utils import read_and_convert, divide_parameters from modeling import BertForQASimple, BertForQASimpleAdaptorTraining from textbrewer import DistillationConfig, TrainingConfig, BasicTrainer from torch.utils.data import TensorDataset, DataLoader, RandomSampler from functools import partial from train_eval import predict def args_check(args): if os.path.exists(args.output_dir) and os.listdir(args.output_dir): logger.warning("Output directory () already exists and is not empty.") if args.gradient_accumulation_steps < 1: raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( args.gradient_accumulation_steps)) if not args.do_train and not args.do_predict: raise ValueError("At least one of `do_train` or `do_predict` must be True.") if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() if not args.no_cuda else 0 else: device = torch.device("cuda", args.local_rank) n_gpu = 1 torch.distributed.init_process_group(backend='nccl') logger.info("device %s n_gpu %d distributed training %r", device, n_gpu, bool(args.local_rank != -1)) args.n_gpu = n_gpu args.device = device return device, n_gpu def main(): #parse arguments config.parse() args = config.args for k,v in vars(args).items(): logger.info(f"{k}:{v}") #set seeds torch.manual_seed(args.random_seed) torch.cuda.manual_seed_all(args.random_seed) np.random.seed(args.random_seed) random.seed(args.random_seed) #arguments check device, n_gpu = args_check(args) os.makedirs(args.output_dir, exist_ok=True) forward_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps) args.forward_batch_size = forward_batch_size #load bert config bert_config_S = BertConfig.from_json_file(args.bert_config_file_S) assert args.max_seq_length <= bert_config_S.max_position_embeddings #read data train_examples = None train_features = None eval_examples = None eval_features = None num_train_steps = None tokenizer = ChineseFullTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case) convert_fn = partial(convert_examples_to_features, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length) if args.do_train: train_examples,train_features = read_and_convert(args.train_file,is_training=True, do_lower_case=args.do_lower_case, read_fn=read_squad_examples,convert_fn=convert_fn) if args.fake_file_1: fake_examples1,fake_features1 = read_and_convert(args.fake_file_1,is_training=True, do_lower_case=args.do_lower_case, read_fn=read_squad_examples,convert_fn=convert_fn) train_examples += fake_examples1 train_features += fake_features1 if args.fake_file_2: fake_examples2, fake_features2 = read_and_convert(args.fake_file_2,is_training=True, do_lower_case=args.do_lower_case, read_fn=read_squad_examples,convert_fn=convert_fn) train_examples += fake_examples2 train_features += fake_features2 num_train_steps = int(len(train_features)/args.train_batch_size) * args.num_train_epochs if args.do_predict: eval_examples,eval_features = read_and_convert(args.predict_file,is_training=False, do_lower_case=args.do_lower_case, read_fn=read_squad_examples,convert_fn=convert_fn) #Build Model and load checkpoint model_S = BertForQASimple(bert_config_S,args) #Load student if args.load_model_type=='bert': assert args.init_checkpoint_S is not None state_dict_S = torch.load(args.init_checkpoint_S, map_location='cpu') state_weight = {k[5:]:v for k,v in state_dict_S.items() if k.startswith('bert.')} missing_keys,_ = model_S.bert.load_state_dict(state_weight,strict=False) assert len(missing_keys)==0 elif args.load_model_type=='all': assert args.tuned_checkpoint_S is not None state_dict_S = torch.load(args.tuned_checkpoint_S,map_location='cpu') model_S.load_state_dict(state_dict_S) else: logger.info("Model is randomly initialized.") model_S.to(device) if args.local_rank != -1 or n_gpu > 1: if args.local_rank != -1: raise NotImplementedError elif n_gpu > 1: model_S = torch.nn.DataParallel(model_S) #,output_device=n_gpu-1) if args.do_train: #parameters params = list(model_S.named_parameters()) all_trainable_params = divide_parameters(params, lr=args.learning_rate) logger.info("Length of all_trainable_params: %d", len(all_trainable_params)) optimizer = BERTAdam(all_trainable_params,lr=args.learning_rate, warmup=args.warmup_proportion,t_total=num_train_steps,schedule=args.schedule, s_opt1=args.s_opt1, s_opt2=args.s_opt2, s_opt3=args.s_opt3) logger.info("***** Running training *****") logger.info(" Num orig examples = %d", len(train_examples)) logger.info(" Num split examples = %d", len(train_features)) logger.info(" Forward batch size = %d", forward_batch_size) logger.info(" Num backward steps = %d", num_train_steps) ########### DISTILLATION ########### train_config = TrainingConfig( gradient_accumulation_steps = args.gradient_accumulation_steps, ckpt_frequency = args.ckpt_frequency, log_dir = args.output_dir, output_dir = args.output_dir, device = args.device) distiller = BasicTrainer(train_config = train_config, model = model_S, adaptor = BertForQASimpleAdaptorTraining) all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long) all_doc_mask = torch.tensor([f.doc_mask for f in train_features], dtype=torch.float) all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long) all_start_positions = torch.tensor([f.start_position for f in train_features], dtype=torch.long) all_end_positions = torch.tensor([f.end_position for f in train_features], dtype=torch.long) train_dataset = TensorDataset(all_input_ids, all_segment_ids, all_input_mask, all_doc_mask, all_start_positions, all_end_positions) if args.local_rank == -1: train_sampler = RandomSampler(train_dataset) else: raise NotImplementedError train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.forward_batch_size,drop_last=True) callback_func = partial(predict, eval_examples=eval_examples, eval_features=eval_features, args=args) with distiller: distiller.train(optimizer, scheduler=None, dataloader=train_dataloader, num_epochs=args.num_train_epochs, callback=callback_func) if not args.do_train and args.do_predict: res = predict(model_S,eval_examples,eval_features,step=0,args=args) print (res) if __name__ == "__main__": main()
gn/gn_to_bp.py
despairblue/esy-skia
2,151
485
<reponame>despairblue/esy-skia #!/usr/bin/env python # # Copyright 2016 Google Inc. # # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # Generate Android.bp for Skia from GN configuration. import json import os import pprint import string import subprocess import tempfile import gn_to_bp_utils # First we start off with a template for Android.bp, # with holes for source lists and include directories. bp = string.Template('''// This file is autogenerated by gn_to_bp.py. cc_library_static { name: "libskia", cflags: [ $cflags ], cppflags:[ $cflags_cc ], export_include_dirs: [ $export_includes ], local_include_dirs: [ $local_includes ], srcs: [ $srcs ], arch: { arm: { srcs: [ $arm_srcs ], neon: { srcs: [ $arm_neon_srcs ], }, }, arm64: { srcs: [ $arm64_srcs ], }, mips: { srcs: [ $none_srcs ], }, mips64: { srcs: [ $none_srcs ], }, x86: { srcs: [ $x86_srcs ], cflags: [ // Clang seems to think new/malloc will only be 4-byte aligned // on x86 Android. We're pretty sure it's actually 8-byte // alignment. tests/OverAlignedTest.cpp has more information, // and should fail if we're wrong. "-Wno-over-aligned" ], }, x86_64: { srcs: [ $x86_srcs ], }, }, defaults: ["skia_deps", "skia_pgo", ], } // Build libskia with PGO by default. // Location of PGO profile data is defined in build/soong/cc/pgo.go // and is separate from skia. // To turn it off, set ANDROID_PGO_NO_PROFILE_USE environment variable // or set enable_profile_use property to false. cc_defaults { name: "skia_pgo", pgo: { instrumentation: true, profile_file: "hwui/hwui.profdata", benchmarks: ["hwui", "skia"], enable_profile_use: true, }, } // "defaults" property to disable profile use for Skia tools and benchmarks. cc_defaults { name: "skia_pgo_no_profile_use", defaults: [ "skia_pgo", ], pgo: { enable_profile_use: false, }, } cc_defaults { name: "skia_deps", shared_libs: [ "libEGL", "libGLESv2", "libdng_sdk", "libexpat", "libft2", "libheif", "libicui18n", "libicuuc", "libjpeg", "liblog", "libpiex", "libpng", "libvulkan", "libz", "libcutils", "libnativewindow", ], static_libs: [ "libarect", "libsfntly", "libwebp-decode", "libwebp-encode", ], group_static_libs: true, } cc_defaults { name: "skia_tool_deps", defaults: [ "skia_deps", "skia_pgo_no_profile_use" ], static_libs: [ "libjsoncpp", "libskia", ], cflags: [ "-Wno-unused-parameter", "-Wno-unused-variable", ], } cc_test { name: "skia_dm", defaults: [ "skia_tool_deps" ], local_include_dirs: [ $dm_includes ], srcs: [ $dm_srcs ], shared_libs: [ "libbinder", "libutils", ], } cc_test { name: "skia_nanobench", defaults: [ "skia_tool_deps" ], local_include_dirs: [ $nanobench_includes ], srcs: [ $nanobench_srcs ], data: [ "resources/*", ], }''') # We'll run GN to get the main source lists and include directories for Skia. gn_args = { 'is_official_build': 'true', 'skia_enable_tools': 'true', 'skia_enable_skottie': 'false', # requires rapidjson third-party 'skia_use_libheif': 'true', 'skia_use_vulkan': 'true', 'target_cpu': '"none"', 'target_os': '"android"', 'skia_vulkan_header': '"Skia_Vulkan_Android.h"', } js = gn_to_bp_utils.GenerateJSONFromGN(gn_args) def strip_slashes(lst): return {str(p.lstrip('/')) for p in lst} srcs = strip_slashes(js['targets']['//:skia']['sources']) cflags = strip_slashes(js['targets']['//:skia']['cflags']) cflags_cc = strip_slashes(js['targets']['//:skia']['cflags_cc']) local_includes = strip_slashes(js['targets']['//:skia']['include_dirs']) export_includes = strip_slashes(js['targets']['//:public']['include_dirs']) defines = [str(d) for d in js['targets']['//:skia']['defines']] dm_srcs = strip_slashes(js['targets']['//:dm']['sources']) dm_includes = strip_slashes(js['targets']['//:dm']['include_dirs']) nanobench_target = js['targets']['//:nanobench'] nanobench_srcs = strip_slashes(nanobench_target['sources']) nanobench_includes = strip_slashes(nanobench_target['include_dirs']) gn_to_bp_utils.GrabDependentValues(js, '//:skia', 'sources', srcs, None) gn_to_bp_utils.GrabDependentValues(js, '//:dm', 'sources', dm_srcs, 'skia') gn_to_bp_utils.GrabDependentValues(js, '//:nanobench', 'sources', nanobench_srcs, 'skia') # skcms is a little special, kind of a second-party library. srcs .add("third_party/skcms/skcms.c") local_includes.add("third_party/skcms") dm_includes .add("third_party/skcms") # No need to list headers. srcs = {s for s in srcs if not s.endswith('.h')} dm_srcs = {s for s in dm_srcs if not s.endswith('.h')} nanobench_srcs = {s for s in nanobench_srcs if not s.endswith('.h')} cflags = gn_to_bp_utils.CleanupCFlags(cflags) cflags_cc = gn_to_bp_utils.CleanupCCFlags(cflags_cc) # We need to add the include path to the vulkan defines and header file set in # then skia_vulkan_header gn arg that is used for framework builds. local_includes.add("platform_tools/android/vulkan") export_includes.add("platform_tools/android/vulkan") here = os.path.dirname(__file__) defs = gn_to_bp_utils.GetArchSources(os.path.join(here, 'opts.gni')) gn_to_bp_utils.WriteUserConfig('include/config/SkUserConfig.h', defines) # Turn a list of strings into the style bpfmt outputs. def bpfmt(indent, lst, sort=True): if sort: lst = sorted(lst) return ('\n' + ' '*indent).join('"%s",' % v for v in lst) # OK! We have everything to fill in Android.bp... with open('Android.bp', 'w') as f: print >>f, bp.substitute({ 'export_includes': bpfmt(8, export_includes), 'local_includes': bpfmt(8, local_includes), 'srcs': bpfmt(8, srcs), 'cflags': bpfmt(8, cflags, False), 'cflags_cc': bpfmt(8, cflags_cc), 'arm_srcs': bpfmt(16, defs['armv7']), 'arm_neon_srcs': bpfmt(20, defs['neon']), 'arm64_srcs': bpfmt(16, defs['arm64'] + defs['crc32']), 'none_srcs': bpfmt(16, defs['none']), 'x86_srcs': bpfmt(16, defs['sse2'] + defs['ssse3'] + defs['sse41'] + defs['sse42'] + defs['avx' ] + defs['hsw' ]), 'dm_includes' : bpfmt(8, dm_includes), 'dm_srcs' : bpfmt(8, dm_srcs), 'nanobench_includes' : bpfmt(8, nanobench_includes), 'nanobench_srcs' : bpfmt(8, nanobench_srcs), })
python/ray/autoscaler/tags.py
firebolt55439/ray
21,382
486
"""The Ray autoscaler uses tags/labels to associate metadata with instances.""" # Tag for the name of the node TAG_RAY_NODE_NAME = "ray-node-name" # Tag for the kind of node (e.g. Head, Worker). For legacy reasons, the tag # value says 'type' instead of 'kind'. TAG_RAY_NODE_KIND = "ray-node-type" NODE_KIND_HEAD = "head" NODE_KIND_WORKER = "worker" NODE_KIND_UNMANAGED = "unmanaged" # Tag for user defined node types (e.g., m4xl_spot). This is used for multi # node type clusters. TAG_RAY_USER_NODE_TYPE = "ray-user-node-type" # Tag for autofilled node types for legacy cluster yamls without multi # node type defined in the cluster configs. NODE_TYPE_LEGACY_HEAD = "ray-legacy-head-node-type" NODE_TYPE_LEGACY_WORKER = "ray-legacy-worker-node-type" # Tag that reports the current state of the node (e.g. Updating, Up-to-date) TAG_RAY_NODE_STATUS = "ray-node-status" STATUS_UNINITIALIZED = "uninitialized" STATUS_WAITING_FOR_SSH = "waiting-for-ssh" STATUS_SYNCING_FILES = "syncing-files" STATUS_SETTING_UP = "setting-up" STATUS_UPDATE_FAILED = "update-failed" STATUS_UP_TO_DATE = "up-to-date" # Tag uniquely identifying all nodes of a cluster TAG_RAY_CLUSTER_NAME = "ray-cluster-name" # Hash of the node launch config, used to identify out-of-date nodes TAG_RAY_LAUNCH_CONFIG = "ray-launch-config" # Hash of the node runtime config, used to determine if updates are needed TAG_RAY_RUNTIME_CONFIG = "ray-runtime-config" # Hash of the contents of the directories specified by the file_mounts config # if the node is a worker, this also hashes content of the directories # specified by the cluster_synced_files config TAG_RAY_FILE_MOUNTS_CONTENTS = "ray-file-mounts-contents"
third_party/google-endpoints/dogpile/cache/region.py
tingshao/catapult
2,151
493
<gh_stars>1000+ from __future__ import with_statement from .. import Lock, NeedRegenerationException from ..util import NameRegistry from . import exception from ..util import PluginLoader, memoized_property, coerce_string_conf from .util import function_key_generator, function_multi_key_generator from .api import NO_VALUE, CachedValue from .proxy import ProxyBackend from ..util import compat import time import datetime from numbers import Number from functools import wraps import threading _backend_loader = PluginLoader("dogpile.cache") register_backend = _backend_loader.register from . import backends # noqa value_version = 1 """An integer placed in the :class:`.CachedValue` so that new versions of dogpile.cache can detect cached values from a previous, backwards-incompatible version. """ class RegionInvalidationStrategy(object): """Region invalidation strategy interface Implement this interface and pass implementation instance to :meth:`.CacheRegion.configure` to override default region invalidation. Example:: class CustomInvalidationStrategy(RegionInvalidationStrategy): def __init__(self): self._soft_invalidated = None self._hard_invalidated = None def invalidate(self, hard=None): if hard: self._soft_invalidated = None self._hard_invalidated = time.time() else: self._soft_invalidated = time.time() self._hard_invalidated = None def is_invalidated(self, timestamp): return ((self._soft_invalidated and timestamp < self._soft_invalidated) or (self._hard_invalidated and timestamp < self._hard_invalidated)) def was_hard_invalidated(self): return bool(self._hard_invalidated) def is_hard_invalidated(self, timestamp): return (self._hard_invalidated and timestamp < self._hard_invalidated) def was_soft_invalidated(self): return bool(self._soft_invalidated) def is_soft_invalidated(self, timestamp): return (self._soft_invalidated and timestamp < self._soft_invalidated) The custom implementation is injected into a :class:`.CacheRegion` at configure time using the :paramref:`.CacheRegion.configure.region_invalidator` parameter:: region = CacheRegion() region = region.configure(region_invalidator=CustomInvalidationStrategy()) Invalidation strategies that wish to have access to the :class:`.CacheRegion` itself should construct the invalidator given the region as an argument:: class MyInvalidator(RegionInvalidationStrategy): def __init__(self, region): self.region = region # ... # ... region = CacheRegion() region = region.configure(region_invalidator=MyInvalidator(region)) .. versionadded:: 0.6.2 .. seealso:: :paramref:`.CacheRegion.configure.region_invalidator` """ def invalidate(self, hard=True): """Region invalidation. :class:`.CacheRegion` propagated call. The default invalidation system works by setting a current timestamp (using ``time.time()``) to consider all older timestamps effectively invalidated. """ raise NotImplementedError() def is_hard_invalidated(self, timestamp): """Check timestamp to determine if it was hard invalidated. :return: Boolean. True if ``timestamp`` is older than the last region invalidation time and region is invalidated in hard mode. """ raise NotImplementedError() def is_soft_invalidated(self, timestamp): """Check timestamp to determine if it was soft invalidated. :return: Boolean. True if ``timestamp`` is older than the last region invalidation time and region is invalidated in soft mode. """ raise NotImplementedError() def is_invalidated(self, timestamp): """Check timestamp to determine if it was invalidated. :return: Boolean. True if ``timestamp`` is older than the last region invalidation time. """ raise NotImplementedError() def was_soft_invalidated(self): """Indicate the region was invalidated in soft mode. :return: Boolean. True if region was invalidated in soft mode. """ raise NotImplementedError() def was_hard_invalidated(self): """Indicate the region was invalidated in hard mode. :return: Boolean. True if region was invalidated in hard mode. """ raise NotImplementedError() class DefaultInvalidationStrategy(RegionInvalidationStrategy): def __init__(self): self._is_hard_invalidated = None self._invalidated = None def invalidate(self, hard=True): self._is_hard_invalidated = bool(hard) self._invalidated = time.time() def is_invalidated(self, timestamp): return (self._invalidated is not None and timestamp < self._invalidated) def was_hard_invalidated(self): return self._is_hard_invalidated is True def is_hard_invalidated(self, timestamp): return self.was_hard_invalidated() and self.is_invalidated(timestamp) def was_soft_invalidated(self): return self._is_hard_invalidated is False def is_soft_invalidated(self, timestamp): return self.was_soft_invalidated() and self.is_invalidated(timestamp) class CacheRegion(object): """A front end to a particular cache backend. :param name: Optional, a string name for the region. This isn't used internally but can be accessed via the ``.name`` parameter, helpful for configuring a region from a config file. :param function_key_generator: Optional. A function that will produce a "cache key" given a data creation function and arguments, when using the :meth:`.CacheRegion.cache_on_arguments` method. The structure of this function should be two levels: given the data creation function, return a new function that generates the key based on the given arguments. Such as:: def my_key_generator(namespace, fn, **kw): fname = fn.__name__ def generate_key(*arg): return namespace + "_" + fname + "_".join(str(s) for s in arg) return generate_key region = make_region( function_key_generator = my_key_generator ).configure( "dogpile.cache.dbm", expiration_time=300, arguments={ "filename":"file.dbm" } ) The ``namespace`` is that passed to :meth:`.CacheRegion.cache_on_arguments`. It's not consulted outside this function, so in fact can be of any form. For example, it can be passed as a tuple, used to specify arguments to pluck from \**kw:: def my_key_generator(namespace, fn): def generate_key(*arg, **kw): return ":".join( [kw[k] for k in namespace] + [str(x) for x in arg] ) return generate_key Where the decorator might be used as:: @my_region.cache_on_arguments(namespace=('x', 'y')) def my_function(a, b, **kw): return my_data() .. seealso:: :func:`.function_key_generator` - default key generator :func:`.kwarg_function_key_generator` - optional gen that also uses keyword arguments :param function_multi_key_generator: Optional. Similar to ``function_key_generator`` parameter, but it's used in :meth:`.CacheRegion.cache_multi_on_arguments`. Generated function should return list of keys. For example:: def my_multi_key_generator(namespace, fn, **kw): namespace = fn.__name__ + (namespace or '') def generate_keys(*args): return [namespace + ':' + str(a) for a in args] return generate_keys :param key_mangler: Function which will be used on all incoming keys before passing to the backend. Defaults to ``None``, in which case the key mangling function recommended by the cache backend will be used. A typical mangler is the SHA1 mangler found at :func:`.sha1_mangle_key` which coerces keys into a SHA1 hash, so that the string length is fixed. To disable all key mangling, set to ``False``. Another typical mangler is the built-in Python function ``str``, which can be used to convert non-string or Unicode keys to bytestrings, which is needed when using a backend such as bsddb or dbm under Python 2.x in conjunction with Unicode keys. :param async_creation_runner: A callable that, when specified, will be passed to and called by dogpile.lock when there is a stale value present in the cache. It will be passed the mutex and is responsible releasing that mutex when finished. This can be used to defer the computation of expensive creator functions to later points in the future by way of, for example, a background thread, a long-running queue, or a task manager system like Celery. For a specific example using async_creation_runner, new values can be created in a background thread like so:: import threading def async_creation_runner(cache, somekey, creator, mutex): ''' Used by dogpile.core:Lock when appropriate ''' def runner(): try: value = creator() cache.set(somekey, value) finally: mutex.release() thread = threading.Thread(target=runner) thread.start() region = make_region( async_creation_runner=async_creation_runner, ).configure( 'dogpile.cache.memcached', expiration_time=5, arguments={ 'url': '127.0.0.1:11211', 'distributed_lock': True, } ) Remember that the first request for a key with no associated value will always block; async_creator will not be invoked. However, subsequent requests for cached-but-expired values will still return promptly. They will be refreshed by whatever asynchronous means the provided async_creation_runner callable implements. By default the async_creation_runner is disabled and is set to ``None``. .. versionadded:: 0.4.2 added the async_creation_runner feature. """ def __init__( self, name=None, function_key_generator=function_key_generator, function_multi_key_generator=function_multi_key_generator, key_mangler=None, async_creation_runner=None, ): """Construct a new :class:`.CacheRegion`.""" self.name = name self.function_key_generator = function_key_generator self.function_multi_key_generator = function_multi_key_generator self.key_mangler = self._user_defined_key_mangler = key_mangler self.async_creation_runner = async_creation_runner self.region_invalidator = DefaultInvalidationStrategy() def configure( self, backend, expiration_time=None, arguments=None, _config_argument_dict=None, _config_prefix=None, wrap=None, replace_existing_backend=False, region_invalidator=None ): """Configure a :class:`.CacheRegion`. The :class:`.CacheRegion` itself is returned. :param backend: Required. This is the name of the :class:`.CacheBackend` to use, and is resolved by loading the class from the ``dogpile.cache`` entrypoint. :param expiration_time: Optional. The expiration time passed to the dogpile system. May be passed as an integer number of seconds, or as a ``datetime.timedelta`` value. .. versionadded 0.5.0 ``expiration_time`` may be optionally passed as a ``datetime.timedelta`` value. The :meth:`.CacheRegion.get_or_create` method as well as the :meth:`.CacheRegion.cache_on_arguments` decorator (though note: **not** the :meth:`.CacheRegion.get` method) will call upon the value creation function after this time period has passed since the last generation. :param arguments: Optional. The structure here is passed directly to the constructor of the :class:`.CacheBackend` in use, though is typically a dictionary. :param wrap: Optional. A list of :class:`.ProxyBackend` classes and/or instances, each of which will be applied in a chain to ultimately wrap the original backend, so that custom functionality augmentation can be applied. .. versionadded:: 0.5.0 .. seealso:: :ref:`changing_backend_behavior` :param replace_existing_backend: if True, the existing cache backend will be replaced. Without this flag, an exception is raised if a backend is already configured. .. versionadded:: 0.5.7 :param region_invalidator: Optional. Override default invalidation strategy with custom implementation of :class:`.RegionInvalidationStrategy`. .. versionadded:: 0.6.2 """ if "backend" in self.__dict__ and not replace_existing_backend: raise exception.RegionAlreadyConfigured( "This region is already " "configured with backend: %s. " "Specify replace_existing_backend=True to replace." % self.backend) backend_cls = _backend_loader.load(backend) if _config_argument_dict: self.backend = backend_cls.from_config_dict( _config_argument_dict, _config_prefix ) else: self.backend = backend_cls(arguments or {}) if not expiration_time or isinstance(expiration_time, Number): self.expiration_time = expiration_time elif isinstance(expiration_time, datetime.timedelta): self.expiration_time = int( compat.timedelta_total_seconds(expiration_time)) else: raise exception.ValidationError( 'expiration_time is not a number or timedelta.') if not self._user_defined_key_mangler: self.key_mangler = self.backend.key_mangler self._lock_registry = NameRegistry(self._create_mutex) if getattr(wrap, '__iter__', False): for wrapper in reversed(wrap): self.wrap(wrapper) if region_invalidator: self.region_invalidator = region_invalidator return self def wrap(self, proxy): ''' Takes a ProxyBackend instance or class and wraps the attached backend. ''' # if we were passed a type rather than an instance then # initialize it. if type(proxy) == type: proxy = proxy() if not issubclass(type(proxy), ProxyBackend): raise TypeError("Type %s is not a valid ProxyBackend" % type(proxy)) self.backend = proxy.wrap(self.backend) def _mutex(self, key): return self._lock_registry.get(key) class _LockWrapper(object): """weakref-capable wrapper for threading.Lock""" def __init__(self): self.lock = threading.Lock() def acquire(self, wait=True): return self.lock.acquire(wait) def release(self): self.lock.release() def _create_mutex(self, key): mutex = self.backend.get_mutex(key) if mutex is not None: return mutex else: return self._LockWrapper() def invalidate(self, hard=True): """Invalidate this :class:`.CacheRegion`. The default invalidation system works by setting a current timestamp (using ``time.time()``) representing the "minimum creation time" for a value. Any retrieved value whose creation time is prior to this timestamp is considered to be stale. It does not affect the data in the cache in any way, and is also local to this instance of :class:`.CacheRegion`. Once set, the invalidation time is honored by the :meth:`.CacheRegion.get_or_create`, :meth:`.CacheRegion.get_or_create_multi` and :meth:`.CacheRegion.get` methods. The method supports both "hard" and "soft" invalidation options. With "hard" invalidation, :meth:`.CacheRegion.get_or_create` will force an immediate regeneration of the value which all getters will wait for. With "soft" invalidation, subsequent getters will return the "old" value until the new one is available. Usage of "soft" invalidation requires that the region or the method is given a non-None expiration time. .. versionadded:: 0.3.0 :param hard: if True, cache values will all require immediate regeneration; dogpile logic won't be used. If False, the creation time of existing values will be pushed back before the expiration time so that a return+regen will be invoked. .. versionadded:: 0.5.1 """ self.region_invalidator.invalidate(hard) def configure_from_config(self, config_dict, prefix): """Configure from a configuration dictionary and a prefix. Example:: local_region = make_region() memcached_region = make_region() # regions are ready to use for function # decorators, but not yet for actual caching # later, when config is available myconfig = { "cache.local.backend":"dogpile.cache.dbm", "cache.local.arguments.filename":"/path/to/dbmfile.dbm", "cache.memcached.backend":"dogpile.cache.pylibmc", "cache.memcached.arguments.url":"127.0.0.1, 10.0.0.1", } local_region.configure_from_config(myconfig, "cache.local.") memcached_region.configure_from_config(myconfig, "cache.memcached.") """ config_dict = coerce_string_conf(config_dict) return self.configure( config_dict["%sbackend" % prefix], expiration_time=config_dict.get( "%sexpiration_time" % prefix, None), _config_argument_dict=config_dict, _config_prefix="%sarguments." % prefix, wrap=config_dict.get( "%swrap" % prefix, None), ) @memoized_property def backend(self): raise exception.RegionNotConfigured( "No backend is configured on this region.") @property def is_configured(self): """Return True if the backend has been configured via the :meth:`.CacheRegion.configure` method already. .. versionadded:: 0.5.1 """ return 'backend' in self.__dict__ def get(self, key, expiration_time=None, ignore_expiration=False): """Return a value from the cache, based on the given key. If the value is not present, the method returns the token ``NO_VALUE``. ``NO_VALUE`` evaluates to False, but is separate from ``None`` to distinguish between a cached value of ``None``. By default, the configured expiration time of the :class:`.CacheRegion`, or alternatively the expiration time supplied by the ``expiration_time`` argument, is tested against the creation time of the retrieved value versus the current time (as reported by ``time.time()``). If stale, the cached value is ignored and the ``NO_VALUE`` token is returned. Passing the flag ``ignore_expiration=True`` bypasses the expiration time check. .. versionchanged:: 0.3.0 :meth:`.CacheRegion.get` now checks the value's creation time against the expiration time, rather than returning the value unconditionally. The method also interprets the cached value in terms of the current "invalidation" time as set by the :meth:`.invalidate` method. If a value is present, but its creation time is older than the current invalidation time, the ``NO_VALUE`` token is returned. Passing the flag ``ignore_expiration=True`` bypasses the invalidation time check. .. versionadded:: 0.3.0 Support for the :meth:`.CacheRegion.invalidate` method. :param key: Key to be retrieved. While it's typical for a key to be a string, it is ultimately passed directly down to the cache backend, before being optionally processed by the key_mangler function, so can be of any type recognized by the backend or by the key_mangler function, if present. :param expiration_time: Optional expiration time value which will supersede that configured on the :class:`.CacheRegion` itself. .. versionadded:: 0.3.0 :param ignore_expiration: if ``True``, the value is returned from the cache if present, regardless of configured expiration times or whether or not :meth:`.invalidate` was called. .. versionadded:: 0.3.0 """ if self.key_mangler: key = self.key_mangler(key) value = self.backend.get(key) value = self._unexpired_value_fn( expiration_time, ignore_expiration)(value) return value.payload def _unexpired_value_fn(self, expiration_time, ignore_expiration): if ignore_expiration: return lambda value: value else: if expiration_time is None: expiration_time = self.expiration_time current_time = time.time() def value_fn(value): if value is NO_VALUE: return value elif expiration_time is not None and \ current_time - value.metadata["ct"] > expiration_time: return NO_VALUE elif self.region_invalidator.is_invalidated( value.metadata["ct"]): return NO_VALUE else: return value return value_fn def get_multi(self, keys, expiration_time=None, ignore_expiration=False): """Return multiple values from the cache, based on the given keys. Returns values as a list matching the keys given. E.g.:: values = region.get_multi(["one", "two", "three"]) To convert values to a dictionary, use ``zip()``:: keys = ["one", "two", "three"] values = region.get_multi(keys) dictionary = dict(zip(keys, values)) Keys which aren't present in the list are returned as the ``NO_VALUE`` token. ``NO_VALUE`` evaluates to False, but is separate from ``None`` to distinguish between a cached value of ``None``. By default, the configured expiration time of the :class:`.CacheRegion`, or alternatively the expiration time supplied by the ``expiration_time`` argument, is tested against the creation time of the retrieved value versus the current time (as reported by ``time.time()``). If stale, the cached value is ignored and the ``NO_VALUE`` token is returned. Passing the flag ``ignore_expiration=True`` bypasses the expiration time check. .. versionadded:: 0.5.0 """ if not keys: return [] if self.key_mangler: keys = list(map(lambda key: self.key_mangler(key), keys)) backend_values = self.backend.get_multi(keys) _unexpired_value_fn = self._unexpired_value_fn( expiration_time, ignore_expiration) return [ value.payload if value is not NO_VALUE else value for value in ( _unexpired_value_fn(value) for value in backend_values ) ] def get_or_create( self, key, creator, expiration_time=None, should_cache_fn=None): """Return a cached value based on the given key. If the value does not exist or is considered to be expired based on its creation time, the given creation function may or may not be used to recreate the value and persist the newly generated value in the cache. Whether or not the function is used depends on if the *dogpile lock* can be acquired or not. If it can't, it means a different thread or process is already running a creation function for this key against the cache. When the dogpile lock cannot be acquired, the method will block if no previous value is available, until the lock is released and a new value available. If a previous value is available, that value is returned immediately without blocking. If the :meth:`.invalidate` method has been called, and the retrieved value's timestamp is older than the invalidation timestamp, the value is unconditionally prevented from being returned. The method will attempt to acquire the dogpile lock to generate a new value, or will wait until the lock is released to return the new value. .. versionchanged:: 0.3.0 The value is unconditionally regenerated if the creation time is older than the last call to :meth:`.invalidate`. :param key: Key to be retrieved. While it's typical for a key to be a string, it is ultimately passed directly down to the cache backend, before being optionally processed by the key_mangler function, so can be of any type recognized by the backend or by the key_mangler function, if present. :param creator: function which creates a new value. :param expiration_time: optional expiration time which will overide the expiration time already configured on this :class:`.CacheRegion` if not None. To set no expiration, use the value -1. :param should_cache_fn: optional callable function which will receive the value returned by the "creator", and will then return True or False, indicating if the value should actually be cached or not. If it returns False, the value is still returned, but isn't cached. E.g.:: def dont_cache_none(value): return value is not None value = region.get_or_create("some key", create_value, should_cache_fn=dont_cache_none) Above, the function returns the value of create_value() if the cache is invalid, however if the return value is None, it won't be cached. .. versionadded:: 0.4.3 .. seealso:: :meth:`.CacheRegion.cache_on_arguments` - applies :meth:`.get_or_create` to any function using a decorator. :meth:`.CacheRegion.get_or_create_multi` - multiple key/value version """ orig_key = key if self.key_mangler: key = self.key_mangler(key) def get_value(): value = self.backend.get(key) if (value is NO_VALUE or value.metadata['v'] != value_version or self.region_invalidator.is_hard_invalidated( value.metadata["ct"])): raise NeedRegenerationException() ct = value.metadata["ct"] if self.region_invalidator.is_soft_invalidated(ct): ct = time.time() - expiration_time - .0001 return value.payload, ct def gen_value(): created_value = creator() value = self._value(created_value) if not should_cache_fn or \ should_cache_fn(created_value): self.backend.set(key, value) return value.payload, value.metadata["ct"] if expiration_time is None: expiration_time = self.expiration_time if (expiration_time is None and self.region_invalidator.was_soft_invalidated()): raise exception.DogpileCacheException( "Non-None expiration time required " "for soft invalidation") if expiration_time == -1: expiration_time = None if self.async_creation_runner: def async_creator(mutex): return self.async_creation_runner( self, orig_key, creator, mutex) else: async_creator = None with Lock( self._mutex(key), gen_value, get_value, expiration_time, async_creator) as value: return value def get_or_create_multi( self, keys, creator, expiration_time=None, should_cache_fn=None): """Return a sequence of cached values based on a sequence of keys. The behavior for generation of values based on keys corresponds to that of :meth:`.Region.get_or_create`, with the exception that the ``creator()`` function may be asked to generate any subset of the given keys. The list of keys to be generated is passed to ``creator()``, and ``creator()`` should return the generated values as a sequence corresponding to the order of the keys. The method uses the same approach as :meth:`.Region.get_multi` and :meth:`.Region.set_multi` to get and set values from the backend. If you are using a :class:`.CacheBackend` or :class:`.ProxyBackend` that modifies values, take note this function invokes ``.set_multi()`` for newly generated values using the same values it returns to the calling function. A correct implementation of ``.set_multi()`` will not modify values in-place on the submitted ``mapping`` dict. :param keys: Sequence of keys to be retrieved. :param creator: function which accepts a sequence of keys and returns a sequence of new values. :param expiration_time: optional expiration time which will overide the expiration time already configured on this :class:`.CacheRegion` if not None. To set no expiration, use the value -1. :param should_cache_fn: optional callable function which will receive each value returned by the "creator", and will then return True or False, indicating if the value should actually be cached or not. If it returns False, the value is still returned, but isn't cached. .. versionadded:: 0.5.0 .. seealso:: :meth:`.CacheRegion.cache_multi_on_arguments` :meth:`.CacheRegion.get_or_create` """ def get_value(key): value = values.get(key, NO_VALUE) if (value is NO_VALUE or value.metadata['v'] != value_version or self.region_invalidator.is_hard_invalidated( value.metadata['v'])): # dogpile.core understands a 0 here as # "the value is not available", e.g. # _has_value() will return False. return value.payload, 0 else: ct = value.metadata["ct"] if self.region_invalidator.is_soft_invalidated(ct): ct = time.time() - expiration_time - .0001 return value.payload, ct def gen_value(): raise NotImplementedError() def async_creator(key, mutex): mutexes[key] = mutex if expiration_time is None: expiration_time = self.expiration_time if (expiration_time is None and self.region_invalidator.was_soft_invalidated()): raise exception.DogpileCacheException( "Non-None expiration time required " "for soft invalidation") if expiration_time == -1: expiration_time = None mutexes = {} sorted_unique_keys = sorted(set(keys)) if self.key_mangler: mangled_keys = [self.key_mangler(k) for k in sorted_unique_keys] else: mangled_keys = sorted_unique_keys orig_to_mangled = dict(zip(sorted_unique_keys, mangled_keys)) values = dict(zip(mangled_keys, self.backend.get_multi(mangled_keys))) for orig_key, mangled_key in orig_to_mangled.items(): with Lock( self._mutex(mangled_key), gen_value, lambda: get_value(mangled_key), expiration_time, async_creator=lambda mutex: async_creator(orig_key, mutex) ): pass try: if mutexes: # sort the keys, the idea is to prevent deadlocks. # though haven't been able to simulate one anyway. keys_to_get = sorted(mutexes) new_values = creator(*keys_to_get) values_w_created = dict( (orig_to_mangled[k], self._value(v)) for k, v in zip(keys_to_get, new_values) ) if not should_cache_fn: self.backend.set_multi(values_w_created) else: self.backend.set_multi(dict( (k, v) for k, v in values_w_created.items() if should_cache_fn(v[0]) )) values.update(values_w_created) return [values[orig_to_mangled[k]].payload for k in keys] finally: for mutex in mutexes.values(): mutex.release() def _value(self, value): """Return a :class:`.CachedValue` given a value.""" return CachedValue( value, { "ct": time.time(), "v": value_version }) def set(self, key, value): """Place a new value in the cache under the given key.""" if self.key_mangler: key = self.key_mangler(key) self.backend.set(key, self._value(value)) def set_multi(self, mapping): """Place new values in the cache under the given keys. .. versionadded:: 0.5.0 """ if not mapping: return if self.key_mangler: mapping = dict(( self.key_mangler(k), self._value(v)) for k, v in mapping.items()) else: mapping = dict((k, self._value(v)) for k, v in mapping.items()) self.backend.set_multi(mapping) def delete(self, key): """Remove a value from the cache. This operation is idempotent (can be called multiple times, or on a non-existent key, safely) """ if self.key_mangler: key = self.key_mangler(key) self.backend.delete(key) def delete_multi(self, keys): """Remove multiple values from the cache. This operation is idempotent (can be called multiple times, or on a non-existent key, safely) .. versionadded:: 0.5.0 """ if self.key_mangler: keys = list(map(lambda key: self.key_mangler(key), keys)) self.backend.delete_multi(keys) def cache_on_arguments( self, namespace=None, expiration_time=None, should_cache_fn=None, to_str=compat.string_type, function_key_generator=None): """A function decorator that will cache the return value of the function using a key derived from the function itself and its arguments. The decorator internally makes use of the :meth:`.CacheRegion.get_or_create` method to access the cache and conditionally call the function. See that method for additional behavioral details. E.g.:: @someregion.cache_on_arguments() def generate_something(x, y): return somedatabase.query(x, y) The decorated function can then be called normally, where data will be pulled from the cache region unless a new value is needed:: result = generate_something(5, 6) The function is also given an attribute ``invalidate()``, which provides for invalidation of the value. Pass to ``invalidate()`` the same arguments you'd pass to the function itself to represent a particular value:: generate_something.invalidate(5, 6) Another attribute ``set()`` is added to provide extra caching possibilities relative to the function. This is a convenience method for :meth:`.CacheRegion.set` which will store a given value directly without calling the decorated function. The value to be cached is passed as the first argument, and the arguments which would normally be passed to the function should follow:: generate_something.set(3, 5, 6) The above example is equivalent to calling ``generate_something(5, 6)``, if the function were to produce the value ``3`` as the value to be cached. .. versionadded:: 0.4.1 Added ``set()`` method to decorated function. Similar to ``set()`` is ``refresh()``. This attribute will invoke the decorated function and populate a new value into the cache with the new value, as well as returning that value:: newvalue = generate_something.refresh(5, 6) .. versionadded:: 0.5.0 Added ``refresh()`` method to decorated function. Lastly, the ``get()`` method returns either the value cached for the given key, or the token ``NO_VALUE`` if no such key exists:: value = generate_something.get(5, 6) .. versionadded:: 0.5.3 Added ``get()`` method to decorated function. The default key generation will use the name of the function, the module name for the function, the arguments passed, as well as an optional "namespace" parameter in order to generate a cache key. Given a function ``one`` inside the module ``myapp.tools``:: @region.cache_on_arguments(namespace="foo") def one(a, b): return a + b Above, calling ``one(3, 4)`` will produce a cache key as follows:: myapp.tools:one|foo|3 4 The key generator will ignore an initial argument of ``self`` or ``cls``, making the decorator suitable (with caveats) for use with instance or class methods. Given the example:: class MyClass(object): @region.cache_on_arguments(namespace="foo") def one(self, a, b): return a + b The cache key above for ``MyClass().one(3, 4)`` will again produce the same cache key of ``myapp.tools:one|foo|3 4`` - the name ``self`` is skipped. The ``namespace`` parameter is optional, and is used normally to disambiguate two functions of the same name within the same module, as can occur when decorating instance or class methods as below:: class MyClass(object): @region.cache_on_arguments(namespace='MC') def somemethod(self, x, y): "" class MyOtherClass(object): @region.cache_on_arguments(namespace='MOC') def somemethod(self, x, y): "" Above, the ``namespace`` parameter disambiguates between ``somemethod`` on ``MyClass`` and ``MyOtherClass``. Python class declaration mechanics otherwise prevent the decorator from having awareness of the ``MyClass`` and ``MyOtherClass`` names, as the function is received by the decorator before it becomes an instance method. The function key generation can be entirely replaced on a per-region basis using the ``function_key_generator`` argument present on :func:`.make_region` and :class:`.CacheRegion`. If defaults to :func:`.function_key_generator`. :param namespace: optional string argument which will be established as part of the cache key. This may be needed to disambiguate functions of the same name within the same source file, such as those associated with classes - note that the decorator itself can't see the parent class on a function as the class is being declared. :param expiration_time: if not None, will override the normal expiration time. May be specified as a callable, taking no arguments, that returns a value to be used as the ``expiration_time``. This callable will be called whenever the decorated function itself is called, in caching or retrieving. Thus, this can be used to determine a *dynamic* expiration time for the cached function result. Example use cases include "cache the result until the end of the day, week or time period" and "cache until a certain date or time passes". .. versionchanged:: 0.5.0 ``expiration_time`` may be passed as a callable to :meth:`.CacheRegion.cache_on_arguments`. :param should_cache_fn: passed to :meth:`.CacheRegion.get_or_create`. .. versionadded:: 0.4.3 :param to_str: callable, will be called on each function argument in order to convert to a string. Defaults to ``str()``. If the function accepts non-ascii unicode arguments on Python 2.x, the ``unicode()`` builtin can be substituted, but note this will produce unicode cache keys which may require key mangling before reaching the cache. .. versionadded:: 0.5.0 :param function_key_generator: a function that will produce a "cache key". This function will supersede the one configured on the :class:`.CacheRegion` itself. .. versionadded:: 0.5.5 .. seealso:: :meth:`.CacheRegion.cache_multi_on_arguments` :meth:`.CacheRegion.get_or_create` """ expiration_time_is_callable = compat.callable(expiration_time) if function_key_generator is None: function_key_generator = self.function_key_generator def decorator(fn): if to_str is compat.string_type: # backwards compatible key_generator = function_key_generator(namespace, fn) else: key_generator = function_key_generator( namespace, fn, to_str=to_str) @wraps(fn) def decorate(*arg, **kw): key = key_generator(*arg, **kw) @wraps(fn) def creator(): return fn(*arg, **kw) timeout = expiration_time() if expiration_time_is_callable \ else expiration_time return self.get_or_create(key, creator, timeout, should_cache_fn) def invalidate(*arg, **kw): key = key_generator(*arg, **kw) self.delete(key) def set_(value, *arg, **kw): key = key_generator(*arg, **kw) self.set(key, value) def get(*arg, **kw): key = key_generator(*arg, **kw) return self.get(key) def refresh(*arg, **kw): key = key_generator(*arg, **kw) value = fn(*arg, **kw) self.set(key, value) return value decorate.set = set_ decorate.invalidate = invalidate decorate.refresh = refresh decorate.get = get decorate.original = fn return decorate return decorator def cache_multi_on_arguments( self, namespace=None, expiration_time=None, should_cache_fn=None, asdict=False, to_str=compat.string_type, function_multi_key_generator=None): """A function decorator that will cache multiple return values from the function using a sequence of keys derived from the function itself and the arguments passed to it. This method is the "multiple key" analogue to the :meth:`.CacheRegion.cache_on_arguments` method. Example:: @someregion.cache_multi_on_arguments() def generate_something(*keys): return [ somedatabase.query(key) for key in keys ] The decorated function can be called normally. The decorator will produce a list of cache keys using a mechanism similar to that of :meth:`.CacheRegion.cache_on_arguments`, combining the name of the function with the optional namespace and with the string form of each key. It will then consult the cache using the same mechanism as that of :meth:`.CacheRegion.get_multi` to retrieve all current values; the originally passed keys corresponding to those values which aren't generated or need regeneration will be assembled into a new argument list, and the decorated function is then called with that subset of arguments. The returned result is a list:: result = generate_something("key1", "key2", "key3") The decorator internally makes use of the :meth:`.CacheRegion.get_or_create_multi` method to access the cache and conditionally call the function. See that method for additional behavioral details. Unlike the :meth:`.CacheRegion.cache_on_arguments` method, :meth:`.CacheRegion.cache_multi_on_arguments` works only with a single function signature, one which takes a simple list of keys as arguments. Like :meth:`.CacheRegion.cache_on_arguments`, the decorated function is also provided with a ``set()`` method, which here accepts a mapping of keys and values to set in the cache:: generate_something.set({"k1": "value1", "k2": "value2", "k3": "value3"}) ...an ``invalidate()`` method, which has the effect of deleting the given sequence of keys using the same mechanism as that of :meth:`.CacheRegion.delete_multi`:: generate_something.invalidate("k1", "k2", "k3") ...a ``refresh()`` method, which will call the creation function, cache the new values, and return them:: values = generate_something.refresh("k1", "k2", "k3") ...and a ``get()`` method, which will return values based on the given arguments:: values = generate_something.get("k1", "k2", "k3") .. versionadded:: 0.5.3 Added ``get()`` method to decorated function. Parameters passed to :meth:`.CacheRegion.cache_multi_on_arguments` have the same meaning as those passed to :meth:`.CacheRegion.cache_on_arguments`. :param namespace: optional string argument which will be established as part of each cache key. :param expiration_time: if not None, will override the normal expiration time. May be passed as an integer or a callable. :param should_cache_fn: passed to :meth:`.CacheRegion.get_or_create_multi`. This function is given a value as returned by the creator, and only if it returns True will that value be placed in the cache. :param asdict: if ``True``, the decorated function should return its result as a dictionary of keys->values, and the final result of calling the decorated function will also be a dictionary. If left at its default value of ``False``, the decorated function should return its result as a list of values, and the final result of calling the decorated function will also be a list. When ``asdict==True`` if the dictionary returned by the decorated function is missing keys, those keys will not be cached. :param to_str: callable, will be called on each function argument in order to convert to a string. Defaults to ``str()``. If the function accepts non-ascii unicode arguments on Python 2.x, the ``unicode()`` builtin can be substituted, but note this will produce unicode cache keys which may require key mangling before reaching the cache. .. versionadded:: 0.5.0 :param function_multi_key_generator: a function that will produce a list of keys. This function will supersede the one configured on the :class:`.CacheRegion` itself. .. versionadded:: 0.5.5 .. seealso:: :meth:`.CacheRegion.cache_on_arguments` :meth:`.CacheRegion.get_or_create_multi` """ expiration_time_is_callable = compat.callable(expiration_time) if function_multi_key_generator is None: function_multi_key_generator = self.function_multi_key_generator def decorator(fn): key_generator = function_multi_key_generator( namespace, fn, to_str=to_str) @wraps(fn) def decorate(*arg, **kw): cache_keys = arg keys = key_generator(*arg, **kw) key_lookup = dict(zip(keys, cache_keys)) @wraps(fn) def creator(*keys_to_create): return fn(*[key_lookup[k] for k in keys_to_create]) timeout = expiration_time() if expiration_time_is_callable \ else expiration_time if asdict: def dict_create(*keys): d_values = creator(*keys) return [ d_values.get(key_lookup[k], NO_VALUE) for k in keys] def wrap_cache_fn(value): if value is NO_VALUE: return False elif not should_cache_fn: return True else: return should_cache_fn(value) result = self.get_or_create_multi( keys, dict_create, timeout, wrap_cache_fn) result = dict( (k, v) for k, v in zip(cache_keys, result) if v is not NO_VALUE) else: result = self.get_or_create_multi( keys, creator, timeout, should_cache_fn) return result def invalidate(*arg): keys = key_generator(*arg) self.delete_multi(keys) def set_(mapping): keys = list(mapping) gen_keys = key_generator(*keys) self.set_multi(dict( (gen_key, mapping[key]) for gen_key, key in zip(gen_keys, keys)) ) def get(*arg): keys = key_generator(*arg) return self.get_multi(keys) def refresh(*arg): keys = key_generator(*arg) values = fn(*arg) if asdict: self.set_multi( dict(zip(keys, [values[a] for a in arg])) ) return values else: self.set_multi( dict(zip(keys, values)) ) return values decorate.set = set_ decorate.invalidate = invalidate decorate.refresh = refresh decorate.get = get return decorate return decorator def make_region(*arg, **kw): """Instantiate a new :class:`.CacheRegion`. Currently, :func:`.make_region` is a passthrough to :class:`.CacheRegion`. See that class for constructor arguments. """ return CacheRegion(*arg, **kw)
tests/optims/distributed_adamw_test.py
AswinRetnakumar/Machina
302
522
import os import unittest import torch import torch.distributed as dist from torch.multiprocessing import Process import torch.nn as nn from machina.optims import DistributedAdamW def init_processes(rank, world_size, function, backend='tcp'): os.environ['MASTER_ADDR'] = '127.0.0.1' os.environ['MASTER_PORT'] = '29500' dist.init_process_group(backend, rank=rank, world_size=world_size) function(rank, world_size) class TestDistributedAdamW(unittest.TestCase): def test_step(self): def _run(rank, world_size): model = nn.Linear(10, 1) optimizer = DistributedAdamW( model.parameters()) optimizer.zero_grad() loss = model(torch.ones(10).float()) loss.backward() optimizer.step() processes = [] world_size = 4 for rank in range(world_size): p = Process(target=init_processes, args=(rank, world_size, _run)) p.start() processes.append(p) for p in processes: p.join()
tests/functional/test_soft_round_inverse.py
tallamjr/NeuralCompression
233
543
<gh_stars>100-1000 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from neuralcompression.functional import soft_round, soft_round_inverse def test_soft_round_inverse(): x = torch.linspace(-2.0, 2.0, 50) torch.testing.assert_close( x, soft_round_inverse(x, alpha=1e-13), ) x = torch.tensor([-1.25, -0.75, 0.75, 1.25]) torch.testing.assert_close( x, soft_round_inverse(soft_round(x, alpha=2.0), alpha=2.0), ) for offset in range(-5, 5): x = torch.linspace(offset + 0.001, offset + 0.999, 100) torch.testing.assert_close( torch.ceil(x) - 0.5, soft_round_inverse(x, alpha=5000.0), atol=0.001, rtol=0.002, )
lingvo/tasks/car/car_layers_test.py
Harshs27/lingvo
2,611
559
# Lint as: python3 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for car_layers.""" from lingvo import compat as tf from lingvo.core import py_utils from lingvo.core import test_utils from lingvo.tasks.car import car_layers class CarLayersTest(test_utils.TestCase): def _testNestedOutShape(self, p, input_shape, expected_shape): batch_size, num_points, _ = input_shape g = tf.Graph() with g.as_default(): net = p.Instantiate() input_data = py_utils.NestedMap( points=tf.random.uniform(input_shape[:-1] + (3,)), features=tf.random.uniform(input_shape), padding=tf.zeros((batch_size, num_points), dtype=tf.float32), label=tf.random.uniform((batch_size,), minval=0, maxval=16, dtype=tf.int32)) result = net.FPropDefaultTheta(input_data) with self.session(graph=g): self.evaluate(tf.global_variables_initializer()) np_result = self.evaluate(result) grouped_points_result = np_result.grouped_points self.assertEqual(grouped_points_result.features.shape, expected_shape.grouped_points.features) self.assertEqual(grouped_points_result.points.shape, expected_shape.grouped_points.points) self.assertEqual(grouped_points_result.padding.shape, expected_shape.grouped_points.padding) query_points_result = np_result.query_points self.assertEqual(query_points_result.points.shape, expected_shape.query_points.points) self.assertEqual(query_points_result.padding.shape, expected_shape.query_points.padding) def testSamplingAndGrouping(self): for num_points in [1024, 256]: for input_dims in [3, 6, 9]: for group_size in [32, 64]: p = car_layers.SamplingAndGroupingLayer.Params().Set( name='SampleGroupTest', num_samples=256, ball_radius=0.2, group_size=group_size, sample_neighbors_uniformly=True) grouped_points_shape = py_utils.NestedMap( features=(8, 256, group_size, input_dims), points=(8, 256, group_size, 3), padding=(8, 256, group_size)) query_points_shape = py_utils.NestedMap( points=(8, 256, 3), padding=(8, 256)) expected_shape = py_utils.NestedMap({ 'grouped_points': grouped_points_shape, 'query_points': query_points_shape }) self._testNestedOutShape(p, (8, num_points, input_dims), expected_shape) if __name__ == '__main__': tf.test.main()
starry/_core/ops/lib/include/oblate/tests/test_derivs.py
rodluger/starry
116
560
<reponame>rodluger/starry<filename>starry/_core/ops/lib/include/oblate/tests/test_derivs.py import oblate import numpy as np import pytest # TODO!
Packs/Pwned/Integrations/PwnedV2/PwnedV2.py
diCagri/content
799
571
from CommonServerPython import * ''' IMPORTS ''' import re import requests # Disable insecure warnings requests.packages.urllib3.disable_warnings() ''' GLOBALS/PARAMS ''' VENDOR = 'Have I Been Pwned? V2' MAX_RETRY_ALLOWED = demisto.params().get('max_retry_time', -1) API_KEY = demisto.params().get('api_key') USE_SSL = not demisto.params().get('insecure', False) BASE_URL = 'https://haveibeenpwned.com/api/v3' HEADERS = { 'hibp-api-key': API_KEY, 'user-agent': 'DBOT-API', 'Content-Type': 'application/json', 'Accept': 'application/json' } DEFAULT_DBOT_SCORE_EMAIL = 2 if demisto.params().get('default_dbot_score_email') == 'SUSPICIOUS' else 3 DEFAULT_DBOT_SCORE_DOMAIN = 2 if demisto.params().get('default_dbot_score_domain') == 'SUSPICIOUS' else 3 SUFFIXES = { "email": '/breachedaccount/', "domain": '/breaches?domain=', "username": '/breachedaccount/', "paste": '/pasteaccount/', "email_truncate_verified": '?truncateResponse=false&includeUnverified=true', "domain_truncate_verified": '&truncateResponse=false&includeUnverified=true', "username_truncate_verified": '?truncateResponse=false&includeUnverified=true' } RETRIES_END_TIME = datetime.min ''' HELPER FUNCTIONS ''' def http_request(method, url_suffix, params=None, data=None): while True: res = requests.request( method, BASE_URL + url_suffix, verify=USE_SSL, params=params, data=data, headers=HEADERS ) if res.status_code != 429: # Rate limit response code break if datetime.now() > RETRIES_END_TIME: return_error('Max retry time has exceeded.') wait_regex = re.search(r'\d+', res.json()['message']) if wait_regex: wait_amount = wait_regex.group() else: demisto.error('failed extracting wait time will use default (5). Res body: {}'.format(res.text)) wait_amount = 5 if datetime.now() + timedelta(seconds=int(wait_amount)) > RETRIES_END_TIME: return_error('Max retry time has exceeded.') time.sleep(int(wait_amount)) if res.status_code == 404: return None if not res.status_code == 200: if not res.status_code == 401: demisto.error( 'Error in API call to Pwned Integration [%d]. Full text: %s' % (res.status_code, res.text)) return_error('Error in API call to Pwned Integration [%d] - %s' % (res.status_code, res.reason)) return None return res.json() def html_description_to_human_readable(breach_description): """ Converting from html description to hr :param breach_description: Description of breach from API response :return: Description string that altered HTML urls to clickable urls for better readability in war-room """ html_link_pattern = re.compile('<a href="(.+?)"(.+?)>(.+?)</a>') patterns_found = html_link_pattern.findall(breach_description) for link in patterns_found: html_actual_address = link[0] html_readable_name = link[2] link_from_desc = '[' + html_readable_name + ']' + '(' + html_actual_address + ')' breach_description = re.sub(html_link_pattern, link_from_desc, breach_description, count=1) return breach_description def data_to_markdown(query_type, query_arg, api_res, api_paste_res=None): records_found = False md = '### Have I Been Pwned query for ' + query_type.lower() + ': *' + query_arg + '*\n' if api_res: records_found = True for breach in api_res: verified_breach = 'Verified' if breach['IsVerified'] else 'Unverified' md += '#### ' + breach['Title'] + ' (' + breach['Domain'] + '): ' + str(breach['PwnCount']) + \ ' records breached [' + verified_breach + ' breach]\n' md += 'Date: **' + breach['BreachDate'] + '**\n\n' md += html_description_to_human_readable(breach['Description']) + '\n' md += 'Data breached: **' + ','.join(breach['DataClasses']) + '**\n' if api_paste_res: records_found = True pastes_list = [] for paste_breach in api_paste_res: paste_entry = \ { 'Source': paste_breach['Source'], 'Title': paste_breach['Title'], 'ID': paste_breach['Id'], 'Date': '', 'Amount of emails in paste': str(paste_breach['EmailCount']) } if paste_breach['Date']: paste_entry['Date'] = paste_breach['Date'].split('T')[0] pastes_list.append(paste_entry) md += tableToMarkdown('The email address was found in the following "Pastes":', pastes_list, ['ID', 'Title', 'Date', 'Source', 'Amount of emails in paste']) if not records_found: md += 'No records found' return md def create_dbot_score_dictionary(indicator_value, indicator_type, dbot_score): return { 'Indicator': indicator_value, 'Type': indicator_type, 'Vendor': VENDOR, 'Score': dbot_score } def create_context_entry(context_type, context_main_value, comp_sites, comp_pastes, malicious_score): context_dict = dict() # dict if context_type == 'email': context_dict['Address'] = context_main_value else: context_dict['Name'] = context_main_value context_dict['Pwned-V2'] = { 'Compromised': { 'Vendor': VENDOR, 'Reporters': ', '.join(comp_sites + comp_pastes) } } if malicious_score == 3: context_dict['Malicious'] = add_malicious_to_context(context_type) return context_dict def add_malicious_to_context(malicious_type): return { 'Vendor': VENDOR, 'Description': 'The ' + malicious_type + ' has been compromised' } def email_to_entry_context(email, api_email_res, api_paste_res): dbot_score = 0 comp_email = dict() # type: dict comp_sites = sorted([item['Title'] for item in api_email_res]) comp_pastes = sorted(set(item['Source'] for item in api_paste_res)) if len(comp_sites) > 0: dbot_score = DEFAULT_DBOT_SCORE_EMAIL email_context = create_context_entry('email', email, comp_sites, comp_pastes, DEFAULT_DBOT_SCORE_EMAIL) comp_email[outputPaths['email']] = email_context comp_email['DBotScore'] = create_dbot_score_dictionary(email, 'email', dbot_score) return comp_email def domain_to_entry_context(domain, api_res): comp_sites = [item['Title'] for item in api_res] comp_sites = sorted(comp_sites) comp_domain = dict() # type: dict dbot_score = 0 if len(comp_sites) > 0: dbot_score = DEFAULT_DBOT_SCORE_DOMAIN domain_context = create_context_entry('domain', domain, comp_sites, [], DEFAULT_DBOT_SCORE_DOMAIN) comp_domain[outputPaths['domain']] = domain_context comp_domain['DBotScore'] = create_dbot_score_dictionary(domain, 'domain', dbot_score) return comp_domain def set_retry_end_time(): global RETRIES_END_TIME if MAX_RETRY_ALLOWED != -1: RETRIES_END_TIME = datetime.now() + timedelta(seconds=int(MAX_RETRY_ALLOWED)) ''' COMMANDS + REQUESTS FUNCTIONS ''' def test_module(args_dict): """ If the http request was successful the test will return OK :return: 3 arrays of outputs """ http_request('GET', SUFFIXES.get("username", '') + 'test') return ['ok'], [None], [None] def pwned_email_command(args_dict): """ Executing the pwned request for emails list, in order to support list input, the function returns 3 lists of outputs :param args_dict: the demisto argument - in this case the email list is needed :return: 3 arrays of outputs """ email_list = argToList(args_dict.get('email', '')) api_email_res_list, api_paste_res_list = pwned_email(email_list) md_list = [] ec_list = [] for email, api_email_res, api_paste_res in zip(email_list, api_email_res_list, api_paste_res_list): md_list.append(data_to_markdown('Email', email, api_email_res, api_paste_res)) ec_list.append(email_to_entry_context(email, api_email_res or [], api_paste_res or [])) return md_list, ec_list, api_email_res_list def pwned_email(email_list): """ Executing the http requests :param email_list: the email list that needed for the http requests :return: 2 arrays of http requests outputs """ api_email_res_list = [] api_paste_res_list = [] for email in email_list: email_suffix = SUFFIXES.get("email") + email + SUFFIXES.get("email_truncate_verified") paste_suffix = SUFFIXES.get("paste") + email api_email_res_list.append(http_request('GET', url_suffix=email_suffix)) api_paste_res_list.append(http_request('GET', url_suffix=paste_suffix)) return api_email_res_list, api_paste_res_list def pwned_domain_command(args_dict): """ Executing the pwned request for domains list, in order to support list input, the function returns 3 lists of outputs :param args_dict: the demisto argument - in this case the domain list is needed :return: 3 arrays of outputs """ domain_list = argToList(args_dict.get('domain', '')) api_res_list = pwned_domain(domain_list) md_list = [] ec_list = [] for domain, api_res in zip(domain_list, api_res_list): md_list.append(data_to_markdown('Domain', domain, api_res)) ec_list.append(domain_to_entry_context(domain, api_res or [])) return md_list, ec_list, api_res_list def pwned_domain(domain_list): """ Executing the http request :param domain_list: the domains list that needed for the http requests :return: an array of http requests outputs """ api_res_list = [] for domain in domain_list: suffix = SUFFIXES.get("domain") + domain + SUFFIXES.get("domain_truncate_verified") api_res_list.append(http_request('GET', url_suffix=suffix)) return api_res_list def pwned_username_command(args_dict): """ Executing the pwned request for usernames list, in order to support list input, the function returns 3 lists of outputs :param args_dict: the demisto argument - in this case the username list is needed :return: 3 arrays of outputs """ username_list = argToList(args_dict.get('username', '')) api_res_list = pwned_username(username_list) md_list = [] ec_list = [] for username, api_res in zip(username_list, api_res_list): md_list.append(data_to_markdown('Username', username, api_res)) ec_list.append(domain_to_entry_context(username, api_res or [])) return md_list, ec_list, api_res_list def pwned_username(username_list): """ Executing the http request :param username_list: the username list that needed for the http requests :return: an array of http requests outputs """ api_res_list = [] for username in username_list: suffix = SUFFIXES.get("username") + username + SUFFIXES.get("username_truncate_verified") api_res_list.append(http_request('GET', url_suffix=suffix)) return api_res_list command = demisto.command() LOG('Command being called is: {}'.format(command)) try: handle_proxy() set_retry_end_time() commands = { 'test-module': test_module, 'email': pwned_email_command, 'pwned-email': pwned_email_command, 'domain': pwned_domain_command, 'pwned-domain': pwned_domain_command, 'pwned-username': pwned_username_command } if command in commands: md_list, ec_list, api_email_res_list = commands[command](demisto.args()) for md, ec, api_paste_res in zip(md_list, ec_list, api_email_res_list): return_outputs(md, ec, api_paste_res) # Log exceptions except Exception as e: return_error(str(e))
moshmosh/extensions/pipelines.py
Aloxaf/moshmosh
114
572
<reponame>Aloxaf/moshmosh<gh_stars>100-1000 from moshmosh.extension import Extension from moshmosh.ast_compat import ast class PipelineVisitor(ast.NodeTransformer): """ `a | f -> f(a)`, recursively """ def __init__(self, activation): self.activation = activation def visit_BinOp(self, n: ast.BinOp): if n.lineno in self.activation and isinstance(n.op, ast.BitOr): return ast.Call( self.visit(n.right), [self.visit(n.left)], [], lineno=n.lineno, col_offset=n.col_offset ) return self.generic_visit(n) class Pipeline(Extension): identifier = "pipeline" def __init__(self): self.visitor = PipelineVisitor(self.activation) def rewrite_ast(self, node): return self.visitor.visit(node)
alibi_detect/utils/tests/test_discretize.py
Clusks/alibi-detect
1,227
596
from itertools import product import numpy as np import pytest from alibi_detect.utils.discretizer import Discretizer x = np.random.rand(10, 4) n_features = x.shape[1] feature_names = [str(_) for _ in range(n_features)] categorical_features = [[], [1, 3]] percentiles = [list(np.arange(25, 100, 25)), list(np.arange(10, 100, 10))] tests = list(product(categorical_features, percentiles)) n_tests = len(tests) @pytest.fixture def cats_and_percentiles(request): cat, perc = tests[request.param] return cat, perc @pytest.mark.parametrize('cats_and_percentiles', list(range(n_tests)), indirect=True) def test_discretizer(cats_and_percentiles): cat, perc = cats_and_percentiles disc = Discretizer(x, cat, feature_names, perc) to_disc = list(disc.names.keys()) assert len(to_disc) == (x.shape[1] - len(cat)) x_disc = disc.discretize(x) for k, v in disc.names.items(): assert len(v) <= len(perc) + 1 assert callable(disc.lambdas[k]) assert (x_disc[:, k].min() == 0).all() assert (x_disc[:, k].max() == len(perc)).all() for i in range(x.shape[1]): if i not in to_disc: assert (x_disc[:, i] == x[:, i]).all()
nuplan/planning/simulation/observation/idm/test/test_profile_idm_observation.py
motional/nuplan-devkit
128
608
import logging import unittest from pyinstrument import Profiler from nuplan.planning.scenario_builder.nuplan_db.test.nuplan_scenario_test_utils import get_test_nuplan_scenario from nuplan.planning.simulation.history.simulation_history_buffer import SimulationHistoryBuffer from nuplan.planning.simulation.observation.idm_agents import IDMAgents from nuplan.planning.simulation.simulation_time_controller.simulation_iteration import SimulationIteration logger = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) class TestProfileIDM(unittest.TestCase): """ Profiling test for IDM agents. """ def setUp(self) -> None: """ Inherited, see super class. """ self.n_repeat_trials = 1 self.display_results = True self.scenario = get_test_nuplan_scenario() def test_profile_idm_agent_observation(self) -> None: """Profile IDMAgents.""" profiler = Profiler(interval=0.0001) profiler.start() # How many times to repeat runtime test for _ in range(self.n_repeat_trials): observation = IDMAgents( target_velocity=10, min_gap_to_lead_agent=0.5, headway_time=1.5, accel_max=1.0, decel_max=2.0, scenario=self.scenario, ) for step in range(self.scenario.get_number_of_iterations() - 1): iteration = SimulationIteration(time_point=self.scenario.get_time_point(step), index=step) next_iteration = SimulationIteration(time_point=self.scenario.get_time_point(step + 1), index=step + 1) buffer = SimulationHistoryBuffer.initialize_from_list( 1, [self.scenario.get_ego_state_at_iteration(step)], [self.scenario.get_tracked_objects_at_iteration(step)], next_iteration.time_point.time_s - iteration.time_point.time_s, ) observation.update_observation(iteration, next_iteration, buffer) profiler.stop() if self.display_results: logger.info(profiler.output_text(unicode=True, color=True)) if __name__ == "__main__": unittest.main()
pypy/interpreter/test/test_generator.py
m4sterchain/mesapy
381
625
<filename>pypy/interpreter/test/test_generator.py class AppTestGenerator: def test_generator(self): def f(): yield 1 assert f().next() == 1 def test_generator2(self): def f(): yield 1 g = f() assert g.next() == 1 raises(StopIteration, g.next) def test_attributes(self): def f(): yield 1 assert g.gi_running g = f() assert g.gi_code is f.__code__ assert g.__name__ == 'f' assert g.gi_frame is not None assert not g.gi_running g.next() assert not g.gi_running raises(StopIteration, g.next) assert not g.gi_running assert g.gi_frame is None assert g.gi_code is f.__code__ assert g.__name__ == 'f' def test_generator3(self): def f(): yield 1 g = f() assert list(g) == [1] def test_generator4(self): def f(): yield 1 g = f() assert [x for x in g] == [1] def test_generator5(self): d = {} exec """if 1: def f(): v = (yield ) yield v g = f() g.next() """ in d g = d['g'] assert g.send(42) == 42 def test_throw1(self): def f(): yield 2 g = f() # two arguments version raises(NameError, g.throw, NameError, "Error") def test_throw2(self): def f(): yield 2 g = f() # single argument version raises(NameError, g.throw, NameError("Error")) def test_throw3(self): def f(): try: yield 1 yield 2 except: yield 3 g = f() assert g.next() == 1 assert g.throw(NameError("Error")) == 3 raises(StopIteration, g.next) def test_throw4(self): d = {} exec """if 1: def f(): try: yield 1 v = (yield 2) except: yield 3 g = f() """ in d g = d['g'] assert g.next() == 1 assert g.next() == 2 assert g.throw(NameError("Error")) == 3 raises(StopIteration, g.next) def test_throw5(self): def f(): try: yield 1 except: x = 3 try: yield x except: pass g = f() g.next() # String exceptions are not allowed anymore raises(TypeError, g.throw, "Error") assert g.throw(Exception) == 3 raises(StopIteration, g.throw, Exception) def test_throw6(self): def f(): yield 2 g = f() raises(NameError, g.throw, NameError, "Error", None) def test_throw_fail(self): def f(): yield 1 g = f() raises(TypeError, g.throw, NameError("Error"), "error") def test_throw_fail2(self): def f(): yield 1 g = f() raises(TypeError, g.throw, list()) def test_throw_fail3(self): def f(): yield 1 g = f() raises(TypeError, g.throw, NameError("Error"), None, "not tb object") def test_throw_finishes_generator(self): def f(): yield 1 g = f() assert g.gi_frame is not None raises(ValueError, g.throw, ValueError) assert g.gi_frame is None def test_throw_bug(self): def f(): try: x.throw(IndexError) # => "generator already executing" except ValueError: yield 1 x = f() res = list(x) assert res == [1] def test_throw_on_finished_generator(self): def f(): yield 1 g = f() res = g.next() assert res == 1 raises(StopIteration, g.next) raises(NameError, g.throw, NameError) def test_close(self): def f(): yield 1 g = f() assert g.close() is None def test_close2(self): def f(): try: yield 1 except GeneratorExit: raise StopIteration g = f() g.next() assert g.close() is None def test_close3(self): def f(): try: yield 1 except GeneratorExit: raise NameError g = f() g.next() raises(NameError, g.close) def test_close_fail(self): def f(): try: yield 1 except GeneratorExit: yield 2 g = f() g.next() raises(RuntimeError, g.close) def test_close_on_collect(self): ## we need to exec it, else it won't run on python2.4 d = {} exec """ def f(): try: yield finally: f.x = 42 """.strip() in d g = d['f']() g.next() del g import gc gc.collect() assert d['f'].x == 42 def test_generator_raises_typeerror(self): def f(): yield 1 g = f() raises(TypeError, g.send) # one argument required raises(TypeError, g.send, 1) # not started, must send None def test_generator_explicit_stopiteration(self): def f(): yield 1 raise StopIteration g = f() assert [x for x in g] == [1] def test_generator_propagate_stopiteration(self): def f(): it = iter([1]) while 1: yield it.next() g = f() assert [x for x in g] == [1] def test_generator_restart(self): def g(): i = me.next() yield i me = g() raises(ValueError, me.next) def test_generator_expression(self): exec "res = sum(i*i for i in range(5))" assert res == 30 def test_generator_expression_2(self): d = {} exec """ def f(): total = sum(i for i in [x for x in z]) return total, x z = [1, 2, 7] res = f() """ in d assert d['res'] == (10, 7) def test_repr(self): def myFunc(): yield 1 g = myFunc() r = repr(g) assert r.startswith("<generator object myFunc at 0x") assert list(g) == [1] assert repr(g) == r def test_unpackiterable_gen(self): g = (i*i for i in range(-5, 3)) assert set(g) == set([0, 1, 4, 9, 16, 25]) assert set(g) == set() assert set(i for i in range(0)) == set() def test_explicit_stop_iteration_unpackiterable(self): def f(): yield 1 raise StopIteration assert tuple(f()) == (1,) def test_exception_is_cleared_by_yield(self): def f(): try: foobar except NameError: yield 5 raise # should raise "no active exception to re-raise" gen = f() next(gen) # --> 5 try: next(gen) except TypeError: pass def test_multiple_invalid_sends(self): def mygen(): yield 42 g = mygen() raises(TypeError, g.send, 2) raises(TypeError, g.send, 2) def test_should_not_inline(space): from pypy.interpreter.generator import should_not_inline w_co = space.appexec([], '''(): def g(x): yield x + 5 return g.__code__ ''') assert should_not_inline(w_co) == False w_co = space.appexec([], '''(): def g(x): yield x + 5 yield x + 6 return g.__code__ ''') assert should_not_inline(w_co) == True
test/test_data_processor/test_condition_generation_dataset.py
puraminy/OpenPrompt
979
627
<reponame>puraminy/OpenPrompt import os, sys from os.path import dirname as d from os.path import abspath, join root_dir = d(d(d(abspath(__file__)))) sys.path.append(root_dir) from openprompt.data_utils.conditional_generation_dataset import PROCESSORS base_path = os.path.join(root_dir, "datasets/CondGen") def test_WebNLGProcessor(): dataset_name = "webnlg_2017" dataset_path = os.path.join(base_path, dataset_name) processor = PROCESSORS[dataset_name.lower()]() train_dataset = processor.get_train_examples(dataset_path) valid_dataset = processor.get_train_examples(dataset_path) test_dataset = processor.get_test_examples(dataset_path) assert len(train_dataset) == 18025 assert len(valid_dataset) == 18025 assert len(test_dataset) == 4928 assert test_dataset[0].text_a == " | Abilene_Regional_Airport : cityServed : Abilene,_Texas" assert test_dataset[0].text_b == "" assert test_dataset[0].tgt_text == "Abilene, Texas is served by the Abilene regional airport."
PhysicsTools/Heppy/python/analyzers/objects/TauAnalyzer.py
ckamtsikis/cmssw
852
643
from PhysicsTools.Heppy.analyzers.core.Analyzer import Analyzer from PhysicsTools.Heppy.analyzers.core.AutoHandle import AutoHandle from PhysicsTools.Heppy.physicsobjects.Tau import Tau from PhysicsTools.HeppyCore.utils.deltar import deltaR, matchObjectCollection3 import PhysicsTools.HeppyCore.framework.config as cfg class TauAnalyzer( Analyzer ): def __init__(self, cfg_ana, cfg_comp, looperName ): super(TauAnalyzer,self).__init__(cfg_ana,cfg_comp,looperName) #---------------------------------------- # DECLARATION OF HANDLES OF LEPTONS STUFF #---------------------------------------- def declareHandles(self): super(TauAnalyzer, self).declareHandles() self.handles['taus'] = AutoHandle( ('slimmedTaus',''),'std::vector<pat::Tau>') def beginLoop(self, setup): super(TauAnalyzer,self).beginLoop(setup) self.counters.addCounter('events') count = self.counters.counter('events') count.register('all events') count.register('has >=1 tau at preselection') count.register('has >=1 selected taus') count.register('has >=1 other taus') #------------------ # MAKE LEPTON LISTS #------------------ def makeTaus(self, event): event.inclusiveTaus = [] event.selectedTaus = [] event.otherTaus = [] #get all alltaus = map( Tau, self.handles['taus'].product() ) #make inclusive taus for tau in alltaus: tau.associatedVertex = event.goodVertices[0] if len(event.goodVertices)>0 else event.vertices[0] tau.lepVeto = False tau.idDecayMode = tau.tauID("decayModeFinding") tau.idDecayModeNewDMs = tau.tauID("decayModeFindingNewDMs") if hasattr(self.cfg_ana, 'inclusive_decayModeID') and self.cfg_ana.inclusive_decayModeID and not tau.tauID(self.cfg_ana.inclusive_decayModeID): continue tau.inclusive_lepVeto = False if self.cfg_ana.inclusive_vetoLeptons: for lep in event.selectedLeptons: if deltaR(lep.eta(), lep.phi(), tau.eta(), tau.phi()) < self.cfg_ana.inclusive_leptonVetoDR: tau.inclusive_lepVeto = True if tau.inclusive_lepVeto: continue if self.cfg_ana.inclusive_vetoLeptonsPOG: if not tau.tauID(self.cfg_ana.inclusive_tauAntiMuonID): tau.inclusive_lepVeto = True if not tau.tauID(self.cfg_ana.inclusive_tauAntiElectronID): tau.inclusive_lepVeto = True if tau.inclusive_lepVeto: continue if tau.pt() < self.cfg_ana.inclusive_ptMin: continue if abs(tau.eta()) > self.cfg_ana.inclusive_etaMax: continue if abs(tau.dxy()) > self.cfg_ana.inclusive_dxyMax or abs(tau.dz()) > self.cfg_ana.inclusive_dzMax: continue def id3(tau,X): """Create an integer equal to 1-2-3 for (loose,medium,tight)""" return tau.tauID(X%"Loose") + tau.tauID(X%"Medium") + tau.tauID(X%"Tight") def id5(tau,X): """Create an integer equal to 1-2-3-4-5 for (very loose, loose, medium, tight, very tight)""" return id3(tau, X) + tau.tauID(X%"VLoose") + tau.tauID(X%"VTight") def id6(tau,X): """Create an integer equal to 1-2-3-4-5-6 for (very loose, loose, medium, tight, very tight, very very tight)""" return id5(tau, X) + tau.tauID(X%"VVTight") tau.idMVA = id6(tau, "by%sIsolationMVArun2v1DBoldDMwLT") tau.idMVANewDM = id6(tau, "by%sIsolationMVArun2v1DBnewDMwLT") tau.idCI3hit = id3(tau, "by%sCombinedIsolationDeltaBetaCorr3Hits") tau.idAntiMu = tau.tauID("againstMuonLoose3") + tau.tauID("againstMuonTight3") tau.idAntiE = id5(tau, "againstElectron%sMVA6") #print "Tau pt %5.1f: idMVA2 %d, idCI3hit %d, %s, %s" % (tau.pt(), tau.idMVA2, tau.idCI3hit, tau.tauID(self.cfg_ana.tauID), tau.tauID(self.cfg_ana.tauLooseID)) if tau.tauID(self.cfg_ana.inclusive_tauID): event.inclusiveTaus.append(tau) for tau in event.inclusiveTaus: tau.loose_lepVeto = False if self.cfg_ana.loose_vetoLeptons: for lep in event.selectedLeptons: if deltaR(lep.eta(), lep.phi(), tau.eta(), tau.phi()) < self.cfg_ana.loose_leptonVetoDR: tau.loose_lepVeto = True if self.cfg_ana.loose_vetoLeptonsPOG: if not tau.tauID(self.cfg_ana.loose_tauAntiMuonID): tau.loose_lepVeto = True if not tau.tauID(self.cfg_ana.loose_tauAntiElectronID): tau.loose_lepVeto = True if tau.tauID(self.cfg_ana.loose_decayModeID) and \ tau.pt() > self.cfg_ana.loose_ptMin and abs(tau.eta()) < self.cfg_ana.loose_etaMax and \ abs(tau.dxy()) < self.cfg_ana.loose_dxyMax and abs(tau.dz()) < self.cfg_ana.loose_dzMax and \ tau.tauID(self.cfg_ana.loose_tauID) and not tau.loose_lepVeto: event.selectedTaus.append(tau) else: event.otherTaus.append(tau) event.inclusiveTaus.sort(key = lambda l : l.pt(), reverse = True) event.selectedTaus.sort(key = lambda l : l.pt(), reverse = True) event.otherTaus.sort(key = lambda l : l.pt(), reverse = True) self.counters.counter('events').inc('all events') if len(event.inclusiveTaus): self.counters.counter('events').inc('has >=1 tau at preselection') if len(event.selectedTaus): self.counters.counter('events').inc('has >=1 selected taus') if len(event.otherTaus): self.counters.counter('events').inc('has >=1 other taus') def matchTaus(self, event): match = matchObjectCollection3(event.inclusiveTaus, event.gentaus, deltaRMax = 0.5) for lep in event.inclusiveTaus: gen = match[lep] lep.mcMatchId = 1 if gen else 0 lep.genp = gen def process(self, event): self.readCollections( event.input ) self.makeTaus(event) if not self.cfg_comp.isMC: return True if hasattr(event, 'gentaus'): self.matchTaus(event) return True # Find the definitions of the tau ID strings here: # http://cmslxr.fnal.gov/lxr/source/PhysicsTools/PatAlgos/python/producersLayer1/tauProducer_cfi.py setattr(TauAnalyzer,"defaultConfig",cfg.Analyzer( class_object = TauAnalyzer, # inclusive very loose hadronic tau selection inclusive_ptMin = 18, inclusive_etaMax = 9999, inclusive_dxyMax = 1000., inclusive_dzMax = 0.4, inclusive_vetoLeptons = False, inclusive_leptonVetoDR = 0.4, inclusive_decayModeID = "decayModeFindingNewDMs", # ignored if not set or "" inclusive_tauID = "decayModeFindingNewDMs", inclusive_vetoLeptonsPOG = False, # If True, the following two IDs are required inclusive_tauAntiMuonID = "", inclusive_tauAntiElectronID = "", # loose hadronic tau selection loose_ptMin = 18, loose_etaMax = 9999, loose_dxyMax = 1000., loose_dzMax = 0.2, loose_vetoLeptons = True, loose_leptonVetoDR = 0.4, loose_decayModeID = "decayModeFindingNewDMs", # ignored if not set or "" loose_tauID = "byLooseCombinedIsolationDeltaBetaCorr3Hits", loose_vetoLeptonsPOG = False, # If True, the following two IDs are required loose_tauAntiMuonID = "againstMuonLoose3", loose_tauAntiElectronID = "againstElectronLooseMVA5" ) )
tutorial/deprecated/tutorial_recurrent_policy/main_a2c.py
Purple-PI/rlstructures
281
652
<gh_stars>100-1000 # # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from rlstructures import logging from rlstructures.env_wrappers import GymEnv, GymEnvInf from rlstructures.tools import weight_init import torch.nn as nn import copy import torch import time import numpy as np import torch.nn.functional as F from tutorial.tutorial_recurrent_policy.agent import RecurrentAgent from tutorial.tutorial_recurrent_policy.a2c import A2C import gym from gym.wrappers import TimeLimit # We write the 'create_env' and 'create_agent' function in the main file to allow these functions to be used with pickle when creating the batcher processes def create_gym_env(env_name): return gym.make(env_name) def create_env(n_envs, env_name=None, max_episode_steps=None, seed=None): envs = [] for k in range(n_envs): e = create_gym_env(env_name) e = TimeLimit(e, max_episode_steps=max_episode_steps) envs.append(e) return GymEnv(envs, seed) def create_train_env(n_envs, env_name=None, max_episode_steps=None, seed=None): envs = [] for k in range(n_envs): e = create_gym_env(env_name) e = TimeLimit(e, max_episode_steps=max_episode_steps) envs.append(e) return GymEnvInf(envs, seed) def create_agent(model, n_actions=1): return RecurrentAgent(model=model, n_actions=n_actions) class Experiment(A2C): def __init__(self, config, create_env, create_train_env, create_agent): super().__init__(config, create_env, create_train_env, create_agent) if __name__ == "__main__": # We use spawn mode such that most of the environment will run in multiple processes import torch.multiprocessing as mp mp.set_start_method("spawn") config = { "env_name": "CartPole-v0", "a2c_timesteps": 3, "n_envs": 4, "max_episode_steps": 100, "env_seed": 42, "n_threads": 4, "n_evaluation_threads": 2, "n_evaluation_episodes": 256, "time_limit": 3600, "lr": 0.001, "discount_factor": 0.95, "critic_coef": 1.0, "entropy_coef": 0.01, "a2c_coef": 1.0, "logdir": "./results", } exp = Experiment(config, create_env, create_train_env, create_agent) exp.run()
desktop/core/ext-py/python-openid-2.2.5/openid/test/test_htmldiscover.py
kokosing/hue
5,079
662
<filename>desktop/core/ext-py/python-openid-2.2.5/openid/test/test_htmldiscover.py from openid.consumer.discover import OpenIDServiceEndpoint import datadriven class BadLinksTestCase(datadriven.DataDrivenTestCase): cases = [ '', "http://not.in.a.link.tag/", '<link rel="openid.server" href="not.in.html.or.head" />', ] def __init__(self, data): datadriven.DataDrivenTestCase.__init__(self, data) self.data = data def runOneTest(self): actual = OpenIDServiceEndpoint.fromHTML('http://unused.url/', self.data) expected = [] self.failUnlessEqual(expected, actual) def pyUnitTests(): return datadriven.loadTests(__name__)
fightchurn/listings/chap9/listing_9_4_regression_cparam.py
guy4261/fight-churn
151
677
from sklearn.linear_model import LogisticRegression from fightchurn.listings.chap8.listing_8_2_logistic_regression import prepare_data, save_regression_model from fightchurn.listings.chap8.listing_8_2_logistic_regression import save_regression_summary, save_dataset_predictions def regression_cparam(data_set_path, C_param): X,y = prepare_data(data_set_path) retain_reg = LogisticRegression( C=C_param, penalty='l1', solver='liblinear', fit_intercept=True) retain_reg.fit(X, y) c_ext = '_c{:.3f}'.format(C_param) save_regression_summary(data_set_path,retain_reg,ext=c_ext) save_regression_model(data_set_path,retain_reg,ext=c_ext) save_dataset_predictions(data_set_path,retain_reg,X,ext=c_ext)
homeassistant/components/zamg/weather.py
MrDelik/core
30,023
733
"""Sensor for data from Austrian Zentralanstalt für Meteorologie.""" from __future__ import annotations import logging import voluptuous as vol from homeassistant.components.weather import ( ATTR_WEATHER_HUMIDITY, ATTR_WEATHER_PRESSURE, ATTR_WEATHER_TEMPERATURE, ATTR_WEATHER_WIND_BEARING, ATTR_WEATHER_WIND_SPEED, PLATFORM_SCHEMA, WeatherEntity, ) from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME, TEMP_CELSIUS from homeassistant.core import HomeAssistant from homeassistant.helpers import config_validation as cv from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType # Reuse data and API logic from the sensor implementation from .sensor import ( ATTRIBUTION, CONF_STATION_ID, ZamgData, closest_station, zamg_stations, ) _LOGGER = logging.getLogger(__name__) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_STATION_ID): cv.string, vol.Inclusive( CONF_LATITUDE, "coordinates", "Latitude and longitude must exist together" ): cv.latitude, vol.Inclusive( CONF_LONGITUDE, "coordinates", "Latitude and longitude must exist together" ): cv.longitude, } ) def setup_platform( hass: HomeAssistant, config: ConfigType, add_entities: AddEntitiesCallback, discovery_info: DiscoveryInfoType | None = None, ) -> None: """Set up the ZAMG weather platform.""" name = config.get(CONF_NAME) latitude = config.get(CONF_LATITUDE, hass.config.latitude) longitude = config.get(CONF_LONGITUDE, hass.config.longitude) station_id = config.get(CONF_STATION_ID) or closest_station( latitude, longitude, hass.config.config_dir ) if station_id not in zamg_stations(hass.config.config_dir): _LOGGER.error( "Configured ZAMG %s (%s) is not a known station", CONF_STATION_ID, station_id, ) return probe = ZamgData(station_id=station_id) try: probe.update() except (ValueError, TypeError) as err: _LOGGER.error("Received error from ZAMG: %s", err) return add_entities([ZamgWeather(probe, name)], True) class ZamgWeather(WeatherEntity): """Representation of a weather condition.""" def __init__(self, zamg_data, stationname=None): """Initialise the platform with a data instance and station name.""" self.zamg_data = zamg_data self.stationname = stationname @property def name(self): """Return the name of the sensor.""" return ( self.stationname or f"ZAMG {self.zamg_data.data.get('Name') or '(unknown station)'}" ) @property def condition(self): """Return the current condition.""" return None @property def attribution(self): """Return the attribution.""" return ATTRIBUTION @property def temperature(self): """Return the platform temperature.""" return self.zamg_data.get_data(ATTR_WEATHER_TEMPERATURE) @property def temperature_unit(self): """Return the unit of measurement.""" return TEMP_CELSIUS @property def pressure(self): """Return the pressure.""" return self.zamg_data.get_data(ATTR_WEATHER_PRESSURE) @property def humidity(self): """Return the humidity.""" return self.zamg_data.get_data(ATTR_WEATHER_HUMIDITY) @property def wind_speed(self): """Return the wind speed.""" return self.zamg_data.get_data(ATTR_WEATHER_WIND_SPEED) @property def wind_bearing(self): """Return the wind bearing.""" return self.zamg_data.get_data(ATTR_WEATHER_WIND_BEARING) def update(self): """Update current conditions.""" self.zamg_data.update()
tf-2-data-parallelism/src/utils.py
Amirosimani/amazon-sagemaker-script-mode
144
747
import os import numpy as np import tensorflow as tf def get_train_data(train_dir, batch_size): train_images = np.load(os.path.join(train_dir, 'train_images.npy')) train_labels = np.load(os.path.join(train_dir, 'train_labels.npy')) print('train_images', train_images.shape, 'train_labels', train_labels.shape) dataset_train = tf.data.Dataset.from_tensor_slices((train_images, train_labels)) dataset_train = dataset_train.repeat().shuffle(10000).batch(batch_size) return dataset_train def get_val_data(val_dir): test_images = np.load(os.path.join(val_dir, 'validation_images.npy')) test_labels = np.load(os.path.join(val_dir, 'validation_labels.npy')) print('validation_images', test_images.shape, 'validation_labels', test_labels.shape) dataset_test = tf.data.Dataset.from_tensor_slices((test_images, test_labels)) return dataset_test
streams/readers/arff_reader.py
JanSurft/tornado
103
761
""" The Tornado Framework By <NAME> University of Ottawa, Ontario, Canada E-mail: apesaran -at- uottawa -dot- ca / alipsgh -at- gmail -dot- com """ import re from data_structures.attribute import Attribute from dictionary.tornado_dictionary import TornadoDic class ARFFReader: """This class is used to read a .arff file.""" @staticmethod def read(file_path): labels = [] attributes = [] attributes_min_max = [] records = [] data_flag = False reader = open(file_path, "r") for line in reader: if line.strip() == '': continue if line.startswith("@attribute") or line.startswith("@ATTRIBUTE"): line = line.strip('\n\r\t') line = line.split(' ') attribute_name = line[1] attribute_value_range = line[2] attribute = Attribute() attribute.set_name(attribute_name) if attribute_value_range.lower() in ['numeric', 'real', 'integer']: attribute_type = TornadoDic.NUMERIC_ATTRIBUTE attribute_value_range = [] attributes_min_max.append([0, 0]) else: attribute_type = TornadoDic.NOMINAL_ATTRIBUTE attribute_value_range = attribute_value_range.strip('{}').replace("'", "") attribute_value_range = attribute_value_range.split(',') attributes_min_max.append([None, None]) attribute.set_type(attribute_type) attribute.set_possible_values(attribute_value_range) attributes.append(attribute) elif line.startswith("@data") or line.startswith("@DATA"): data_flag = True labels = attributes[len(attributes) - 1].POSSIBLE_VALUES attributes.pop(len(attributes) - 1) continue elif data_flag is True: line = re.sub('\s+', '', line) elements = line.split(',') for i in range(0, len(elements) - 1): if attributes[i].TYPE == TornadoDic.NUMERIC_ATTRIBUTE: elements[i] = float(elements[i]) min_value = attributes_min_max[i][0] max_value = attributes_min_max[i][1] if elements[i] < min_value: min_value = elements[i] elif elements[i] > max_value: max_value = elements[i] attributes_min_max[i] = [min_value, max_value] records.append(elements) for i in range(0, len(attributes)): if attributes[i].TYPE == TornadoDic.NUMERIC_ATTRIBUTE: attributes[i].set_bounds_values(attributes_min_max[i][0], attributes_min_max[i][1]) return labels, attributes, records
smarts/zoo/worker.py
idsc-frazzoli/SMARTS
554
768
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ Run an agent in it's own (independent) process. What Agent code does is out of our direct control, we want to avoid any interactions with global state that might be present in the SMARTS process. To protect and isolate Agents from any pollution of global state in the main SMARTS process, we spawn Agents in their fresh and independent python process. This script is called from within SMARTS to instantiate a remote agent. The protocal is as follows: 1. SMARTS calls: worker.py --port 5467 # sets a unique port per agent 2. worker.py will begin listening on port 5467. 3. SMARTS connects to (ip, 5467) as a client. 4. SMARTS calls `build()` rpc with `AgentSpec` as input. 5. worker.py recieves the `AgentSpec` instances and builds the Agent. 6. SMARTS calls `act()` rpc with observation as input and receives the actions as response from worker.py. """ import argparse import importlib import logging import os import signal import sys from concurrent import futures import grpc from smarts.zoo import worker_pb2_grpc, worker_servicer # Front-load some expensive imports as to not block the simulation modules = [ "smarts.core.utils.pybullet", "smarts.core.utils.sumo", "smarts.core.sumo_road_network", "numpy", "sklearn", "shapely", "scipy", "trimesh", "panda3d", "gym", "ray", ] for mod in modules: try: importlib.import_module(mod) except ImportError: if mod == "ray": print( "You need to install the ray dependency using pip install -e .[train] first" ) if mod == "panda3d": print( "You need to install the panda3d dependency using pip install -e .[camera-obs] first" ) pass # End front-loaded imports logging.basicConfig(level=logging.INFO) log = logging.getLogger(f"worker.py - pid({os.getpid()})") def serve(port): ip = "[::]" server = grpc.server(futures.ThreadPoolExecutor(max_workers=1)) worker_pb2_grpc.add_WorkerServicer_to_server( worker_servicer.WorkerServicer(), server ) server.add_insecure_port(f"{ip}:{port}") server.start() log.debug(f"Worker - ip({ip}), port({port}), pid({os.getpid()}): Started serving.") def stop_server(unused_signum, unused_frame): server.stop(0) log.debug( f"Worker - ip({ip}), port({port}), pid({os.getpid()}): Received interrupt signal." ) # Catch keyboard interrupt and terminate signal signal.signal(signal.SIGINT, stop_server) signal.signal(signal.SIGTERM, stop_server) # Wait to receive server termination signal server.wait_for_termination() log.debug(f"Worker - ip({ip}), port({port}), pid({os.getpid()}): Server exited") if __name__ == "__main__": parser = argparse.ArgumentParser("Run an agent in an independent process.") parser.add_argument( "--port", type=int, required=True, help="Port to listen for remote client connections.", ) args = parser.parse_args() serve(args.port)
configs/mmdet/detection/detection_tensorrt_static-300x300.py
zhiqwang/mmdeploy
746
774
<reponame>zhiqwang/mmdeploy _base_ = ['../_base_/base_tensorrt_static-300x300.py']
tencentcloud/dbbrain/v20210527/models.py
lleiyyang/tencentcloud-sdk-python
465
794
# -*- coding: utf8 -*- # Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from tencentcloud.common.abstract_model import AbstractModel class AddUserContactRequest(AbstractModel): """AddUserContact请求参数结构体 """ def __init__(self): r""" :param Name: 联系人姓名,由中英文、数字、空格、!@#$%^&*()_+-=()组成,不能以下划线开头,长度在20以内。 :type Name: str :param ContactInfo: 邮箱地址,支持大小写字母、数字、下划线及@字符, 不能以下划线开头,邮箱地址不可重复。 :type ContactInfo: str :param Product: 服务产品类型,固定值:"mysql"。 :type Product: str """ self.Name = None self.ContactInfo = None self.Product = None def _deserialize(self, params): self.Name = params.get("Name") self.ContactInfo = params.get("ContactInfo") self.Product = params.get("Product") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class AddUserContactResponse(AbstractModel): """AddUserContact返回参数结构体 """ def __init__(self): r""" :param Id: 添加成功的联系人id。 :type Id: int :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Id = None self.RequestId = None def _deserialize(self, params): self.Id = params.get("Id") self.RequestId = params.get("RequestId") class ContactItem(AbstractModel): """联系人contact描述。 """ def __init__(self): r""" :param Id: 联系人id。 :type Id: int :param Name: 联系人姓名。 :type Name: str :param Mail: 联系人绑定的邮箱。 :type Mail: str """ self.Id = None self.Name = None self.Mail = None def _deserialize(self, params): self.Id = params.get("Id") self.Name = params.get("Name") self.Mail = params.get("Mail") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CreateDBDiagReportTaskRequest(AbstractModel): """CreateDBDiagReportTask请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例ID。 :type InstanceId: str :param StartTime: 开始时间,如“2020-11-08T14:00:00+08:00”。 :type StartTime: str :param EndTime: 结束时间,如“2020-11-09T14:00:00+08:00”。 :type EndTime: str :param SendMailFlag: 是否发送邮件: 0 - 否,1 - 是。 :type SendMailFlag: int :param ContactPerson: 接收邮件的联系人ID数组。 :type ContactPerson: list of int :param ContactGroup: 接收邮件的联系组ID数组。 :type ContactGroup: list of int :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认值为"mysql"。 :type Product: str """ self.InstanceId = None self.StartTime = None self.EndTime = None self.SendMailFlag = None self.ContactPerson = None self.ContactGroup = None self.Product = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.StartTime = params.get("StartTime") self.EndTime = params.get("EndTime") self.SendMailFlag = params.get("SendMailFlag") self.ContactPerson = params.get("ContactPerson") self.ContactGroup = params.get("ContactGroup") self.Product = params.get("Product") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CreateDBDiagReportTaskResponse(AbstractModel): """CreateDBDiagReportTask返回参数结构体 """ def __init__(self): r""" :param AsyncRequestId: 异步任务的请求 ID,可使用此 ID 查询异步任务的执行结果。 注意:此字段可能返回 null,表示取不到有效值。 :type AsyncRequestId: int :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.AsyncRequestId = None self.RequestId = None def _deserialize(self, params): self.AsyncRequestId = params.get("AsyncRequestId") self.RequestId = params.get("RequestId") class CreateDBDiagReportUrlRequest(AbstractModel): """CreateDBDiagReportUrl请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例ID。 :type InstanceId: str :param AsyncRequestId: 健康报告相应的任务ID,可通过DescribeDBDiagReportTasks查询。 :type AsyncRequestId: int :param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL;"cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。 :type Product: str """ self.InstanceId = None self.AsyncRequestId = None self.Product = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.AsyncRequestId = params.get("AsyncRequestId") self.Product = params.get("Product") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CreateDBDiagReportUrlResponse(AbstractModel): """CreateDBDiagReportUrl返回参数结构体 """ def __init__(self): r""" :param ReportUrl: 健康报告浏览地址。 :type ReportUrl: str :param ExpireTime: 健康报告浏览地址到期时间戳(秒)。 :type ExpireTime: int :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.ReportUrl = None self.ExpireTime = None self.RequestId = None def _deserialize(self, params): self.ReportUrl = params.get("ReportUrl") self.ExpireTime = params.get("ExpireTime") self.RequestId = params.get("RequestId") class CreateMailProfileRequest(AbstractModel): """CreateMailProfile请求参数结构体 """ def __init__(self): r""" :param ProfileInfo: 邮件配置内容。 :type ProfileInfo: :class:`tencentcloud.dbbrain.v20210527.models.ProfileInfo` :param ProfileLevel: 配置级别,支持值包括:"User" - 用户级别,"Instance" - 实例级别,其中数据库巡检邮件配置为用户级别,定期生成邮件配置为实例级别。 :type ProfileLevel: str :param ProfileName: 配置名称,需要保持唯一性,数据库巡检邮件配置名称自拟;定期生成邮件配置命名格式:"scheduler_" + {instanceId},如"schduler_cdb-test"。 :type ProfileName: str :param ProfileType: 配置类型,支持值包括:"dbScan_mail_configuration" - 数据库巡检邮件配置,"scheduler_mail_configuration" - 定期生成邮件配置。 :type ProfileType: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL。 :type Product: str :param BindInstanceIds: 配置绑定的实例ID,当配置级别为"Instance"时需要传入且只能为一个实例;当配置级别为“User”时,此参数不填。 :type BindInstanceIds: list of str """ self.ProfileInfo = None self.ProfileLevel = None self.ProfileName = None self.ProfileType = None self.Product = None self.BindInstanceIds = None def _deserialize(self, params): if params.get("ProfileInfo") is not None: self.ProfileInfo = ProfileInfo() self.ProfileInfo._deserialize(params.get("ProfileInfo")) self.ProfileLevel = params.get("ProfileLevel") self.ProfileName = params.get("ProfileName") self.ProfileType = params.get("ProfileType") self.Product = params.get("Product") self.BindInstanceIds = params.get("BindInstanceIds") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CreateMailProfileResponse(AbstractModel): """CreateMailProfile返回参数结构体 """ def __init__(self): r""" :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.RequestId = None def _deserialize(self, params): self.RequestId = params.get("RequestId") class CreateSchedulerMailProfileRequest(AbstractModel): """CreateSchedulerMailProfile请求参数结构体 """ def __init__(self): r""" :param WeekConfiguration: 取值范围1-7,分别代表周一至周日。 :type WeekConfiguration: list of int :param ProfileInfo: 邮件配置内容。 :type ProfileInfo: :class:`tencentcloud.dbbrain.v20210527.models.ProfileInfo` :param ProfileName: 配置名称,需要保持唯一性,定期生成邮件配置命名格式:"scheduler_" + {instanceId},如"schduler_cdb-test"。 :type ProfileName: str :param BindInstanceId: 配置订阅的实例ID。 :type BindInstanceId: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。 :type Product: str """ self.WeekConfiguration = None self.ProfileInfo = None self.ProfileName = None self.BindInstanceId = None self.Product = None def _deserialize(self, params): self.WeekConfiguration = params.get("WeekConfiguration") if params.get("ProfileInfo") is not None: self.ProfileInfo = ProfileInfo() self.ProfileInfo._deserialize(params.get("ProfileInfo")) self.ProfileName = params.get("ProfileName") self.BindInstanceId = params.get("BindInstanceId") self.Product = params.get("Product") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CreateSchedulerMailProfileResponse(AbstractModel): """CreateSchedulerMailProfile返回参数结构体 """ def __init__(self): r""" :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.RequestId = None def _deserialize(self, params): self.RequestId = params.get("RequestId") class CreateSecurityAuditLogExportTaskRequest(AbstractModel): """CreateSecurityAuditLogExportTask请求参数结构体 """ def __init__(self): r""" :param SecAuditGroupId: 安全审计组Id。 :type SecAuditGroupId: str :param StartTime: 导出日志开始时间,例如2020-12-28 00:00:00。 :type StartTime: str :param EndTime: 导出日志结束时间,例如2020-12-28 01:00:00。 :type EndTime: str :param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL。 :type Product: str :param DangerLevels: 日志风险等级列表,支持值包括:0 无风险;1 低风险;2 中风险;3 高风险。 :type DangerLevels: list of int """ self.SecAuditGroupId = None self.StartTime = None self.EndTime = None self.Product = None self.DangerLevels = None def _deserialize(self, params): self.SecAuditGroupId = params.get("SecAuditGroupId") self.StartTime = params.get("StartTime") self.EndTime = params.get("EndTime") self.Product = params.get("Product") self.DangerLevels = params.get("DangerLevels") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CreateSecurityAuditLogExportTaskResponse(AbstractModel): """CreateSecurityAuditLogExportTask返回参数结构体 """ def __init__(self): r""" :param AsyncRequestId: 日志导出任务Id。 :type AsyncRequestId: int :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.AsyncRequestId = None self.RequestId = None def _deserialize(self, params): self.AsyncRequestId = params.get("AsyncRequestId") self.RequestId = params.get("RequestId") class DeleteSecurityAuditLogExportTasksRequest(AbstractModel): """DeleteSecurityAuditLogExportTasks请求参数结构体 """ def __init__(self): r""" :param SecAuditGroupId: 安全审计组Id。 :type SecAuditGroupId: str :param AsyncRequestIds: 日志导出任务Id列表,接口会忽略不存在或已删除的任务Id。 :type AsyncRequestIds: list of int non-negative :param Product: 服务产品类型,支持值: "mysql" - 云数据库 MySQL。 :type Product: str """ self.SecAuditGroupId = None self.AsyncRequestIds = None self.Product = None def _deserialize(self, params): self.SecAuditGroupId = params.get("SecAuditGroupId") self.AsyncRequestIds = params.get("AsyncRequestIds") self.Product = params.get("Product") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DeleteSecurityAuditLogExportTasksResponse(AbstractModel): """DeleteSecurityAuditLogExportTasks返回参数结构体 """ def __init__(self): r""" :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.RequestId = None def _deserialize(self, params): self.RequestId = params.get("RequestId") class DescribeAllUserContactRequest(AbstractModel): """DescribeAllUserContact请求参数结构体 """ def __init__(self): r""" :param Product: 服务产品类型,固定值:mysql。 :type Product: str :param Names: 联系人名数组,支持模糊搜索。 :type Names: list of str """ self.Product = None self.Names = None def _deserialize(self, params): self.Product = params.get("Product") self.Names = params.get("Names") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeAllUserContactResponse(AbstractModel): """DescribeAllUserContact返回参数结构体 """ def __init__(self): r""" :param TotalCount: 联系人的总数量。 :type TotalCount: int :param Contacts: 联系人的信息。 注意:此字段可能返回 null,表示取不到有效值。 :type Contacts: list of ContactItem :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.TotalCount = None self.Contacts = None self.RequestId = None def _deserialize(self, params): self.TotalCount = params.get("TotalCount") if params.get("Contacts") is not None: self.Contacts = [] for item in params.get("Contacts"): obj = ContactItem() obj._deserialize(item) self.Contacts.append(obj) self.RequestId = params.get("RequestId") class DescribeAllUserGroupRequest(AbstractModel): """DescribeAllUserGroup请求参数结构体 """ def __init__(self): r""" :param Product: 服务产品类型,固定值:mysql。 :type Product: str :param Names: 联系组名称数组,支持模糊搜索。 :type Names: list of str """ self.Product = None self.Names = None def _deserialize(self, params): self.Product = params.get("Product") self.Names = params.get("Names") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeAllUserGroupResponse(AbstractModel): """DescribeAllUserGroup返回参数结构体 """ def __init__(self): r""" :param TotalCount: 组总数。 :type TotalCount: int :param Groups: 组信息。 注意:此字段可能返回 null,表示取不到有效值。 :type Groups: list of GroupItem :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.TotalCount = None self.Groups = None self.RequestId = None def _deserialize(self, params): self.TotalCount = params.get("TotalCount") if params.get("Groups") is not None: self.Groups = [] for item in params.get("Groups"): obj = GroupItem() obj._deserialize(item) self.Groups.append(obj) self.RequestId = params.get("RequestId") class DescribeDBDiagEventRequest(AbstractModel): """DescribeDBDiagEvent请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例 ID 。 :type InstanceId: str :param EventId: 事件 ID 。通过“获取实例诊断历史DescribeDBDiagHistory”获取。 :type EventId: int :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。 :type Product: str """ self.InstanceId = None self.EventId = None self.Product = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.EventId = params.get("EventId") self.Product = params.get("Product") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeDBDiagEventResponse(AbstractModel): """DescribeDBDiagEvent返回参数结构体 """ def __init__(self): r""" :param DiagItem: 诊断项。 :type DiagItem: str :param DiagType: 诊断类型。 :type DiagType: str :param EventId: 事件 ID 。 :type EventId: int :param Explanation: 诊断事件详情,若无附加解释信息则输出为空。 :type Explanation: str :param Outline: 诊断概要。 :type Outline: str :param Problem: 诊断出的问题。 :type Problem: str :param Severity: 严重程度。严重程度分为5级,按影响程度从高至低分别为:1:致命,2:严重,3:告警,4:提示,5:健康。 :type Severity: int :param StartTime: 开始时间 :type StartTime: str :param Suggestions: 诊断建议,若无建议则输出为空。 :type Suggestions: str :param Metric: 保留字段。 注意:此字段可能返回 null,表示取不到有效值。 :type Metric: str :param EndTime: 结束时间。 :type EndTime: str :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.DiagItem = None self.DiagType = None self.EventId = None self.Explanation = None self.Outline = None self.Problem = None self.Severity = None self.StartTime = None self.Suggestions = None self.Metric = None self.EndTime = None self.RequestId = None def _deserialize(self, params): self.DiagItem = params.get("DiagItem") self.DiagType = params.get("DiagType") self.EventId = params.get("EventId") self.Explanation = params.get("Explanation") self.Outline = params.get("Outline") self.Problem = params.get("Problem") self.Severity = params.get("Severity") self.StartTime = params.get("StartTime") self.Suggestions = params.get("Suggestions") self.Metric = params.get("Metric") self.EndTime = params.get("EndTime") self.RequestId = params.get("RequestId") class DescribeDBDiagHistoryRequest(AbstractModel): """DescribeDBDiagHistory请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例 ID 。 :type InstanceId: str :param StartTime: 开始时间,如“2019-09-10 12:13:14”。 :type StartTime: str :param EndTime: 结束时间,如“2019-09-11 12:13:14”,结束时间与开始时间的间隔最大可为2天。 :type EndTime: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。 :type Product: str """ self.InstanceId = None self.StartTime = None self.EndTime = None self.Product = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.StartTime = params.get("StartTime") self.EndTime = params.get("EndTime") self.Product = params.get("Product") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeDBDiagHistoryResponse(AbstractModel): """DescribeDBDiagHistory返回参数结构体 """ def __init__(self): r""" :param Events: 事件描述。 :type Events: list of DiagHistoryEventItem :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Events = None self.RequestId = None def _deserialize(self, params): if params.get("Events") is not None: self.Events = [] for item in params.get("Events"): obj = DiagHistoryEventItem() obj._deserialize(item) self.Events.append(obj) self.RequestId = params.get("RequestId") class DescribeDBDiagReportTasksRequest(AbstractModel): """DescribeDBDiagReportTasks请求参数结构体 """ def __init__(self): r""" :param StartTime: 第一个任务的开始时间,用于范围查询,时间格式如:2019-09-10 12:13:14。 :type StartTime: str :param EndTime: 最后一个任务的开始时间,用于范围查询,时间格式如:2019-09-10 12:13:14。 :type EndTime: str :param InstanceIds: 实例ID数组,用于筛选指定实例的任务列表。 :type InstanceIds: list of str :param Sources: 任务的触发来源,支持的取值包括:"DAILY_INSPECTION" - 实例巡检;"SCHEDULED" - 定时生成;"MANUAL" - 手动触发。 :type Sources: list of str :param HealthLevels: 报告的健康等级,支持的取值包括:"HEALTH" - 健康;"SUB_HEALTH" - 亚健康;"RISK" - 危险;"HIGH_RISK" - 高危。 :type HealthLevels: str :param TaskStatuses: 任务的状态,支持的取值包括:"created" - 新建;"chosen" - 待执行; "running" - 执行中;"failed" - 失败;"finished" - 已完成。 :type TaskStatuses: str :param Offset: 偏移量,默认0。 :type Offset: int :param Limit: 返回数量,默认20,最大值为100。 :type Limit: int :param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL;"cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。 :type Product: str """ self.StartTime = None self.EndTime = None self.InstanceIds = None self.Sources = None self.HealthLevels = None self.TaskStatuses = None self.Offset = None self.Limit = None self.Product = None def _deserialize(self, params): self.StartTime = params.get("StartTime") self.EndTime = params.get("EndTime") self.InstanceIds = params.get("InstanceIds") self.Sources = params.get("Sources") self.HealthLevels = params.get("HealthLevels") self.TaskStatuses = params.get("TaskStatuses") self.Offset = params.get("Offset") self.Limit = params.get("Limit") self.Product = params.get("Product") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeDBDiagReportTasksResponse(AbstractModel): """DescribeDBDiagReportTasks返回参数结构体 """ def __init__(self): r""" :param TotalCount: 任务总数目。 :type TotalCount: int :param Tasks: 任务列表。 :type Tasks: list of HealthReportTask :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.TotalCount = None self.Tasks = None self.RequestId = None def _deserialize(self, params): self.TotalCount = params.get("TotalCount") if params.get("Tasks") is not None: self.Tasks = [] for item in params.get("Tasks"): obj = HealthReportTask() obj._deserialize(item) self.Tasks.append(obj) self.RequestId = params.get("RequestId") class DescribeDBSpaceStatusRequest(AbstractModel): """DescribeDBSpaceStatus请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例 ID 。 :type InstanceId: str :param RangeDays: 时间段天数,截止日期为当日,默认为7天。 :type RangeDays: int :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。 :type Product: str """ self.InstanceId = None self.RangeDays = None self.Product = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.RangeDays = params.get("RangeDays") self.Product = params.get("Product") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeDBSpaceStatusResponse(AbstractModel): """DescribeDBSpaceStatus返回参数结构体 """ def __init__(self): r""" :param Growth: 磁盘增长量(MB)。 :type Growth: int :param Remain: 磁盘剩余(MB)。 :type Remain: int :param Total: 磁盘总量(MB)。 :type Total: int :param AvailableDays: 预计可用天数。 :type AvailableDays: int :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Growth = None self.Remain = None self.Total = None self.AvailableDays = None self.RequestId = None def _deserialize(self, params): self.Growth = params.get("Growth") self.Remain = params.get("Remain") self.Total = params.get("Total") self.AvailableDays = params.get("AvailableDays") self.RequestId = params.get("RequestId") class DescribeDiagDBInstancesRequest(AbstractModel): """DescribeDiagDBInstances请求参数结构体 """ def __init__(self): r""" :param IsSupported: 是否是DBbrain支持的实例,固定传 true。 :type IsSupported: bool :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。 :type Product: str :param Offset: 分页参数,偏移量。 :type Offset: int :param Limit: 分页参数,分页值,最大值为100。 :type Limit: int :param InstanceNames: 根据实例名称条件查询。 :type InstanceNames: list of str :param InstanceIds: 根据实例ID条件查询。 :type InstanceIds: list of str :param Regions: 根据地域条件查询。 :type Regions: list of str """ self.IsSupported = None self.Product = None self.Offset = None self.Limit = None self.InstanceNames = None self.InstanceIds = None self.Regions = None def _deserialize(self, params): self.IsSupported = params.get("IsSupported") self.Product = params.get("Product") self.Offset = params.get("Offset") self.Limit = params.get("Limit") self.InstanceNames = params.get("InstanceNames") self.InstanceIds = params.get("InstanceIds") self.Regions = params.get("Regions") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeDiagDBInstancesResponse(AbstractModel): """DescribeDiagDBInstances返回参数结构体 """ def __init__(self): r""" :param TotalCount: 实例总数。 :type TotalCount: int :param DbScanStatus: 全实例巡检状态:0:开启全实例巡检;1:未开启全实例巡检。 :type DbScanStatus: int :param Items: 实例相关信息。 :type Items: list of InstanceInfo :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.TotalCount = None self.DbScanStatus = None self.Items = None self.RequestId = None def _deserialize(self, params): self.TotalCount = params.get("TotalCount") self.DbScanStatus = params.get("DbScanStatus") if params.get("Items") is not None: self.Items = [] for item in params.get("Items"): obj = InstanceInfo() obj._deserialize(item) self.Items.append(obj) self.RequestId = params.get("RequestId") class DescribeHealthScoreRequest(AbstractModel): """DescribeHealthScore请求参数结构体 """ def __init__(self): r""" :param InstanceId: 需要获取健康得分的实例ID。 :type InstanceId: str :param Time: 获取健康得分的时间,时间格式如:2019-09-10 12:13:14。 :type Time: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。 :type Product: str """ self.InstanceId = None self.Time = None self.Product = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.Time = params.get("Time") self.Product = params.get("Product") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeHealthScoreResponse(AbstractModel): """DescribeHealthScore返回参数结构体 """ def __init__(self): r""" :param Data: 健康得分以及异常扣分项。 :type Data: :class:`tencentcloud.dbbrain.v20210527.models.HealthScoreInfo` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Data = None self.RequestId = None def _deserialize(self, params): if params.get("Data") is not None: self.Data = HealthScoreInfo() self.Data._deserialize(params.get("Data")) self.RequestId = params.get("RequestId") class DescribeMailProfileRequest(AbstractModel): """DescribeMailProfile请求参数结构体 """ def __init__(self): r""" :param ProfileType: 配置类型,支持值包括:"dbScan_mail_configuration" - 数据库巡检邮件配置,"scheduler_mail_configuration" - 定期生成邮件配置。 :type ProfileType: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。 :type Product: str :param Offset: 分页偏移量。 :type Offset: int :param Limit: 分页单位,最大支持50。 :type Limit: int :param ProfileName: 根据邮件配置名称查询,定期发送的邮件配置名称遵循:"scheduler_"+{instanceId}的规则。 :type ProfileName: str """ self.ProfileType = None self.Product = None self.Offset = None self.Limit = None self.ProfileName = None def _deserialize(self, params): self.ProfileType = params.get("ProfileType") self.Product = params.get("Product") self.Offset = params.get("Offset") self.Limit = params.get("Limit") self.ProfileName = params.get("ProfileName") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeMailProfileResponse(AbstractModel): """DescribeMailProfile返回参数结构体 """ def __init__(self): r""" :param ProfileList: 邮件配置详情。 注意:此字段可能返回 null,表示取不到有效值。 :type ProfileList: list of UserProfile :param TotalCount: 邮件模版总数。 注意:此字段可能返回 null,表示取不到有效值。 :type TotalCount: int :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.ProfileList = None self.TotalCount = None self.RequestId = None def _deserialize(self, params): if params.get("ProfileList") is not None: self.ProfileList = [] for item in params.get("ProfileList"): obj = UserProfile() obj._deserialize(item) self.ProfileList.append(obj) self.TotalCount = params.get("TotalCount") self.RequestId = params.get("RequestId") class DescribeMySqlProcessListRequest(AbstractModel): """DescribeMySqlProcessList请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例ID。 :type InstanceId: str :param ID: 线程的ID,用于筛选线程列表。 :type ID: int :param User: 线程的操作账号名,用于筛选线程列表。 :type User: str :param Host: 线程的操作主机地址,用于筛选线程列表。 :type Host: str :param DB: 线程的操作数据库,用于筛选线程列表。 :type DB: str :param State: 线程的操作状态,用于筛选线程列表。 :type State: str :param Command: 线程的执行类型,用于筛选线程列表。 :type Command: str :param Time: 线程的操作时长最小值,单位秒,用于筛选操作时长大于该值的线程列表。 :type Time: int :param Info: 线程的操作语句,用于筛选线程列表。 :type Info: str :param Limit: 返回数量,默认20。 :type Limit: int :param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL;"cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。 :type Product: str """ self.InstanceId = None self.ID = None self.User = None self.Host = None self.DB = None self.State = None self.Command = None self.Time = None self.Info = None self.Limit = None self.Product = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.ID = params.get("ID") self.User = params.get("User") self.Host = params.get("Host") self.DB = params.get("DB") self.State = params.get("State") self.Command = params.get("Command") self.Time = params.get("Time") self.Info = params.get("Info") self.Limit = params.get("Limit") self.Product = params.get("Product") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeMySqlProcessListResponse(AbstractModel): """DescribeMySqlProcessList返回参数结构体 """ def __init__(self): r""" :param ProcessList: 实时线程列表。 :type ProcessList: list of MySqlProcess :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.ProcessList = None self.RequestId = None def _deserialize(self, params): if params.get("ProcessList") is not None: self.ProcessList = [] for item in params.get("ProcessList"): obj = MySqlProcess() obj._deserialize(item) self.ProcessList.append(obj) self.RequestId = params.get("RequestId") class DescribeSecurityAuditLogDownloadUrlsRequest(AbstractModel): """DescribeSecurityAuditLogDownloadUrls请求参数结构体 """ def __init__(self): r""" :param SecAuditGroupId: 安全审计组Id。 :type SecAuditGroupId: str :param AsyncRequestId: 异步任务Id。 :type AsyncRequestId: int :param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL。 :type Product: str """ self.SecAuditGroupId = None self.AsyncRequestId = None self.Product = None def _deserialize(self, params): self.SecAuditGroupId = params.get("SecAuditGroupId") self.AsyncRequestId = params.get("AsyncRequestId") self.Product = params.get("Product") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeSecurityAuditLogDownloadUrlsResponse(AbstractModel): """DescribeSecurityAuditLogDownloadUrls返回参数结构体 """ def __init__(self): r""" :param Urls: 导出结果的COS链接列表。当结果集很大时,可能会切分为多个url下载。 :type Urls: list of str :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Urls = None self.RequestId = None def _deserialize(self, params): self.Urls = params.get("Urls") self.RequestId = params.get("RequestId") class DescribeSecurityAuditLogExportTasksRequest(AbstractModel): """DescribeSecurityAuditLogExportTasks请求参数结构体 """ def __init__(self): r""" :param SecAuditGroupId: 安全审计组Id。 :type SecAuditGroupId: str :param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL。 :type Product: str :param AsyncRequestIds: 日志导出任务Id列表。 :type AsyncRequestIds: list of int non-negative :param Offset: 偏移量,默认0。 :type Offset: int :param Limit: 返回数量,默认20,最大值为100。 :type Limit: int """ self.SecAuditGroupId = None self.Product = None self.AsyncRequestIds = None self.Offset = None self.Limit = None def _deserialize(self, params): self.SecAuditGroupId = params.get("SecAuditGroupId") self.Product = params.get("Product") self.AsyncRequestIds = params.get("AsyncRequestIds") self.Offset = params.get("Offset") self.Limit = params.get("Limit") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeSecurityAuditLogExportTasksResponse(AbstractModel): """DescribeSecurityAuditLogExportTasks返回参数结构体 """ def __init__(self): r""" :param Tasks: 安全审计日志导出任务列表。 :type Tasks: list of SecLogExportTaskInfo :param TotalCount: 安全审计日志导出任务总数。 :type TotalCount: int :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Tasks = None self.TotalCount = None self.RequestId = None def _deserialize(self, params): if params.get("Tasks") is not None: self.Tasks = [] for item in params.get("Tasks"): obj = SecLogExportTaskInfo() obj._deserialize(item) self.Tasks.append(obj) self.TotalCount = params.get("TotalCount") self.RequestId = params.get("RequestId") class DescribeSlowLogTimeSeriesStatsRequest(AbstractModel): """DescribeSlowLogTimeSeriesStats请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例 ID 。 :type InstanceId: str :param StartTime: 开始时间,如“2019-09-10 12:13:14”。 :type StartTime: str :param EndTime: 结束时间,如“2019-09-10 12:13:14”,结束时间与开始时间的间隔最大可为7天。 :type EndTime: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。 :type Product: str """ self.InstanceId = None self.StartTime = None self.EndTime = None self.Product = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.StartTime = params.get("StartTime") self.EndTime = params.get("EndTime") self.Product = params.get("Product") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeSlowLogTimeSeriesStatsResponse(AbstractModel): """DescribeSlowLogTimeSeriesStats返回参数结构体 """ def __init__(self): r""" :param Period: 柱间单位时间间隔,单位为秒。 :type Period: int :param TimeSeries: 单位时间间隔内慢日志数量统计。 :type TimeSeries: list of TimeSlice :param SeriesData: 单位时间间隔内的实例 cpu 利用率监控数据。 :type SeriesData: :class:`tencentcloud.dbbrain.v20210527.models.MonitorMetricSeriesData` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Period = None self.TimeSeries = None self.SeriesData = None self.RequestId = None def _deserialize(self, params): self.Period = params.get("Period") if params.get("TimeSeries") is not None: self.TimeSeries = [] for item in params.get("TimeSeries"): obj = TimeSlice() obj._deserialize(item) self.TimeSeries.append(obj) if params.get("SeriesData") is not None: self.SeriesData = MonitorMetricSeriesData() self.SeriesData._deserialize(params.get("SeriesData")) self.RequestId = params.get("RequestId") class DescribeSlowLogTopSqlsRequest(AbstractModel): """DescribeSlowLogTopSqls请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例 ID 。 :type InstanceId: str :param StartTime: 开始时间,如“2019-09-10 12:13:14”。 :type StartTime: str :param EndTime: 截止时间,如“2019-09-11 10:13:14”,截止时间与开始时间的间隔小于7天。 :type EndTime: str :param SortBy: 排序键,目前支持 QueryTime,ExecTimes,RowsSent,LockTime以及RowsExamined 等排序键,默认为QueryTime。 :type SortBy: str :param OrderBy: 排序方式,支持ASC(升序)以及DESC(降序),默认为DESC。 :type OrderBy: str :param Limit: 返回数量,默认为20,最大值为100。 :type Limit: int :param Offset: 偏移量,默认为0。 :type Offset: int :param SchemaList: 数据库名称数组。 :type SchemaList: list of SchemaItem :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。 :type Product: str """ self.InstanceId = None self.StartTime = None self.EndTime = None self.SortBy = None self.OrderBy = None self.Limit = None self.Offset = None self.SchemaList = None self.Product = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.StartTime = params.get("StartTime") self.EndTime = params.get("EndTime") self.SortBy = params.get("SortBy") self.OrderBy = params.get("OrderBy") self.Limit = params.get("Limit") self.Offset = params.get("Offset") if params.get("SchemaList") is not None: self.SchemaList = [] for item in params.get("SchemaList"): obj = SchemaItem() obj._deserialize(item) self.SchemaList.append(obj) self.Product = params.get("Product") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeSlowLogTopSqlsResponse(AbstractModel): """DescribeSlowLogTopSqls返回参数结构体 """ def __init__(self): r""" :param TotalCount: 符合条件的记录总数。 :type TotalCount: int :param Rows: 慢日志 top sql 列表 :type Rows: list of SlowLogTopSqlItem :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.TotalCount = None self.Rows = None self.RequestId = None def _deserialize(self, params): self.TotalCount = params.get("TotalCount") if params.get("Rows") is not None: self.Rows = [] for item in params.get("Rows"): obj = SlowLogTopSqlItem() obj._deserialize(item) self.Rows.append(obj) self.RequestId = params.get("RequestId") class DescribeSlowLogUserHostStatsRequest(AbstractModel): """DescribeSlowLogUserHostStats请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例ID。 :type InstanceId: str :param StartTime: 查询范围的开始时间,时间格式如:2019-09-10 12:13:14。 :type StartTime: str :param EndTime: 查询范围的结束时间,时间格式如:2019-09-10 12:13:14。 :type EndTime: str :param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL;"cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。 :type Product: str :param Md5: SOL模板的MD5值 :type Md5: str """ self.InstanceId = None self.StartTime = None self.EndTime = None self.Product = None self.Md5 = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.StartTime = params.get("StartTime") self.EndTime = params.get("EndTime") self.Product = params.get("Product") self.Md5 = params.get("Md5") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeSlowLogUserHostStatsResponse(AbstractModel): """DescribeSlowLogUserHostStats返回参数结构体 """ def __init__(self): r""" :param TotalCount: 来源地址数目。 :type TotalCount: int :param Items: 各来源地址的慢日志占比详情列表。 :type Items: list of SlowLogHost :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.TotalCount = None self.Items = None self.RequestId = None def _deserialize(self, params): self.TotalCount = params.get("TotalCount") if params.get("Items") is not None: self.Items = [] for item in params.get("Items"): obj = SlowLogHost() obj._deserialize(item) self.Items.append(obj) self.RequestId = params.get("RequestId") class DescribeTopSpaceSchemaTimeSeriesRequest(AbstractModel): """DescribeTopSpaceSchemaTimeSeries请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例ID。 :type InstanceId: str :param Limit: 返回的Top库数量,最大值为100,默认为20。 :type Limit: int :param SortBy: 筛选Top库所用的排序字段,可选字段包含DataLength、IndexLength、TotalLength、DataFree、FragRatio、TableRows、PhysicalFileSize(仅云数据库 MySQL实例支持),云数据库 MySQL实例默认为 PhysicalFileSize,其他产品实例默认为TotalLength。 :type SortBy: str :param StartDate: 开始日期,如“2021-01-01”,最早为当日的前第29天,默认为截止日期的前第6天。 :type StartDate: str :param EndDate: 截止日期,如“2021-01-01”,最早为当日的前第29天,默认为当日。 :type EndDate: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。 :type Product: str """ self.InstanceId = None self.Limit = None self.SortBy = None self.StartDate = None self.EndDate = None self.Product = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.Limit = params.get("Limit") self.SortBy = params.get("SortBy") self.StartDate = params.get("StartDate") self.EndDate = params.get("EndDate") self.Product = params.get("Product") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeTopSpaceSchemaTimeSeriesResponse(AbstractModel): """DescribeTopSpaceSchemaTimeSeries返回参数结构体 """ def __init__(self): r""" :param TopSpaceSchemaTimeSeries: 返回的Top库空间统计信息的时序数据列表。 :type TopSpaceSchemaTimeSeries: list of SchemaSpaceTimeSeries :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.TopSpaceSchemaTimeSeries = None self.RequestId = None def _deserialize(self, params): if params.get("TopSpaceSchemaTimeSeries") is not None: self.TopSpaceSchemaTimeSeries = [] for item in params.get("TopSpaceSchemaTimeSeries"): obj = SchemaSpaceTimeSeries() obj._deserialize(item) self.TopSpaceSchemaTimeSeries.append(obj) self.RequestId = params.get("RequestId") class DescribeTopSpaceSchemasRequest(AbstractModel): """DescribeTopSpaceSchemas请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例 ID 。 :type InstanceId: str :param Limit: 返回的Top库数量,最大值为100,默认为20。 :type Limit: int :param SortBy: 筛选Top库所用的排序字段,可选字段包含DataLength、IndexLength、TotalLength、DataFree、FragRatio、TableRows、PhysicalFileSize(仅云数据库 MySQL实例支持),云数据库 MySQL实例默认为 PhysicalFileSize,其他产品实例默认为TotalLength。 :type SortBy: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。 :type Product: str """ self.InstanceId = None self.Limit = None self.SortBy = None self.Product = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.Limit = params.get("Limit") self.SortBy = params.get("SortBy") self.Product = params.get("Product") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeTopSpaceSchemasResponse(AbstractModel): """DescribeTopSpaceSchemas返回参数结构体 """ def __init__(self): r""" :param TopSpaceSchemas: 返回的Top库空间统计信息列表。 :type TopSpaceSchemas: list of SchemaSpaceData :param Timestamp: 采集库空间数据的时间戳(秒)。 :type Timestamp: int :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.TopSpaceSchemas = None self.Timestamp = None self.RequestId = None def _deserialize(self, params): if params.get("TopSpaceSchemas") is not None: self.TopSpaceSchemas = [] for item in params.get("TopSpaceSchemas"): obj = SchemaSpaceData() obj._deserialize(item) self.TopSpaceSchemas.append(obj) self.Timestamp = params.get("Timestamp") self.RequestId = params.get("RequestId") class DescribeTopSpaceTableTimeSeriesRequest(AbstractModel): """DescribeTopSpaceTableTimeSeries请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例 ID 。 :type InstanceId: str :param Limit: 返回的Top表数量,最大值为100,默认为20。 :type Limit: int :param SortBy: 筛选Top表所用的排序字段,可选字段包含DataLength、IndexLength、TotalLength、DataFree、FragRatio、TableRows、PhysicalFileSize,默认为 PhysicalFileSize。 :type SortBy: str :param StartDate: 开始日期,如“2021-01-01”,最早为当日的前第29天,默认为截止日期的前第6天。 :type StartDate: str :param EndDate: 截止日期,如“2021-01-01”,最早为当日的前第29天,默认为当日。 :type EndDate: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。 :type Product: str """ self.InstanceId = None self.Limit = None self.SortBy = None self.StartDate = None self.EndDate = None self.Product = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.Limit = params.get("Limit") self.SortBy = params.get("SortBy") self.StartDate = params.get("StartDate") self.EndDate = params.get("EndDate") self.Product = params.get("Product") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeTopSpaceTableTimeSeriesResponse(AbstractModel): """DescribeTopSpaceTableTimeSeries返回参数结构体 """ def __init__(self): r""" :param TopSpaceTableTimeSeries: 返回的Top表空间统计信息的时序数据列表。 :type TopSpaceTableTimeSeries: list of TableSpaceTimeSeries :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.TopSpaceTableTimeSeries = None self.RequestId = None def _deserialize(self, params): if params.get("TopSpaceTableTimeSeries") is not None: self.TopSpaceTableTimeSeries = [] for item in params.get("TopSpaceTableTimeSeries"): obj = TableSpaceTimeSeries() obj._deserialize(item) self.TopSpaceTableTimeSeries.append(obj) self.RequestId = params.get("RequestId") class DescribeTopSpaceTablesRequest(AbstractModel): """DescribeTopSpaceTables请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例 ID 。 :type InstanceId: str :param Limit: 返回的Top表数量,最大值为100,默认为20。 :type Limit: int :param SortBy: 筛选Top表所用的排序字段,可选字段包含DataLength、IndexLength、TotalLength、DataFree、FragRatio、TableRows、PhysicalFileSize(仅云数据库 MySQL实例支持),云数据库 MySQL实例默认为 PhysicalFileSize,其他产品实例默认为TotalLength。 :type SortBy: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。 :type Product: str """ self.InstanceId = None self.Limit = None self.SortBy = None self.Product = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.Limit = params.get("Limit") self.SortBy = params.get("SortBy") self.Product = params.get("Product") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeTopSpaceTablesResponse(AbstractModel): """DescribeTopSpaceTables返回参数结构体 """ def __init__(self): r""" :param TopSpaceTables: 返回的Top表空间统计信息列表。 :type TopSpaceTables: list of TableSpaceData :param Timestamp: 采集表空间数据的时间戳(秒)。 :type Timestamp: int :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.TopSpaceTables = None self.Timestamp = None self.RequestId = None def _deserialize(self, params): if params.get("TopSpaceTables") is not None: self.TopSpaceTables = [] for item in params.get("TopSpaceTables"): obj = TableSpaceData() obj._deserialize(item) self.TopSpaceTables.append(obj) self.Timestamp = params.get("Timestamp") self.RequestId = params.get("RequestId") class DescribeUserSqlAdviceRequest(AbstractModel): """DescribeUserSqlAdvice请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例ID。 :type InstanceId: str :param SqlText: SQL语句。 :type SqlText: str :param Schema: 库名。 :type Schema: str """ self.InstanceId = None self.SqlText = None self.Schema = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.SqlText = params.get("SqlText") self.Schema = params.get("Schema") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeUserSqlAdviceResponse(AbstractModel): """DescribeUserSqlAdvice返回参数结构体 """ def __init__(self): r""" :param Advices: SQL优化建议,可解析为JSON数组,无需优化时输出为空。 :type Advices: str :param Comments: SQL优化建议备注,可解析为String数组,无需优化时输出为空。 :type Comments: str :param SqlText: SQL语句。 :type SqlText: str :param Schema: 库名。 :type Schema: str :param Tables: 相关表的DDL信息,可解析为JSON数组。 :type Tables: str :param SqlPlan: SQL执行计划,可解析为JSON,无需优化时输出为空。 :type SqlPlan: str :param Cost: SQL优化后的成本节约详情,可解析为JSON,无需优化时输出为空。 :type Cost: str :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Advices = None self.Comments = None self.SqlText = None self.Schema = None self.Tables = None self.SqlPlan = None self.Cost = None self.RequestId = None def _deserialize(self, params): self.Advices = params.get("Advices") self.Comments = params.get("Comments") self.SqlText = params.get("SqlText") self.Schema = params.get("Schema") self.Tables = params.get("Tables") self.SqlPlan = params.get("SqlPlan") self.Cost = params.get("Cost") self.RequestId = params.get("RequestId") class DiagHistoryEventItem(AbstractModel): """实例诊断历史事件 """ def __init__(self): r""" :param DiagType: 诊断类型。 :type DiagType: str :param EndTime: 结束时间。 :type EndTime: str :param StartTime: 开始时间。 :type StartTime: str :param EventId: 事件唯一ID 。 :type EventId: int :param Severity: 严重程度。严重程度分为5级,按影响程度从高至低分别为:1:致命,2:严重,3:告警,4:提示,5:健康。 :type Severity: int :param Outline: 诊断概要。 :type Outline: str :param DiagItem: 诊断项说明。 :type DiagItem: str :param InstanceId: 实例 ID 。 :type InstanceId: str :param Metric: 保留字段。 注意:此字段可能返回 null,表示取不到有效值。 :type Metric: str :param Region: 地域。 :type Region: str """ self.DiagType = None self.EndTime = None self.StartTime = None self.EventId = None self.Severity = None self.Outline = None self.DiagItem = None self.InstanceId = None self.Metric = None self.Region = None def _deserialize(self, params): self.DiagType = params.get("DiagType") self.EndTime = params.get("EndTime") self.StartTime = params.get("StartTime") self.EventId = params.get("EventId") self.Severity = params.get("Severity") self.Outline = params.get("Outline") self.DiagItem = params.get("DiagItem") self.InstanceId = params.get("InstanceId") self.Metric = params.get("Metric") self.Region = params.get("Region") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class EventInfo(AbstractModel): """异常事件信息。 """ def __init__(self): r""" :param EventId: 事件 ID 。 :type EventId: int :param DiagType: 诊断类型。 :type DiagType: str :param StartTime: 开始时间。 :type StartTime: str :param EndTime: 结束时间。 :type EndTime: str :param Outline: 概要。 :type Outline: str :param Severity: 严重程度。严重程度分为5级,按影响程度从高至低分别为:1:致命,2:严重,3:告警,4:提示,5:健康。 :type Severity: int :param ScoreLost: 扣分。 :type ScoreLost: int :param Metric: 保留字段。 :type Metric: str :param Count: 告警数目。 :type Count: int """ self.EventId = None self.DiagType = None self.StartTime = None self.EndTime = None self.Outline = None self.Severity = None self.ScoreLost = None self.Metric = None self.Count = None def _deserialize(self, params): self.EventId = params.get("EventId") self.DiagType = params.get("DiagType") self.StartTime = params.get("StartTime") self.EndTime = params.get("EndTime") self.Outline = params.get("Outline") self.Severity = params.get("Severity") self.ScoreLost = params.get("ScoreLost") self.Metric = params.get("Metric") self.Count = params.get("Count") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class GroupItem(AbstractModel): """描述组信息。 """ def __init__(self): r""" :param Id: 组id。 :type Id: int :param Name: 组名称。 :type Name: str :param MemberCount: 组成员数量。 :type MemberCount: int """ self.Id = None self.Name = None self.MemberCount = None def _deserialize(self, params): self.Id = params.get("Id") self.Name = params.get("Name") self.MemberCount = params.get("MemberCount") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class HealthReportTask(AbstractModel): """健康报告任务详情。 """ def __init__(self): r""" :param AsyncRequestId: 异步任务请求 ID。 :type AsyncRequestId: int :param Source: 任务的触发来源,支持的取值包括:"DAILY_INSPECTION" - 实例巡检;"SCHEDULED" - 定时生成;"MANUAL" - 手动触发。 :type Source: str :param Progress: 任务完成进度,单位%。 :type Progress: int :param CreateTime: 任务创建时间。 :type CreateTime: str :param StartTime: 任务开始执行时间。 :type StartTime: str :param EndTime: 任务完成执行时间。 :type EndTime: str :param InstanceInfo: 任务所属实例的基础信息。 :type InstanceInfo: :class:`tencentcloud.dbbrain.v20210527.models.InstanceBasicInfo` :param HealthStatus: 健康报告中的健康信息。 :type HealthStatus: :class:`tencentcloud.dbbrain.v20210527.models.HealthStatus` """ self.AsyncRequestId = None self.Source = None self.Progress = None self.CreateTime = None self.StartTime = None self.EndTime = None self.InstanceInfo = None self.HealthStatus = None def _deserialize(self, params): self.AsyncRequestId = params.get("AsyncRequestId") self.Source = params.get("Source") self.Progress = params.get("Progress") self.CreateTime = params.get("CreateTime") self.StartTime = params.get("StartTime") self.EndTime = params.get("EndTime") if params.get("InstanceInfo") is not None: self.InstanceInfo = InstanceBasicInfo() self.InstanceInfo._deserialize(params.get("InstanceInfo")) if params.get("HealthStatus") is not None: self.HealthStatus = HealthStatus() self.HealthStatus._deserialize(params.get("HealthStatus")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class HealthScoreInfo(AbstractModel): """获取健康得分返回的详情。 """ def __init__(self): r""" :param IssueTypes: 异常详情。 :type IssueTypes: list of IssueTypeInfo :param EventsTotalCount: 异常事件总数。 :type EventsTotalCount: int :param HealthScore: 健康得分。 :type HealthScore: int :param HealthLevel: 健康等级, 如:"HEALTH", "SUB_HEALTH", "RISK", "HIGH_RISK"。 :type HealthLevel: str """ self.IssueTypes = None self.EventsTotalCount = None self.HealthScore = None self.HealthLevel = None def _deserialize(self, params): if params.get("IssueTypes") is not None: self.IssueTypes = [] for item in params.get("IssueTypes"): obj = IssueTypeInfo() obj._deserialize(item) self.IssueTypes.append(obj) self.EventsTotalCount = params.get("EventsTotalCount") self.HealthScore = params.get("HealthScore") self.HealthLevel = params.get("HealthLevel") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class HealthStatus(AbstractModel): """实例健康详情。 """ def __init__(self): r""" :param HealthScore: 健康分数,满分100。 :type HealthScore: int :param HealthLevel: 健康等级,取值包括:"HEALTH" - 健康;"SUB_HEALTH" - 亚健康;"RISK"- 危险;"HIGH_RISK" - 高危。 :type HealthLevel: str :param ScoreLost: 总扣分分数。 :type ScoreLost: int :param ScoreDetails: 扣分详情。 注意:此字段可能返回 null,表示取不到有效值。 :type ScoreDetails: list of ScoreDetail """ self.HealthScore = None self.HealthLevel = None self.ScoreLost = None self.ScoreDetails = None def _deserialize(self, params): self.HealthScore = params.get("HealthScore") self.HealthLevel = params.get("HealthLevel") self.ScoreLost = params.get("ScoreLost") if params.get("ScoreDetails") is not None: self.ScoreDetails = [] for item in params.get("ScoreDetails"): obj = ScoreDetail() obj._deserialize(item) self.ScoreDetails.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class InstanceBasicInfo(AbstractModel): """实例基础信息。 """ def __init__(self): r""" :param InstanceId: 实例ID。 :type InstanceId: str :param InstanceName: 实例名称。 :type InstanceName: str :param Vip: 实例内网IP。 :type Vip: str :param Vport: 实例内网Port。 :type Vport: int :param Product: 实例产品。 :type Product: str :param EngineVersion: 实例引擎版本。 :type EngineVersion: str """ self.InstanceId = None self.InstanceName = None self.Vip = None self.Vport = None self.Product = None self.EngineVersion = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.InstanceName = params.get("InstanceName") self.Vip = params.get("Vip") self.Vport = params.get("Vport") self.Product = params.get("Product") self.EngineVersion = params.get("EngineVersion") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class InstanceConfs(AbstractModel): """实例配置。 """ def __init__(self): r""" :param DailyInspection: 数据库巡检开关, Yes/No。 :type DailyInspection: str :param OverviewDisplay: 实例概览开关,Yes/No。 :type OverviewDisplay: str """ self.DailyInspection = None self.OverviewDisplay = None def _deserialize(self, params): self.DailyInspection = params.get("DailyInspection") self.OverviewDisplay = params.get("OverviewDisplay") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class InstanceInfo(AbstractModel): """查询实例列表,返回实例的相关信息的对象。 """ def __init__(self): r""" :param InstanceId: 实例ID。 :type InstanceId: str :param InstanceName: 实例名称。 :type InstanceName: str :param Region: 实例所属地域。 :type Region: str :param HealthScore: 健康得分。 :type HealthScore: int :param Product: 所属产品。 :type Product: str :param EventCount: 异常事件数量。 :type EventCount: int :param InstanceType: 实例类型:1:MASTER;2:DR,3:RO,4:SDR。 :type InstanceType: int :param Cpu: 核心数。 :type Cpu: int :param Memory: 内存,单位MB。 :type Memory: int :param Volume: 硬盘存储,单位GB。 :type Volume: int :param EngineVersion: 数据库版本。 :type EngineVersion: str :param Vip: 内网地址。 :type Vip: str :param Vport: 内网端口。 :type Vport: int :param Source: 接入来源。 :type Source: str :param GroupId: 分组ID。 :type GroupId: str :param GroupName: 分组组名。 :type GroupName: str :param Status: 实例状态:0:发货中;1:运行正常;4:销毁中;5:隔离中。 :type Status: int :param UniqSubnetId: 子网统一ID。 :type UniqSubnetId: str :param DeployMode: cdb类型。 :type DeployMode: str :param InitFlag: cdb实例初始化标志:0:未初始化;1:已初始化。 :type InitFlag: int :param TaskStatus: 任务状态。 :type TaskStatus: int :param UniqVpcId: 私有网络统一ID。 :type UniqVpcId: str :param InstanceConf: 实例巡检/概览的状态。 :type InstanceConf: :class:`tencentcloud.dbbrain.v20210527.models.InstanceConfs` :param DeadlineTime: 资源到期时间。 :type DeadlineTime: str :param IsSupported: 是否是DBbrain支持的实例。 :type IsSupported: bool :param SecAuditStatus: 实例安全审计日志开启状态:ON: 安全审计开启;OFF: 未开启安全审计。 :type SecAuditStatus: str :param AuditPolicyStatus: 实例审计日志开启状态,ALL_AUDIT: 开启全审计;RULE_AUDIT: 开启规则审计;UNBOUND: 未开启审计。 :type AuditPolicyStatus: str :param AuditRunningStatus: 实例审计日志运行状态:normal: 运行中; paused: 欠费暂停。 :type AuditRunningStatus: str """ self.InstanceId = None self.InstanceName = None self.Region = None self.HealthScore = None self.Product = None self.EventCount = None self.InstanceType = None self.Cpu = None self.Memory = None self.Volume = None self.EngineVersion = None self.Vip = None self.Vport = None self.Source = None self.GroupId = None self.GroupName = None self.Status = None self.UniqSubnetId = None self.DeployMode = None self.InitFlag = None self.TaskStatus = None self.UniqVpcId = None self.InstanceConf = None self.DeadlineTime = None self.IsSupported = None self.SecAuditStatus = None self.AuditPolicyStatus = None self.AuditRunningStatus = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.InstanceName = params.get("InstanceName") self.Region = params.get("Region") self.HealthScore = params.get("HealthScore") self.Product = params.get("Product") self.EventCount = params.get("EventCount") self.InstanceType = params.get("InstanceType") self.Cpu = params.get("Cpu") self.Memory = params.get("Memory") self.Volume = params.get("Volume") self.EngineVersion = params.get("EngineVersion") self.Vip = params.get("Vip") self.Vport = params.get("Vport") self.Source = params.get("Source") self.GroupId = params.get("GroupId") self.GroupName = params.get("GroupName") self.Status = params.get("Status") self.UniqSubnetId = params.get("UniqSubnetId") self.DeployMode = params.get("DeployMode") self.InitFlag = params.get("InitFlag") self.TaskStatus = params.get("TaskStatus") self.UniqVpcId = params.get("UniqVpcId") if params.get("InstanceConf") is not None: self.InstanceConf = InstanceConfs() self.InstanceConf._deserialize(params.get("InstanceConf")) self.DeadlineTime = params.get("DeadlineTime") self.IsSupported = params.get("IsSupported") self.SecAuditStatus = params.get("SecAuditStatus") self.AuditPolicyStatus = params.get("AuditPolicyStatus") self.AuditRunningStatus = params.get("AuditRunningStatus") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class IssueTypeInfo(AbstractModel): """指标信息。 """ def __init__(self): r""" :param IssueType: 指标分类:AVAILABILITY:可用性,MAINTAINABILITY:可维护性,PERFORMANCE,性能,RELIABILITY可靠性。 :type IssueType: str :param Events: 异常事件。 :type Events: list of EventInfo :param TotalCount: 异常事件总数。 :type TotalCount: int """ self.IssueType = None self.Events = None self.TotalCount = None def _deserialize(self, params): self.IssueType = params.get("IssueType") if params.get("Events") is not None: self.Events = [] for item in params.get("Events"): obj = EventInfo() obj._deserialize(item) self.Events.append(obj) self.TotalCount = params.get("TotalCount") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class KillMySqlThreadsRequest(AbstractModel): """KillMySqlThreads请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例ID。 :type InstanceId: str :param Stage: kill会话任务的阶段,取值包括:"Prepare"-准备阶段,"Commit"-提交阶段。 :type Stage: str :param Threads: 需要kill的sql会话ID列表,此参数用于Prepare阶段。 :type Threads: list of int :param SqlExecId: 执行ID,此参数用于Commit阶段。 :type SqlExecId: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。 :type Product: str """ self.InstanceId = None self.Stage = None self.Threads = None self.SqlExecId = None self.Product = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.Stage = params.get("Stage") self.Threads = params.get("Threads") self.SqlExecId = params.get("SqlExecId") self.Product = params.get("Product") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class KillMySqlThreadsResponse(AbstractModel): """KillMySqlThreads返回参数结构体 """ def __init__(self): r""" :param Threads: kill完成的sql会话ID列表。 :type Threads: list of int :param SqlExecId: 执行ID, Prepare阶段的任务输出,用于Commit阶段中指定执行kill操作的会话ID。 注意:此字段可能返回 null,表示取不到有效值。 :type SqlExecId: str :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Threads = None self.SqlExecId = None self.RequestId = None def _deserialize(self, params): self.Threads = params.get("Threads") self.SqlExecId = params.get("SqlExecId") self.RequestId = params.get("RequestId") class MailConfiguration(AbstractModel): """邮件发送配置 """ def __init__(self): r""" :param SendMail: 是否开启邮件发送: 0, 否; 1, 是。 :type SendMail: int :param Region: 地域配置, 如["ap-guangzhou", "ap-shanghai"]。巡检的邮件发送模版,配置需要发送巡检邮件的地域;订阅的邮件发送模版,配置当前订阅实例的所属地域。 :type Region: list of str :param HealthStatus: 发送指定的健康等级的报告, 如["HEALTH", "SUB_HEALTH", "RISK", "HIGH_RISK"]。 :type HealthStatus: list of str :param ContactPerson: 联系人id, 联系人/联系组不能都为空。 :type ContactPerson: list of int :param ContactGroup: 联系组id, 联系人/联系组不能都为空。 :type ContactGroup: list of int """ self.SendMail = None self.Region = None self.HealthStatus = None self.ContactPerson = None self.ContactGroup = None def _deserialize(self, params): self.SendMail = params.get("SendMail") self.Region = params.get("Region") self.HealthStatus = params.get("HealthStatus") self.ContactPerson = params.get("ContactPerson") self.ContactGroup = params.get("ContactGroup") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ModifyDiagDBInstanceConfRequest(AbstractModel): """ModifyDiagDBInstanceConf请求参数结构体 """ def __init__(self): r""" :param InstanceConfs: 实例配置,包括巡检、概览开关等。 :type InstanceConfs: :class:`tencentcloud.dbbrain.v20210527.models.InstanceConfs` :param Regions: 生效实例地域,取值为"All",代表全地域。 :type Regions: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL。 :type Product: str :param InstanceIds: 指定更改巡检状态的实例ID。 :type InstanceIds: list of str """ self.InstanceConfs = None self.Regions = None self.Product = None self.InstanceIds = None def _deserialize(self, params): if params.get("InstanceConfs") is not None: self.InstanceConfs = InstanceConfs() self.InstanceConfs._deserialize(params.get("InstanceConfs")) self.Regions = params.get("Regions") self.Product = params.get("Product") self.InstanceIds = params.get("InstanceIds") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ModifyDiagDBInstanceConfResponse(AbstractModel): """ModifyDiagDBInstanceConf返回参数结构体 """ def __init__(self): r""" :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.RequestId = None def _deserialize(self, params): self.RequestId = params.get("RequestId") class MonitorFloatMetric(AbstractModel): """监控数据(浮点型) """ def __init__(self): r""" :param Metric: 指标名称。 :type Metric: str :param Unit: 指标单位。 :type Unit: str :param Values: 指标值。 注意:此字段可能返回 null,表示取不到有效值。 :type Values: list of float """ self.Metric = None self.Unit = None self.Values = None def _deserialize(self, params): self.Metric = params.get("Metric") self.Unit = params.get("Unit") self.Values = params.get("Values") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MonitorFloatMetricSeriesData(AbstractModel): """单位时间间隔内的监控指标数据(浮点型) """ def __init__(self): r""" :param Series: 监控指标。 :type Series: list of MonitorFloatMetric :param Timestamp: 监控指标对应的时间戳。 :type Timestamp: list of int """ self.Series = None self.Timestamp = None def _deserialize(self, params): if params.get("Series") is not None: self.Series = [] for item in params.get("Series"): obj = MonitorFloatMetric() obj._deserialize(item) self.Series.append(obj) self.Timestamp = params.get("Timestamp") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MonitorMetric(AbstractModel): """监控数据 """ def __init__(self): r""" :param Metric: 指标名称。 :type Metric: str :param Unit: 指标单位。 :type Unit: str :param Values: 指标值。 注意:此字段可能返回 null,表示取不到有效值。 :type Values: list of float """ self.Metric = None self.Unit = None self.Values = None def _deserialize(self, params): self.Metric = params.get("Metric") self.Unit = params.get("Unit") self.Values = params.get("Values") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MonitorMetricSeriesData(AbstractModel): """单位时间间隔内的监控指标数据 """ def __init__(self): r""" :param Series: 监控指标。 :type Series: list of MonitorMetric :param Timestamp: 监控指标对应的时间戳。 :type Timestamp: list of int """ self.Series = None self.Timestamp = None def _deserialize(self, params): if params.get("Series") is not None: self.Series = [] for item in params.get("Series"): obj = MonitorMetric() obj._deserialize(item) self.Series.append(obj) self.Timestamp = params.get("Timestamp") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MySqlProcess(AbstractModel): """关系型数据库线程 """ def __init__(self): r""" :param ID: 线程ID。 :type ID: str :param User: 线程的操作账号名。 :type User: str :param Host: 线程的操作主机地址。 :type Host: str :param DB: 线程的操作数据库。 :type DB: str :param State: 线程的操作状态。 :type State: str :param Command: 线程的执行类型。 :type Command: str :param Time: 线程的操作时长,单位秒。 :type Time: str :param Info: 线程的操作语句。 :type Info: str """ self.ID = None self.User = None self.Host = None self.DB = None self.State = None self.Command = None self.Time = None self.Info = None def _deserialize(self, params): self.ID = params.get("ID") self.User = params.get("User") self.Host = params.get("Host") self.DB = params.get("DB") self.State = params.get("State") self.Command = params.get("Command") self.Time = params.get("Time") self.Info = params.get("Info") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ProfileInfo(AbstractModel): """用户配置的信息 """ def __init__(self): r""" :param Language: 语言, 如"zh"。 :type Language: str :param MailConfiguration: 邮件模板的内容。 :type MailConfiguration: :class:`tencentcloud.dbbrain.v20210527.models.MailConfiguration` """ self.Language = None self.MailConfiguration = None def _deserialize(self, params): self.Language = params.get("Language") if params.get("MailConfiguration") is not None: self.MailConfiguration = MailConfiguration() self.MailConfiguration._deserialize(params.get("MailConfiguration")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class SchemaItem(AbstractModel): """SchemaItem数组 """ def __init__(self): r""" :param Schema: 数据库名称 :type Schema: str """ self.Schema = None def _deserialize(self, params): self.Schema = params.get("Schema") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class SchemaSpaceData(AbstractModel): """库空间统计数据。 """ def __init__(self): r""" :param TableSchema: 库名。 :type TableSchema: str :param DataLength: 数据空间(MB)。 :type DataLength: float :param IndexLength: 索引空间(MB)。 :type IndexLength: float :param DataFree: 碎片空间(MB)。 :type DataFree: float :param TotalLength: 总使用空间(MB)。 :type TotalLength: float :param FragRatio: 碎片率(%)。 :type FragRatio: float :param TableRows: 行数。 :type TableRows: int :param PhysicalFileSize: 库中所有表对应的独立物理文件大小加和(MB)。 注意:此字段可能返回 null,表示取不到有效值。 :type PhysicalFileSize: float """ self.TableSchema = None self.DataLength = None self.IndexLength = None self.DataFree = None self.TotalLength = None self.FragRatio = None self.TableRows = None self.PhysicalFileSize = None def _deserialize(self, params): self.TableSchema = params.get("TableSchema") self.DataLength = params.get("DataLength") self.IndexLength = params.get("IndexLength") self.DataFree = params.get("DataFree") self.TotalLength = params.get("TotalLength") self.FragRatio = params.get("FragRatio") self.TableRows = params.get("TableRows") self.PhysicalFileSize = params.get("PhysicalFileSize") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class SchemaSpaceTimeSeries(AbstractModel): """库空间时序数据 """ def __init__(self): r""" :param TableSchema: 库名 :type TableSchema: str :param SeriesData: 单位时间间隔内的空间指标数据。 :type SeriesData: :class:`tencentcloud.dbbrain.v20210527.models.MonitorMetricSeriesData` """ self.TableSchema = None self.SeriesData = None def _deserialize(self, params): self.TableSchema = params.get("TableSchema") if params.get("SeriesData") is not None: self.SeriesData = MonitorMetricSeriesData() self.SeriesData._deserialize(params.get("SeriesData")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ScoreDetail(AbstractModel): """扣分详情。 """ def __init__(self): r""" :param IssueType: 扣分项分类,取值包括:可用性、可维护性、性能及可靠性。 :type IssueType: str :param ScoreLost: 扣分总分。 :type ScoreLost: int :param ScoreLostMax: 扣分总分上限。 :type ScoreLostMax: int :param Items: 扣分项列表。 注意:此字段可能返回 null,表示取不到有效值。 :type Items: list of ScoreItem """ self.IssueType = None self.ScoreLost = None self.ScoreLostMax = None self.Items = None def _deserialize(self, params): self.IssueType = params.get("IssueType") self.ScoreLost = params.get("ScoreLost") self.ScoreLostMax = params.get("ScoreLostMax") if params.get("Items") is not None: self.Items = [] for item in params.get("Items"): obj = ScoreItem() obj._deserialize(item) self.Items.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ScoreItem(AbstractModel): """诊断扣分项。 """ def __init__(self): r""" :param DiagItem: 异常诊断项名称。 :type DiagItem: str :param IssueType: 诊断项分类,取值包括:可用性、可维护性、性能及可靠性。 :type IssueType: str :param TopSeverity: 健康等级,取值包括:信息、提示、告警、严重、致命。 :type TopSeverity: str :param Count: 该异常诊断项出现次数。 :type Count: int :param ScoreLost: 扣分分数。 :type ScoreLost: int """ self.DiagItem = None self.IssueType = None self.TopSeverity = None self.Count = None self.ScoreLost = None def _deserialize(self, params): self.DiagItem = params.get("DiagItem") self.IssueType = params.get("IssueType") self.TopSeverity = params.get("TopSeverity") self.Count = params.get("Count") self.ScoreLost = params.get("ScoreLost") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class SecLogExportTaskInfo(AbstractModel): """安全审计日志导出任务信息 """ def __init__(self): r""" :param AsyncRequestId: 异步任务Id。 :type AsyncRequestId: int :param StartTime: 任务开始时间。 注意:此字段可能返回 null,表示取不到有效值。 :type StartTime: str :param EndTime: 任务结束时间。 注意:此字段可能返回 null,表示取不到有效值。 :type EndTime: str :param CreateTime: 任务创建时间。 :type CreateTime: str :param Status: 任务状态。 :type Status: str :param Progress: 任务执行进度。 :type Progress: int :param LogStartTime: 导出日志开始时间。 注意:此字段可能返回 null,表示取不到有效值。 :type LogStartTime: str :param LogEndTime: 导出日志结束时间。 注意:此字段可能返回 null,表示取不到有效值。 :type LogEndTime: str :param TotalSize: 日志文件总大小,单位KB。 注意:此字段可能返回 null,表示取不到有效值。 :type TotalSize: int :param DangerLevels: 风险等级列表。0 无风险;1 低风险;2 中风险;3 高风险。 注意:此字段可能返回 null,表示取不到有效值。 :type DangerLevels: list of int non-negative """ self.AsyncRequestId = None self.StartTime = None self.EndTime = None self.CreateTime = None self.Status = None self.Progress = None self.LogStartTime = None self.LogEndTime = None self.TotalSize = None self.DangerLevels = None def _deserialize(self, params): self.AsyncRequestId = params.get("AsyncRequestId") self.StartTime = params.get("StartTime") self.EndTime = params.get("EndTime") self.CreateTime = params.get("CreateTime") self.Status = params.get("Status") self.Progress = params.get("Progress") self.LogStartTime = params.get("LogStartTime") self.LogEndTime = params.get("LogEndTime") self.TotalSize = params.get("TotalSize") self.DangerLevels = params.get("DangerLevels") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class SlowLogHost(AbstractModel): """慢日志来源地址详情。 """ def __init__(self): r""" :param UserHost: 来源地址。 :type UserHost: str :param Ratio: 该来源地址的慢日志数目占总数目的比例,单位%。 :type Ratio: float :param Count: 该来源地址的慢日志数目。 :type Count: int """ self.UserHost = None self.Ratio = None self.Count = None def _deserialize(self, params): self.UserHost = params.get("UserHost") self.Ratio = params.get("Ratio") self.Count = params.get("Count") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class SlowLogTopSqlItem(AbstractModel): """慢日志TopSql """ def __init__(self): r""" :param LockTime: sql总锁等待时间,单位秒 :type LockTime: float :param LockTimeMax: 最大锁等待时间,单位秒 :type LockTimeMax: float :param LockTimeMin: 最小锁等待时间,单位秒 :type LockTimeMin: float :param RowsExamined: 总扫描行数 :type RowsExamined: int :param RowsExaminedMax: 最大扫描行数 :type RowsExaminedMax: int :param RowsExaminedMin: 最小扫描行数 :type RowsExaminedMin: int :param QueryTime: 总耗时,单位秒 :type QueryTime: float :param QueryTimeMax: 最大执行时间,单位秒 :type QueryTimeMax: float :param QueryTimeMin: 最小执行时间,单位秒 :type QueryTimeMin: float :param RowsSent: 总返回行数 :type RowsSent: int :param RowsSentMax: 最大返回行数 :type RowsSentMax: int :param RowsSentMin: 最小返回行数 :type RowsSentMin: int :param ExecTimes: 执行次数 :type ExecTimes: int :param SqlTemplate: sql模板 :type SqlTemplate: str :param SqlText: 带参数SQL(随机) :type SqlText: str :param Schema: 数据库名 :type Schema: str :param QueryTimeRatio: 总耗时占比,单位% :type QueryTimeRatio: float :param LockTimeRatio: sql总锁等待时间占比,单位% :type LockTimeRatio: float :param RowsExaminedRatio: 总扫描行数占比,单位% :type RowsExaminedRatio: float :param RowsSentRatio: 总返回行数占比,单位% :type RowsSentRatio: float :param QueryTimeAvg: 平均执行时间,单位秒 :type QueryTimeAvg: float :param RowsSentAvg: 平均返回行数 :type RowsSentAvg: float :param LockTimeAvg: 平均锁等待时间,单位秒 :type LockTimeAvg: float :param RowsExaminedAvg: 平均扫描行数 :type RowsExaminedAvg: float :param Md5: SOL模板的MD5值 :type Md5: str """ self.LockTime = None self.LockTimeMax = None self.LockTimeMin = None self.RowsExamined = None self.RowsExaminedMax = None self.RowsExaminedMin = None self.QueryTime = None self.QueryTimeMax = None self.QueryTimeMin = None self.RowsSent = None self.RowsSentMax = None self.RowsSentMin = None self.ExecTimes = None self.SqlTemplate = None self.SqlText = None self.Schema = None self.QueryTimeRatio = None self.LockTimeRatio = None self.RowsExaminedRatio = None self.RowsSentRatio = None self.QueryTimeAvg = None self.RowsSentAvg = None self.LockTimeAvg = None self.RowsExaminedAvg = None self.Md5 = None def _deserialize(self, params): self.LockTime = params.get("LockTime") self.LockTimeMax = params.get("LockTimeMax") self.LockTimeMin = params.get("LockTimeMin") self.RowsExamined = params.get("RowsExamined") self.RowsExaminedMax = params.get("RowsExaminedMax") self.RowsExaminedMin = params.get("RowsExaminedMin") self.QueryTime = params.get("QueryTime") self.QueryTimeMax = params.get("QueryTimeMax") self.QueryTimeMin = params.get("QueryTimeMin") self.RowsSent = params.get("RowsSent") self.RowsSentMax = params.get("RowsSentMax") self.RowsSentMin = params.get("RowsSentMin") self.ExecTimes = params.get("ExecTimes") self.SqlTemplate = params.get("SqlTemplate") self.SqlText = params.get("SqlText") self.Schema = params.get("Schema") self.QueryTimeRatio = params.get("QueryTimeRatio") self.LockTimeRatio = params.get("LockTimeRatio") self.RowsExaminedRatio = params.get("RowsExaminedRatio") self.RowsSentRatio = params.get("RowsSentRatio") self.QueryTimeAvg = params.get("QueryTimeAvg") self.RowsSentAvg = params.get("RowsSentAvg") self.LockTimeAvg = params.get("LockTimeAvg") self.RowsExaminedAvg = params.get("RowsExaminedAvg") self.Md5 = params.get("Md5") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class TableSpaceData(AbstractModel): """库表空间统计数据。 """ def __init__(self): r""" :param TableName: 表名。 :type TableName: str :param TableSchema: 库名。 :type TableSchema: str :param Engine: 库表的存储引擎。 :type Engine: str :param DataLength: 数据空间(MB)。 :type DataLength: float :param IndexLength: 索引空间(MB)。 :type IndexLength: float :param DataFree: 碎片空间(MB)。 :type DataFree: float :param TotalLength: 总使用空间(MB)。 :type TotalLength: float :param FragRatio: 碎片率(%)。 :type FragRatio: float :param TableRows: 行数。 :type TableRows: int :param PhysicalFileSize: 表对应的独立物理文件大小(MB)。 :type PhysicalFileSize: float """ self.TableName = None self.TableSchema = None self.Engine = None self.DataLength = None self.IndexLength = None self.DataFree = None self.TotalLength = None self.FragRatio = None self.TableRows = None self.PhysicalFileSize = None def _deserialize(self, params): self.TableName = params.get("TableName") self.TableSchema = params.get("TableSchema") self.Engine = params.get("Engine") self.DataLength = params.get("DataLength") self.IndexLength = params.get("IndexLength") self.DataFree = params.get("DataFree") self.TotalLength = params.get("TotalLength") self.FragRatio = params.get("FragRatio") self.TableRows = params.get("TableRows") self.PhysicalFileSize = params.get("PhysicalFileSize") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class TableSpaceTimeSeries(AbstractModel): """库表空间时序数据 """ def __init__(self): r""" :param TableName: 表名。 :type TableName: str :param TableSchema: 库名。 :type TableSchema: str :param Engine: 库表的存储引擎。 :type Engine: str :param SeriesData: 单位时间间隔内的空间指标数据。 :type SeriesData: :class:`tencentcloud.dbbrain.v20210527.models.MonitorFloatMetricSeriesData` """ self.TableName = None self.TableSchema = None self.Engine = None self.SeriesData = None def _deserialize(self, params): self.TableName = params.get("TableName") self.TableSchema = params.get("TableSchema") self.Engine = params.get("Engine") if params.get("SeriesData") is not None: self.SeriesData = MonitorFloatMetricSeriesData() self.SeriesData._deserialize(params.get("SeriesData")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class TimeSlice(AbstractModel): """单位时间间隔内的慢日志统计 """ def __init__(self): r""" :param Count: 总数 :type Count: int :param Timestamp: 统计开始时间 :type Timestamp: int """ self.Count = None self.Timestamp = None def _deserialize(self, params): self.Count = params.get("Count") self.Timestamp = params.get("Timestamp") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class UserProfile(AbstractModel): """用户配置的相关信息,包括邮件配置。 """ def __init__(self): r""" :param ProfileId: 配置的id。 注意:此字段可能返回 null,表示取不到有效值。 :type ProfileId: str :param ProfileType: 配置类型,支持值包括:"dbScan_mail_configuration" - 数据库巡检邮件配置,"scheduler_mail_configuration" - 定期生成邮件配置。 注意:此字段可能返回 null,表示取不到有效值。 :type ProfileType: str :param ProfileLevel: 配置级别,支持值包括:"User" - 用户级别,"Instance" - 实例级别,其中数据库巡检邮件配置为用户级别,定期生成邮件配置为实例级别。 注意:此字段可能返回 null,表示取不到有效值。 :type ProfileLevel: str :param ProfileName: 配置名称。 注意:此字段可能返回 null,表示取不到有效值。 :type ProfileName: str :param ProfileInfo: 配置详情。 :type ProfileInfo: :class:`tencentcloud.dbbrain.v20210527.models.ProfileInfo` """ self.ProfileId = None self.ProfileType = None self.ProfileLevel = None self.ProfileName = None self.ProfileInfo = None def _deserialize(self, params): self.ProfileId = params.get("ProfileId") self.ProfileType = params.get("ProfileType") self.ProfileLevel = params.get("ProfileLevel") self.ProfileName = params.get("ProfileName") if params.get("ProfileInfo") is not None: self.ProfileInfo = ProfileInfo() self.ProfileInfo._deserialize(params.get("ProfileInfo")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set))
oauth/provider.py
giuseppe/quay
2,027
795
<filename>oauth/provider.py<gh_stars>1000+ # Ported to Python 3 # Originally from https://github.com/DeprecatedCode/oauth2lib/blob/d161b010f8a596826050a09e5e94d59443cc12d9/oauth2lib/provider.py import json import logging from requests import Response from io import StringIO try: from werkzeug.exceptions import Unauthorized except ImportError: Unauthorized = Exception from oauth import utils class Provider(object): """Base provider class for different types of OAuth 2.0 providers.""" def _handle_exception(self, exc): """Handle an internal exception that was caught and suppressed. :param exc: Exception to process. :type exc: Exception """ logger = logging.getLogger(__name__) logger.exception(exc) def _make_response(self, body="", headers=None, status_code=200): """Return a response object from the given parameters. :param body: Buffer/string containing the response body. :type body: str :param headers: Dict of headers to include in the requests. :type headers: dict :param status_code: HTTP status code. :type status_code: int :rtype: requests.Response """ res = Response() res.status_code = status_code if headers is not None: res.headers.update(headers) res.raw = StringIO(body) return res def _make_redirect_error_response(self, redirect_uri, err): """Return a HTTP 302 redirect response object containing the error. :param redirect_uri: Client redirect URI. :type redirect_uri: str :param err: OAuth error message. :type err: str :rtype: requests.Response """ params = {"error": err, "response_type": None, "client_id": None, "redirect_uri": None} redirect = utils.build_url(redirect_uri, params) return self._make_response(headers={"Location": redirect}, status_code=302) def _make_json_response(self, data, headers=None, status_code=200): """Return a response object from the given JSON data. :param data: Data to JSON-encode. :type data: mixed :param headers: Dict of headers to include in the requests. :type headers: dict :param status_code: HTTP status code. :type status_code: int :rtype: requests.Response """ response_headers = {} if headers is not None: response_headers.update(headers) response_headers["Content-Type"] = "application/json;charset=UTF-8" response_headers["Cache-Control"] = "no-store" response_headers["Pragma"] = "no-cache" return self._make_response(json.dumps(data), response_headers, status_code) def _make_json_error_response(self, err): """Return a JSON-encoded response object representing the error. :param err: OAuth error message. :type err: str :rtype: requests.Response """ return self._make_json_response({"error": err}, status_code=400) def _invalid_redirect_uri_response(self): """What to return when the redirect_uri parameter is missing. :rtype: requests.Response """ return self._make_json_error_response("invalid_request") class AuthorizationProvider(Provider): """OAuth 2.0 authorization provider. This class manages authorization codes and access tokens. Certain methods MUST be overridden in a subclass, thus this class cannot be directly used as a provider. These are the methods that must be implemented in a subclass: validate_client_id(self, client_id) # Return True or False validate_client_secret(self, client_id, client_secret) # Return True or False validate_scope(self, client_id, scope) # Return True or False validate_redirect_uri(self, client_id, redirect_uri) # Return True or False validate_access(self) # Use this to validate your app session user # Return True or False from_authorization_code(self, client_id, code, scope) # Return mixed data or None on invalid from_refresh_token(self, client_id, refresh_token, scope) # Return mixed data or None on invalid persist_authorization_code(self, client_id, code, scope) # Return value ignored persist_token_information(self, client_id, scope, access_token, token_type, expires_in, refresh_token, data) # Return value ignored discard_authorization_code(self, client_id, code) # Return value ignored discard_refresh_token(self, client_id, refresh_token) # Return value ignored Optionally, the following may be overridden to acheive desired behavior: @property token_length(self) @property token_type(self) @property token_expires_in(self) generate_authorization_code(self) generate_access_token(self) generate_refresh_token(self) """ @property def token_length(self): """Property method to get the length used to generate tokens. :rtype: int """ return 40 @property def token_type(self): """Property method to get the access token type. :rtype: str """ return "Bearer" @property def token_expires_in(self): """Property method to get the token expiration time in seconds. :rtype: int """ return 3600 def generate_authorization_code(self): """Generate a random authorization code. :rtype: str """ return utils.random_ascii_string(self.token_length) def generate_access_token(self): """Generate a random access token. :rtype: str """ return utils.random_ascii_string(self.token_length) def generate_refresh_token(self): """Generate a random refresh token. :rtype: str """ return utils.random_ascii_string(self.token_length) def get_authorization_code(self, response_type, client_id, redirect_uri, **params): """Generate authorization code HTTP response. :param response_type: Desired response type. Must be exactly "code". :type response_type: str :param client_id: Client ID. :type client_id: str :param redirect_uri: Client redirect URI. :type redirect_uri: str :rtype: requests.Response """ # Ensure proper response_type if response_type != "code": err = "unsupported_response_type" return self._make_redirect_error_response(redirect_uri, err) # Check redirect URI is_valid_redirect_uri = self.validate_redirect_uri(client_id, redirect_uri) if not is_valid_redirect_uri: return self._invalid_redirect_uri_response() # Check conditions is_valid_client_id = self.validate_client_id(client_id) is_valid_access = self.validate_access() scope = params.get("scope", "") is_valid_scope = self.validate_scope(client_id, scope) # Return proper error responses on invalid conditions if not is_valid_client_id: err = "unauthorized_client" return self._make_redirect_error_response(redirect_uri, err) if not is_valid_access: err = "access_denied" return self._make_redirect_error_response(redirect_uri, err) if not is_valid_scope: err = "invalid_scope" return self._make_redirect_error_response(redirect_uri, err) # Generate authorization code code = self.generate_authorization_code() # Save information to be used to validate later requests self.persist_authorization_code(client_id=client_id, code=code, scope=scope) # Return redirection response params.update( {"code": code, "response_type": None, "client_id": None, "redirect_uri": None} ) redirect = utils.build_url(redirect_uri, params) return self._make_response(headers={"Location": redirect}, status_code=302) def refresh_token(self, grant_type, client_id, client_secret, refresh_token, **params): """Generate access token HTTP response from a refresh token. :param grant_type: Desired grant type. Must be "refresh_token". :type grant_type: str :param client_id: Client ID. :type client_id: str :param client_secret: Client secret. :type client_secret: str :param refresh_token: Refresh token. :type refresh_token: str :rtype: requests.Response """ # Ensure proper grant_type if grant_type != "refresh_token": return self._make_json_error_response("unsupported_grant_type") # Check conditions is_valid_client_id = self.validate_client_id(client_id) is_valid_client_secret = self.validate_client_secret(client_id, client_secret) scope = params.get("scope", "") is_valid_scope = self.validate_scope(client_id, scope) data = self.from_refresh_token(client_id, refresh_token, scope) is_valid_refresh_token = data is not None # Return proper error responses on invalid conditions if not (is_valid_client_id and is_valid_client_secret): return self._make_json_error_response("invalid_client") if not is_valid_scope: return self._make_json_error_response("invalid_scope") if not is_valid_refresh_token: return self._make_json_error_response("invalid_grant") # Discard original refresh token self.discard_refresh_token(client_id, refresh_token) # Generate access tokens once all conditions have been met access_token = self.generate_access_token() token_type = self.token_type expires_in = self.token_expires_in refresh_token = self.generate_refresh_token() # Save information to be used to validate later requests self.persist_token_information( client_id=client_id, scope=scope, access_token=access_token, token_type=token_type, expires_in=expires_in, refresh_token=refresh_token, data=data, ) # Return json response return self._make_json_response( { "access_token": access_token, "token_type": token_type, "expires_in": expires_in, "refresh_token": refresh_token, } ) def get_token(self, grant_type, client_id, client_secret, redirect_uri, code, **params): """Generate access token HTTP response. :param grant_type: Desired grant type. Must be "authorization_code". :type grant_type: str :param client_id: Client ID. :type client_id: str :param client_secret: Client secret. :type client_secret: str :param redirect_uri: Client redirect URI. :type redirect_uri: str :param code: Authorization code. :type code: str :rtype: requests.Response """ # Ensure proper grant_type if grant_type != "authorization_code": return self._make_json_error_response("unsupported_grant_type") # Check conditions is_valid_client_id = self.validate_client_id(client_id) is_valid_client_secret = self.validate_client_secret(client_id, client_secret) is_valid_redirect_uri = self.validate_redirect_uri(client_id, redirect_uri) scope = params.get("scope", "") is_valid_scope = self.validate_scope(client_id, scope) data = self.from_authorization_code(client_id, code, scope) is_valid_grant = data is not None # Return proper error responses on invalid conditions if not (is_valid_client_id and is_valid_client_secret): return self._make_json_error_response("invalid_client") if not is_valid_grant or not is_valid_redirect_uri: return self._make_json_error_response("invalid_grant") if not is_valid_scope: return self._make_json_error_response("invalid_scope") # Discard original authorization code self.discard_authorization_code(client_id, code) # Generate access tokens once all conditions have been met access_token = self.generate_access_token() token_type = self.token_type expires_in = self.token_expires_in refresh_token = self.generate_refresh_token() # Save information to be used to validate later requests self.persist_token_information( client_id=client_id, scope=scope, access_token=access_token, token_type=token_type, expires_in=expires_in, refresh_token=refresh_token, data=data, ) # Return json response return self._make_json_response( { "access_token": access_token, "token_type": token_type, "expires_in": expires_in, "refresh_token": refresh_token, } ) def get_authorization_code_from_uri(self, uri): """Get authorization code response from a URI. This method will ignore the domain and path of the request, instead automatically parsing the query string parameters. :param uri: URI to parse for authorization information. :type uri: str :rtype: requests.Response """ params = utils.url_query_params(uri) try: if "response_type" not in params: raise TypeError("Missing parameter response_type in URL query") if "client_id" not in params: raise TypeError("Missing parameter client_id in URL query") if "redirect_uri" not in params: raise TypeError("Missing parameter redirect_uri in URL query") return self.get_authorization_code(**params) except TypeError as exc: self._handle_exception(exc) # Catch missing parameters in request err = "invalid_request" if "redirect_uri" in params: u = params["redirect_uri"] return self._make_redirect_error_response(u, err) else: return self._invalid_redirect_uri_response() except StandardError as exc: self._handle_exception(exc) # Catch all other server errors err = "server_error" u = params["redirect_uri"] return self._make_redirect_error_response(u, err) def get_token_from_post_data(self, data): """Get a token response from POST data. :param data: POST data containing authorization information. :type data: dict :rtype: requests.Response """ try: # Verify OAuth 2.0 Parameters for x in ["grant_type", "client_id", "client_secret"]: if not data.get(x): raise TypeError("Missing required OAuth 2.0 POST param: {0}".format(x)) # Handle get token from refresh_token if "refresh_token" in data: return self.refresh_token(**data) # Handle get token from authorization code for x in ["redirect_uri", "code"]: if not data.get(x): raise TypeError("Missing required OAuth 2.0 POST param: {0}".format(x)) return self.get_token(**data) except TypeError as exc: self._handle_exception(exc) # Catch missing parameters in request return self._make_json_error_response("invalid_request") except StandardError as exc: self._handle_exception(exc) # Catch all other server errors return self._make_json_error_response("server_error") def validate_client_id(self, client_id): raise NotImplementedError("Subclasses must implement " "validate_client_id.") def validate_client_secret(self, client_id, client_secret): raise NotImplementedError("Subclasses must implement " "validate_client_secret.") def validate_redirect_uri(self, client_id, redirect_uri): raise NotImplementedError("Subclasses must implement " "validate_redirect_uri.") def validate_scope(self, client_id, scope): raise NotImplementedError("Subclasses must implement " "validate_scope.") def validate_access(self): raise NotImplementedError("Subclasses must implement " "validate_access.") def from_authorization_code(self, client_id, code, scope): raise NotImplementedError("Subclasses must implement " "from_authorization_code.") def from_refresh_token(self, client_id, refresh_token, scope): raise NotImplementedError("Subclasses must implement " "from_refresh_token.") def persist_authorization_code(self, client_id, code, scope): raise NotImplementedError("Subclasses must implement " "persist_authorization_code.") def persist_token_information( self, client_id, scope, access_token, token_type, expires_in, refresh_token, data ): raise NotImplementedError("Subclasses must implement " "persist_token_information.") def discard_authorization_code(self, client_id, code): raise NotImplementedError("Subclasses must implement " "discard_authorization_code.") def discard_refresh_token(self, client_id, refresh_token): raise NotImplementedError("Subclasses must implement " "discard_refresh_token.") class OAuthError(Unauthorized): """OAuth error, including the OAuth error reason.""" def __init__(self, reason, *args, **kwargs): self.reason = reason super(OAuthError, self).__init__(*args, **kwargs) class ResourceAuthorization(object): """A class containing an OAuth 2.0 authorization.""" is_oauth = False is_valid = None token = None client_id = None expires_in = None error = None def raise_error_if_invalid(self): if not self.is_valid: raise OAuthError(self.error, "OAuth authorization error") class ResourceProvider(Provider): """OAuth 2.0 resource provider. This class provides an interface to validate an incoming request and authenticate resource access. Certain methods MUST be overridden in a subclass, thus this class cannot be directly used as a resource provider. These are the methods that must be implemented in a subclass: get_authorization_header(self) # Return header string for key "Authorization" or None validate_access_token(self, access_token, authorization) # Set is_valid=True, client_id, and expires_in attributes # on authorization if authorization was successful. # Return value is ignored """ @property def authorization_class(self): return ResourceAuthorization def get_authorization(self): """Get authorization object representing status of authentication.""" auth = self.authorization_class() header = self.get_authorization_header() if not header or not header.split: return auth header = header.split() if len(header) > 1 and header[0] == "Bearer": auth.is_oauth = True access_token = header[1] self.validate_access_token(access_token, auth) if not auth.is_valid: auth.error = "access_denied" return auth def get_authorization_header(self): raise NotImplementedError("Subclasses must implement " "get_authorization_header.") def validate_access_token(self, access_token, authorization): raise NotImplementedError("Subclasses must implement " "validate_token.")
tests/dummies.py
arvindmuralie77/gradsflow
253
797
# Copyright (c) 2021 GradsFlow. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from gradsflow.models import Model class DummyModel(Model): def __init__(self): learner = torch.nn.Linear(1, 4) super().__init__(learner) def backward(self, loss: torch.Tensor): return None def train_step(self, batch): return {"loss": torch.as_tensor(1), "metrics": {"accuracy": 1}} def val_step(self, batch): return {"loss": torch.as_tensor(1), "metrics": {"accuracy": 1}}
python/flexflow/keras/datasets/cifar.py
zmxdream/FlexFlow
455
829
# -*- coding: utf-8 -*- """Utilities common to CIFAR10 and CIFAR100 datasets. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys from six.moves import cPickle def load_batch(fpath, label_key='labels'): """Internal utility for parsing CIFAR data. # Arguments fpath: path the file to parse. label_key: key for label data in the retrieve dictionary. # Returns A tuple `(data, labels)`. """ with open(fpath, 'rb') as f: if sys.version_info < (3,): d = cPickle.load(f) else: d = cPickle.load(f, encoding='bytes') # decode utf8 d_decoded = {} for k, v in d.items(): d_decoded[k.decode('utf8')] = v d = d_decoded data = d['data'] labels = d[label_key] data = data.reshape(data.shape[0], 3, 32, 32) return data, labels
tests/api/serializer/test_user.py
armandomeeuwenoord/freight
562
845
from freight.api.serializer import serialize from freight.testutils import TestCase class UserSerializerTest(TestCase): def test_simple(self): user = self.create_user() result = serialize(user) assert result["id"] == str(user.id) assert result["name"] == user.name
tests/test_custom_experts.py
protagohhz/hivemind
1,026
855
import os import pytest import torch from hivemind import RemoteExpert from hivemind.moe.server import background_server CUSTOM_EXPERTS_PATH = os.path.join(os.path.dirname(__file__), "test_utils", "custom_networks.py") @pytest.mark.forked def test_custom_expert(hid_dim=16): with background_server( expert_cls="perceptron", num_experts=2, device="cpu", hidden_dim=hid_dim, num_handlers=2, no_dht=True, custom_module_path=CUSTOM_EXPERTS_PATH, ) as (server_endpoint, _): expert0 = RemoteExpert("expert.0", server_endpoint) expert1 = RemoteExpert("expert.1", server_endpoint) for batch_size in (1, 4): batch = torch.randn(batch_size, hid_dim) output0 = expert0(batch) output1 = expert1(batch) loss = output0.sum() loss.backward() loss = output1.sum() loss.backward() @pytest.mark.forked def test_multihead_expert(hid_dim=16): with background_server( expert_cls="multihead", num_experts=2, device="cpu", hidden_dim=hid_dim, num_handlers=2, no_dht=True, custom_module_path=CUSTOM_EXPERTS_PATH, ) as (server_endpoint, _): expert0 = RemoteExpert("expert.0", server_endpoint) expert1 = RemoteExpert("expert.1", server_endpoint) for batch_size in (1, 4): batch = ( torch.randn(batch_size, hid_dim), torch.randn(batch_size, 2 * hid_dim), torch.randn(batch_size, 3 * hid_dim), ) output0 = expert0(*batch) output1 = expert1(*batch) loss = output0.sum() loss.backward() loss = output1.sum() loss.backward()
data_structures/stack/largest_rectangle_area_in_histogram.py
ruler30cm/python-ds
1,723
865
<gh_stars>1000+ ''' Largest rectangle area in a histogram:: Find the largest rectangular area possible in a given histogram where the largest rectangle can be made of a number of contiguous bars. For simplicity, assume that all bars have same width and the width is 1 unit. ''' def max_area_histogram(histogram): stack = list() max_area = 0 # Initialize max area index = 0 while index < len(histogram): if (not stack) or (histogram[stack[-1]] <= histogram[index]): stack.append(index) index += 1 else: top_of_stack = stack.pop() area = (histogram[top_of_stack] * ((index - stack[-1] - 1) if stack else index)) max_area = max(max_area, area) while stack: top_of_stack = stack.pop() area = (histogram[top_of_stack] * ((index - stack[-1] - 1) if stack else index)) max_area = max(max_area, area) return max_area hist = [4, 7, 1, 8, 4, 9, 5] print("Maximum area is", max_area_histogram(hist))
src/tools/types/obj.py
loongson-zn/build
215
868
<filename>src/tools/types/obj.py<gh_stars>100-1000 # Copyright <NAME> 2004. Distributed under the Boost # Software License, Version 1.0. (See accompanying # file LICENSE.txt or copy at https://www.bfgroup.xyz/b2/LICENSE.txt) from b2.build import type def register (): type.register_type ('OBJ', ['obj'], None, ['NT', 'CYGWIN']) type.register_type ('OBJ', ['o']) register ()
timm/utils/checkpoint_saver.py
Robert-JunWang/pytorch-image-models
17,769
879
<filename>timm/utils/checkpoint_saver.py """ Checkpoint Saver Track top-n training checkpoints and maintain recovery checkpoints on specified intervals. Hacked together by / Copyright 2020 <NAME> """ import glob import operator import os import logging import torch from .model import unwrap_model, get_state_dict _logger = logging.getLogger(__name__) class CheckpointSaver: def __init__( self, model, optimizer, args=None, model_ema=None, amp_scaler=None, checkpoint_prefix='checkpoint', recovery_prefix='recovery', checkpoint_dir='', recovery_dir='', decreasing=False, max_history=10, unwrap_fn=unwrap_model): # objects to save state_dicts of self.model = model self.optimizer = optimizer self.args = args self.model_ema = model_ema self.amp_scaler = amp_scaler # state self.checkpoint_files = [] # (filename, metric) tuples in order of decreasing betterness self.best_epoch = None self.best_metric = None self.curr_recovery_file = '' self.last_recovery_file = '' # config self.checkpoint_dir = checkpoint_dir self.recovery_dir = recovery_dir self.save_prefix = checkpoint_prefix self.recovery_prefix = recovery_prefix self.extension = '.pth.tar' self.decreasing = decreasing # a lower metric is better if True self.cmp = operator.lt if decreasing else operator.gt # True if lhs better than rhs self.max_history = max_history self.unwrap_fn = unwrap_fn assert self.max_history >= 1 def save_checkpoint(self, epoch, metric=None): assert epoch >= 0 tmp_save_path = os.path.join(self.checkpoint_dir, 'tmp' + self.extension) last_save_path = os.path.join(self.checkpoint_dir, 'last' + self.extension) self._save(tmp_save_path, epoch, metric) if os.path.exists(last_save_path): os.unlink(last_save_path) # required for Windows support. os.rename(tmp_save_path, last_save_path) worst_file = self.checkpoint_files[-1] if self.checkpoint_files else None if (len(self.checkpoint_files) < self.max_history or metric is None or self.cmp(metric, worst_file[1])): if len(self.checkpoint_files) >= self.max_history: self._cleanup_checkpoints(1) filename = '-'.join([self.save_prefix, str(epoch)]) + self.extension save_path = os.path.join(self.checkpoint_dir, filename) os.link(last_save_path, save_path) self.checkpoint_files.append((save_path, metric)) self.checkpoint_files = sorted( self.checkpoint_files, key=lambda x: x[1], reverse=not self.decreasing) # sort in descending order if a lower metric is not better checkpoints_str = "Current checkpoints:\n" for c in self.checkpoint_files: checkpoints_str += ' {}\n'.format(c) _logger.info(checkpoints_str) if metric is not None and (self.best_metric is None or self.cmp(metric, self.best_metric)): self.best_epoch = epoch self.best_metric = metric best_save_path = os.path.join(self.checkpoint_dir, 'model_best' + self.extension) if os.path.exists(best_save_path): os.unlink(best_save_path) os.link(last_save_path, best_save_path) return (None, None) if self.best_metric is None else (self.best_metric, self.best_epoch) def _save(self, save_path, epoch, metric=None): save_state = { 'epoch': epoch, 'arch': type(self.model).__name__.lower(), 'state_dict': get_state_dict(self.model, self.unwrap_fn), 'optimizer': self.optimizer.state_dict(), 'version': 2, # version < 2 increments epoch before save } if self.args is not None: save_state['arch'] = self.args.model save_state['args'] = self.args if self.amp_scaler is not None: save_state[self.amp_scaler.state_dict_key] = self.amp_scaler.state_dict() if self.model_ema is not None: save_state['state_dict_ema'] = get_state_dict(self.model_ema, self.unwrap_fn) if metric is not None: save_state['metric'] = metric torch.save(save_state, save_path) def _cleanup_checkpoints(self, trim=0): trim = min(len(self.checkpoint_files), trim) delete_index = self.max_history - trim if delete_index < 0 or len(self.checkpoint_files) <= delete_index: return to_delete = self.checkpoint_files[delete_index:] for d in to_delete: try: _logger.debug("Cleaning checkpoint: {}".format(d)) os.remove(d[0]) except Exception as e: _logger.error("Exception '{}' while deleting checkpoint".format(e)) self.checkpoint_files = self.checkpoint_files[:delete_index] def save_recovery(self, epoch, batch_idx=0): assert epoch >= 0 filename = '-'.join([self.recovery_prefix, str(epoch), str(batch_idx)]) + self.extension save_path = os.path.join(self.recovery_dir, filename) self._save(save_path, epoch) if os.path.exists(self.last_recovery_file): try: _logger.debug("Cleaning recovery: {}".format(self.last_recovery_file)) os.remove(self.last_recovery_file) except Exception as e: _logger.error("Exception '{}' while removing {}".format(e, self.last_recovery_file)) self.last_recovery_file = self.curr_recovery_file self.curr_recovery_file = save_path def find_recovery(self): recovery_path = os.path.join(self.recovery_dir, self.recovery_prefix) files = glob.glob(recovery_path + '*' + self.extension) files = sorted(files) return files[0] if len(files) else ''
src/python/pants/core/goals/check_test.py
yoav-orca/pants
1,806
884
<reponame>yoav-orca/pants # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from abc import ABCMeta, abstractmethod from pathlib import Path from textwrap import dedent from typing import ClassVar, Iterable, List, Optional, Tuple, Type from pants.core.goals.check import Check, CheckRequest, CheckResult, CheckResults, check from pants.core.util_rules.distdir import DistDir from pants.engine.addresses import Address from pants.engine.fs import Workspace from pants.engine.target import FieldSet, MultipleSourcesField, Target, Targets from pants.engine.unions import UnionMembership from pants.testutil.option_util import create_options_bootstrapper from pants.testutil.rule_runner import MockGet, RuleRunner, mock_console, run_rule_with_mocks from pants.util.logging import LogLevel class MockTarget(Target): alias = "mock_target" core_fields = (MultipleSourcesField,) class MockCheckFieldSet(FieldSet): required_fields = (MultipleSourcesField,) class MockCheckRequest(CheckRequest, metaclass=ABCMeta): field_set_type = MockCheckFieldSet checker_name: ClassVar[str] @staticmethod @abstractmethod def exit_code(_: Iterable[Address]) -> int: pass @property def check_results(self) -> CheckResults: addresses = [config.address for config in self.field_sets] return CheckResults( [ CheckResult( self.exit_code(addresses), "", "", ) ], checker_name=self.checker_name, ) class SuccessfulRequest(MockCheckRequest): checker_name = "SuccessfulChecker" @staticmethod def exit_code(_: Iterable[Address]) -> int: return 0 class FailingRequest(MockCheckRequest): checker_name = "FailingChecker" @staticmethod def exit_code(_: Iterable[Address]) -> int: return 1 class ConditionallySucceedsRequest(MockCheckRequest): checker_name = "ConditionallySucceedsChecker" @staticmethod def exit_code(addresses: Iterable[Address]) -> int: if any(address.target_name == "bad" for address in addresses): return 127 return 0 class SkippedRequest(MockCheckRequest): @staticmethod def exit_code(_) -> int: return 0 @property def check_results(self) -> CheckResults: return CheckResults([], checker_name="SkippedChecker") class InvalidField(MultipleSourcesField): pass class InvalidFieldSet(MockCheckFieldSet): required_fields = (InvalidField,) class InvalidRequest(MockCheckRequest): field_set_type = InvalidFieldSet checker_name = "InvalidChecker" @staticmethod def exit_code(_: Iterable[Address]) -> int: return -1 def make_target(address: Optional[Address] = None) -> Target: if address is None: address = Address("", target_name="tests") return MockTarget({}, address) def run_typecheck_rule( *, request_types: List[Type[CheckRequest]], targets: List[Target] ) -> Tuple[int, str]: union_membership = UnionMembership({CheckRequest: request_types}) with mock_console(create_options_bootstrapper()) as (console, stdio_reader): rule_runner = RuleRunner() result: Check = run_rule_with_mocks( check, rule_args=[ console, Workspace(rule_runner.scheduler, _enforce_effects=False), Targets(targets), DistDir(relpath=Path("dist")), union_membership, ], mock_gets=[ MockGet( output_type=CheckResults, input_type=CheckRequest, mock=lambda field_set_collection: field_set_collection.check_results, ), ], union_membership=union_membership, ) assert not stdio_reader.get_stdout() return result.exit_code, stdio_reader.get_stderr() def test_invalid_target_noops() -> None: exit_code, stderr = run_typecheck_rule(request_types=[InvalidRequest], targets=[make_target()]) assert exit_code == 0 assert stderr == "" def test_summary() -> None: good_address = Address("", target_name="good") bad_address = Address("", target_name="bad") exit_code, stderr = run_typecheck_rule( request_types=[ ConditionallySucceedsRequest, FailingRequest, SkippedRequest, SuccessfulRequest, ], targets=[make_target(good_address), make_target(bad_address)], ) assert exit_code == FailingRequest.exit_code([bad_address]) assert stderr == dedent( """\ 𐄂 ConditionallySucceedsChecker failed. 𐄂 FailingChecker failed. - SkippedChecker skipped. ✓ SuccessfulChecker succeeded. """ ) def test_streaming_output_skip() -> None: results = CheckResults([], checker_name="typechecker") assert results.level() == LogLevel.DEBUG assert results.message() == "typechecker skipped." def test_streaming_output_success() -> None: results = CheckResults([CheckResult(0, "stdout", "stderr")], checker_name="typechecker") assert results.level() == LogLevel.INFO assert results.message() == dedent( """\ typechecker succeeded. stdout stderr """ ) def test_streaming_output_failure() -> None: results = CheckResults([CheckResult(18, "stdout", "stderr")], checker_name="typechecker") assert results.level() == LogLevel.ERROR assert results.message() == dedent( """\ typechecker failed (exit code 18). stdout stderr """ ) def test_streaming_output_partitions() -> None: results = CheckResults( [ CheckResult(21, "", "", partition_description="ghc8.1"), CheckResult(0, "stdout", "stderr", partition_description="ghc9.2"), ], checker_name="typechecker", ) assert results.level() == LogLevel.ERROR assert results.message() == dedent( """\ typechecker failed (exit code 21). Partition #1 - ghc8.1: Partition #2 - ghc9.2: stdout stderr """ )
fairseq/models/bart/model.py
samsontmr/fairseq
172
888
<reponame>samsontmr/fairseq<gh_stars>100-1000 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension """ import torch.nn as nn from fairseq import utils from fairseq.models import ( register_model, register_model_architecture, ) from fairseq.models.transformer import TransformerModel from fairseq.modules.transformer_sentence_encoder import init_bert_params from .hub_interface import BARTHubInterface @register_model('bart') class BARTModel(TransformerModel): @classmethod def hub_models(cls): return { 'bart.large': 'http://dl.fbaipublicfiles.com/fairseq/models/bart.large.tar.gz', 'bart.large.mnli': 'http://dl.fbaipublicfiles.com/fairseq/models/bart.large.mnli.tar.gz', } def __init__(self, args, encoder, decoder): super().__init__(args, encoder, decoder) # We follow BERT's random weight initialization self.apply(init_bert_params) self.classification_heads = nn.ModuleDict() @staticmethod def add_args(parser): super(BARTModel, BARTModel).add_args(parser) parser.add_argument( '--max-source-positions', default=1024, type=int, metavar='N', help='max number of tokens in the source sequence' ) parser.add_argument( '--max-target-positions', default=1024, type=int, metavar='N', help='max number of tokens in the target sequence' ) parser.add_argument( '--pooler-dropout', type=float, metavar='D', help='dropout probability in the masked_lm pooler layers' ) parser.add_argument( '--pooler-activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use for pooler layer' ) @property def supported_targets(self): return {'self'} def forward( self, src_tokens, src_lengths, prev_output_tokens, features_only=False, classification_head_name=None, **kwargs ): if classification_head_name is not None: features_only = True encoder_out = self.encoder( src_tokens, src_lengths=src_lengths, **kwargs, ) x, extra = self.decoder( prev_output_tokens, encoder_out=encoder_out, features_only=features_only, **kwargs, ) if classification_head_name is not None: sentence_representation = x[ src_tokens.eq(self.encoder.dictionary.eos()), : ].view(x.size(0), -1, x.size(-1))[:, -1, :] x = self.classification_heads[classification_head_name]( sentence_representation ) return x, extra @classmethod def from_pretrained( cls, model_name_or_path, checkpoint_file='model.pt', data_name_or_path='.', bpe='gpt2', **kwargs, ): from fairseq import hub_utils x = hub_utils.from_pretrained( model_name_or_path, checkpoint_file, data_name_or_path, archive_map=cls.hub_models(), bpe=bpe, load_checkpoint_heads=True, **kwargs, ) return BARTHubInterface(x['args'], x['task'], x['models'][0]) def register_classification_head(self, name, num_classes=None, inner_dim=None, **kwargs): """Register a classification head.""" print("Registering classification head: {0}".format(name)) if name in self.classification_heads: prev_num_classes = self.classification_heads[name].out_proj.out_features prev_inner_dim = self.classification_heads[name].dense.out_features if num_classes != prev_num_classes or inner_dim != prev_inner_dim: print( 'WARNING: re-registering head "{}" with num_classes {} (prev: {}) ' 'and inner_dim {} (prev: {})'.format( name, num_classes, prev_num_classes, inner_dim, prev_inner_dim ) ) self.classification_heads[name] = BARTClassificationHead( self.args.encoder_embed_dim, inner_dim or self.args.encoder_embed_dim, num_classes, self.args.pooler_activation_fn, self.args.pooler_dropout, ) def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) prefix = name + '.' if name != '' else '' current_head_names = [] if not hasattr(self, 'classification_heads') else \ self.classification_heads.keys() # Handle new classification heads present in the state dict. keys_to_delete = [] for k in state_dict.keys(): if not k.startswith(prefix + 'classification_heads.'): continue head_name = k[len(prefix + 'classification_heads.'):].split('.')[0] num_classes = state_dict[prefix + 'classification_heads.' + head_name + '.out_proj.weight'].size(0) inner_dim = state_dict[prefix + 'classification_heads.' + head_name + '.dense.weight'].size(0) if getattr(self.args, 'load_checkpoint_heads', False): if head_name not in current_head_names: self.register_classification_head(head_name, num_classes, inner_dim) else: if head_name not in current_head_names: print( 'WARNING: deleting classification head ({}) from checkpoint ' 'not present in current model: {}'.format(head_name, k) ) keys_to_delete.append(k) elif ( num_classes != self.classification_heads[head_name].out_proj.out_features or inner_dim != self.classification_heads[head_name].dense.out_features ): print( 'WARNING: deleting classification head ({}) from checkpoint ' 'with different dimensions than current model: {}'.format(head_name, k) ) keys_to_delete.append(k) for k in keys_to_delete: del state_dict[k] # Copy any newly-added classification heads into the state dict # with their current weights. if hasattr(self, 'classification_heads'): cur_state = self.classification_heads.state_dict() for k, v in cur_state.items(): if prefix + 'classification_heads.' + k not in state_dict: print('Overwriting', prefix + 'classification_heads.' + k) state_dict[prefix + 'classification_heads.' + k] = v class BARTClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__( self, input_dim, inner_dim, num_classes, activation_fn, pooler_dropout, ): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.activation_fn = utils.get_activation_fn(activation_fn) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, features, **kwargs): x = features x = self.dropout(x) x = self.dense(x) x = self.activation_fn(x) x = self.dropout(x) x = self.out_proj(x) return x @register_model_architecture('bart', 'bart_large') def bart_large_architecture(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4*1024) args.encoder_layers = getattr(args, 'encoder_layers', 12) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', True) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 12) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', True) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.relu_dropout = getattr(args, 'relu_dropout', 0.) args.dropout = getattr(args, 'dropout', 0.1) args.max_target_positions = getattr(args, 'max_target_positions', 1024) args.max_source_positions = getattr(args, 'max_source_positions', 1024) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', True) args.share_all_embeddings = getattr(args, 'share_all_embeddings', True) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) args.no_scale_embedding = getattr(args, 'no_scale_embedding', True) args.layernorm_embedding = getattr(args, 'layernorm_embedding', True) args.activation_fn = getattr(args, 'activation_fn', 'gelu') args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh') args.pooler_dropout = getattr(args, 'pooler_dropout', 0.0)
scibert/models/text_classifier.py
tomhoper/scibert
1,143
905
from typing import Dict, Optional, List, Any import torch import torch.nn.functional as F from allennlp.data import Vocabulary from allennlp.models.model import Model from allennlp.modules import FeedForward, TextFieldEmbedder, Seq2SeqEncoder from allennlp.nn import InitializerApplicator, RegularizerApplicator from allennlp.nn import util from allennlp.training.metrics import CategoricalAccuracy, F1Measure from overrides import overrides @Model.register("text_classifier") class TextClassifier(Model): """ Implements a basic text classifier: 1) Embed tokens using `text_field_embedder` 2) Seq2SeqEncoder, e.g. BiLSTM 3) Append the first and last encoder states 4) Final feedforward layer Optimized with CrossEntropyLoss. Evaluated with CategoricalAccuracy & F1. """ def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, text_encoder: Seq2SeqEncoder, classifier_feedforward: FeedForward, verbose_metrics: False, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None, ) -> None: super(TextClassifier, self).__init__(vocab, regularizer) self.text_field_embedder = text_field_embedder self.num_classes = self.vocab.get_vocab_size("labels") self.text_encoder = text_encoder self.classifier_feedforward = classifier_feedforward self.prediction_layer = torch.nn.Linear(self.classifier_feedforward.get_output_dim() , self.num_classes) self.label_accuracy = CategoricalAccuracy() self.label_f1_metrics = {} self.verbose_metrics = verbose_metrics for i in range(self.num_classes): self.label_f1_metrics[vocab.get_token_from_index(index=i, namespace="labels")] = F1Measure(positive_label=i) self.loss = torch.nn.CrossEntropyLoss() self.pool = lambda text, mask: util.get_final_encoder_states(text, mask, bidirectional=True) initializer(self) @overrides def forward(self, text: Dict[str, torch.LongTensor], label: torch.IntTensor = None, metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]: """ Parameters ---------- text : Dict[str, torch.LongTensor] From a ``TextField`` label : torch.IntTensor, optional (default = None) From a ``LabelField`` metadata : ``List[Dict[str, Any]]``, optional, (default = None) Metadata containing the original tokenization of the premise and hypothesis with 'premise_tokens' and 'hypothesis_tokens' keys respectively. Returns ------- An output dictionary consisting of: label_logits : torch.FloatTensor A tensor of shape ``(batch_size, num_labels)`` representing unnormalised log probabilities of the label. label_probs : torch.FloatTensor A tensor of shape ``(batch_size, num_labels)`` representing probabilities of the label. loss : torch.FloatTensor, optional A scalar loss to be optimised. """ embedded_text = self.text_field_embedder(text) mask = util.get_text_field_mask(text) encoded_text = self.text_encoder(embedded_text, mask) pooled = self.pool(encoded_text, mask) ff_hidden = self.classifier_feedforward(pooled) logits = self.prediction_layer(ff_hidden) class_probs = F.softmax(logits, dim=1) output_dict = {"logits": logits} if label is not None: loss = self.loss(logits, label) output_dict["loss"] = loss # compute F1 per label for i in range(self.num_classes): metric = self.label_f1_metrics[self.vocab.get_token_from_index(index=i, namespace="labels")] metric(class_probs, label) self.label_accuracy(logits, label) return output_dict @overrides def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: class_probabilities = F.softmax(output_dict['logits'], dim=-1) output_dict['class_probs'] = class_probabilities return output_dict def get_metrics(self, reset: bool = False) -> Dict[str, float]: metric_dict = {} sum_f1 = 0.0 for name, metric in self.label_f1_metrics.items(): metric_val = metric.get_metric(reset) if self.verbose_metrics: metric_dict[name + '_P'] = metric_val[0] metric_dict[name + '_R'] = metric_val[1] metric_dict[name + '_F1'] = metric_val[2] sum_f1 += metric_val[2] names = list(self.label_f1_metrics.keys()) total_len = len(names) average_f1 = sum_f1 / total_len metric_dict['average_F1'] = average_f1 metric_dict['accuracy'] = self.label_accuracy.get_metric(reset) return metric_dict
plugins/template/tasks.py
crotwell/cmd2
469
906
# # -*- coding: utf-8 -*- """Development related tasks to be run with 'invoke'""" import os import pathlib import shutil import invoke TASK_ROOT = pathlib.Path(__file__).resolve().parent TASK_ROOT_STR = str(TASK_ROOT) # shared function def rmrf(items, verbose=True): """Silently remove a list of directories or files""" if isinstance(items, str): items = [items] for item in items: if verbose: print("Removing {}".format(item)) shutil.rmtree(item, ignore_errors=True) # rmtree doesn't remove bare files try: os.remove(item) except FileNotFoundError: pass # create namespaces namespace = invoke.Collection() namespace_clean = invoke.Collection('clean') namespace.add_collection(namespace_clean, 'clean') ##### # # pytest, pylint, and codecov # ##### @invoke.task def pytest(context, junit=False, pty=True, append_cov=False): """Run tests and code coverage using pytest""" ROOT_PATH = TASK_ROOT.parent.parent with context.cd(str(ROOT_PATH)): command_str = 'pytest --cov=cmd2_myplugin --cov-report=term --cov-report=html' if append_cov: command_str += ' --cov-append' if junit: command_str += ' --junitxml=junit/test-results.xml' command_str += ' ' + str((TASK_ROOT / 'tests').relative_to(ROOT_PATH)) context.run(command_str, pty=pty) namespace.add_task(pytest) @invoke.task def pytest_clean(context): """Remove pytest cache and code coverage files and directories""" # pylint: disable=unused-argument with context.cd(TASK_ROOT_STR): dirs = ['.pytest_cache', '.cache', '.coverage'] rmrf(dirs) namespace_clean.add_task(pytest_clean, 'pytest') @invoke.task def pylint(context): """Check code quality using pylint""" context.run('pylint --rcfile=cmd2_myplugin/pylintrc cmd2_myplugin') namespace.add_task(pylint) @invoke.task def pylint_tests(context): """Check code quality of test suite using pylint""" context.run('pylint --rcfile=tests/pylintrc tests') namespace.add_task(pylint_tests) ##### # # build and distribute # ##### BUILDDIR = 'build' DISTDIR = 'dist' @invoke.task def build_clean(context): """Remove the build directory""" # pylint: disable=unused-argument rmrf(BUILDDIR) namespace_clean.add_task(build_clean, 'build') @invoke.task def dist_clean(context): """Remove the dist directory""" # pylint: disable=unused-argument rmrf(DISTDIR) namespace_clean.add_task(dist_clean, 'dist') @invoke.task def eggs_clean(context): """Remove egg directories""" # pylint: disable=unused-argument dirs = set() dirs.add('.eggs') for name in os.listdir(os.curdir): if name.endswith('.egg-info'): dirs.add(name) if name.endswith('.egg'): dirs.add(name) rmrf(dirs) namespace_clean.add_task(eggs_clean, 'eggs') @invoke.task def bytecode_clean(context): """Remove __pycache__ directories and *.pyc files""" # pylint: disable=unused-argument dirs = set() for root, dirnames, files in os.walk(os.curdir): if '__pycache__' in dirnames: dirs.add(os.path.join(root, '__pycache__')) for file in files: if file.endswith(".pyc"): dirs.add(os.path.join(root, file)) print("Removing __pycache__ directories and .pyc files") rmrf(dirs, verbose=False) namespace_clean.add_task(bytecode_clean, 'bytecode') # # make a dummy clean task which runs all the tasks in the clean namespace clean_tasks = list(namespace_clean.tasks.values()) @invoke.task(pre=list(namespace_clean.tasks.values()), default=True) def clean_all(context): """Run all clean tasks""" # pylint: disable=unused-argument pass namespace_clean.add_task(clean_all, 'all') @invoke.task(pre=[clean_all]) def sdist(context): """Create a source distribution""" context.run('python setup.py sdist') namespace.add_task(sdist) @invoke.task(pre=[clean_all]) def wheel(context): """Build a wheel distribution""" context.run('python setup.py bdist_wheel') namespace.add_task(wheel) # # these two tasks are commented out so you don't # accidentally run them and upload this template to pypi # # @invoke.task(pre=[sdist, wheel]) # def pypi(context): # """Build and upload a distribution to pypi""" # context.run('twine upload dist/*') # namespace.add_task(pypi) # @invoke.task(pre=[sdist, wheel]) # def pypi_test(context): # """Build and upload a distribution to https://test.pypi.org""" # context.run('twine upload --repository-url https://test.pypi.org/legacy/ dist/*') # namespace.add_task(pypi_test)
scripts/automation/trex_control_plane/interactive/trex/examples/stl/ndr_plugin.py
timgates42/trex-core
956
907
import stl_path class MyNDRPlugin(): def __init__(self): pass def pre_iteration(self, finding_max_rate, run_results=None, **kwargs): """ Function ran before each iteration. :parameters: finding_max_rate: boolean Indicates whether we are running for the first time, trying to find the max rate. In this is the case, the run_results will be None. run_results: dict A dictionary that contains the following keys: queue_full_percentage: Percentage of packets that are queued. drop_rate_percentage: Percentage of packets that were dropped. rate_tx_bps: TX rate in bps. rate_rx_bps: RX rate in bps. tx_util: TX utilization percentage. latency: Latency groups. cpu_util: CPU utilization percentage. tx_pps: TX in pps. rx_pps: RX in pps. tx_bps: TX in bps. rx_bps: RX in bps. bw_per_core: Bandwidth per core. rate_p: Running rate in percentage out of max. total_tx_L1: Total TX L1. total_rx_L1: Total RX L1. iteration: Description of iteration (not necessarily a number) Pay attention: The rate is of the upcoming iteration. All the rest are of the previous iteration. kwargs: dict List of tunables passed as parameters. """ # Pre iteration function. This function will run before TRex transmits to the DUT. # Could use this to better prepare the DUT, for example define shapers, policers, increase buffers and queues. # You can receive tunables in the command line, through the kwargs argument. pass def post_iteration(self, finding_max_rate, run_results, **kwargs): """ Function ran after each iteration. :parameters: finding_max_rate: boolean Indicates whether we are running for the first time, trying to find the max rate. If this is the case, some values of run_results (like iteration for example) are not relevant. run_results: dict A dictionary that contains the following keys: queue_full_percentage: Percentage of packets that are queued. drop_rate_percentage: Percentage of packets that were dropped. rate_tx_bps: TX rate in bps. rate_rx_bps: RX rate in bps. tx_util: TX utilization percentage. latency: Latency groups. cpu_util: CPU utilization percentage. tx_pps: TX in pps. rx_pps: RX in pps. tx_bps: TX in bps. rx_bps: RX in bps. bw_per_core: Bandwidth per core. rate_p: Running rate in percentage out of max. total_tx_L1: Total TX L1. total_rx_L1: Total RX L1. iteration: Description of iteration (not necessarily a number) kwargs: dict List of tunables passed as parameters. :returns: bool: should stop the benchmarking or not. """ # Post iteration function. This function will run after TRex transmits to the DUT. # Could use this to decide if to continue the benchmark after querying the DUT post run. The DUT might be overheated or any other thing that might make you want to stop the run. # You can receive tunables in the command line, through the kwargs argument. should_stop = False return should_stop # dynamic load of python module def register(): return MyNDRPlugin()
GPT-distributed.py
wenhuchen/LogicNLG
141
961
<filename>GPT-distributed.py<gh_stars>100-1000 import argparse import logging import torch import torch.nn.functional as F import numpy as np from torch import nn from torch.autograd import Variable from transformers import GPT2Config from transformers import GPT2LMHeadModel, GPT2Tokenizer, BertTokenizer from DataLoader import * from Model import BERTGen from utils import sample_sequence import torch.optim as optim import math import sys import pandas import os import numpy import nltk from torch.utils.tensorboard import SummaryWriter import warnings from tqdm import tqdm, trange from torch.utils.data import RandomSampler, SequentialSampler from torch.utils.data import DataLoader as DL import torch from torch.utils.data.distributed import DistributedSampler warnings.filterwarnings("ignore", category=UserWarning) device = torch.device('cuda') def set_seed(args): np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("--model", default='gpt2', type=str) parser.add_argument("--top_k", type=int, default=0) parser.add_argument("--top_p", type=float, default=0.9) parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--do_train', default=False, action="store_true", help="whether to train or test the model") parser.add_argument('--do_rl', default=False, action="store_true", help="whether to train or test the model") parser.add_argument('--do_val', default=False, action="store_true", help="whether to train or test the model") parser.add_argument('--do_test', default=False, action="store_true", help="whether to compute the BLEU scores on test split") parser.add_argument('--do_test_challenge', default=False, action="store_true", help="whether to compute the BLEU scores on challenge split") parser.add_argument('--do_ppl', default=False, action="store_true", help="whether to compute perplexity of the model") parser.add_argument('--do_verify', default=False, action="store_true", help="whether compute the adv-acc score on test split") parser.add_argument('--do_verify_challenge', default=False, action="store_true", help="whether compute the adv-acc score on challenge split") parser.add_argument('--epoch', default=10, type=int, help="whether to train or test the model") parser.add_argument('--batch_size', default=6, type=int, help="whether to train or test the model") parser.add_argument('--local_rank', default=-1, type=int, help="whether to train or test the model") parser.add_argument('--learning_rate', default=2e-6, type=float, help="whether to train or test the model") parser.add_argument('--dataset', default='table', type=str, help="whether to train or test the model") parser.add_argument('--every', default=50, type=int, help="whether to train or test the model") parser.add_argument('--load_from', default='', type=str, help="whether to train or test the model") parser.add_argument('--id', default='models', type=str, help="specify the id of the experiment") parser.add_argument('--max_len', default=800, type=int, help="whether to train or test the model") parser.add_argument('--dim', default=768, type=int, help="whether to train or test the model") parser.add_argument('--layers', default=3, type=int, help="whether to train or test the model") parser.add_argument('--head', default=4, type=int, help="whether to train or test the model") parser.add_argument("--modelpath", type=str, default="bert-base-uncased", help="For distributed training: local_rank") parser.add_argument('--gradient_accumulation_steps', type=int, default=5, help="accumulation steps for gradient") parser.add_argument('--decode_first_K', type=int, default=10000, help="For debugging purpose") args = parser.parse_args() if args.local_rank == -1: device = torch.device("cuda") args.n_gpu = 1 else: torch.cuda.set_device(args.local_rank) device = torch.device('cuda', args.local_rank) torch.distributed.init_process_group(backend='nccl') args.n_gpu = 1 args.device = device if args.local_rank not in [-1, 0]: torch.distributed.barrier() tokenizer = GPT2Tokenizer.from_pretrained(args.model) model = GPT2LMHeadModel.from_pretrained(args.model) #model = nn.DataParallel(model) model.to(args.device) if args.local_rank == 0: torch.distributed.barrier() criterion = nn.CrossEntropyLoss(reduction='none', ignore_index=-1) if args.do_train: if args.local_rank in [-1, 0]: if not os.path.exists(args.id): os.mkdir(args.id) tb_writer = SummaryWriter(log_dir='tensorboard/GPT2-{}'.format(args.model)) dataset = GPTTableDataset2('data/train_lm_preprocessed.json', tokenizer, args.max_len) if args.local_rank == -1: sampler = RandomSampler(dataset) else: sampler = DistributedSampler(dataset) train_dataloader = DL(dataset, sampler=sampler, batch_size=args.batch_size, num_workers=0) model.train() optimizer = optim.Adam(model.parameters(), args.learning_rate) avg_loss = 0 global_step = 0 if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) else: model = torch.nn.DataParallel(model) for epoch_idx in trange(0, args.epoch, desc='Epoch', disable=args.local_rank not in [-1, 0]): #for idx in range(0, dataset.train_len()): for idx, batch in enumerate(tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])): batch = tuple(Variable(t).to(device) for t in batch) trg_inp, trg_out, mask, caption = batch inputs = torch.cat([caption, trg_inp], 1) model.zero_grad() optimizer.zero_grad() logits = model(inputs)[0] logits = logits[:, -trg_out.shape[1]:, :].contiguous() loss = criterion(logits.view(-1, logits.shape[-1]), trg_out.view(-1)) loss = loss * mask.view(-1) loss = loss.sum() / mask.sum() avg_loss += loss.item() loss.backward() optimizer.step() global_step += 1 if args.local_rank in [-1, 0] and idx % args.every == 0 and idx > 0: tb_writer.add_scalar("perplexity", math.exp(avg_loss / args.every), global_step) fake_inputs = caption gt_inputs = trg_out.cpu().data.numpy() #samples = model.sample(fake_inputs, tabfeat, caption, highlight_idx, bert) samples = sample_sequence(model, 30, fake_inputs, []) samples = samples[:, caption.shape[1]:] samples = samples.cpu().data.numpy() for s, gt in zip(samples, gt_inputs): text = tokenizer.decode(s, clean_up_tokenization_spaces=True) text = text[: text.find(tokenizer.eos_token)] print("PREDICTION |||||| ", text) text = tokenizer.decode(gt, clean_up_tokenization_spaces=True) text = text[: text.find(tokenizer.eos_token)] print("GROUNDTRUH |||||| ",text) break avg_loss = 0 if args.local_rank in [-1, 0]: if args.model == 'gpt2': torch.save(model.state_dict(), '{}/GPT_ep{}.pt'.format(args.id, epoch_idx)) else: torch.save(model.state_dict(), '{}/GPT_medium_ep{}.pt'.format(args.id, epoch_idx)) if args.local_rank in [-1, 0]: tb_writer.close()
bentoml/saved_bundle/loader.py
niits/BentoML
3,451
962
<gh_stars>1000+ # Copyright 2019 Atalaya Tech, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import io import os import sys import tarfile import logging import tempfile import shutil from functools import wraps from contextlib import contextmanager from urllib.parse import urlparse from typing import TYPE_CHECKING from pathlib import PureWindowsPath, PurePosixPath from bentoml.utils.s3 import is_s3_url from bentoml.utils.gcs import is_gcs_url from bentoml.exceptions import BentoMLException from bentoml.saved_bundle.config import SavedBundleConfig from bentoml.saved_bundle.pip_pkg import ZIPIMPORT_DIR if TYPE_CHECKING: from bentoml.yatai.proto.repository_pb2 import BentoServiceMetadata logger = logging.getLogger(__name__) def _is_http_url(bundle_path) -> bool: try: return urlparse(bundle_path).scheme in ["http", "https"] except ValueError: return False def _is_remote_path(bundle_path) -> bool: return isinstance(bundle_path, str) and ( is_s3_url(bundle_path) or is_gcs_url(bundle_path) or _is_http_url(bundle_path) ) @contextmanager def _resolve_remote_bundle_path(bundle_path): if is_s3_url(bundle_path): import boto3 parsed_url = urlparse(bundle_path) bucket_name = parsed_url.netloc object_name = parsed_url.path.lstrip('/') s3 = boto3.client('s3') fileobj = io.BytesIO() s3.download_fileobj(bucket_name, object_name, fileobj) fileobj.seek(0, 0) elif is_gcs_url(bundle_path): try: from google.cloud import storage except ImportError: raise BentoMLException( '"google-cloud-storage" package is required. You can install it with ' 'pip: "pip install google-cloud-storage"' ) gcs = storage.Client() fileobj = io.BytesIO() gcs.download_blob_to_file(bundle_path, fileobj) fileobj.seek(0, 0) elif _is_http_url(bundle_path): import requests response = requests.get(bundle_path) if response.status_code != 200: raise BentoMLException( f"Error retrieving BentoService bundle. " f"{response.status_code}: {response.text}" ) fileobj = io.BytesIO() fileobj.write(response.content) fileobj.seek(0, 0) else: raise BentoMLException(f"Saved bundle path: '{bundle_path}' is not supported") with tarfile.open(mode="r:gz", fileobj=fileobj) as tar: with tempfile.TemporaryDirectory() as tmpdir: filename = tar.getmembers()[0].name tar.extractall(path=tmpdir) yield os.path.join(tmpdir, filename) def resolve_remote_bundle(func): """Decorate a function to handle remote bundles.""" @wraps(func) def wrapper(bundle_path, *args): if _is_remote_path(bundle_path): with _resolve_remote_bundle_path(bundle_path) as local_bundle_path: return func(local_bundle_path, *args) return func(bundle_path, *args) return wrapper @resolve_remote_bundle def load_saved_bundle_config(bundle_path) -> "SavedBundleConfig": try: return SavedBundleConfig.load(os.path.join(bundle_path, "bentoml.yml")) except FileNotFoundError: raise BentoMLException( "BentoML can't locate config file 'bentoml.yml'" " in saved bundle in path: {}".format(bundle_path) ) def load_bento_service_metadata(bundle_path: str) -> "BentoServiceMetadata": return load_saved_bundle_config(bundle_path).get_bento_service_metadata_pb() def _find_module_file(bundle_path, service_name, module_file): # Simply join full path when module_file is just a file name, # e.g. module_file=="iris_classifier.py" module_file_path = os.path.join(bundle_path, service_name, module_file) if not os.path.isfile(module_file_path): # Try loading without service_name prefix, for loading from a installed PyPi module_file_path = os.path.join(bundle_path, module_file) # When module_file is located in sub directory # e.g. module_file=="foo/bar/iris_classifier.py" # This needs to handle the path differences between posix and windows platform: if not os.path.isfile(module_file_path): if sys.platform == "win32": # Try load a saved bundle created from posix platform on windows module_file_path = os.path.join( bundle_path, service_name, str(PurePosixPath(module_file)) ) if not os.path.isfile(module_file_path): module_file_path = os.path.join( bundle_path, str(PurePosixPath(module_file)) ) else: # Try load a saved bundle created from windows platform on posix module_file_path = os.path.join( bundle_path, service_name, PureWindowsPath(module_file).as_posix() ) if not os.path.isfile(module_file_path): module_file_path = os.path.join( bundle_path, PureWindowsPath(module_file).as_posix() ) if not os.path.isfile(module_file_path): raise BentoMLException( "Can not locate module_file {} in saved bundle {}".format( module_file, bundle_path ) ) return module_file_path @resolve_remote_bundle def load_bento_service_class(bundle_path): """ Load a BentoService class from saved bundle in given path :param bundle_path: A path to Bento files generated from BentoService#save, #save_to_dir, or the path to pip installed BentoService directory :return: BentoService class """ config = load_saved_bundle_config(bundle_path) metadata = config["metadata"] # Find and load target module containing BentoService class from given path module_file_path = _find_module_file( bundle_path, metadata["service_name"], metadata["module_file"] ) # Prepend bundle_path to sys.path for loading extra python dependencies sys.path.insert(0, bundle_path) sys.path.insert(0, os.path.join(bundle_path, metadata["service_name"])) # Include zipimport modules zipimport_dir = os.path.join(bundle_path, metadata["service_name"], ZIPIMPORT_DIR) if os.path.exists(zipimport_dir): for p in os.listdir(zipimport_dir): logger.debug('adding %s to sys.path', p) sys.path.insert(0, os.path.join(zipimport_dir, p)) module_name = metadata["module_name"] if module_name in sys.modules: logger.warning( "Module `%s` already loaded, using existing imported module.", module_name ) module = sys.modules[module_name] elif sys.version_info >= (3, 5): import importlib.util spec = importlib.util.spec_from_file_location(module_name, module_file_path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) elif sys.version_info >= (3, 3): from importlib.machinery import SourceFileLoader # pylint:disable=deprecated-method module = SourceFileLoader(module_name, module_file_path).load_module( module_name ) # pylint:enable=deprecated-method else: raise BentoMLException("BentoML requires Python 3.4 and above") # Remove bundle_path from sys.path to avoid import naming conflicts sys.path.remove(bundle_path) model_service_class = module.__getattribute__(metadata["service_name"]) # Set _bento_service_bundle_path, where BentoService will load its artifacts model_service_class._bento_service_bundle_path = bundle_path # Set cls._version, service instance can access it via svc.version model_service_class._bento_service_bundle_version = metadata["service_version"] if ( model_service_class._env and model_service_class._env._requirements_txt_file is not None ): # Load `requirement.txt` from bundle directory instead of the user-provided # file path, which may only available during the bundle save process model_service_class._env._requirements_txt_file = os.path.join( bundle_path, "requirements.txt" ) return model_service_class @resolve_remote_bundle def safe_retrieve(bundle_path: str, target_dir: str): """Safely retrieve bento service to local path Args: bundle_path (:obj:`str`): The path that contains saved BentoService bundle, supporting both local file path and s3 path target_dir (:obj:`str`): Where the service contents should end up. Returns: :obj:`str`: location of safe local path """ shutil.copytree(bundle_path, target_dir) @resolve_remote_bundle def load_from_dir(bundle_path): """Load bento service from local file path or s3 path Args: bundle_path (str): The path that contains saved BentoService bundle, supporting both local file path and s3 path Returns: bentoml.service.BentoService: a loaded BentoService instance """ svc_cls = load_bento_service_class(bundle_path) return svc_cls() @resolve_remote_bundle def load_bento_service_api(bundle_path, api_name=None): bento_service = load_from_dir(bundle_path) return bento_service.get_inference_api(api_name)
ice/consoles.py
reavessm/Ice
578
969
<reponame>reavessm/Ice<gh_stars>100-1000 # encoding: utf-8 import os import roms def console_roms_directory(configuration, console): """ If the user has specified a custom ROMs directory in consoles.txt then return that. Otherwise, append the shortname of the console to the default ROMs directory given by config.txt. """ if console.custom_roms_directory: return console.custom_roms_directory return os.path.join(roms.roms_directory(configuration), console.shortname) def path_is_rom(console, path): """ This function determines if a given path is actually a valid ROM file. If a list of extensions is supplied for this console, we check if the path has a valid extension If no extensions are defined for this console, we just accept any file """ if console.extensions == "": return True # Normalize the extension based on the things we validly ignore. # Aka capitalization, whitespace, and leading dots normalize = lambda ext: ext.lower().strip().lstrip('.') (name, ext) = os.path.splitext(path) valid_extensions = console.extensions.split(',') return normalize(ext) in map(normalize, valid_extensions)
tools/perf/contrib/oop_raster/oop_raster.py
zipated/src
2,151
971
<filename>tools/perf/contrib/oop_raster/oop_raster.py # Copyright 2018 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from benchmarks import smoothness,thread_times import page_sets from telemetry import benchmark # pylint: disable=protected-access def CustomizeBrowserOptionsForOopRasterization(options): """Enables flags needed for out of process rasterization.""" options.AppendExtraBrowserArgs('--force-gpu-rasterization') options.AppendExtraBrowserArgs('--enable-oop-rasterization') @benchmark.Owner(emails=['<EMAIL>']) class SmoothnessOopRasterizationTop25(smoothness._Smoothness): """Measures rendering statistics for the top 25 with oop rasterization. """ tag = 'oop_rasterization' page_set = page_sets.Top25SmoothPageSet def SetExtraBrowserOptions(self, options): CustomizeBrowserOptionsForOopRasterization(options) @classmethod def Name(cls): return 'smoothness.oop_rasterization.top_25_smooth' @benchmark.Owner(emails=['<EMAIL>']) class ThreadTimesOopRasterKeyMobile(thread_times._ThreadTimes): """Measure timeline metrics for key mobile pages while using out of process raster.""" tag = 'oop_rasterization' page_set = page_sets.KeyMobileSitesSmoothPageSet options = {'story_tag_filter': 'fastpath'} def SetExtraBrowserOptions(self, options): super(ThreadTimesOopRasterKeyMobile, self).SetExtraBrowserOptions(options) CustomizeBrowserOptionsForOopRasterization(options) @classmethod def Name(cls): return 'thread_times.oop_rasterization.key_mobile'
tests/test_std.py
ashwini-balnaves/python-consul
469
988
<filename>tests/test_std.py import base64 import operator import struct import time import pytest import six import consul import consul.std Check = consul.Check class TestHTTPClient(object): def test_uri(self): http = consul.std.HTTPClient() assert http.uri('/v1/kv') == 'http://127.0.0.1:8500/v1/kv' assert http.uri('/v1/kv', params={'index': 1}) == \ 'http://127.0.0.1:8500/v1/kv?index=1' class TestConsul(object): def test_kv(self, consul_port): c = consul.Consul(port=consul_port) index, data = c.kv.get('foo') assert data is None assert c.kv.put('foo', 'bar') is True index, data = c.kv.get('foo') assert data['Value'] == six.b('bar') def test_kv_wait(self, consul_port): c = consul.Consul(port=consul_port) assert c.kv.put('foo', 'bar') is True index, data = c.kv.get('foo') check, data = c.kv.get('foo', index=index, wait='20ms') assert index == check def test_kv_encoding(self, consul_port): c = consul.Consul(port=consul_port) # test binary c.kv.put('foo', struct.pack('i', 1000)) index, data = c.kv.get('foo') assert struct.unpack('i', data['Value']) == (1000,) # test unicode c.kv.put('foo', u'bar') index, data = c.kv.get('foo') assert data['Value'] == six.b('bar') # test empty-string comes back as `None` c.kv.put('foo', '') index, data = c.kv.get('foo') assert data['Value'] is None # test None c.kv.put('foo', None) index, data = c.kv.get('foo') assert data['Value'] is None # check unencoded values raises assert pytest.raises(AssertionError, c.kv.put, 'foo', {1: 2}) def test_kv_put_cas(self, consul_port): c = consul.Consul(port=consul_port) assert c.kv.put('foo', 'bar', cas=50) is False assert c.kv.put('foo', 'bar', cas=0) is True index, data = c.kv.get('foo') assert c.kv.put('foo', 'bar2', cas=data['ModifyIndex']-1) is False assert c.kv.put('foo', 'bar2', cas=data['ModifyIndex']) is True index, data = c.kv.get('foo') assert data['Value'] == six.b('bar2') def test_kv_put_flags(self, consul_port): c = consul.Consul(port=consul_port) c.kv.put('foo', 'bar') index, data = c.kv.get('foo') assert data['Flags'] == 0 assert c.kv.put('foo', 'bar', flags=50) is True index, data = c.kv.get('foo') assert data['Flags'] == 50 def test_kv_recurse(self, consul_port): c = consul.Consul(port=consul_port) index, data = c.kv.get('foo/', recurse=True) assert data is None c.kv.put('foo/', None) index, data = c.kv.get('foo/', recurse=True) assert len(data) == 1 c.kv.put('foo/bar1', '1') c.kv.put('foo/bar2', '2') c.kv.put('foo/bar3', '3') index, data = c.kv.get('foo/', recurse=True) assert [x['Key'] for x in data] == [ 'foo/', 'foo/bar1', 'foo/bar2', 'foo/bar3'] assert [x['Value'] for x in data] == [ None, six.b('1'), six.b('2'), six.b('3')] def test_kv_delete(self, consul_port): c = consul.Consul(port=consul_port) c.kv.put('foo1', '1') c.kv.put('foo2', '2') c.kv.put('foo3', '3') index, data = c.kv.get('foo', recurse=True) assert [x['Key'] for x in data] == ['foo1', 'foo2', 'foo3'] assert c.kv.delete('foo2') is True index, data = c.kv.get('foo', recurse=True) assert [x['Key'] for x in data] == ['foo1', 'foo3'] assert c.kv.delete('foo', recurse=True) is True index, data = c.kv.get('foo', recurse=True) assert data is None def test_kv_delete_cas(self, consul_port): c = consul.Consul(port=consul_port) c.kv.put('foo', 'bar') index, data = c.kv.get('foo') assert c.kv.delete('foo', cas=data['ModifyIndex']-1) is False assert c.kv.get('foo') == (index, data) assert c.kv.delete('foo', cas=data['ModifyIndex']) is True index, data = c.kv.get('foo') assert data is None def test_kv_acquire_release(self, consul_port): c = consul.Consul(port=consul_port) pytest.raises( consul.ConsulException, c.kv.put, 'foo', 'bar', acquire='foo') s1 = c.session.create() s2 = c.session.create() assert c.kv.put('foo', '1', acquire=s1) is True assert c.kv.put('foo', '2', acquire=s2) is False assert c.kv.put('foo', '1', acquire=s1) is True assert c.kv.put('foo', '1', release='foo') is False assert c.kv.put('foo', '2', release=s2) is False assert c.kv.put('foo', '2', release=s1) is True c.session.destroy(s1) c.session.destroy(s2) def test_kv_keys_only(self, consul_port): c = consul.Consul(port=consul_port) assert c.kv.put('bar', '4') is True assert c.kv.put('base/foo', '1') is True assert c.kv.put('base/base/foo', '5') is True index, data = c.kv.get('base/', keys=True, separator='/') assert data == ['base/base/', 'base/foo'] def test_transaction(self, consul_port): c = consul.Consul(port=consul_port) value = base64.b64encode(b"1").decode("utf8") d = {"KV": {"Verb": "set", "Key": "asdf", "Value": value}} r = c.txn.put([d]) assert r["Errors"] is None d = {"KV": {"Verb": "get", "Key": "asdf"}} r = c.txn.put([d]) assert r["Results"][0]["KV"]["Value"] == value def test_event(self, consul_port): c = consul.Consul(port=consul_port) assert c.event.fire("fooname", "foobody") index, events = c.event.list() assert [x['Name'] == 'fooname' for x in events] assert [x['Payload'] == 'foobody' for x in events] def test_event_targeted(self, consul_port): c = consul.Consul(port=consul_port) assert c.event.fire("fooname", "foobody") index, events = c.event.list(name="othername") assert events == [] index, events = c.event.list(name="fooname") assert [x['Name'] == 'fooname' for x in events] assert [x['Payload'] == 'foobody' for x in events] def test_agent_checks(self, consul_port): c = consul.Consul(port=consul_port) def verify_and_dereg_check(check_id): assert set(c.agent.checks().keys()) == set([check_id]) assert c.agent.check.deregister(check_id) is True assert set(c.agent.checks().keys()) == set([]) def verify_check_status(check_id, status, notes=None): checks = c.agent.checks() assert checks[check_id]['Status'] == status if notes: assert checks[check_id]['Output'] == notes # test setting notes on a check c.agent.check.register('check', Check.ttl('1s'), notes='foo') assert c.agent.checks()['check']['Notes'] == 'foo' c.agent.check.deregister('check') assert set(c.agent.checks().keys()) == set([]) assert c.agent.check.register( 'script_check', Check.script('/bin/true', 10)) is True verify_and_dereg_check('script_check') assert c.agent.check.register( 'check name', Check.script('/bin/true', 10), check_id='check_id') is True verify_and_dereg_check('check_id') http_addr = "http://127.0.0.1:{0}".format(consul_port) assert c.agent.check.register( 'http_check', Check.http(http_addr, '10ms')) is True time.sleep(1) verify_check_status('http_check', 'passing') verify_and_dereg_check('http_check') assert c.agent.check.register( 'http_timeout_check', Check.http(http_addr, '100ms', timeout='2s')) is True verify_and_dereg_check('http_timeout_check') assert c.agent.check.register('ttl_check', Check.ttl('100ms')) is True assert c.agent.check.ttl_warn('ttl_check') is True verify_check_status('ttl_check', 'warning') assert c.agent.check.ttl_warn( 'ttl_check', notes='its not quite right') is True verify_check_status('ttl_check', 'warning', 'its not quite right') assert c.agent.check.ttl_fail('ttl_check') is True verify_check_status('ttl_check', 'critical') assert c.agent.check.ttl_fail( 'ttl_check', notes='something went boink!') is True verify_check_status( 'ttl_check', 'critical', notes='something went boink!') assert c.agent.check.ttl_pass('ttl_check') is True verify_check_status('ttl_check', 'passing') assert c.agent.check.ttl_pass( 'ttl_check', notes='all hunky dory!') is True verify_check_status('ttl_check', 'passing', notes='all hunky dory!') # wait for ttl to expire time.sleep(120/1000.0) verify_check_status('ttl_check', 'critical') verify_and_dereg_check('ttl_check') def test_service_dereg_issue_156(self, consul_port): # https://github.com/cablehead/python-consul/issues/156 service_name = 'app#127.0.0.1#3000' c = consul.Consul(port=consul_port) c.agent.service.register(service_name) time.sleep(80/1000.0) index, nodes = c.health.service(service_name) assert [node['Service']['ID'] for node in nodes] == [service_name] # Clean up tasks assert c.agent.service.deregister(service_name) is True time.sleep(40/1000.0) index, nodes = c.health.service(service_name) assert [node['Service']['ID'] for node in nodes] == [] def test_agent_checks_service_id(self, consul_port): c = consul.Consul(port=consul_port) c.agent.service.register('foo1') time.sleep(40/1000.0) index, nodes = c.health.service('foo1') assert [node['Service']['ID'] for node in nodes] == ['foo1'] c.agent.check.register('foo', Check.ttl('100ms'), service_id='foo1') time.sleep(40/1000.0) index, nodes = c.health.service('foo1') assert set([ check['ServiceID'] for node in nodes for check in node['Checks']]) == set(['foo1', '']) assert set([ check['CheckID'] for node in nodes for check in node['Checks']]) == set(['foo', 'serfHealth']) # Clean up tasks assert c.agent.check.deregister('foo') is True time.sleep(40/1000.0) assert c.agent.service.deregister('foo1') is True time.sleep(40/1000.0) def test_agent_register_check_no_service_id(self, consul_port): c = consul.Consul(port=consul_port) index, nodes = c.health.service("foo1") assert nodes == [] pytest.raises(consul.std.base.ConsulException, c.agent.check.register, 'foo', Check.ttl('100ms'), service_id='foo1') time.sleep(40/1000.0) assert c.agent.checks() == {} # Cleanup tasks c.agent.check.deregister('foo') time.sleep(40/1000.0) def test_agent_register_enable_tag_override(self, consul_port): c = consul.Consul(port=consul_port) index, nodes = c.health.service("foo1") assert nodes == [] c.agent.service.register('foo', enable_tag_override=True) assert c.agent.services()['foo']['EnableTagOverride'] # Cleanup tasks c.agent.check.deregister('foo') def test_agent_service_maintenance(self, consul_port): c = consul.Consul(port=consul_port) c.agent.service.register('foo', check=Check.ttl('100ms')) time.sleep(40/1000.0) c.agent.service.maintenance('foo', 'true', "test") time.sleep(40/1000.0) checks_pre = c.agent.checks() assert '_service_maintenance:foo' in checks_pre.keys() assert 'test' == checks_pre['_service_maintenance:foo']['Notes'] c.agent.service.maintenance('foo', 'false') time.sleep(40/1000.0) checks_post = c.agent.checks() assert '_service_maintenance:foo' not in checks_post.keys() # Cleanup c.agent.service.deregister('foo') time.sleep(40/1000.0) def test_agent_node_maintenance(self, consul_port): c = consul.Consul(port=consul_port) c.agent.maintenance('true', "test") time.sleep(40/1000.0) checks_pre = c.agent.checks() assert '_node_maintenance' in checks_pre.keys() assert 'test' == checks_pre['_node_maintenance']['Notes'] c.agent.maintenance('false') time.sleep(40/1000.0) checks_post = c.agent.checks() assert '_node_maintenance' not in checks_post.keys() def test_agent_members(self, consul_port): c = consul.Consul(port=consul_port) members = c.agent.members() for x in members: assert x['Status'] == 1 assert not x['Name'] is None assert not x['Tags'] is None assert c.agent.self()['Member'] in members wan_members = c.agent.members(wan=True) for x in wan_members: assert 'dc1' in x['Name'] def test_agent_self(self, consul_port): c = consul.Consul(port=consul_port) assert set(c.agent.self().keys()) == set(['Member', 'Stats', 'Config', 'Coord', 'DebugConfig', 'Meta']) def test_agent_services(self, consul_port): c = consul.Consul(port=consul_port) assert c.agent.service.register('foo') is True assert set(c.agent.services().keys()) == set(['foo']) assert c.agent.service.deregister('foo') is True assert set(c.agent.services().keys()) == set() # test address param assert c.agent.service.register('foo', address='10.10.10.1') is True assert [ v['Address'] for k, v in c.agent.services().items() if k == 'foo'][0] == '10.10.10.1' assert c.agent.service.deregister('foo') is True def test_catalog(self, consul_port): c = consul.Consul(port=consul_port) # grab the node our server created, so we can ignore it _, nodes = c.catalog.nodes() assert len(nodes) == 1 current = nodes[0] # test catalog.datacenters assert c.catalog.datacenters() == ['dc1'] # test catalog.register pytest.raises( consul.ConsulException, c.catalog.register, 'foo', '10.1.10.11', dc='dc2') assert c.catalog.register( 'n1', '10.1.10.11', service={'service': 's1'}, check={'name': 'c1'}) is True assert c.catalog.register( 'n1', '10.1.10.11', service={'service': 's2'}) is True assert c.catalog.register( 'n2', '10.1.10.12', service={'service': 's1', 'tags': ['master']}) is True # test catalog.nodes pytest.raises(consul.ConsulException, c.catalog.nodes, dc='dc2') _, nodes = c.catalog.nodes() nodes.remove(current) assert [x['Node'] for x in nodes] == ['n1', 'n2'] # test catalog.services pytest.raises(consul.ConsulException, c.catalog.services, dc='dc2') _, services = c.catalog.services() assert services == {'s1': [u'master'], 's2': [], 'consul': []} # test catalog.node pytest.raises(consul.ConsulException, c.catalog.node, 'n1', dc='dc2') _, node = c.catalog.node('n1') assert set(node['Services'].keys()) == set(['s1', 's2']) _, node = c.catalog.node('n3') assert node is None # test catalog.service pytest.raises( consul.ConsulException, c.catalog.service, 's1', dc='dc2') _, nodes = c.catalog.service('s1') assert set([x['Node'] for x in nodes]) == set(['n1', 'n2']) _, nodes = c.catalog.service('s1', tag='master') assert set([x['Node'] for x in nodes]) == set(['n2']) # test catalog.deregister pytest.raises( consul.ConsulException, c.catalog.deregister, 'n2', dc='dc2') assert c.catalog.deregister('n1', check_id='c1') is True assert c.catalog.deregister('n2', service_id='s1') is True # check the nodes weren't removed _, nodes = c.catalog.nodes() nodes.remove(current) assert [x['Node'] for x in nodes] == ['n1', 'n2'] # check n2's s1 service was removed though _, nodes = c.catalog.service('s1') assert set([x['Node'] for x in nodes]) == set(['n1']) # cleanup assert c.catalog.deregister('n1') is True assert c.catalog.deregister('n2') is True _, nodes = c.catalog.nodes() nodes.remove(current) assert [x['Node'] for x in nodes] == [] def test_health_service(self, consul_port): c = consul.Consul(port=consul_port) # check there are no nodes for the service 'foo' index, nodes = c.health.service('foo') assert nodes == [] # register two nodes, one with a long ttl, the other shorter c.agent.service.register( 'foo', service_id='foo:1', check=Check.ttl('10s'), tags=['tag:foo:1']) c.agent.service.register( 'foo', service_id='foo:2', check=Check.ttl('100ms')) time.sleep(40/1000.0) # check the nodes show for the /health/service endpoint index, nodes = c.health.service('foo') assert [node['Service']['ID'] for node in nodes] == ['foo:1', 'foo:2'] # but that they aren't passing their health check index, nodes = c.health.service('foo', passing=True) assert nodes == [] # ping the two node's health check c.agent.check.ttl_pass('service:foo:1') c.agent.check.ttl_pass('service:foo:2') time.sleep(40/1000.0) # both nodes are now available index, nodes = c.health.service('foo', passing=True) assert [node['Service']['ID'] for node in nodes] == ['foo:1', 'foo:2'] # wait until the short ttl node fails time.sleep(120/1000.0) # only one node available index, nodes = c.health.service('foo', passing=True) assert [node['Service']['ID'] for node in nodes] == ['foo:1'] # ping the failed node's health check c.agent.check.ttl_pass('service:foo:2') time.sleep(40/1000.0) # check both nodes are available index, nodes = c.health.service('foo', passing=True) assert [node['Service']['ID'] for node in nodes] == ['foo:1', 'foo:2'] # check that tag works index, nodes = c.health.service('foo', tag='tag:foo:1') assert [node['Service']['ID'] for node in nodes] == ['foo:1'] # deregister the nodes c.agent.service.deregister('foo:1') c.agent.service.deregister('foo:2') time.sleep(40/1000.0) index, nodes = c.health.service('foo') assert nodes == [] def test_health_state(self, consul_port): c = consul.Consul(port=consul_port) # The empty string is for the Serf Health Status check, which has an # empty ServiceID index, nodes = c.health.state('any') assert [node['ServiceID'] for node in nodes] == [''] # register two nodes, one with a long ttl, the other shorter c.agent.service.register( 'foo', service_id='foo:1', check=Check.ttl('10s')) c.agent.service.register( 'foo', service_id='foo:2', check=Check.ttl('100ms')) time.sleep(40/1000.0) # check the nodes show for the /health/state/any endpoint index, nodes = c.health.state('any') assert set([node['ServiceID'] for node in nodes]) == set( ['', 'foo:1', 'foo:2']) # but that they aren't passing their health check index, nodes = c.health.state('passing') assert [node['ServiceID'] for node in nodes] != 'foo' # ping the two node's health check c.agent.check.ttl_pass('service:foo:1') c.agent.check.ttl_pass('service:foo:2') time.sleep(40/1000.0) # both nodes are now available index, nodes = c.health.state('passing') assert set([node['ServiceID'] for node in nodes]) == set( ['', 'foo:1', 'foo:2']) # wait until the short ttl node fails time.sleep(2200/1000.0) # only one node available index, nodes = c.health.state('passing') assert set([node['ServiceID'] for node in nodes]) == set( ['', 'foo:1']) # ping the failed node's health check c.agent.check.ttl_pass('service:foo:2') time.sleep(40/1000.0) # check both nodes are available index, nodes = c.health.state('passing') assert set([node['ServiceID'] for node in nodes]) == set( ['', 'foo:1', 'foo:2']) # deregister the nodes c.agent.service.deregister('foo:1') c.agent.service.deregister('foo:2') time.sleep(40/1000.0) index, nodes = c.health.state('any') assert [node['ServiceID'] for node in nodes] == [''] def test_health_node(self, consul_port): c = consul.Consul(port=consul_port) # grab local node name node = c.agent.self()['Config']['NodeName'] index, checks = c.health.node(node) assert node in [check["Node"] for check in checks] def test_health_checks(self, consul_port): c = consul.Consul(port=consul_port) c.agent.service.register( 'foobar', service_id='foobar', check=Check.ttl('10s')) time.sleep(40/1000.00) index, checks = c.health.checks('foobar') assert [check['ServiceID'] for check in checks] == ['foobar'] assert [check['CheckID'] for check in checks] == ['service:foobar'] c.agent.service.deregister('foobar') time.sleep(40/1000.0) index, checks = c.health.checks('foobar') assert len(checks) == 0 def test_session(self, consul_port): c = consul.Consul(port=consul_port) # session.create pytest.raises(consul.ConsulException, c.session.create, node='n2') pytest.raises(consul.ConsulException, c.session.create, dc='dc2') session_id = c.session.create('my-session') # session.list pytest.raises(consul.ConsulException, c.session.list, dc='dc2') _, sessions = c.session.list() assert [x['Name'] for x in sessions] == ['my-session'] # session.info pytest.raises( consul.ConsulException, c.session.info, session_id, dc='dc2') index, session = c.session.info('1'*36) assert session is None index, session = c.session.info(session_id) assert session['Name'] == 'my-session' # session.node node = session['Node'] pytest.raises( consul.ConsulException, c.session.node, node, dc='dc2') _, sessions = c.session.node(node) assert [x['Name'] for x in sessions] == ['my-session'] # session.destroy pytest.raises( consul.ConsulException, c.session.destroy, session_id, dc='dc2') assert c.session.destroy(session_id) is True _, sessions = c.session.list() assert sessions == [] def test_session_delete_ttl_renew(self, consul_port): c = consul.Consul(port=consul_port) s = c.session.create(behavior='delete', ttl=20) # attempt to renew an unknown session pytest.raises(consul.NotFound, c.session.renew, '1'*36) session = c.session.renew(s) assert session['Behavior'] == 'delete' assert session['TTL'] == '20s' # trying out the behavior assert c.kv.put('foo', '1', acquire=s) is True index, data = c.kv.get('foo') assert data['Value'] == six.b('1') c.session.destroy(s) index, data = c.kv.get('foo') assert data is None def test_acl_disabled(self, consul_port): c = consul.Consul(port=consul_port) pytest.raises(consul.ACLDisabled, c.acl.list) pytest.raises(consul.ACLDisabled, c.acl.info, '1'*36) pytest.raises(consul.ACLDisabled, c.acl.create) pytest.raises(consul.ACLDisabled, c.acl.update, 'foo') pytest.raises(consul.ACLDisabled, c.acl.clone, 'foo') pytest.raises(consul.ACLDisabled, c.acl.destroy, 'foo') def test_acl_permission_denied(self, acl_consul): c = consul.Consul(port=acl_consul.port) pytest.raises(consul.ACLPermissionDenied, c.acl.list) pytest.raises(consul.ACLPermissionDenied, c.acl.create) pytest.raises(consul.ACLPermissionDenied, c.acl.update, 'anonymous') pytest.raises(consul.ACLPermissionDenied, c.acl.clone, 'anonymous') pytest.raises(consul.ACLPermissionDenied, c.acl.destroy, 'anonymous') def test_acl_explict_token_use(self, acl_consul): c = consul.Consul(port=acl_consul.port) master_token = acl_consul.token acls = c.acl.list(token=master_token) assert set([x['ID'] for x in acls]) == \ set(['anonymous', master_token]) assert c.acl.info('1'*36) is None compare = [c.acl.info(master_token), c.acl.info('anonymous')] compare.sort(key=operator.itemgetter('ID')) assert acls == compare rules = """ key "" { policy = "read" } key "private/" { policy = "deny" } service "foo-" { policy = "write" } service "bar-" { policy = "read" } """ token = c.acl.create(rules=rules, token=master_token) assert c.acl.info(token)['Rules'] == rules token2 = c.acl.clone(token, token=master_token) assert c.acl.info(token2)['Rules'] == rules assert c.acl.update(token2, name='Foo', token=master_token) == token2 assert c.acl.info(token2)['Name'] == 'Foo' assert c.acl.destroy(token2, token=master_token) is True assert c.acl.info(token2) is None c.kv.put('foo', 'bar') c.kv.put('private/foo', 'bar') assert c.kv.get('foo', token=token)[1]['Value'] == six.b('bar') pytest.raises( consul.ACLPermissionDenied, c.kv.put, 'foo', 'bar2', token=token) pytest.raises( consul.ACLPermissionDenied, c.kv.delete, 'foo', token=token) assert c.kv.get('private/foo')[1]['Value'] == six.b('bar') pytest.raises( consul.ACLPermissionDenied, c.kv.get, 'private/foo', token=token) pytest.raises( consul.ACLPermissionDenied, c.kv.put, 'private/foo', 'bar2', token=token) pytest.raises( consul.ACLPermissionDenied, c.kv.delete, 'private/foo', token=token) # test token pass through for service registration pytest.raises( consul.ACLPermissionDenied, c.agent.service.register, "bar-1", token=token) c.agent.service.register("foo-1", token=token) index, data = c.health.service('foo-1', token=token) assert data[0]['Service']['ID'] == "foo-1" index, data = c.health.checks('foo-1', token=token) assert data == [] index, data = c.health.service('bar-1', token=token) assert not data # clean up assert c.agent.service.deregister('foo-1') is True c.acl.destroy(token, token=master_token) acls = c.acl.list(token=master_token) assert set([x['ID'] for x in acls]) == \ set(['anonymous', master_token]) def test_acl_implicit_token_use(self, acl_consul): # configure client to use the master token by default c = consul.Consul(port=acl_consul.port, token=acl_consul.token) master_token = acl_consul.token acls = c.acl.list() assert set([x['ID'] for x in acls]) == \ set(['anonymous', master_token]) assert c.acl.info('foo') is None compare = [c.acl.info(master_token), c.acl.info('anonymous')] compare.sort(key=operator.itemgetter('ID')) assert acls == compare rules = """ key "" { policy = "read" } key "private/" { policy = "deny" } """ token = c.acl.create(rules=rules) assert c.acl.info(token)['Rules'] == rules token2 = c.acl.clone(token) assert c.acl.info(token2)['Rules'] == rules assert c.acl.update(token2, name='Foo') == token2 assert c.acl.info(token2)['Name'] == 'Foo' assert c.acl.destroy(token2) is True assert c.acl.info(token2) is None c.kv.put('foo', 'bar') c.kv.put('private/foo', 'bar') c_limited = consul.Consul(port=acl_consul.port, token=token) assert c_limited.kv.get('foo')[1]['Value'] == six.b('bar') pytest.raises( consul.ACLPermissionDenied, c_limited.kv.put, 'foo', 'bar2') pytest.raises( consul.ACLPermissionDenied, c_limited.kv.delete, 'foo') assert c.kv.get('private/foo')[1]['Value'] == six.b('bar') pytest.raises( consul.ACLPermissionDenied, c_limited.kv.get, 'private/foo') pytest.raises( consul.ACLPermissionDenied, c_limited.kv.put, 'private/foo', 'bar2') pytest.raises( consul.ACLPermissionDenied, c_limited.kv.delete, 'private/foo') # check we can override the client's default token pytest.raises( consul.ACLPermissionDenied, c.kv.get, 'private/foo', token=token ) pytest.raises( consul.ACLPermissionDenied, c.kv.put, 'private/foo', 'bar2', token=token) pytest.raises( consul.ACLPermissionDenied, c.kv.delete, 'private/foo', token=token) # clean up c.acl.destroy(token) acls = c.acl.list() assert set([x['ID'] for x in acls]) == \ set(['anonymous', master_token]) def test_status_leader(self, consul_port): c = consul.Consul(port=consul_port) agent_self = c.agent.self() leader = c.status.leader() addr_port = agent_self['Stats']['consul']['leader_addr'] assert leader == addr_port, \ "Leader value was {0}, expected value " \ "was {1}".format(leader, addr_port) def test_status_peers(self, consul_port): c = consul.Consul(port=consul_port) agent_self = c.agent.self() addr_port = agent_self['Stats']['consul']['leader_addr'] peers = c.status.peers() assert addr_port in peers, \ "Expected value '{0}' " \ "in peer list but it was not present".format(addr_port) def test_query(self, consul_port): c = consul.Consul(port=consul_port) # check that query list is empty queries = c.query.list() assert queries == [] # create a new named query query_service = 'foo' query_name = 'fooquery' query = c.query.create(query_service, query_name) # assert response contains query ID assert 'ID' in query \ and query['ID'] is not None \ and str(query['ID']) != '' # retrieve query using id and name queries = c.query.get(query['ID']) assert queries != [] \ and len(queries) == 1 assert queries[0]['Name'] == query_name \ and queries[0]['ID'] == query['ID'] # explain query assert c.query.explain(query_name)['Query'] # delete query assert c.query.delete(query['ID']) def test_coordinate(self, consul_port): c = consul.Consul(port=consul_port) c.coordinate.nodes() c.coordinate.datacenters() assert set(c.coordinate.datacenters()[0].keys()) == \ set(['Datacenter', 'Coordinates', 'AreaID']) def test_operator(self, consul_port): c = consul.Consul(port=consul_port) config = c.operator.raft_config() assert config["Index"] == 1 leader = False voter = False for server in config["Servers"]: if server["Leader"]: leader = True if server["Voter"]: voter = True assert leader assert voter
sdk/python/pulumi_kubernetes/coordination/v1/_inputs.py
polivbr/pulumi-kubernetes
277
1001
<reponame>polivbr/pulumi-kubernetes # coding=utf-8 # *** WARNING: this file was generated by pulumigen. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from ... import meta as _meta __all__ = [ 'LeaseSpecArgs', 'LeaseArgs', ] @pulumi.input_type class LeaseSpecArgs: def __init__(__self__, *, acquire_time: Optional[pulumi.Input[str]] = None, holder_identity: Optional[pulumi.Input[str]] = None, lease_duration_seconds: Optional[pulumi.Input[int]] = None, lease_transitions: Optional[pulumi.Input[int]] = None, renew_time: Optional[pulumi.Input[str]] = None): """ LeaseSpec is a specification of a Lease. :param pulumi.Input[str] acquire_time: acquireTime is a time when the current lease was acquired. :param pulumi.Input[str] holder_identity: holderIdentity contains the identity of the holder of a current lease. :param pulumi.Input[int] lease_duration_seconds: leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime. :param pulumi.Input[int] lease_transitions: leaseTransitions is the number of transitions of a lease between holders. :param pulumi.Input[str] renew_time: renewTime is a time when the current holder of a lease has last updated the lease. """ if acquire_time is not None: pulumi.set(__self__, "acquire_time", acquire_time) if holder_identity is not None: pulumi.set(__self__, "holder_identity", holder_identity) if lease_duration_seconds is not None: pulumi.set(__self__, "lease_duration_seconds", lease_duration_seconds) if lease_transitions is not None: pulumi.set(__self__, "lease_transitions", lease_transitions) if renew_time is not None: pulumi.set(__self__, "renew_time", renew_time) @property @pulumi.getter(name="acquireTime") def acquire_time(self) -> Optional[pulumi.Input[str]]: """ acquireTime is a time when the current lease was acquired. """ return pulumi.get(self, "acquire_time") @acquire_time.setter def acquire_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "acquire_time", value) @property @pulumi.getter(name="holderIdentity") def holder_identity(self) -> Optional[pulumi.Input[str]]: """ holderIdentity contains the identity of the holder of a current lease. """ return pulumi.get(self, "holder_identity") @holder_identity.setter def holder_identity(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "holder_identity", value) @property @pulumi.getter(name="leaseDurationSeconds") def lease_duration_seconds(self) -> Optional[pulumi.Input[int]]: """ leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime. """ return pulumi.get(self, "lease_duration_seconds") @lease_duration_seconds.setter def lease_duration_seconds(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "lease_duration_seconds", value) @property @pulumi.getter(name="leaseTransitions") def lease_transitions(self) -> Optional[pulumi.Input[int]]: """ leaseTransitions is the number of transitions of a lease between holders. """ return pulumi.get(self, "lease_transitions") @lease_transitions.setter def lease_transitions(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "lease_transitions", value) @property @pulumi.getter(name="renewTime") def renew_time(self) -> Optional[pulumi.Input[str]]: """ renewTime is a time when the current holder of a lease has last updated the lease. """ return pulumi.get(self, "renew_time") @renew_time.setter def renew_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "renew_time", value) @pulumi.input_type class LeaseArgs: def __init__(__self__, *, api_version: Optional[pulumi.Input[str]] = None, kind: Optional[pulumi.Input[str]] = None, metadata: Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']] = None, spec: Optional[pulumi.Input['LeaseSpecArgs']] = None): """ Lease defines a lease concept. :param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources :param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds :param pulumi.Input['_meta.v1.ObjectMetaArgs'] metadata: More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata :param pulumi.Input['LeaseSpecArgs'] spec: Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status """ if api_version is not None: pulumi.set(__self__, "api_version", 'coordination.k8s.io/v1') if kind is not None: pulumi.set(__self__, "kind", 'Lease') if metadata is not None: pulumi.set(__self__, "metadata", metadata) if spec is not None: pulumi.set(__self__, "spec", spec) @property @pulumi.getter(name="apiVersion") def api_version(self) -> Optional[pulumi.Input[str]]: """ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources """ return pulumi.get(self, "api_version") @api_version.setter def api_version(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "api_version", value) @property @pulumi.getter def kind(self) -> Optional[pulumi.Input[str]]: """ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds """ return pulumi.get(self, "kind") @kind.setter def kind(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "kind", value) @property @pulumi.getter def metadata(self) -> Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]: """ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata """ return pulumi.get(self, "metadata") @metadata.setter def metadata(self, value: Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]): pulumi.set(self, "metadata", value) @property @pulumi.getter def spec(self) -> Optional[pulumi.Input['LeaseSpecArgs']]: """ Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status """ return pulumi.get(self, "spec") @spec.setter def spec(self, value: Optional[pulumi.Input['LeaseSpecArgs']]): pulumi.set(self, "spec", value)
log_system_information.py
ibaiGorordo/depthai
476
1009
#!/usr/bin/env python3 import json import platform def make_sys_report(anonymous=False, skipUsb=False, skipPackages=False): def get_usb(): try: import usb.core except ImportError: yield "NoLib" return speeds = ["Unknown", "Low", "Full", "High", "Super", "SuperPlus"] format_hex = lambda val: f"{val:#0{6}x}" try: for dev in usb.core.find(find_all=True): yield { "port": dev.port_number, "vendor_id": format_hex(dev.idVendor), "product_id": format_hex(dev.idProduct), "speed": speeds[dev.speed] if dev.speed < len(speeds) else dev.speed } except usb.core.NoBackendError: yield "No USB backend found" result = { "architecture": ' '.join(platform.architecture()).strip(), "machine": platform.machine(), "platform": platform.platform(), "processor": platform.processor(), "python_build": ' '.join(platform.python_build()).strip(), "python_compiler": platform.python_compiler(), "python_implementation": platform.python_implementation(), "python_version": platform.python_version(), "release": platform.release(), "system": platform.system(), "version": platform.version(), "win32_ver": ' '.join(platform.win32_ver()).strip(), } if not skipPackages: from pip._internal.operations.freeze import freeze result["packages"] = list(freeze(local_only=True)) if not skipUsb: result["usb"] = list(get_usb()) if not anonymous: result["uname"] = ' '.join(platform.uname()).strip(), return result if __name__ == "__main__": data = make_sys_report() with open("log_system_information.json", "w") as f: json.dump(data, f, indent=4) print(json.dumps(data, indent=4)) print("System info gathered successfully - saved as \"log_system_information.json\"")
lemur/deployment/service.py
rajatsharma94/lemur
1,656
1014
from lemur import database def rotate_certificate(endpoint, new_cert): """ Rotates a certificate on a given endpoint. :param endpoint: :param new_cert: :return: """ # ensure that certificate is available for rotation endpoint.source.plugin.update_endpoint(endpoint, new_cert) endpoint.certificate = new_cert database.update(endpoint)
cinder/tests/unit/fake_group_snapshot.py
lightsey/cinder
571
1028
<filename>cinder/tests/unit/fake_group_snapshot.py # Copyright 2016 EMC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from cinder import objects from cinder.tests.unit import fake_constants as fake def fake_db_group_snapshot(**updates): db_group_snapshot = { 'id': fake.GROUP_SNAPSHOT_ID, 'name': 'group-1', 'status': 'available', 'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID, 'group_type_id': fake.GROUP_TYPE_ID, 'group_id': fake.GROUP_ID, } for name, field in objects.GroupSnapshot.fields.items(): if name in db_group_snapshot: continue if field.nullable: db_group_snapshot[name] = None elif field.default != fields.UnspecifiedDefault: db_group_snapshot[name] = field.default else: raise Exception('fake_db_group_snapshot needs help with %s.' % name) if updates: db_group_snapshot.update(updates) return db_group_snapshot def fake_group_snapshot_obj(context, **updates): return objects.GroupSnapshot._from_db_object( context, objects.GroupSnapshot(), fake_db_group_snapshot(**updates))
MicroPython_BUILD/components/micropython/esp32/modules_examples/mqtt_example.py
FlorianPoot/MicroPython_ESP32_psRAM_LoBo
838
1057
import network def conncb(task): print("[{}] Connected".format(task)) def disconncb(task): print("[{}] Disconnected".format(task)) def subscb(task): print("[{}] Subscribed".format(task)) def pubcb(pub): print("[{}] Published: {}".format(pub[0], pub[1])) def datacb(msg): print("[{}] Data arrived from topic: {}, Message:\n".format(msg[0], msg[1]), msg[2]) mqtt = network.mqtt("loboris", "mqtt://loboris.eu", user="wifimcu", password="<PASSWORD>", cleansession=True, connected_cb=conncb, disconnected_cb=disconncb, subscribed_cb=subscb, published_cb=pubcb, data_cb=datacb) # secure connection requires more memory and may not work # mqtts = network.mqtt("eclipse", "mqtts//iot.eclipse.org", cleansession=True, connected_cb=conncb, disconnected_cb=disconncb, subscribed_cb=subscb, published_cb=pubcb, data_cb=datacb) # wsmqtt = network.mqtt("eclipse", "ws://iot.eclipse.org:80/ws", cleansession=True, data_cb=datacb) mqtt.start() #mqtt.config(lwt_topic='status', lwt_msg='Disconected') ''' # Wait until status is: (1, 'Connected') mqtt.subscribe('test') mqtt.publish('test', 'Hi from Micropython') mqtt.stop() ''' # ================== # ThingSpeak example # ================== import network def datacb(msg): print("[{}] Data arrived from topic: {}, Message:\n".format(msg[0], msg[1]), msg[2]) thing = network.mqtt("thingspeak", "mqtt://mqtt.thingspeak.com", user="anyName", password="<PASSWORD>", cleansession=True, data_cb=datacb) # or secure connection #thing = network.mqtt("thingspeak", "mqtts://mqtt.thingspeak.com", user="anyName", password="<PASSWORD>", cleansession=True, data_cb=datacb) thingspeakChannelId = "123456" # enter Thingspeak Channel ID thingspeakChannelWriteApiKey = "ThingspeakWriteAPIKey" # EDIT - enter Thingspeak Write API Key thingspeakFieldNo = 1 thingSpeakChanelFormat = "json" pubchan = "channels/{:s}/publish/{:s}".format(thingspeakChannelId, thingspeakChannelWriteApiKey) pubfield = "channels/{:s}/publish/fields/field{}/{:s}".format(thingspeakChannelId, thingspeakFieldNo, thingspeakChannelWriteApiKey) subchan = "channels/{:s}/subscribe/{:s}/{:s}".format(thingspeakChannelId, thingSpeakChanelFormat, thingspeakChannelWriteApiKey) subfield = "channels/{:s}/subscribe/fields/field{}/{:s}".format(thingspeakChannelId, thingspeakFieldNo, thingspeakChannelWriteApiKey) thing.start() tmo = 0 while thing.status()[0] != 2: utime.sleep_ms(100) tmo += 1 if tmo > 80: print("Not connected") break # subscribe to channel thing.subscribe(subchan) # subscribe to field thing.subscribe(subfield) # publish to channel # Payload can include any of those fields separated b< ';': # "field1=value;field2=value;...;field8=value;latitude=value;longitude=value;elevation=value;status=value" thing.publish(pubchan, "field1=25.2;status=On line") # Publish to field thing.publish(pubfield, "24.5")
render/PC_Normalisation.py
sun-pyo/OcCo
158
1070
<reponame>sun-pyo/OcCo<gh_stars>100-1000 # Copyright (c) 2020. <NAME>, <EMAIL> import os, open3d, numpy as np File_ = open('ModelNet_flist_short.txt', 'w') if __name__ == "__main__": root_dir = "../data/ModelNet_subset/" for root, dirs, files in os.walk(root_dir, topdown=False): for file in files: if '.ply' in file: amesh = open3d.io.read_triangle_mesh(os.path.join(root, file)) out_file_name = os.path.join(root, file).replace('.ply', '_normalised.obj') center = amesh.get_center() amesh.translate(-center) maxR = (np.asarray(amesh.vertices)**2).sum(axis=1).max()**(1/2) # we found divided by (2*maxR) has best rendered visualisation results amesh.scale(1/(2*maxR)) open3d.io.write_triangle_mesh(out_file_name, amesh) File_.writelines(out_file_name.replace('.obj', '').replace(root_dir, '') + '\n') print(out_file_name)
python/GafferUI/ColorSwatchPlugValueWidget.py
ddesmond/gaffer
561
1072
########################################################################## # # Copyright (c) 2013, <NAME>. All rights reserved. # Copyright (c) 2013, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of <NAME> nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import weakref import imath import Gaffer import GafferUI class ColorSwatchPlugValueWidget( GafferUI.PlugValueWidget ) : def __init__( self, plugs, **kw ) : self.__swatch = GafferUI.ColorSwatch() GafferUI.PlugValueWidget.__init__( self, self.__swatch, plugs, **kw ) ## \todo How do set maximum height with a public API? self.__swatch._qtWidget().setMaximumHeight( 20 ) self._addPopupMenu( self.__swatch ) self.__swatch.buttonPressSignal().connect( Gaffer.WeakMethod( self.__buttonPress ), scoped = False ) self.__swatch.dragBeginSignal().connect( Gaffer.WeakMethod( self.__dragBegin ), scoped = False ) self.__swatch.dragEndSignal().connect( Gaffer.WeakMethod( self.__dragEnd ), scoped = False ) self.__swatch.buttonReleaseSignal().connect( Gaffer.WeakMethod( self.__buttonRelease ), scoped = False ) self._updateFromPlugs() def setHighlighted( self, highlighted ) : GafferUI.PlugValueWidget.setHighlighted( self, highlighted ) self.__swatch.setHighlighted( highlighted ) def _updateFromPlugs( self ) : with self.getContext() : value = _colorFromPlugs( self.getPlugs() ) self.__swatch.setColor( value ) def __buttonPress( self, widget, event ) : if event.buttons == event.Buttons.Left : return True return False def __dragBegin( self, widget, event ) : GafferUI.Pointer.setCurrent( "rgba" ) return self.__swatch.getColor() def __dragEnd( self, widget, event ) : GafferUI.Pointer.setCurrent( None ) def __buttonRelease( self, widget, event ) : if event.button != event.Buttons.Left : return False if not self._editable() : return False _ColorPlugValueDialogue.acquire( self.getPlugs() ) return True def _colorFromPlugs( plugs ) : if not len( plugs ) : return imath.Color4f( 0 ) # ColorSwatch only supports one colour, and doesn't have # an "indeterminate" state, so when we have multiple plugs # the best we can do is take an average. return sum( p.getValue() for p in plugs ) / len( plugs ) ## \todo Perhaps we could make this a part of the public API? Perhaps we could also make a # PlugValueDialogue base class to share some of the work with the dialogue made by the # SplinePlugValueWidget. Or perhaps the `acquire()` here and `NodeSetEditor.acquire()` should # actually be functionality of CompoundEditor? class _ColorPlugValueDialogue( GafferUI.ColorChooserDialogue ) : def __init__( self, plugs, parentWindow ) : GafferUI.ColorChooserDialogue.__init__( self, color = _colorFromPlugs( plugs ) ) # we use these to decide which actions to merge into a single undo self.__lastChangedReason = None self.__mergeGroupId = 0 self.__colorChangedConnection = self.colorChooser().colorChangedSignal().connect( Gaffer.WeakMethod( self.__colorChanged ), scoped = False ) self.confirmButton.clickedSignal().connect( Gaffer.WeakMethod( self.__buttonClicked ), scoped = False ) self.cancelButton.clickedSignal().connect( Gaffer.WeakMethod( self.__buttonClicked ), scoped = False ) self.__plugs = plugs self.__initialValues = { p : p.getValue() for p in self.__plugs } nodes = { p.node() for p in self.__plugs } self.__plugSetConnections = [ n.plugSetSignal().connect( Gaffer.WeakMethod( self.__plugSet ), scoped = False ) for n in nodes ] for node in nodes : node.parentChangedSignal().connect( Gaffer.WeakMethod( self.__destroy ), scoped = False ) plug = next( iter( self.__plugs ) ) if len( self.__plugs ) == 1 : self.setTitle( plug.relativeName( plug.ancestor( Gaffer.ScriptNode ) ) ) else : self.setTitle( "{} plugs".format( len( self.__plugs ) ) ) self.__plugSet( plug ) parentWindow.addChildWindow( self, removeOnClose = True ) @classmethod def acquire( cls, plugs ) : plug = next( iter( plugs ) ) script = plug.node().scriptNode() scriptWindow = GafferUI.ScriptWindow.acquire( script ) for window in scriptWindow.childWindows() : if isinstance( window, cls ) and window.__plugs == plugs : window.setVisible( True ) return window window = _ColorPlugValueDialogue( plugs, scriptWindow ) window.setVisible( True ) return False def __plugSet( self, plug ) : if plug in self.__plugs : with Gaffer.BlockedConnection( self.__colorChangedConnection ) : self.colorChooser().setColor( _colorFromPlugs( self.__plugs ) ) def __colorChanged( self, colorChooser, reason ) : if not GafferUI.ColorChooser.changesShouldBeMerged( self.__lastChangedReason, reason ) : self.__mergeGroupId += 1 self.__lastChangedReason = reason with Gaffer.UndoScope( next( iter( self.__plugs ) ).ancestor( Gaffer.ScriptNode ), mergeGroup = "ColorPlugValueDialogue%d%d" % ( id( self, ), self.__mergeGroupId ) ) : with Gaffer.BlockedConnection( self.__plugSetConnections ) : for plug in self.__plugs : plug.setValue( self.colorChooser().getColor() ) def __buttonClicked( self, button ) : if button is self.cancelButton : with Gaffer.UndoScope( next( iter( self.__plugs ) ).ancestor( Gaffer.ScriptNode ) ) : for p, v in self.__initialValues.items() : p.setValue( v ) self.parent().removeChild( self ) # Workaround for https://bugreports.qt-project.org/browse/QTBUG-26761. assert( not self.visible() ) GafferUI.WidgetAlgo.keepUntilIdle( self ) def __destroy( self, *unused ) : self.parent().removeChild( self )
test/integration/component/test_browse_templates2.py
ycyun/ablestack-cloud
1,131
1083
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # Import Local Modules import marvin from nose.plugins.attrib import attr from marvin.cloudstackTestCase import cloudstackTestCase import unittest from marvin.cloudstackAPI import * from marvin.lib.utils import * from marvin.lib.base import * from marvin.lib.common import * from marvin.codes import PASS, FAILED, SUCCESS, XEN_SERVER from marvin.sshClient import SshClient import requests requests.packages.urllib3.disable_warnings() import random import string import telnetlib import os import urllib.request, urllib.parse, urllib.error import time import tempfile _multiprocess_shared_ = True class TestBrowseUploadTemplate(cloudstackTestCase): """ Tests for browser based upload template feature. Once all issues in test_browse_templates.py are fixed, this should be merged back """ @classmethod def setUpClass(cls): cls.testClient = super(TestBrowseUploadTemplate, cls).getClsTestClient() cls.testdata = cls.testClient.getParsedTestDataConfig() cls.apiclient = cls.testClient.getApiClient() cls.hypervisor = cls.testClient.getHypervisorInfo() cls._cleanup = [] cls.cleanup = [] hosts = list_hosts( cls.apiclient, type="Routing" ) if hosts is None: cls.SkipTest( "There are no hypervisor's available. Check list hosts response") cls.uploadtemplateformat = "VHD" cls.templatename = "test" cls.templatehypervisor = "XenServer" cls.templateostypeid = 142 cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) cls.domain = get_domain(cls.apiclient) cls.pod = get_pod(cls.apiclient, cls.zone.id) cls.account = Account.create( cls.apiclient, cls.testdata["account"], domainid=cls.domain.id ) cls._cleanup = [ cls.account ] def waitForSystemVMAgent(self, vmname): timeout = self.testdata["timeout"] while True: list_host_response = list_hosts( self.apiclient, name=vmname ) if list_host_response and list_host_response[0].state == 'Up': break if timeout == 0: raise Exception("Timed out waiting for SSVM agent to be Up") time.sleep(self.testdata["sleep"]) timeout = timeout - 1 def destroy_ssvm(self): list_ssvm_response = list_ssvms( self.apiclient, systemvmtype='secondarystoragevm', state='Running', zoneid=self.zone.id ) self.assertEqual( isinstance(list_ssvm_response, list), True, "Check list response returns a valid list" ) ssvm_response = list_ssvm_response[0] old_name = ssvm_response.name self.debug("Destroying SSVM: %s" % ssvm_response.id) cmd = destroySystemVm.destroySystemVmCmd() cmd.id = ssvm_response.id self.apiclient.destroySystemVm(cmd) timeout = self.testdata["timeout"] while True: list_ssvm_response = list_ssvms( self.apiclient, zoneid=self.zone.id, systemvmtype='secondarystoragevm' ) if isinstance(list_ssvm_response, list): if list_ssvm_response[0].state == 'Running': break if timeout == 0: raise Exception("List SSVM call failed!") time.sleep(self.testdata["sleep"]) timeout = timeout - 1 ssvm_response = list_ssvm_response[0] # Verify Name, Public IP, Private IP and Link local IP # for newly created SSVM self.assertNotEqual( ssvm_response.name, old_name, "Check SSVM new name with name of destroyed SSVM" ) self.assertEqual( hasattr(ssvm_response, 'privateip'), True, "Check whether SSVM has private IP field" ) self.assertEqual( hasattr(ssvm_response, 'linklocalip'), True, "Check whether SSVM has link local IP field" ) self.assertEqual( hasattr(ssvm_response, 'publicip'), True, "Check whether SSVM has public IP field" ) # Wait for the agent to be up self.waitForSystemVMAgent(ssvm_response.name) return @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="false") def test_browser_upload_template_incomplete(self): """ Test browser based incomplete template upload, followed by SSVM destroy. Template should go to UploadAbandoned state and get cleaned up. """ try: self.debug("========================= Test browser based incomplete template upload ========================") #Only register template, without uploading cmd = getUploadParamsForTemplate.getUploadParamsForTemplateCmd() cmd.zoneid = self.zone.id cmd.format = self.uploadtemplateformat cmd.name=self.templatename+self.account.name+(random.choice(string.ascii_uppercase)) cmd.account=self.account.name cmd.domainid=self.domain.id cmd.displaytext=cmd.name cmd.hypervisor=self.templatehypervisor cmd.ostypeid=self.templateostypeid template_response=self.apiclient.getUploadParamsForTemplate(cmd) #Destroy SSVM, and wait for new one to start self.destroy_ssvm() wait_for_cleanup(self.apiclient, ["expunge.delay", "expunge.interval"]) #Verify that the template is cleaned up as part of sync-up during new SSVM start list_template_response=Template.list( self.apiclient, id=template_response.id, templatefilter="all", zoneid=self.zone.id) self.assertEqual(list_template_response, None, "Template is not cleaned up, some issue with template sync-up") except Exception as e: self.fail("Exception occurred : %s" % e) return @classmethod def tearDownClass(self): try: self.apiclient = super(TestBrowseUploadTemplate, self).getClsTestClient().getApiClient() cleanup_resources(self.apiclient, self._cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return
Adafruit_BluefruitLE/interfaces/__init__.py
acoomans/Adafruit_Python_BluefruitLE
415
1087
<reponame>acoomans/Adafruit_Python_BluefruitLE from .provider import Provider from .adapter import Adapter from .device import Device from .gatt import GattService, GattCharacteristic, GattDescriptor
parallelformers/policies/base/auto.py
Oaklight/parallelformers
454
1098
<filename>parallelformers/policies/base/auto.py # Copyright 2021 TUNiB inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from contextlib import suppress from typing import List, Union from torch import nn from parallelformers.policies.base import Policy class AutoPolicy: """Class for finds automatically appropriate policies for the current model""" def __init__(self): self.builtin_policies = {} with suppress(Exception): from transformers.models.gpt_neo.modeling_gpt_neo import ( GPTNeoPreTrainedModel, ) from parallelformers.policies.gpt_neo import GPTNeoPolicy self.builtin_policies[GPTNeoPreTrainedModel] = [ GPTNeoPolicy, ] with suppress(Exception): from transformers.models.bert.modeling_bert import ( BertPreTrainedModel, ) from parallelformers.policies.bert import BertPolicy self.builtin_policies[BertPreTrainedModel] = [ BertPolicy, ] with suppress(Exception): from transformers.models.bart.modeling_bart import ( BartPretrainedModel, ) from parallelformers.policies.bart import ( BartDecoderPolicy, BartEncoderPolicy, ) self.builtin_policies[BartPretrainedModel] = [ BartEncoderPolicy, BartDecoderPolicy, ] with suppress(Exception): from transformers.models.blenderbot.modeling_blenderbot import ( BlenderbotPreTrainedModel, ) from parallelformers.policies.blenderbot import ( BlenderbotDecoderPolicy, BlenderbotEncoderPolicy, ) self.builtin_policies[BlenderbotPreTrainedModel] = [ BlenderbotEncoderPolicy, BlenderbotDecoderPolicy, ] with suppress(Exception): from transformers.models.deberta.modeling_deberta import ( DebertaPreTrainedModel, ) from parallelformers.policies.deberta import DebertaPolicy self.builtin_policies[DebertaPreTrainedModel] = [ DebertaPolicy, ] with suppress(Exception): from transformers.models.transfo_xl.modeling_transfo_xl import ( TransfoXLPreTrainedModel, ) from parallelformers.policies.transfo_xl import TransfoXLPolicy self.builtin_policies[TransfoXLPreTrainedModel] = [ TransfoXLPolicy, ] with suppress(Exception): from transformers.models.roberta.modeling_roberta import ( RobertaPreTrainedModel, ) from parallelformers.policies.roberta import RobertaPolicy self.builtin_policies[RobertaPreTrainedModel] = [ RobertaPolicy, ] with suppress(Exception): from transformers.models.albert.modeling_albert import ( AlbertPreTrainedModel, ) from parallelformers.policies.albert import AlbertPolicy self.builtin_policies[AlbertPreTrainedModel] = [ AlbertPolicy, ] with suppress(Exception): from transformers.models.gpt2.modeling_gpt2 import ( GPT2PreTrainedModel, ) from parallelformers.policies.gpt2 import GPT2Policy self.builtin_policies[GPT2PreTrainedModel] = [ GPT2Policy, ] with suppress(Exception): from transformers.models.ctrl.modeling_ctrl import ( CTRLPreTrainedModel, ) from parallelformers.policies.ctrl import CTRLPolicy self.builtin_policies[CTRLPreTrainedModel] = [ CTRLPolicy, ] with suppress(Exception): from transformers.models.deberta_v2.modeling_deberta_v2 import ( DebertaV2PreTrainedModel, ) from parallelformers.policies.deberta_v2 import DebertaV2Policy self.builtin_policies[DebertaV2PreTrainedModel] = [ DebertaV2Policy, ] with suppress(Exception): from transformers.models.openai.modeling_openai import ( OpenAIGPTPreTrainedModel, ) from parallelformers.policies.openai import OpenAIGPTPolicy self.builtin_policies[OpenAIGPTPreTrainedModel] = [ OpenAIGPTPolicy, ] with suppress(Exception): from transformers.models.electra.modeling_electra import ( ElectraPreTrainedModel, ) from parallelformers.policies.electra import ElectraPolicy self.builtin_policies[ElectraPreTrainedModel] = [ ElectraPolicy, ] with suppress(Exception): from transformers.models.blenderbot_small.modeling_blenderbot_small import ( BlenderbotSmallPreTrainedModel, ) from parallelformers.policies.blenderbot_small import ( BlenderbotSmallDecoderPolicy, BlenderbotSmallEncoderPolicy, ) self.builtin_policies[BlenderbotSmallPreTrainedModel] = [ BlenderbotSmallEncoderPolicy, BlenderbotSmallDecoderPolicy, ] with suppress(Exception): from transformers.models.distilbert.modeling_distilbert import ( DistilBertPreTrainedModel, ) from parallelformers.policies.distil_bert import DistilBertPolicy self.builtin_policies[DistilBertPreTrainedModel] = [ DistilBertPolicy, ] with suppress(Exception): from transformers.models.convbert.modeling_convbert import ( ConvBertPreTrainedModel, ) from parallelformers.policies.convbert import ConvBertPolicy self.builtin_policies[ConvBertPreTrainedModel] = [ ConvBertPolicy, ] with suppress(Exception): from transformers.models.bert_generation.modeling_bert_generation import ( BertGenerationPreTrainedModel, ) from parallelformers.policies.bert import BertPolicy self.builtin_policies[BertGenerationPreTrainedModel] = [ BertPolicy, ] with suppress(Exception): from transformers.models.big_bird.modeling_big_bird import ( BigBirdPreTrainedModel, ) from parallelformers.policies.bigbird import BigBirdPolicy self.builtin_policies[BigBirdPreTrainedModel] = [ BigBirdPolicy, ] with suppress(Exception): from transformers.models.bigbird_pegasus.modeling_bigbird_pegasus import ( BigBirdPegasusPreTrainedModel, ) from parallelformers.policies.bigbird_pegasus import ( BigBirdPegasusDecoderPolicy, BigBirdPegasusEncoderPolicy, ) self.builtin_policies[BigBirdPegasusPreTrainedModel] = [ BigBirdPegasusEncoderPolicy, BigBirdPegasusDecoderPolicy, ] with suppress(Exception): from transformers.models.vit.modeling_vit import ViTPreTrainedModel from parallelformers.policies.vit import ViTPolicy self.builtin_policies[ViTPreTrainedModel] = [ ViTPolicy, ] with suppress(Exception): from transformers.models.deit.modeling_deit import ( DeiTPreTrainedModel, ) from parallelformers.policies.deit import DeiTPolicy self.builtin_policies[DeiTPreTrainedModel] = [DeiTPolicy] with suppress(Exception): from transformers.models.mbart.modeling_mbart import ( MBartPreTrainedModel, ) from parallelformers.policies.mbart import ( MBartDecoderPolicy, MBartEncoderPolicy, ) self.builtin_policies[MBartPreTrainedModel] = [ MBartEncoderPolicy, MBartDecoderPolicy, ] with suppress(Exception): from transformers.models.t5.modeling_t5 import T5PreTrainedModel from parallelformers.policies.t5 import T5Policy self.builtin_policies[T5PreTrainedModel] = [ T5Policy, ] with suppress(Exception): from transformers.models.pegasus.modeling_pegasus import ( PegasusPreTrainedModel, ) from parallelformers.policies.pegasus import ( PegasusDecoderPolicy, PegasusEncoderPolicy, ) self.builtin_policies[PegasusPreTrainedModel] = [ PegasusEncoderPolicy, PegasusDecoderPolicy, ] with suppress(Exception): from transformers.models.fsmt.modeling_fsmt import ( PretrainedFSMTModel, ) from parallelformers.policies.fsmt import ( FSMTDecoderPolicy, FSMTEncoderPolicy, ) self.builtin_policies[PretrainedFSMTModel] = [ FSMTEncoderPolicy, FSMTDecoderPolicy, ] with suppress(Exception): from transformers.models.xlm.modeling_xlm import XLMPreTrainedModel from parallelformers.policies.xlm import ( XLMAttentionPolicy, XLMMLPPolicy, ) self.builtin_policies[XLMPreTrainedModel] = [ XLMAttentionPolicy, XLMMLPPolicy, ] with suppress(Exception): from transformers.models.m2m_100.modeling_m2m_100 import ( M2M100PreTrainedModel, ) from parallelformers.policies.m2m_100 import ( M2M100DecoderPolicy, M2M100EncoderPolicy, ) self.builtin_policies[M2M100PreTrainedModel] = [ M2M100EncoderPolicy, M2M100DecoderPolicy, ] with suppress(Exception): from transformers.models.marian.modeling_marian import ( MarianPreTrainedModel, ) from parallelformers.policies.marian import ( MarianDecoderPolicy, MarianEncoderPolicy, ) self.builtin_policies[MarianPreTrainedModel] = [ MarianEncoderPolicy, MarianDecoderPolicy, ] with suppress(Exception): from transformers.models.mobilebert.modeling_mobilebert import ( MobileBertPreTrainedModel, ) from parallelformers.policies.mobilebert import MobileBertPolicy self.builtin_policies[MobileBertPreTrainedModel] = [ MobileBertPolicy, ] with suppress(Exception): from transformers.models.mpnet.modeling_mpnet import ( MPNetPreTrainedModel, ) from parallelformers.policies.mpnet import ( MPNetEncoderPolicy, MPNetLayerPolicy, ) self.builtin_policies[MPNetPreTrainedModel] = [ MPNetEncoderPolicy, MPNetLayerPolicy, ] with suppress(Exception): from transformers.models.luke.modeling_luke import ( LukePreTrainedModel, ) from parallelformers.policies.luke import LukePolicy self.builtin_policies[LukePreTrainedModel] = [ LukePolicy, ] with suppress(Exception): from transformers.models.dpr.modeling_dpr import ( DPRPretrainedContextEncoder, DPRPretrainedQuestionEncoder, DPRPretrainedReader, ) self.builtin_policies[DPRPretrainedReader] = [ BertPolicy, ] self.builtin_policies[DPRPretrainedQuestionEncoder] = [ BertPolicy, ] self.builtin_policies[DPRPretrainedContextEncoder] = [ BertPolicy, ] with suppress(Exception): from transformers.models.lxmert.modeling_lxmert import ( LxmertPreTrainedModel, ) from parallelformers.policies.lxmert import LxmertPolicy self.builtin_policies[LxmertPreTrainedModel] = [ LxmertPolicy, ] with suppress(Exception): from transformers.models.hubert.modeling_hubert import ( HubertPreTrainedModel, ) from parallelformers.policies.hubert import HubertPolicy self.builtin_policies[HubertPreTrainedModel] = [ HubertPolicy, ] with suppress(Exception): from transformers.models.wav2vec2.modeling_wav2vec2 import ( Wav2Vec2PreTrainedModel, ) from parallelformers.policies.wav2vec import Wav2VecPolicy self.builtin_policies[Wav2Vec2PreTrainedModel] = [ Wav2VecPolicy, ] with suppress(Exception): from transformers.models.xlnet.modeling_xlnet import ( XLNetPreTrainedModel, ) from parallelformers.policies.xlnet import XLNetPolicy self.builtin_policies[XLNetPreTrainedModel] = [ XLNetPolicy, ] with suppress(Exception): from transformers.models.retribert.modeling_retribert import ( RetriBertPreTrainedModel, ) self.builtin_policies[RetriBertPreTrainedModel] = [ BertPolicy, ] with suppress(Exception): from transformers.models.clip.modeling_clip import ( CLIPPreTrainedModel, ) from parallelformers.policies.clip import ( CLIPLayerPolicy, CLIPTextPolicy, CLIPVisionPolicy, ) self.builtin_policies[CLIPPreTrainedModel] = [ CLIPLayerPolicy, CLIPTextPolicy, CLIPVisionPolicy, ] with suppress(Exception): from transformers.models.detr.modeling_detr import ( DetrPreTrainedModel, ) from parallelformers.policies.detr import ( DetrDecoderPolicy, DetrEncoderPolicy, ) self.builtin_policies[DetrPreTrainedModel] = [ DetrEncoderPolicy, DetrDecoderPolicy, ] with suppress(Exception): from transformers.models.reformer.modeling_reformer import ( ReformerPreTrainedModel, ) from parallelformers.policies.reformer import ReformerPolicy self.builtin_policies[ReformerPreTrainedModel] = [ ReformerPolicy, ] with suppress(Exception): from transformers.models.longformer.modeling_longformer import ( LongformerPreTrainedModel, ) from parallelformers.policies.longformer import LongformerPolicy self.builtin_policies[LongformerPreTrainedModel] = [ LongformerPolicy, ] with suppress(Exception): from transformers.models.roformer.modeling_roformer import ( RoFormerPreTrainedModel, ) from parallelformers.policies.roformer import RoformerPolicy self.builtin_policies[RoFormerPreTrainedModel] = [ RoformerPolicy, ] with suppress(Exception): from transformers.models.ibert.modeling_ibert import ( IBertPreTrainedModel, ) from parallelformers.policies.ibert import IBertPolicy self.builtin_policies[IBertPreTrainedModel] = [ IBertPolicy, ] with suppress(Exception): from transformers.models.tapas.modeling_tapas import ( TapasPreTrainedModel, ) from parallelformers.policies.tapas import TapasPolicy self.builtin_policies[TapasPreTrainedModel] = [ TapasPolicy, ] with suppress(Exception): from transformers.models.funnel.modeling_funnel import ( FunnelPreTrainedModel, ) from parallelformers.policies.funnel import FunnelPolicy self.builtin_policies[FunnelPreTrainedModel] = [ FunnelPolicy, ] with suppress(Exception): from transformers.models.layoutlm.modeling_layoutlm import ( LayoutLMPreTrainedModel, ) from parallelformers.policies.layoutlm import LayoutLMPolicy self.builtin_policies[LayoutLMPreTrainedModel] = [ LayoutLMPolicy, ] with suppress(Exception): from transformers.models.led.modeling_led import LEDPreTrainedModel from parallelformers.policies.led import ( LEDDecoderPolicy, LEDEncoderPolicy, ) self.builtin_policies[LEDPreTrainedModel] = [ LEDEncoderPolicy, LEDDecoderPolicy, ] with suppress(Exception): from transformers.models.prophetnet.modeling_prophetnet import ( ProphetNetPreTrainedModel, ) from parallelformers.policies.prophetnet import ( ProphetNetDecoderPolicy, ProphetNetEncoderPolicy, ) self.builtin_policies[ProphetNetPreTrainedModel] = [ ProphetNetEncoderPolicy, ProphetNetDecoderPolicy, ] with suppress(Exception): from transformers.models.visual_bert.modeling_visual_bert import ( VisualBertPreTrainedModel, ) from parallelformers.policies.visual_bert import VisualBertPolicy self.builtin_policies[VisualBertPreTrainedModel] = [ VisualBertPolicy, ] with suppress(Exception): from transformers.models.speech_to_text.modeling_speech_to_text import ( Speech2TextPreTrainedModel, ) from parallelformers.policies.speech_to_text import ( Speech2TextDecoderPolicy, Speech2TextEncoderPolicy, ) self.builtin_policies[Speech2TextPreTrainedModel] = [ Speech2TextEncoderPolicy, Speech2TextDecoderPolicy, ] with suppress(Exception): from transformers.models.gptj.modeling_gptj import ( GPTJPreTrainedModel, ) from parallelformers.policies.gptj import GPTJPolicy self.builtin_policies[GPTJPreTrainedModel] = [ GPTJPolicy, ] with suppress(Exception): from transformers.models.megatron_bert import ( MegatronBertPreTrainedModel, ) from parallelformers.policies.megtron_bert import ( MegatronBertPolicy, ) self.builtin_policies[MegatronBertPreTrainedModel] = [ MegatronBertPolicy, ] def get_policy(self, model: nn.Module) -> Union[List[Policy], None]: """ Find appropriate policies for the current model Args: model (nn.Module): model to parallelize Returns: Union[List[Policy], None]: appropriate policies or none """ for k, v in self.available().items(): if isinstance(model, k): return v return None def available(self): """Dictionary of available models and policies""" return self.builtin_policies
venv/Lib/site-packages/toolz/sandbox/__init__.py
ajayiagbebaku/NFL-Model
3,749
1118
from .core import EqualityHashKey, unzip from .parallel import fold
lm/validate.py
ericlin8545/grover
864
1124
<reponame>ericlin8545/grover # Original work Copyright 2018 The Google AI Language Team Authors. # Modified work Copyright 2019 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from lm.modeling import model_fn_builder, GroverConfig import tensorflow as tf from lm.dataloader import input_fn_builder import numpy as np import tempfile import h5py from google.cloud import storage flags = tf.flags FLAGS = flags.FLAGS ## Required parameters flags.DEFINE_string( "config_file", 'configs/base.json', "The config json file corresponding to the pre-trained news model. " "This specifies the model architecture.") flags.DEFINE_string( "input_file", None, "Input TF example files (can be a glob or comma separated).") flags.DEFINE_string( "output_dir", None, "The output directory where the model checkpoints will be written.") flags.DEFINE_string( "validation_name", 'preds.h5', "Name to use") ## Other parameters flags.DEFINE_string( "init_checkpoint", None, "Initial checkpoint (usually from a pre-trained model).") flags.DEFINE_integer( "max_seq_length", 1024, "The maximum total input sequence length after WordPiece tokenization. " "Sequences longer than this will be truncated, and sequences shorter " "than this will be padded. Must match data generation.") flags.DEFINE_integer("iterations_per_loop", 1000, "How many steps to make in each estimator call.") flags.DEFINE_integer("batch_size", 32, "Batch size used for eval") flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.") flags.DEFINE_integer( "num_tpu_cores", 8, "Only used if `use_tpu` is True. Total number of TPU cores to use.") # This is a handy little utility so that we can save the perplexities to TPU class gcloudwriter(): def __init__(self, gcloud_name): assert gcloud_name.startswith('gs://') self.gcloud_name = gcloud_name bucket_name, blob_name = gcloud_name.split('gs://')[1].split('/', 1) bucket = storage.Client().get_bucket(bucket_name) self.blob = bucket.blob(blob_name) def __enter__(self): self.tempfile = tempfile.NamedTemporaryFile() return self.tempfile def __exit__(self, *args): self.tempfile.flush() print("UPLOADING TO {}".format(self.gcloud_name), flush=True) self.blob.upload_from_filename(self.tempfile.name) self.tempfile.close() def ind_where(array: np.ndarray, target, return_first_match=True, default_value=-1): """ :param array: Single dimension array :param target: target to search for :param return_first_match: If true, return the first index that matches, otherwise, return the last one :param default_value: Index to return if there was no match :return: index of the first match, or -1 if nothing """ assert array.ndim == 1 matching_inds = np.where(array == target)[0] if len(matching_inds) > 0: if return_first_match: return int(matching_inds[0]) else: return int(matching_inds[-1]) return default_value def main(_): tf.logging.set_verbosity(tf.logging.INFO) news_config = GroverConfig.from_json_file(FLAGS.config_file) tf.gfile.MakeDirs(FLAGS.output_dir) input_files = [] for input_pattern in FLAGS.input_file.split(","): input_files.extend(tf.gfile.Glob(input_pattern)) tf.logging.info("*** Input Files ***") for input_file in input_files: tf.logging.info(" %s" % input_file) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.iterations_per_loop, keep_checkpoint_max=None, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) model_fn = model_fn_builder(news_config, init_checkpoint=FLAGS.init_checkpoint, learning_rate=1e-4, num_train_steps=0, num_warmup_steps=0, use_tpu=FLAGS.use_tpu, ) # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. estimator = tf.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.batch_size, eval_batch_size=FLAGS.batch_size, predict_batch_size=FLAGS.batch_size, params={'model_dir': FLAGS.output_dir} ) eval_input_fn = input_fn_builder( input_files=input_files, seq_length=FLAGS.max_seq_length, evaluate_for_fixed_number_of_steps=False, num_cpu_threads=1, is_training=False) result = [x for x in estimator.predict(input_fn=eval_input_fn, yield_single_examples=True)] cats = sorted(result[0].keys()) result_stack = {cat: np.stack([x[cat] for x in result]) for cat in cats} with gcloudwriter(os.path.join(FLAGS.output_dir, FLAGS.validation_name)) as tempfile_name: with h5py.File(tempfile_name, 'w') as h5: for cat, data in result_stack.items(): dtype2use = np.float16 if cat.endswith(('logprobs', 'top_p_required')) else np.uint16 h5.create_dataset(cat, data=data.astype(dtype2use)) h5.create_dataset('model', data=FLAGS.config_file) h5.create_dataset('ckpt', data=FLAGS.init_checkpoint) h5.create_dataset('input_file', data=FLAGS.input_file) # This gives the perplexity of the entire article. if you want to replicate the results of the paper you # might need to do something different to extract the ppl of just the body in particular. ppl_ex = [] for logprobs_i, ids_i in zip(result_stack['gt_logprobs'], result_stack['labels']): # Omit the first token. Keep in mind input_ids is shifted by 1 start_ind = ind_where(ids_i, target=50265, default_value=0) end_ind = ind_where(ids_i, target=50266, default_value=ids_i.shape[0] - 1) ppl_ex.append(logprobs_i[start_ind:end_ind]) ppl_ex = np.concatenate(ppl_ex, 0) print("Article perplexity is {:.3f}".format(np.exp(-np.mean(ppl_ex))), flush=True) if __name__ == "__main__": flags.mark_flag_as_required("input_file") flags.mark_flag_as_required("output_dir") tf.app.run()
Z - Tool Box/LaZagne/Windows/lazagne/softwares/windows/ppypykatz.py
dfirpaul/Active-Directory-Exploitation-Cheat-Sheet-1
1,290
1153
# -*- coding: utf-8 -*- # Thanks to @skelsec for his awesome tool Pypykatz # Checks his project here: https://github.com/skelsec/pypykatz import codecs import traceback from lazagne.config.module_info import ModuleInfo from lazagne.config.constant import constant from pypykatz.pypykatz import pypykatz class Pypykatz(ModuleInfo): """ Pypykatz dumps all secrets from the lsass.exe memory It does not work if: - LSASS is running as a protected process - A security product blocks this access """ def __init__(self): ModuleInfo.__init__(self, 'pypykatz', 'windows', system_module=True) def run(self): mimi = None try: mimi = pypykatz.go_live() except Exception: self.debug(traceback.format_exc()) if mimi: results = {} logon_sessions = mimi.to_dict().get('logon_sessions', []) for logon_session in logon_sessions: # Right now kerberos_creds, dpapi_creds results are not used user = logon_sessions[logon_session] # Get cleartext password for i in ['credman_creds', 'ssp_creds', 'livessp_creds', 'tspkg_creds', 'wdigest_creds']: for data in user.get(i, []): if all((data['username'], data['password'])): login = data['username'] if login not in results: results[login] = {} results[login]['Type'] = i results[login]['Domain'] = data.get('domainname', 'N/A') results[login]['Password'] = data['password'] # msv_creds to get sha1 user hash for data in user.get('msv_creds', []): if data['username']: login = data['username'] else: login = user['username'] if login not in results: results[login] = {} if data['SHAHash']: results[login]['Shahash'] = codecs.encode(data['SHAHash'], 'hex') if data['LMHash']: results[login]['Lmhash'] = codecs.encode(data['LMHash'], 'hex') if data['NThash']: results[login]['Nthash'] = codecs.encode(data['NThash'], 'hex') constant.pypykatz_result = results pwd_found = [] for user in results: results[user]['Login'] = user pwd_found.append(results[user]) return pwd_found
qf_lib/containers/futures/future_contract.py
webclinic017/qf-lib
198
1164
# Copyright 2016-present CERN – European Organization for Nuclear Research # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from datetime import datetime from qf_lib.common.tickers.tickers import Ticker from qf_lib.containers.dataframe.prices_dataframe import PricesDataFrame class FutureContract(object): """ Class representing a single future contract. The FutureContract is a simple class representing one futures contract. The FutureContract objects are used by the FuturesChain, in order to provide the contracts chaining possibilities. It requires 3 parameters: ticker, which is the symbol of the specific future contract (e.g. BloombergFutureTicker(“CTZ9 Comdty”)), expiration date of the contract and a PricesDataFrame, containing dates with price field values. Parameters ---------- ticker: Ticker symbol of the future contract exp_date: datetime expiration date data: PricesDataFrame data frame containing dates with price fields values """ def __init__(self, ticker: Ticker, exp_date: datetime, data: PricesDataFrame): self.ticker = ticker self.exp_date = exp_date self.data = data def __str__(self): return 'Contract: ticker: {}, expiration date: {}'.format( self.ticker, self.exp_date) def __eq__(self, other): if self is other: return True if not isinstance(other, FutureContract): return False return (self.ticker, self.exp_date, self.data) == (other.ticker, other.exp_date, other.data) def __hash__(self): return hash((self.ticker, self.exp_date, self.data))
pytorch-frontend/benchmarks/operator_benchmark/pt/embeddingbag_test.py
AndreasKaratzas/stonne
206
1175
<reponame>AndreasKaratzas/stonne<filename>pytorch-frontend/benchmarks/operator_benchmark/pt/embeddingbag_test.py import operator_benchmark as op_bench import torch import numpy from . import configs """EmbeddingBag Operator Benchmark""" class EmbeddingBagBenchmark(op_bench.TorchBenchmarkBase): def init(self, embeddingbags, dim, mode, input_size, offset, sparse, include_last_offset, device): self.embedding = torch.nn.EmbeddingBag( num_embeddings=embeddingbags, embedding_dim=dim, mode=mode, include_last_offset=include_last_offset, sparse=sparse).to(device=device) numpy.random.seed((1 << 32) - 1) self.input = torch.tensor(numpy.random.randint(0, embeddingbags, input_size), device=device).long() offsets = torch.LongTensor([offset], device=device) self.offset = torch.cat((offsets, torch.tensor([self.input.size(0)], dtype=torch.long)), 0) self.set_module_name('embeddingbag') def forward(self): return self.embedding(self.input, self.offset) op_bench.generate_pt_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark) op_bench.generate_pt_gradient_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark) if __name__ == "__main__": op_bench.benchmark_runner.main()
python/dgl/geometry/capi.py
lfchener/dgl
9,516
1176
"""Python interfaces to DGL farthest point sampler.""" from dgl._ffi.base import DGLError import numpy as np from .._ffi.function import _init_api from .. import backend as F from .. import ndarray as nd def _farthest_point_sampler(data, batch_size, sample_points, dist, start_idx, result): r"""Farthest Point Sampler Parameters ---------- data : tensor A tensor of shape (N, d) where N is the number of points and d is the dimension. batch_size : int The number of batches in the ``data``. N should be divisible by batch_size. sample_points : int The number of points to sample in each batch. dist : tensor Pre-allocated tensor of shape (N, ) for to-sample distance. start_idx : tensor of int Pre-allocated tensor of shape (batch_size, ) for the starting sample in each batch. result : tensor of int Pre-allocated tensor of shape (sample_points * batch_size, ) for the sampled index. Returns ------- No return value. The input variable ``result`` will be overwriten with sampled indices. """ assert F.shape(data)[0] >= sample_points * batch_size assert F.shape(data)[0] % batch_size == 0 _CAPI_FarthestPointSampler(F.zerocopy_to_dgl_ndarray(data), batch_size, sample_points, F.zerocopy_to_dgl_ndarray(dist), F.zerocopy_to_dgl_ndarray(start_idx), F.zerocopy_to_dgl_ndarray(result)) def _neighbor_matching(graph_idx, num_nodes, edge_weights=None, relabel_idx=True): """ Description ----------- The neighbor matching procedure of edge coarsening used in `Metis <http://cacs.usc.edu/education/cs653/Karypis-METIS-SIAMJSC98.pdf>`__ and `Graclus <https://www.cs.utexas.edu/users/inderjit/public_papers/multilevel_pami.pdf>`__ for homogeneous graph coarsening. This procedure keeps picking an unmarked vertex and matching it with one its unmarked neighbors (that maximizes its edge weight) until no match can be done. If no edge weight is given, this procedure will randomly pick neighbor for each vertex. The GPU implementation is based on `A GPU Algorithm for Greedy Graph Matching <http://www.staff.science.uu.nl/~bisse101/Articles/match12.pdf>`__ NOTE: The input graph must be bi-directed (undirected) graph. Call :obj:`dgl.to_bidirected` if you are not sure your graph is bi-directed. Parameters ---------- graph : HeteroGraphIndex The input homogeneous graph. num_nodes : int The number of nodes in this homogeneous graph. edge_weight : tensor, optional The edge weight tensor holding non-negative scalar weight for each edge. default: :obj:`None` relabel_idx : bool, optional If true, relabel resulting node labels to have consecutive node ids. default: :obj:`True` Returns ------- a 1-D tensor A vector with each element that indicates the cluster ID of a vertex. """ edge_weight_capi = nd.NULL["int64"] if edge_weights is not None: edge_weight_capi = F.zerocopy_to_dgl_ndarray(edge_weights) node_label = F.full_1d( num_nodes, -1, getattr(F, graph_idx.dtype), F.to_backend_ctx(graph_idx.ctx)) node_label_capi = F.zerocopy_to_dgl_ndarray_for_write(node_label) _CAPI_NeighborMatching(graph_idx, edge_weight_capi, node_label_capi) if F.reduce_sum(node_label < 0).item() != 0: raise DGLError("Find unmatched node") # reorder node id # TODO: actually we can add `return_inverse` option for `unique` # function in backend for efficiency. if relabel_idx: node_label_np = F.zerocopy_to_numpy(node_label) _, node_label_np = np.unique(node_label_np, return_inverse=True) return F.tensor(node_label_np) else: return node_label _init_api('dgl.geometry', __name__)
tools/accuracy_checker/openvino/tools/accuracy_checker/postprocessor/clip_segmentation_mask.py
TolyaTalamanov/open_model_zoo
2,201
1204
<gh_stars>1000+ """ Copyright (c) 2018-2022 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import numpy as np from .postprocessor import PostprocessorWithSpecificTargets from ..representation import BrainTumorSegmentationAnnotation, BrainTumorSegmentationPrediction from ..config import NumberField, ConfigError class ClipSegmentationMask(PostprocessorWithSpecificTargets): __provider__ = 'clip_segmentation_mask' annotation_types = (BrainTumorSegmentationAnnotation, ) prediction_types = (BrainTumorSegmentationPrediction, ) @classmethod def parameters(cls): parameters = super().parameters() parameters.update({ 'min_value': NumberField(value_type=int, min_value=0, optional=True, default=0, description="Min value"), 'max_value': NumberField(value_type=int, description="Max value") }) return parameters def configure(self): self.min_value = self.get_value_from_config('min_value') self.max_value = self.get_value_from_config('max_value') if self.max_value < self.min_value: raise ConfigError('max_value should be greater than min_value') def process_image(self, annotation, prediction): for target in annotation: target.mask = np.clip(target.mask, a_min=self.min_value, a_max=self.max_value) for target in prediction: target.mask = np.clip(target.mask, a_min=self.min_value, a_max=self.max_value) return annotation, prediction
tests/test_utils.py
isabella232/pynacl
756
1205
<filename>tests/test_utils.py # Copyright 2013 <NAME> and individual contributors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest import nacl.secret import nacl.utils def test_random_bytes_produces(): assert len(nacl.utils.random(16)) == 16 def test_random_bytes_produces_different_bytes(): assert nacl.utils.random(16) != nacl.utils.random(16) def test_string_fixer(): assert str(nacl.secret.SecretBox(b"\x00" * 32)) == str(b"\x00" * 32) def test_deterministic_random_bytes(): expected = ( b"0d8e6cc68715648926732e7ea73250cfaf2d58422083904c841a8ba" b"33b986111f346ba50723a68ae283524a6bded09f83be6b80595856f" b"72e25b86918e8b114bafb94bc8abedd73daab454576b7c5833eb0bf" b"982a1bb4587a5c970ff0810ca3b791d7e12" ) seed = ( b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d" b"\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b" b"\x1c\x1d\x1e\x1f" ) assert ( nacl.utils.randombytes_deterministic( 100, seed, encoder=nacl.utils.encoding.HexEncoder ) == expected ) def test_deterministic_random_bytes_invalid_seed_length(): expected = "Deterministic random bytes must be generated from 32 bytes" seed = b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a" with pytest.raises(TypeError) as e: nacl.utils.randombytes_deterministic(100, seed) assert expected in str(e.value)
drybell/drybell_lfs_spark.py
jsnlp/snorkel-tutorials
315
1224
<filename>drybell/drybell_lfs_spark.py from pyspark.sql import Row from snorkel.labeling.lf import labeling_function from snorkel.labeling.lf.nlp_spark import spark_nlp_labeling_function from snorkel.preprocess import preprocessor from drybell_lfs import load_celebrity_knowledge_base ABSTAIN = -1 NEGATIVE = 0 POSITIVE = 1 @preprocessor() def combine_text(x): return Row(title=x.title, body=x.body, article=f"{x.title} {x.body}") @spark_nlp_labeling_function(text_field="article", pre=[combine_text]) def article_mentions_person(x): for ent in x.doc.ents: if ent.label_ == "PERSON": return ABSTAIN return NEGATIVE @spark_nlp_labeling_function( text_field="article", pre=[combine_text], resources=dict(celebrity_knowledge_base=load_celebrity_knowledge_base()), ) def person_in_db(x, celebrity_knowledge_base): for ent in x.doc.ents: if ent.label_ == "PERSON" and ent.text.lower() in celebrity_knowledge_base: return POSITIVE return ABSTAIN @labeling_function() def body_contains_fortune(x): return POSITIVE if "fortune" in x.body else ABSTAIN
terrascript/resource/sematext.py
mjuenema/python-terrascript
507
1230
<filename>terrascript/resource/sematext.py # terrascript/resource/sematext.py # Automatically generated by tools/makecode.py (24-Sep-2021 15:26:36 UTC) # # For imports without namespace, e.g. # # >>> import terrascript.resource.sematext # # instead of # # >>> import terrascript.resource.sematext.sematext # # This is only available for 'official' and 'partner' providers. from terrascript.resource.sematext.sematext import *
tests/space_test.py
hadrianmontes/jax-md
713
1267
<reponame>hadrianmontes/jax-md<gh_stars>100-1000 # Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for jax_md.space.""" from absl.testing import absltest from absl.testing import parameterized from jax.config import config as jax_config from jax import random import jax.numpy as jnp from jax import grad, jit, jacfwd from jax import test_util as jtu from jax_md import space, test_util, quantity, energy from jax_md.util import * from functools import partial from unittest import SkipTest test_util.update_test_tolerance(5e-5, 5e-13) jax_config.parse_flags_with_absl() jax_config.enable_omnistaging() FLAGS = jax_config.FLAGS PARTICLE_COUNT = 10 STOCHASTIC_SAMPLES = 10 SHIFT_STEPS = 10 SPATIAL_DIMENSION = [2, 3] BOX_FORMATS = ['scalar', 'vector', 'matrix'] if FLAGS.jax_enable_x64: POSITION_DTYPE = [f32, f64] else: POSITION_DTYPE = [f32] def make_periodic_general_test_system(N, dim, dtype, box_format): assert box_format in BOX_FORMATS box_size = quantity.box_size_at_number_density(N, 1.0, dim) box = dtype(box_size) if box_format == 'vector': box = jnp.array(jnp.ones(dim) * box_size, dtype) elif box_format == 'matrix': box = jnp.array(jnp.eye(dim) * box_size, dtype) d, s = space.periodic(jnp.diag(box) if box_format == 'matrix' else box) d_gf, s_gf = space.periodic_general(box) d_g, s_g = space.periodic_general(box, fractional_coordinates=False) key = random.PRNGKey(0) R_f = random.uniform(key, (N, dim), dtype=dtype) R = space.transform(box, R_f) E = jit(energy.soft_sphere_pair(d)) E_gf = jit(energy.soft_sphere_pair(d_gf)) E_g = jit(energy.soft_sphere_pair(d_g)) return R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) # pylint: disable=invalid-name class SpaceTest(jtu.JaxTestCase): # pylint: disable=g-complex-comprehension @parameterized.named_parameters(jtu.cases_from_list( { 'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__), 'spatial_dimension': dim, 'dtype': dtype } for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE)) def test_transform(self, spatial_dimension, dtype): key = random.PRNGKey(0) for _ in range(STOCHASTIC_SAMPLES): key, split1, split2 = random.split(key, 3) R = random.normal( split1, (PARTICLE_COUNT, spatial_dimension), dtype=dtype) T = random.normal( split2, (spatial_dimension, spatial_dimension), dtype=dtype) R_prime_exact = jnp.array(jnp.einsum('ij,kj->ki', T, R), dtype=dtype) R_prime = space.transform(T, R) self.assertAllClose(R_prime_exact, R_prime) @parameterized.named_parameters(jtu.cases_from_list( { 'testcase_name': '_dim={}'.format(dim), 'spatial_dimension': dim } for dim in SPATIAL_DIMENSION)) def test_transform_grad(self, spatial_dimension): key = random.PRNGKey(0) for _ in range(STOCHASTIC_SAMPLES): key, split1, split2 = random.split(key, 3) R = random.normal(split1, (PARTICLE_COUNT, spatial_dimension)) T = random.normal(split2, (spatial_dimension, spatial_dimension)) R_prime = space.transform(T, R) energy_direct = lambda R: jnp.sum(R ** 2) energy_indirect = lambda T, R: jnp.sum(space.transform(T, R) ** 2) grad_direct = grad(energy_direct)(R_prime) grad_indirect = grad(energy_indirect, 1)(T, R) self.assertAllClose(grad_direct, grad_indirect) @parameterized.named_parameters(jtu.cases_from_list( { 'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__), 'spatial_dimension': dim, 'dtype': dtype } for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE)) def test_transform_inverse(self, spatial_dimension, dtype): key = random.PRNGKey(0) tol = 1e-13 if dtype is f32: tol = 1e-5 for _ in range(STOCHASTIC_SAMPLES): key, split1, split2 = random.split(key, 3) R = random.normal( split1, (PARTICLE_COUNT, spatial_dimension), dtype=dtype) T = random.normal( split2, (spatial_dimension, spatial_dimension), dtype=dtype) T_inv = space.inverse(T) R_test = space.transform(T_inv, space.transform(T, R)) self.assertAllClose(R, R_test) @parameterized.named_parameters(jtu.cases_from_list( { 'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__), 'spatial_dimension': dim, 'dtype': dtype } for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE)) def test_canonicalize_displacement_or_metric(self, spatial_dimension, dtype): key = random.PRNGKey(0) displacement, _ = space.periodic_general(jnp.eye(spatial_dimension)) metric = space.metric(displacement) test_metric = space.canonicalize_displacement_or_metric(displacement) metric = space.map_product(metric) test_metric = space.map_product(test_metric) for _ in range(STOCHASTIC_SAMPLES): key, split1, split2 = random.split(key, 3) R = random.normal( split1, (PARTICLE_COUNT, spatial_dimension), dtype=dtype) self.assertAllClose(metric(R, R), test_metric(R, R)) @parameterized.named_parameters(jtu.cases_from_list( { 'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__), 'spatial_dimension': dim, 'dtype': dtype } for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE)) def test_periodic_displacement(self, spatial_dimension, dtype): key = random.PRNGKey(0) for _ in range(STOCHASTIC_SAMPLES): key, split = random.split(key) R = random.uniform( split, (PARTICLE_COUNT, spatial_dimension), dtype=dtype) dR = space.map_product(space.pairwise_displacement)(R, R) dR_wrapped = space.periodic_displacement(f32(1.0), dR) dR_direct = dR dr_direct = space.distance(dR) dr_direct = jnp.reshape(dr_direct, dr_direct.shape + (1,)) if spatial_dimension == 2: for i in range(-1, 2): for j in range(-1, 2): dR_shifted = dR + jnp.array([i, j], dtype=R.dtype) dr_shifted = space.distance(dR_shifted) dr_shifted = jnp.reshape(dr_shifted, dr_shifted.shape + (1,)) dR_direct = jnp.where(dr_shifted < dr_direct, dR_shifted, dR_direct) dr_direct = jnp.where(dr_shifted < dr_direct, dr_shifted, dr_direct) elif spatial_dimension == 3: for i in range(-1, 2): for j in range(-1, 2): for k in range(-1, 2): dR_shifted = dR + jnp.array([i, j, k], dtype=R.dtype) dr_shifted = space.distance(dR_shifted) dr_shifted = jnp.reshape(dr_shifted, dr_shifted.shape + (1,)) dR_direct = jnp.where( dr_shifted < dr_direct, dR_shifted, dR_direct) dr_direct = jnp.where( dr_shifted < dr_direct, dr_shifted, dr_direct) dR_direct = jnp.array(dR_direct, dtype=dR.dtype) assert dR_wrapped.dtype == dtype self.assertAllClose(dR_wrapped, dR_direct) @parameterized.named_parameters(jtu.cases_from_list( { 'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__), 'spatial_dimension': dim, 'dtype': dtype } for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE)) def test_periodic_shift(self, spatial_dimension, dtype): key = random.PRNGKey(0) for _ in range(STOCHASTIC_SAMPLES): key, split1, split2 = random.split(key, 3) R = random.uniform( split1, (PARTICLE_COUNT, spatial_dimension), dtype=dtype) dR = jnp.sqrt(f32(0.1)) * random.normal( split2, (PARTICLE_COUNT, spatial_dimension), dtype=dtype) dR = jnp.where(dR > 0.49, f32(0.49), dR) dR = jnp.where(dR < -0.49, f32(-0.49), dR) R_shift = space.periodic_shift(f32(1.0), R, dR) assert R_shift.dtype == R.dtype assert jnp.all(R_shift < 1.0) assert jnp.all(R_shift > 0.0) dR_after = space.periodic_displacement(f32(1.0), R_shift - R) assert dR_after.dtype == R.dtype self.assertAllClose(dR_after, dR) @parameterized.named_parameters(jtu.cases_from_list( { 'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__), 'spatial_dimension': dim, 'dtype': dtype } for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE)) def test_periodic_against_periodic_general(self, spatial_dimension, dtype): key = random.PRNGKey(0) tol = 1e-13 if dtype is f32: tol = 1e-5 for _ in range(STOCHASTIC_SAMPLES): key, split1, split2, split3 = random.split(key, 4) max_box_size = f32(10.0) box_size = max_box_size * random.uniform( split1, (spatial_dimension,), dtype=dtype) transform = jnp.diag(box_size) R = random.uniform( split2, (PARTICLE_COUNT, spatial_dimension), dtype=dtype) R_scaled = R * box_size dR = random.normal( split3, (PARTICLE_COUNT, spatial_dimension), dtype=dtype) disp_fn, shift_fn = space.periodic(box_size) general_disp_fn, general_shift_fn = space.periodic_general(transform) disp_fn = space.map_product(disp_fn) general_disp_fn = space.map_product(general_disp_fn) self.assertAllClose(disp_fn(R_scaled, R_scaled), general_disp_fn(R, R)) assert disp_fn(R_scaled, R_scaled).dtype == dtype self.assertAllClose( shift_fn(R_scaled, dR), general_shift_fn(R, dR) * box_size) assert shift_fn(R_scaled, dR).dtype == dtype @parameterized.named_parameters(jtu.cases_from_list( { 'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__), 'spatial_dimension': dim, 'dtype': dtype } for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE)) def test_periodic_against_periodic_general_grad(self, spatial_dimension, dtype): key = random.PRNGKey(0) tol = 1e-13 if dtype is f32: tol = 1e-5 for _ in range(STOCHASTIC_SAMPLES): key, split1, split2, split3 = random.split(key, 4) max_box_size = f32(10.0) box_size = max_box_size * random.uniform( split1, (spatial_dimension,), dtype=dtype) transform = jnp.diag(box_size) R = random.uniform( split2, (PARTICLE_COUNT, spatial_dimension), dtype=dtype) R_scaled = R * box_size dR = random.normal( split3, (PARTICLE_COUNT, spatial_dimension), dtype=dtype) disp_fn, shift_fn = space.periodic(box_size) general_disp_fn, general_shift_fn = space.periodic_general(transform) disp_fn = space.map_product(disp_fn) general_disp_fn = space.map_product(general_disp_fn) grad_fn = grad(lambda R: jnp.sum(disp_fn(R, R) ** 2)) general_grad_fn = grad(lambda R: jnp.sum(general_disp_fn(R, R) ** 2)) self.assertAllClose(grad_fn(R_scaled), general_grad_fn(R)) assert general_grad_fn(R).dtype == dtype @parameterized.named_parameters(jtu.cases_from_list( { 'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__), 'spatial_dimension': dim, 'dtype': dtype, } for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE)) def test_periodic_general_dynamic(self, spatial_dimension, dtype): key = random.PRNGKey(0) eye = jnp.eye(spatial_dimension) for _ in range(STOCHASTIC_SAMPLES): key, split_T0_scale, split_T0_dT = random.split(key, 3) key, split_T1_scale, split_T1_dT = random.split(key, 3) key, split_t, split_R, split_dR = random.split(key, 4) size_0 = 10.0 * random.uniform(split_T0_scale, ()) dtransform_0 = 0.5 * random.normal( split_T0_dT, (spatial_dimension, spatial_dimension)) T_0 = jnp.array(size_0 * (eye + dtransform_0), dtype=dtype) size_1 = 10.0 * random.uniform(split_T1_scale, (), dtype=dtype) dtransform_1 = 0.5 * random.normal( split_T1_dT, (spatial_dimension, spatial_dimension), dtype=dtype) T_1 = jnp.array(size_1 * (eye + dtransform_1), dtype=dtype) disp_fn, shift_fn = space.periodic_general(T_0) true_disp_fn, true_shift_fn = space.periodic_general(T_1) disp_fn = partial(disp_fn, box=T_1) disp_fn = space.map_product(disp_fn) true_disp_fn = space.map_product(true_disp_fn) R = random.uniform( split_R, (PARTICLE_COUNT, spatial_dimension), dtype=dtype) dR = random.normal( split_dR, (PARTICLE_COUNT, spatial_dimension), dtype=dtype) self.assertAllClose( disp_fn(R, R), jnp.array(true_disp_fn(R, R), dtype=dtype)) self.assertAllClose( shift_fn(R, dR, box=T_1), jnp.array(true_shift_fn(R, dR), dtype=dtype)) @parameterized.named_parameters(jtu.cases_from_list( { 'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__), 'spatial_dimension': dim, 'dtype': dtype, } for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE)) def test_periodic_general_wrapped_vs_unwrapped( self, spatial_dimension, dtype): key = random.PRNGKey(0) eye = jnp.eye(spatial_dimension, dtype=dtype) tol = 1e-13 if dtype is f32: tol = 2e-5 for _ in range(STOCHASTIC_SAMPLES): key, split_R, split_T = random.split(key, 3) dT = random.normal( split_T, (spatial_dimension, spatial_dimension), dtype=dtype) T = eye + dT + jnp.transpose(dT) R = random.uniform( split_R, (PARTICLE_COUNT, spatial_dimension), dtype=dtype) R0 = R unwrapped_R = R displacement, shift = space.periodic_general(T) _, unwrapped_shift = space.periodic_general(T, wrapped=False) displacement = space.map_product(displacement) for _ in range(SHIFT_STEPS): key, split = random.split(key) dR = random.normal( split, (PARTICLE_COUNT, spatial_dimension), dtype=dtype) R = shift(R, dR) unwrapped_R = unwrapped_shift(unwrapped_R, dR) self.assertAllClose( displacement(R, R0), displacement(unwrapped_R, R0)) assert not (jnp.all(unwrapped_R > 0) and jnp.all(unwrapped_R < 1)) @parameterized.named_parameters(jtu.cases_from_list( { 'testcase_name': f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}', 'spatial_dimension': dim, 'dtype': dtype, 'box_format': box_format } for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE for box_format in BOX_FORMATS)) def test_periodic_general_energy(self, spatial_dimension, dtype, box_format): N = 16 R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) = \ make_periodic_general_test_system(N, spatial_dimension, dtype, box_format) self.assertAllClose(E(R), E_gf(R_f)) self.assertAllClose(E(R), E_g(R)) @parameterized.named_parameters(jtu.cases_from_list( { 'testcase_name': f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}', 'spatial_dimension': dim, 'dtype': dtype, 'box_format': box_format } for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE for box_format in BOX_FORMATS)) def test_periodic_general_force(self, spatial_dimension, dtype, box_format): N = 16 R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) = \ make_periodic_general_test_system(N, spatial_dimension, dtype, box_format) self.assertAllClose(grad(E)(R), grad(E_gf)(R_f)) self.assertAllClose(grad(E)(R), grad(E_g)(R)) @parameterized.named_parameters(jtu.cases_from_list( { 'testcase_name': f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}', 'spatial_dimension': dim, 'dtype': dtype, 'box_format': box_format } for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE for box_format in BOX_FORMATS)) def test_periodic_general_shift(self, spatial_dimension, dtype, box_format): N = 16 R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) = \ make_periodic_general_test_system(N, spatial_dimension, dtype, box_format) R_new = s(R, grad(E)(R)) R_gf_new = s_gf(R_f, grad(E_gf)(R_f)) R_g_new = s_g(R, grad(E_g)(R)) self.assertAllClose(R_new, space.transform(box, R_gf_new)) self.assertAllClose(R_new, R_g_new) @parameterized.named_parameters(jtu.cases_from_list( { 'testcase_name': f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}', 'spatial_dimension': dim, 'dtype': dtype, 'box_format': box_format } for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE for box_format in BOX_FORMATS)) def test_periodic_general_deform(self, spatial_dimension, dtype, box_format): N = 16 R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) = \ make_periodic_general_test_system(N, spatial_dimension, dtype, box_format) deformed_box = box * 0.9 self.assertAllClose(E_gf(R_f, box=deformed_box), E_g(R, new_box=deformed_box)) @parameterized.named_parameters(jtu.cases_from_list( { 'testcase_name': f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}', 'spatial_dimension': dim, 'dtype': dtype, 'box_format': box_format } for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE for box_format in BOX_FORMATS)) def test_periodic_general_deform_grad(self, spatial_dimension, dtype, box_format): N = 16 R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) = \ make_periodic_general_test_system(N, spatial_dimension, dtype, box_format) deformed_box = box * 0.9 self.assertAllClose(grad(E_gf)(R_f, box=deformed_box), grad(E_g)(R, new_box=deformed_box)) self.assertAllClose(jacfwd(E_gf)(R_f, box=deformed_box), jacfwd(E_g)(R, new_box=deformed_box)) @parameterized.named_parameters(jtu.cases_from_list( { 'testcase_name': f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}', 'spatial_dimension': dim, 'dtype': dtype, 'box_format': box_format } for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE for box_format in BOX_FORMATS)) def test_periodic_general_deform_shift(self, spatial_dimension, dtype, box_format): N = 16 R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) = \ make_periodic_general_test_system(N, spatial_dimension, dtype, box_format) deformed_box = box * 0.9 R_new = s_g(R, grad(E_g)(R), new_box=deformed_box) R_gf_new = space.transform(deformed_box, s_gf(R_f, grad(E_gf)(R_f))) self.assertAllClose(R_new, R_gf_new) @parameterized.named_parameters(jtu.cases_from_list( { 'testcase_name': f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}', 'spatial_dimension': dim, 'dtype': dtype, 'box_format': box_format } for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE for box_format in BOX_FORMATS)) def test_periodic_general_grad_box(self, spatial_dimension, dtype, box_format): if box_format == 'scalar': raise SkipTest('Scalar case fails due to JAX Issue #5849.') N = 16 R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) = \ make_periodic_general_test_system(N, spatial_dimension, dtype, box_format) @grad def box_energy_g_fn(box): return E_g(R, new_box=box) @grad def box_energy_gf_fn(box): return E_gf(R_f, box=box) self.assertAllClose(box_energy_g_fn(box), box_energy_gf_fn(box)) if __name__ == '__main__': absltest.main()
Python/hello-world-pt-BR.py
PushpneetSingh/Hello-world
1,428
1299
print(u"Ol<NAME>!")
shared/templates/grub2_bootloader_argument/template.py
justchris1/scap-security-guide
1,138
1319
import ssg.utils def preprocess(data, lang): data["arg_name_value"] = data["arg_name"] + "=" + data["arg_value"] if lang == "oval": # escape dot, this is used in oval regex data["escaped_arg_name_value"] = data["arg_name_value"].replace(".", "\\.") # replace . with _, this is used in test / object / state ids data["sanitized_arg_name"] = ssg.utils.escape_id(data["arg_name"]) return data
inverse_warp.py
ZephyrII/competitive_colaboration
357
1374
# Author: <NAME> # Copyright (c) 2019, <NAME> # All rights reserved. # based on github.com/ClementPinard/SfMLearner-Pytorch from __future__ import division import torch from torch.autograd import Variable pixel_coords = None def set_id_grid(depth): global pixel_coords b, h, w = depth.size() i_range = Variable(torch.arange(0, h).view(1, h, 1).expand(1,h,w)).type_as(depth) # [1, H, W] j_range = Variable(torch.arange(0, w).view(1, 1, w).expand(1,h,w)).type_as(depth) # [1, H, W] ones = Variable(torch.ones(1,h,w)).type_as(depth) pixel_coords = torch.stack((j_range, i_range, ones), dim=1) # [1, 3, H, W] def check_sizes(input, input_name, expected): condition = [input.ndimension() == len(expected)] for i,size in enumerate(expected): if size.isdigit(): condition.append(input.size(i) == int(size)) assert(all(condition)), "wrong size for {}, expected {}, got {}".format(input_name, 'x'.join(expected), list(input.size())) def pixel2cam(depth, intrinsics_inv): global pixel_coords """Transform coordinates in the pixel frame to the camera frame. Args: depth: depth maps -- [B, H, W] intrinsics_inv: intrinsics_inv matrix for each element of batch -- [B, 3, 3] Returns: array of (u,v,1) cam coordinates -- [B, 3, H, W] """ b, h, w = depth.size() if (pixel_coords is None) or pixel_coords.size(2) != h or pixel_coords.size(3) != w: set_id_grid(depth) current_pixel_coords = pixel_coords[:,:,:h,:w].expand(b,3,h,w).contiguous().view(b, 3, -1) # [B, 3, H*W] cam_coords = intrinsics_inv.bmm(current_pixel_coords).view(b, 3, h, w) return cam_coords * depth.unsqueeze(1) def cam2pixel(cam_coords, proj_c2p_rot, proj_c2p_tr, padding_mode): """Transform coordinates in the camera frame to the pixel frame. Args: cam_coords: pixel coordinates defined in the first camera coordinates system -- [B, 4, H, W] proj_c2p_rot: rotation matrix of cameras -- [B, 3, 4] proj_c2p_tr: translation vectors of cameras -- [B, 3, 1] Returns: array of [-1,1] coordinates -- [B, 2, H, W] """ b, _, h, w = cam_coords.size() cam_coords_flat = cam_coords.view(b, 3, -1) # [B, 3, H*W] if proj_c2p_rot is not None: pcoords = proj_c2p_rot.bmm(cam_coords_flat) else: pcoords = cam_coords_flat if proj_c2p_tr is not None: pcoords = pcoords + proj_c2p_tr # [B, 3, H*W] X = pcoords[:, 0] Y = pcoords[:, 1] Z = pcoords[:, 2].clamp(min=1e-3) X_norm = 2*(X / Z)/(w-1) - 1 # Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1) [B, H*W] Y_norm = 2*(Y / Z)/(h-1) - 1 # Idem [B, H*W] if padding_mode == 'zeros': X_mask = ((X_norm > 1)+(X_norm < -1)).detach() X_norm[X_mask] = 2 # make sure that no point in warped image is a combinaison of im and gray Y_mask = ((Y_norm > 1)+(Y_norm < -1)).detach() Y_norm[Y_mask] = 2 pixel_coords = torch.stack([X_norm, Y_norm], dim=2) # [B, H*W, 2] return pixel_coords.view(b,h,w,2) def euler2mat(angle): """Convert euler angles to rotation matrix. Reference: https://github.com/pulkitag/pycaffe-utils/blob/master/rot_utils.py#L174 Args: angle: rotation angle along 3 axis (in radians) -- size = [B, 3] Returns: Rotation matrix corresponding to the euler angles -- size = [B, 3, 3] """ B = angle.size(0) x, y, z = angle[:,0], angle[:,1], angle[:,2] cosz = torch.cos(z) sinz = torch.sin(z) zeros = z.detach()*0 ones = zeros.detach()+1 zmat = torch.stack([cosz, -sinz, zeros, sinz, cosz, zeros, zeros, zeros, ones], dim=1).view(B, 3, 3) cosy = torch.cos(y) siny = torch.sin(y) ymat = torch.stack([cosy, zeros, siny, zeros, ones, zeros, -siny, zeros, cosy], dim=1).view(B, 3, 3) cosx = torch.cos(x) sinx = torch.sin(x) xmat = torch.stack([ones, zeros, zeros, zeros, cosx, -sinx, zeros, sinx, cosx], dim=1).view(B, 3, 3) rotMat = xmat.bmm(ymat).bmm(zmat) return rotMat def quat2mat(quat): """Convert quaternion coefficients to rotation matrix. Args: quat: first three coeff of quaternion of rotation. fourht is then computed to have a norm of 1 -- size = [B, 3] Returns: Rotation matrix corresponding to the quaternion -- size = [B, 3, 3] """ norm_quat = torch.cat([quat[:,:1].detach()*0 + 1, quat], dim=1) norm_quat = norm_quat/norm_quat.norm(p=2, dim=1, keepdim=True) w, x, y, z = norm_quat[:,0], norm_quat[:,1], norm_quat[:,2], norm_quat[:,3] B = quat.size(0) w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2) wx, wy, wz = w*x, w*y, w*z xy, xz, yz = x*y, x*z, y*z rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz, 2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx, 2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3) return rotMat def pose_vec2mat(vec, rotation_mode='euler'): """ Convert 6DoF parameters to transformation matrix. Args:s vec: 6DoF parameters in the order of tx, ty, tz, rx, ry, rz -- [B, 6] Returns: A transformation matrix -- [B, 3, 4] """ translation = vec[:, :3].unsqueeze(-1) # [B, 3, 1] rot = vec[:,3:] if rotation_mode == 'euler': rot_mat = euler2mat(rot) # [B, 3, 3] elif rotation_mode == 'quat': rot_mat = quat2mat(rot) # [B, 3, 3] transform_mat = torch.cat([rot_mat, translation], dim=2) # [B, 3, 4] return transform_mat def flow_warp(img, flow, padding_mode='zeros'): """ Inverse warp a source image to the target image plane. Args: img: the source image (where to sample pixels) -- [B, 3, H, W] flow: flow map of the target image -- [B, 2, H, W] Returns: Source image warped to the target image plane """ check_sizes(img, 'img', 'BCHW') check_sizes(flow, 'flow', 'B2HW') bs, _, h, w = flow.size() u = flow[:,0,:,:] v = flow[:,1,:,:] grid_x = Variable(torch.arange(0, w).view(1, 1, w).expand(1,h,w), requires_grad=False).type_as(u).expand_as(u) # [bs, H, W] grid_y = Variable(torch.arange(0, h).view(1, h, 1).expand(1,h,w), requires_grad=False).type_as(v).expand_as(v) # [bs, H, W] X = grid_x + u Y = grid_y + v X = 2*(X/(w-1.0) - 0.5) Y = 2*(Y/(h-1.0) - 0.5) grid_tf = torch.stack((X,Y), dim=3) img_tf = torch.nn.functional.grid_sample(img, grid_tf, padding_mode=padding_mode) return img_tf def pose2flow(depth, pose, intrinsics, intrinsics_inv, rotation_mode='euler', padding_mode=None): """ Converts pose parameters to rigid optical flow """ check_sizes(depth, 'depth', 'BHW') check_sizes(pose, 'pose', 'B6') check_sizes(intrinsics, 'intrinsics', 'B33') check_sizes(intrinsics_inv, 'intrinsics', 'B33') assert(intrinsics_inv.size() == intrinsics.size()) bs, h, w = depth.size() grid_x = Variable(torch.arange(0, w).view(1, 1, w).expand(1,h,w), requires_grad=False).type_as(depth).expand_as(depth) # [bs, H, W] grid_y = Variable(torch.arange(0, h).view(1, h, 1).expand(1,h,w), requires_grad=False).type_as(depth).expand_as(depth) # [bs, H, W] cam_coords = pixel2cam(depth, intrinsics_inv) # [B,3,H,W] pose_mat = pose_vec2mat(pose, rotation_mode) # [B,3,4] # Get projection matrix for tgt camera frame to source pixel frame proj_cam_to_src_pixel = intrinsics.bmm(pose_mat) # [B, 3, 4] src_pixel_coords = cam2pixel(cam_coords, proj_cam_to_src_pixel[:,:,:3], proj_cam_to_src_pixel[:,:,-1:], padding_mode) # [B,H,W,2] X = (w-1)*(src_pixel_coords[:,:,:,0]/2.0 + 0.5) - grid_x Y = (h-1)*(src_pixel_coords[:,:,:,1]/2.0 + 0.5) - grid_y return torch.stack((X,Y), dim=1) def flow2oob(flow): check_sizes(flow, 'flow', 'B2HW') bs, _, h, w = flow.size() u = flow[:,0,:,:] v = flow[:,1,:,:] grid_x = Variable(torch.arange(0, w).view(1, 1, w).expand(1,h,w), requires_grad=False).type_as(u).expand_as(u) # [bs, H, W] grid_y = Variable(torch.arange(0, h).view(1, h, 1).expand(1,h,w), requires_grad=False).type_as(v).expand_as(v) # [bs, H, W] X = grid_x + u Y = grid_y + v X = 2*(X/(w-1.0) - 0.5) Y = 2*(Y/(h-1.0) - 0.5) oob = (X.abs()>1).add(Y.abs()>1)>0 return oob def occlusion_mask(grid, depth): check_sizes(img, 'grid', 'BHW2') check_sizes(depth, 'depth', 'BHW') mask = grid return mask def inverse_warp(img, depth, pose, intrinsics, intrinsics_inv, rotation_mode='euler', padding_mode='zeros'): """ Inverse warp a source image to the target image plane. Args: img: the source image (where to sample pixels) -- [B, 3, H, W] depth: depth map of the target image -- [B, H, W] pose: 6DoF pose parameters from target to source -- [B, 6] intrinsics: camera intrinsic matrix -- [B, 3, 3] intrinsics_inv: inverse of the intrinsic matrix -- [B, 3, 3] Returns: Source image warped to the target image plane """ check_sizes(img, 'img', 'B3HW') check_sizes(depth, 'depth', 'BHW') check_sizes(pose, 'pose', 'B6') check_sizes(intrinsics, 'intrinsics', 'B33') check_sizes(intrinsics_inv, 'intrinsics', 'B33') assert(intrinsics_inv.size() == intrinsics.size()) batch_size, _, img_height, img_width = img.size() cam_coords = pixel2cam(depth, intrinsics_inv) # [B,3,H,W] pose_mat = pose_vec2mat(pose, rotation_mode) # [B,3,4] # Get projection matrix for tgt camera frame to source pixel frame proj_cam_to_src_pixel = intrinsics.bmm(pose_mat) # [B, 3, 4] src_pixel_coords = cam2pixel(cam_coords, proj_cam_to_src_pixel[:,:,:3], proj_cam_to_src_pixel[:,:,-1:], padding_mode) # [B,H,W,2] projected_img = torch.nn.functional.grid_sample(img, src_pixel_coords, padding_mode=padding_mode) return projected_img
examples/nn_cudamat.py
cloudspectatordevelopment/cudamat
526
1376
<filename>examples/nn_cudamat.py # This file shows how to implement a single hidden layer neural network for # performing binary classification on the GPU using cudamat. from __future__ import division import pdb import time import numpy as np import cudamat as cm from cudamat import learn as cl import util # initialize CUDA cm.cublas_init() # load data util.load('mnist49.dat', globals()) # Put training data onto the GPU. dat_train = dat_train/255. dat_train = dat_train - (np.mean(dat_train, 1)+10**-8)[:, np.newaxis] dev_train = cm.CUDAMatrix(dat_train) dev_lbl = cm.CUDAMatrix(lbl_train) # training parameters epsilon = 0.01 momentum = 0.9 num_epochs = 30 batch_size = 128 num_batches = dat_train.shape[1]//batch_size # model parameters dim_in = dat_train.shape[0] dim_out = 1 num_hid = 1024 # initialize weights w_w1 = cm.CUDAMatrix(dim_in ** -0.5 * np.random.randn(dim_in, num_hid)) w_b1 = cm.CUDAMatrix(np.zeros((num_hid, 1))) w_w2 = cm.CUDAMatrix(num_hid ** -0.5 * np.random.randn(num_hid, dim_out)) w_b2 = cm.CUDAMatrix(np.zeros((dim_out, 1))) # initialize weight update matrices wu_w1 = cm.empty(w_w1.shape).assign(0) wu_b1 = cm.empty(w_b1.shape).assign(0) wu_w2 = cm.empty(w_w2.shape).assign(0) wu_b2 = cm.empty(w_b2.shape).assign(0) # initialize temporary storage h = cm.empty((num_hid, batch_size)) out = cm.empty((dim_out, batch_size)) delta = cm.empty((num_hid, batch_size)) # Train neural network. start_time = time.time() for epoch in range(num_epochs): print("Epoch %i" % (epoch + 1)) err = [] for batch in range(num_batches): # get current minibatch inp = dev_train.slice(batch*batch_size,(batch + 1)*batch_size) target = dev_lbl.slice(batch*batch_size,(batch + 1)*batch_size) # forward pass cm.dot(w_w1.T, inp, target = h) h.add_col_vec(w_b1) h.apply_sigmoid() cm.dot(w_w2.T, h, target = out) out.add_col_vec(w_b2) out.apply_sigmoid() # back prop errors out.subtract(target) # compute error # gradients for w_w2 and w_b2 wu_w2.add_dot(h, out.T, beta = momentum) wu_b2.add_sums(out, axis = 1, beta = momentum) # compute delta cm.dot(w_w2, out, target = delta) # delta = delta * h * (1 - h) cl.mult_by_sigmoid_deriv(delta, h) # gradients for w_w1 and w_b1 wu_w1.add_dot(inp, delta.T, beta = momentum) wu_b1.add_sums(delta, axis = 1, beta = momentum) # update weights w_w1.subtract_mult(wu_w1, epsilon/batch_size) w_b1.subtract_mult(wu_b1, epsilon/batch_size) w_w2.subtract_mult(wu_w2, epsilon/batch_size) w_b2.subtract_mult(wu_b2, epsilon/batch_size) # calculate error on current minibatch err.append(np.abs(out.asarray())>0.5) print("Training misclassification rate: %f" % np.mean(err)) print("Time: %f" % (time.time() - start_time)) # Evaluate neural network on test data. # Load test data onto the GPU. dat_test = dat_test/255. dat_test = dat_test - np.mean(dat_test, 1)[:, np.newaxis] dev_test = cm.CUDAMatrix(dat_test) dev_lbl = cm.CUDAMatrix(lbl_test) # Initalize temporary storage. h = cm.empty((num_hid, dat_test.shape[1])) out = cm.empty((dim_out, dat_test.shape[1])) # forward pass cm.dot(w_w1.T, dev_test, target = h) h.add_col_vec(w_b1) h.apply_sigmoid() cm.dot(w_w2.T, h, target = out) out.add_col_vec(w_b2) out.apply_sigmoid() # compute error out.subtract(dev_lbl) print("Testing misclassification rate: %f" % np.mean(np.abs(out.asarray())>0.5)) cm.cublas_shutdown()
scripts/49-cat-logs.py
jmviz/xd
179
1391
#!/usr/bin/env python3 # Usage: # $0 -o log.txt products/ # # concatenates .log files (even those in subdirs or .zip) and combines into a single combined.log from xdfile.utils import find_files_with_time, open_output, get_args import boto3 # from boto.s3.connection import S3Connection import os def main(): args = get_args('aggregates all .log files') outf = open_output() s3 = boto3.resource('s3') s3path = "logs/" # bucket = conn.get_bucket(s3path) bucket = s3.Bucket(os.environ['DOMAIN']) for obj in sorted(bucket.objects.all(), key=lambda x: x.last_modified): # last_modified if s3path in obj.key: print("Name: %s LastModified:%s" % (obj.key.encode('utf-8'), obj.last_modified)) for fn, contents, dt in sorted(find_files_with_time(*args.inputs, ext=".log"), key=lambda x: x[2]): # earliest first outf.write_file(fn, contents.decode("utf-8")) main()
manuscript/link_checker.py
wuyang1002431655/tango_with_django_19
244
1392
# Checks for broken links in the book chapters, printing the status of each link found to stdout. # The Python package 'requests' must be installed and available for this simple module to work. # Author: <NAME> # Date: 2017-02-14 import re import requests def main(chapters_list_filename, hide_success=True): """ hide_success = a boolean switch that determines whether to show URLs that return a HTTP 200. If set to true, only URLs that fail will be printed. """ chapters_f = open(chapters_list_filename, 'r') pattern = re.compile(r'\[([^]]+)]\(\s*(http[s]?://[^)]+)\s*\)') # http://stackoverflow.com/a/23395483 print 'filename\tline_no\ttitle\turl\tstatus_code' for filename in chapters_f: filename = filename.strip() if not filename or filename.startswith('{'): # Skip non-filename lines continue chapter_f = open(filename, 'r') line_no = 1 for line in chapter_f: line = line.strip() for match in re.findall(pattern, line): title = match[0] url = match[1] if '127.0.0.1' in url or 'localhost' in url: # Don't check localhost URLs continue request = None status_code = -1 try: request = requests.get(url) status_code = request.status_code except requests.exceptions.ConnectionError: request = None status_code = 'FAILED_TO_CONNECT' if hide_success and status_code == 200: continue title = title.replace('\t', ' ') print '{filename}\t{line_no}\t{title}\t{url}\t{status_code}'.format(filename=filename, line_no=line_no, title=title, url=url, status_code=status_code) line_no = line_no + 1 chapter_f.close() chapters_f.close() if __name__ == '__main__': main('Book.txt', hide_success=False)
pommerman/__init__.py
rmccann01/playground
725
1407
<filename>pommerman/__init__.py '''Entry point into the pommerman module''' import gym import inspect from . import agents from . import configs from . import constants from . import forward_model from . import helpers from . import utility from . import network gym.logger.set_level(40) REGISTRY = None def _register(): global REGISTRY REGISTRY = [] for name, f in inspect.getmembers(configs, inspect.isfunction): if not name.endswith('_env'): continue config = f() gym.envs.registration.register( id=config['env_id'], entry_point=config['env_entry_point'], kwargs=config['env_kwargs'] ) REGISTRY.append(config['env_id']) # Register environments with gym _register() def make(config_id, agent_list, game_state_file=None, render_mode='human'): '''Makes the pommerman env and registers it with gym''' assert config_id in REGISTRY, "Unknown configuration '{}'. " \ "Possible values: {}".format(config_id, REGISTRY) env = gym.make(config_id) for id_, agent in enumerate(agent_list): assert isinstance(agent, agents.BaseAgent) # NOTE: This is IMPORTANT so that the agent character is initialized agent.init_agent(id_, env.spec._kwargs['game_type']) env.set_agents(agent_list) env.set_init_game_state(game_state_file) env.set_render_mode(render_mode) return env from . import cli
tests/components/mysensors/conftest.py
liangleslie/core
30,023
1412
<filename>tests/components/mysensors/conftest.py """Provide common mysensors fixtures.""" from __future__ import annotations from collections.abc import AsyncGenerator, Callable, Generator import json from typing import Any from unittest.mock import AsyncMock, MagicMock, patch from mysensors import BaseSyncGateway from mysensors.persistence import MySensorsJSONDecoder from mysensors.sensor import Sensor import pytest from homeassistant.components.device_tracker.legacy import Device from homeassistant.components.mqtt import DOMAIN as MQTT_DOMAIN from homeassistant.components.mysensors.config_flow import DEFAULT_BAUD_RATE from homeassistant.components.mysensors.const import ( CONF_BAUD_RATE, CONF_DEVICE, CONF_GATEWAY_TYPE, CONF_GATEWAY_TYPE_SERIAL, CONF_VERSION, DOMAIN, ) from homeassistant.core import HomeAssistant from homeassistant.setup import async_setup_component from tests.common import MockConfigEntry, load_fixture @pytest.fixture(autouse=True) def device_tracker_storage(mock_device_tracker_conf: list[Device]) -> list[Device]: """Mock out device tracker known devices storage.""" devices = mock_device_tracker_conf return devices @pytest.fixture(name="mqtt") def mock_mqtt_fixture(hass: HomeAssistant) -> None: """Mock the MQTT integration.""" hass.config.components.add(MQTT_DOMAIN) @pytest.fixture(name="is_serial_port") def is_serial_port_fixture() -> Generator[MagicMock, None, None]: """Patch the serial port check.""" with patch("homeassistant.components.mysensors.gateway.cv.isdevice") as is_device: is_device.side_effect = lambda device: device yield is_device @pytest.fixture(name="gateway_nodes") def gateway_nodes_fixture() -> dict[int, Sensor]: """Return the gateway nodes dict.""" return {} @pytest.fixture(name="serial_transport") async def serial_transport_fixture( gateway_nodes: dict[int, Sensor], is_serial_port: MagicMock, ) -> AsyncGenerator[dict[int, Sensor], None]: """Mock a serial transport.""" with patch( "mysensors.gateway_serial.AsyncTransport", autospec=True ) as transport_class, patch("mysensors.task.OTAFirmware", autospec=True), patch( "mysensors.task.load_fw", autospec=True ), patch( "mysensors.task.Persistence", autospec=True ) as persistence_class: persistence = persistence_class.return_value mock_gateway_features(persistence, transport_class, gateway_nodes) yield transport_class def mock_gateway_features( persistence: MagicMock, transport_class: MagicMock, nodes: dict[int, Sensor] ) -> None: """Mock the gateway features.""" async def mock_schedule_save_sensors() -> None: """Load nodes from via persistence.""" gateway = transport_class.call_args[0][0] gateway.sensors.update(nodes) persistence.schedule_save_sensors = AsyncMock( side_effect=mock_schedule_save_sensors ) # For some reason autospeccing does not recognize these methods. persistence.safe_load_sensors = MagicMock() persistence.save_sensors = MagicMock() async def mock_connect() -> None: """Mock the start method.""" transport.connect_task = MagicMock() gateway = transport_class.call_args[0][0] gateway.on_conn_made(gateway) transport = transport_class.return_value transport.connect_task = None transport.connect.side_effect = mock_connect @pytest.fixture(name="transport") def transport_fixture(serial_transport: MagicMock) -> MagicMock: """Return the default mocked transport.""" return serial_transport @pytest.fixture def transport_write(transport: MagicMock) -> MagicMock: """Return the transport mock that accepts string messages.""" return transport.return_value.send @pytest.fixture(name="serial_entry") async def serial_entry_fixture(hass: HomeAssistant) -> MockConfigEntry: """Create a config entry for a serial gateway.""" entry = MockConfigEntry( domain=DOMAIN, data={ CONF_GATEWAY_TYPE: CONF_GATEWAY_TYPE_SERIAL, CONF_VERSION: "2.3", CONF_DEVICE: "/test/device", CONF_BAUD_RATE: DEFAULT_BAUD_RATE, }, ) return entry @pytest.fixture(name="config_entry") def config_entry_fixture(serial_entry: MockConfigEntry) -> MockConfigEntry: """Provide the config entry used for integration set up.""" return serial_entry @pytest.fixture(name="integration") async def integration_fixture( hass: HomeAssistant, transport: MagicMock, config_entry: MockConfigEntry ) -> AsyncGenerator[MockConfigEntry, None]: """Set up the mysensors integration with a config entry.""" config: dict[str, Any] = {} config_entry.add_to_hass(hass) with patch("homeassistant.components.mysensors.device.UPDATE_DELAY", new=0): await async_setup_component(hass, DOMAIN, config) await hass.async_block_till_done() yield config_entry @pytest.fixture def receive_message( transport: MagicMock, integration: MockConfigEntry ) -> Callable[[str], None]: """Receive a message for the gateway.""" def receive_message_callback(message_string: str) -> None: """Receive a message with the transport. The message_string parameter is a string in the MySensors message format. """ gateway = transport.call_args[0][0] # node_id;child_id;command;ack;type;payload\n gateway.logic(message_string) return receive_message_callback @pytest.fixture(name="gateway") def gateway_fixture( transport: MagicMock, integration: MockConfigEntry ) -> BaseSyncGateway: """Return a setup gateway.""" return transport.call_args[0][0] def load_nodes_state(fixture_path: str) -> dict: """Load mysensors nodes fixture.""" return json.loads(load_fixture(fixture_path), cls=MySensorsJSONDecoder) def update_gateway_nodes( gateway_nodes: dict[int, Sensor], nodes: dict[int, Sensor] ) -> dict: """Update the gateway nodes.""" gateway_nodes.update(nodes) return nodes @pytest.fixture(name="gps_sensor_state", scope="session") def gps_sensor_state_fixture() -> dict: """Load the gps sensor state.""" return load_nodes_state("mysensors/gps_sensor_state.json") @pytest.fixture def gps_sensor(gateway_nodes: dict[int, Sensor], gps_sensor_state: dict) -> Sensor: """Load the gps sensor.""" nodes = update_gateway_nodes(gateway_nodes, gps_sensor_state) node = nodes[1] return node @pytest.fixture(name="power_sensor_state", scope="session") def power_sensor_state_fixture() -> dict: """Load the power sensor state.""" return load_nodes_state("mysensors/power_sensor_state.json") @pytest.fixture def power_sensor(gateway_nodes: dict[int, Sensor], power_sensor_state: dict) -> Sensor: """Load the power sensor.""" nodes = update_gateway_nodes(gateway_nodes, power_sensor_state) node = nodes[1] return node @pytest.fixture(name="energy_sensor_state", scope="session") def energy_sensor_state_fixture() -> dict: """Load the energy sensor state.""" return load_nodes_state("mysensors/energy_sensor_state.json") @pytest.fixture def energy_sensor( gateway_nodes: dict[int, Sensor], energy_sensor_state: dict ) -> Sensor: """Load the energy sensor.""" nodes = update_gateway_nodes(gateway_nodes, energy_sensor_state) node = nodes[1] return node @pytest.fixture(name="sound_sensor_state", scope="session") def sound_sensor_state_fixture() -> dict: """Load the sound sensor state.""" return load_nodes_state("mysensors/sound_sensor_state.json") @pytest.fixture def sound_sensor(gateway_nodes: dict[int, Sensor], sound_sensor_state: dict) -> Sensor: """Load the sound sensor.""" nodes = update_gateway_nodes(gateway_nodes, sound_sensor_state) node = nodes[1] return node @pytest.fixture(name="distance_sensor_state", scope="session") def distance_sensor_state_fixture() -> dict: """Load the distance sensor state.""" return load_nodes_state("mysensors/distance_sensor_state.json") @pytest.fixture def distance_sensor( gateway_nodes: dict[int, Sensor], distance_sensor_state: dict ) -> Sensor: """Load the distance sensor.""" nodes = update_gateway_nodes(gateway_nodes, distance_sensor_state) node = nodes[1] return node @pytest.fixture(name="temperature_sensor_state", scope="session") def temperature_sensor_state_fixture() -> dict: """Load the temperature sensor state.""" return load_nodes_state("mysensors/temperature_sensor_state.json") @pytest.fixture def temperature_sensor( gateway_nodes: dict[int, Sensor], temperature_sensor_state: dict ) -> Sensor: """Load the temperature sensor.""" nodes = update_gateway_nodes(gateway_nodes, temperature_sensor_state) node = nodes[1] return node @pytest.fixture(name="text_node_state", scope="session") def text_node_state_fixture() -> dict: """Load the text node state.""" return load_nodes_state("mysensors/text_node_state.json") @pytest.fixture def text_node(gateway_nodes: dict[int, Sensor], text_node_state: dict) -> Sensor: """Load the text child node.""" nodes = update_gateway_nodes(gateway_nodes, text_node_state) node = nodes[1] return node
nlpgnn/gnn/RGCNConv.py
ojipadeson/NLPGNN
263
1447
<gh_stars>100-1000 #! usr/bin/env python3 # -*- coding:utf-8 -*- """ @Author:<NAME> Usage: node_embeddings = tf.random.normal(shape=(5, 3)) adjacency_lists = [ tf.constant([[0, 1], [2, 4], [2, 4]], dtype=tf.int32), tf.constant([[0, 1], [2, 4], [2, 4]], dtype=tf.int32) ] layer = RGraphConvolution(out_features=12) x = layer(GNNInput(node_embeddings, adjacency_lists)) """ import tensorflow as tf from nlpgnn.gnn.messagepassing import MessagePassing class RGraphConvolution(MessagePassing): def __init__(self, out_features, epsion=1e-7, aggr="sum", normalize=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', use_bias=True, **kwargs): super(RGraphConvolution, self).__init__(aggr, **kwargs) self.kernel_initializer = tf.keras.initializers.get(kernel_initializer) self.bias_initializer = tf.keras.initializers.get(bias_initializer) self.use_bias = use_bias self.normalize = normalize self.out_features = out_features self.epsion = epsion def build(self, input_shapes): node_embedding_shapes = input_shapes.node_embeddings adjacency_list_shapes = input_shapes.adjacency_lists num_edge_type = len(adjacency_list_shapes) in_features = node_embedding_shapes[-1] self._edge_type_weights = [] self._edge_type_bias = [] for i in range(num_edge_type): weight = self.add_weight( shape=(in_features, self.out_features), initializer=self.kernel_initializer, name='wt_{}'.format(i), ) self._edge_type_weights.append(weight) if self.use_bias: self.bias = self.add_weight( shape=(self.out_features), initializer=self.bias_initializer, name='b', ) else: self.bias = None self.weight_o = self.add_weight( shape=(in_features, self.out_features), initializer=self.kernel_initializer, name='wo', ) self.built = True def message_function(self, edge_source_states, edge_target_states, num_incoming_to_node_per_message, num_outing_to_node_per_message, edge_type_idx): """ :param edge_source_states: [M,H] :param edge_target_states: [M,H] :param num_incoming_to_node_per_message:[M] :param edge_type_idx: :param training: :return: """ weight_r = self._edge_type_weights[edge_type_idx] messages = tf.linalg.matmul(edge_source_states, weight_r) if self.normalize: messages = ( tf.expand_dims(1.0 / (tf.cast(num_incoming_to_node_per_message, tf.float32) + self.epsion), axis=-1) * messages ) return messages def call(self, inputs): aggr_out = self.propagate(inputs) # message_passing + update aggr_out += tf.linalg.matmul(inputs.node_embeddings, self.weight_o) if self.bias is not None: aggr_out += self.bias return aggr_out