max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
test/IECore/BasicPreset.py | ericmehl/cortex | 386 | 27 | ##########################################################################
#
# Copyright (c) 2010-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import os
import sys
import shutil
import unittest
import IECore
class TestBasicPreset( unittest.TestCase ) :
def testCopy( self ) :
testObj = IECore.Parameterised( "testParameterised1" )
testObj.parameters().addParameters(
[
IECore.BoolParameter( "a", "", True ),
IECore.FloatParameter( "b", "", 1.0 ),
]
)
testObj2 = IECore.Parameterised( "testParameterised2" )
testObj2.parameters().addParameters(
[
IECore.BoolParameter( "a", "", False ),
IECore.FloatParameter( "c", "", 0.0 ),
]
)
p = IECore.BasicPreset( testObj, testObj.parameters() )
self.assertTrue( p.applicableTo( testObj, testObj.parameters() ) )
self.assertFalse( p.applicableTo( testObj2, testObj2.parameters() ) )
testObj.parameters()["a"].setTypedValue( False )
testObj.parameters()["b"].setTypedValue( 0.0 )
p( testObj, testObj.parameters() )
self.assertEqual( testObj.parameters()["a"].getTypedValue(), True )
self.assertEqual( testObj.parameters()["b"].getTypedValue(), 1.0 )
p2 = IECore.BasicPreset( testObj, testObj.parameters(), parameters=( testObj.parameters()["a"], ) )
self.assertTrue( p2.applicableTo( testObj, testObj.parameters() ) )
self.assertTrue( p2.applicableTo( testObj2, testObj.parameters() ) )
p2( testObj2, testObj2.parameters() )
self.assertEqual( testObj2.parameters()["a"].getTypedValue(), True )
self.assertEqual( testObj2.parameters()["c"].getTypedValue(), 0.0 )
def testLoad( self ) :
testObj = IECore.Parameterised( "testParameterised1" )
testObj.parameters().addParameters(
[
IECore.BoolParameter( "a", "", True ),
IECore.FloatParameter( "b", "", 1.0 ),
]
)
testObj2 = IECore.Parameterised( "testParameterised1" )
testObj2.parameters().addParameters(
[
IECore.BoolParameter( "a", "", False ),
IECore.FloatParameter( "c", "", 0.0 ),
]
)
savePath = os.path.abspath( os.path.join( os.path.dirname( __file__ ), "data", "basicPreset" ) )
messageHandler = IECore.CapturingMessageHandler()
with messageHandler :
p = IECore.BasicPreset( os.path.join( savePath, "basicPresetLoadTest", "basicPresetLoadTest-1.cob" ) )
self.assertEqual( len( messageHandler.messages ), 0 )
self.assertTrue( p.applicableTo( testObj, testObj.parameters() ) )
self.assertFalse( p.applicableTo( testObj2, testObj2.parameters() ) )
testObj.parameters()["a"].setTypedValue( False )
testObj.parameters()["b"].setTypedValue( 0.0 )
p( testObj, testObj.parameters() )
self.assertEqual( testObj.parameters()["a"].getTypedValue(), True )
self.assertEqual( testObj.parameters()["b"].getTypedValue(), 1.0 )
def testSave( self ) :
testObj = IECore.Parameterised( "testParameterised1" )
testObj.parameters().addParameters(
[
IECore.BoolParameter( "a", "", True ),
IECore.FloatParameter( "b", "", 1.0 ),
]
)
testObj2 = IECore.Parameterised( "testParameterised1" )
testObj2.parameters().addParameters(
[
IECore.BoolParameter( "a", "", False ),
IECore.FloatParameter( "c", "", 0.0 ),
]
)
savePath = os.path.abspath( os.path.join( os.path.dirname( __file__ ), "data", "basicPreset" ) )
preset = IECore.BasicPreset( testObj, testObj.parameters() )
# Save for the classLoader and check its there, we test the 'loadability' later...
preset.save( savePath, "basicPresetTest" )
self.assertTrue( os.path.isfile( os.path.join( savePath, "basicPresetTest", "basicPresetTest-1.cob" ) ) )
self.assertTrue( os.path.isfile( os.path.join( savePath, "basicPresetTest", "basicPresetTest-1.py" ) ) )
# save without the classLoader and check its there
preset.save( savePath, "basicPresetTest", classLoadable=False )
self.assertTrue( os.path.isfile( os.path.join( savePath, "basicPresetTest.cob" ) ) )
# reload
p = IECore.BasicPreset( os.path.join( savePath, "basicPresetTest.cob" ) )
self.assertTrue( p.applicableTo( testObj, testObj.parameters() ) )
self.assertFalse( p.applicableTo( testObj2, testObj2.parameters() ) )
testObj.parameters()["a"].setTypedValue( False )
testObj.parameters()["b"].setTypedValue( 0.0 )
p( testObj, testObj.parameters() )
self.assertEqual( testObj.parameters()["a"].getTypedValue(), True )
self.assertEqual( testObj.parameters()["b"].getTypedValue(), 1.0 )
preset2 = IECore.BasicPreset( testObj, testObj.parameters(), parameters=( testObj.parameters()["a"], ) )
preset2.save( savePath, "basicPresetTest2", classLoadable=False )
#reload
p2 = IECore.BasicPreset( os.path.join( savePath, "basicPresetTest2.cob" ) )
self.assertTrue( p2.applicableTo( testObj, testObj.parameters() ) )
self.assertTrue( p2.applicableTo( testObj2, testObj.parameters() ) )
p2( testObj2, testObj2.parameters() )
self.assertEqual( testObj2.parameters()["a"].getTypedValue(), True )
self.assertEqual( testObj2.parameters()["c"].getTypedValue(), 0.0 )
def testClassLoader( self ) :
testObj = IECore.Parameterised( "testParameterised1" )
testObj.parameters().addParameters(
[
IECore.BoolParameter( "a", "", True ),
IECore.FloatParameter( "b", "", 1.0 ),
]
)
savePath = os.path.abspath( os.path.join( os.path.dirname( __file__ ), "data", "basicPreset" ) )
preset = IECore.BasicPreset( testObj, testObj.parameters() )
preset.save( savePath, "basicPresetTestClassLoader" )
# make sure that no messages are emitted during loading
messageHandler = IECore.CapturingMessageHandler()
with messageHandler :
loader = IECore.ClassLoader( IECore.SearchPath( savePath ) )
p = loader.load( "basicPresetTestClassLoader" )()
self.assertEqual( len( messageHandler.messages ), 0 )
self.assertTrue( isinstance( p, IECore.BasicPreset ) )
p.metadata()
def testClasses( self ) :
testObj = IECore.Parameterised( "testParameterised1" )
testObj.parameters().addParameters(
[
IECore.BoolParameter( "a", "", True ),
IECore.ClassParameter( "b", "", "IECORE_OP_PATHS", os.path.join( "maths", "multiply" ), 2 ),
]
)
testObj2 = IECore.Parameterised( "testParameterised2" )
testObj2.parameters().addParameters(
[
IECore.ClassParameter( "c", "", "IECORE_OP_PATHS" ),
]
)
classes1 = testObj.parameters()["b"].getClass( True )
classes2 = testObj2.parameters()["c"].getClass( True )
self.assertNotEqual( classes1[1:], classes2[1:] )
p = IECore.BasicPreset( testObj, testObj.parameters()["b"] )
self.assertTrue( p.applicableTo( testObj, testObj.parameters()["b"] ) )
self.assertFalse( p.applicableTo( testObj, testObj.parameters() ) )
self.assertTrue( p.applicableTo( testObj2, testObj2.parameters()["c"] ) )
p( testObj2, testObj2.parameters()["c"] )
classes1 = testObj.parameters()["b"].getClass( True )
classes2 = testObj2.parameters()["c"].getClass( True )
self.assertEqual( classes1[1:], classes2[1:] )
def testClassVectors( self ) :
testObj = IECore.Parameterised( "testParameterised1" )
testObj.parameters().addParameters(
[
IECore.BoolParameter( "a", "", True ),
IECore.ClassVectorParameter( "b", "", "IECORE_OP_PATHS" ),
]
)
testObj.parameters()["b"].setClasses(
[
( "mult", os.path.join( "maths", "multiply" ), 2 ),
( "coIO", "compoundObjectInOut", 1 ),
]
)
testObj2 = IECore.Parameterised( "testParameterised2" )
testObj2.parameters().addParameters(
[
IECore.ClassVectorParameter( "c", "", "IECORE_OP_PATHS" ),
]
)
classes1 = [ c[1:] for c in testObj.parameters()["b"].getClasses( True ) ]
classes2 = [ c[1:] for c in testObj2.parameters()["c"].getClasses( True ) ]
self.assertNotEqual( classes1, classes2 )
p = IECore.BasicPreset( testObj, testObj.parameters()["b"] )
self.assertTrue( p.applicableTo( testObj, testObj.parameters()["b"] ) )
self.assertFalse( p.applicableTo( testObj, testObj.parameters() ) )
self.assertTrue( p.applicableTo( testObj2, testObj2.parameters()["c"] ) )
p( testObj2, testObj2.parameters()["c"] )
classes1 = [ c[1:] for c in testObj.parameters()["b"].getClasses( True ) ]
classes2 = [ c[1:] for c in testObj2.parameters()["c"].getClasses( True ) ]
self.assertEqual( classes1, classes2 )
def testCompoundVectorParameter( self ) :
p = IECore.Parameterised( "test" )
p.parameters().addParameters(
[
IECore.BoolParameter( "a", "", False ),
IECore.CompoundVectorParameter(
"c",
"",
members = [
IECore.StringVectorParameter( "s", "", IECore.StringVectorData() ),
IECore.BoolVectorParameter( "b", "", IECore.BoolVectorData() ),
]
)
]
)
p["c"]["s"].setValue( IECore.StringVectorData( [ "1", "2", "3" ] ) )
p["c"]["b"].setValue( IECore.BoolVectorData( [ True, False, True ] ) )
v = p.parameters().getValue().copy()
preset = IECore.BasicPreset( p, p.parameters() )
self.assertTrue( preset.applicableTo( p, p.parameters() ) )
p.parameters().setValue( p.parameters().defaultValue )
self.assertNotEqual( p.parameters().getValue(), v )
preset( p, p.parameters() )
self.assertEqual( p.parameters().getValue(), v )
def tearDown( self ) :
savePath = os.path.abspath( os.path.join( os.path.dirname( __file__ ), "data", "basicPreset" ) )
paths = (
os.path.join( savePath, "basicPresetTest" ),
os.path.join( savePath, "basicPresetTest.cob" ),
os.path.join( savePath, "basicPresetTest2.cob" ),
os.path.join( savePath, "basicPresetTestClassLoader" ),
)
for p in paths :
if os.path.isdir( p ) :
shutil.rmtree( p )
elif os.path.isfile( p ) :
os.remove( p )
if __name__ == "__main__":
unittest.main()
|
src/biotite/copyable.py | danijoo/biotite | 208 | 56 | <filename>src/biotite/copyable.py
# This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
__name__ = "biotite"
__author__ = "<NAME>"
__all__ = ["Copyable"]
import abc
class Copyable(metaclass=abc.ABCMeta):
"""
Base class for all objects, that should be copyable.
The public method `copy()` first creates a fresh instance of the
class of the instance, that is copied via the `__copy_create__()`
method. All variables, that could not be set via the constructor,
are then copied via `__copy_fill__()`, starting with the method in
the uppermost base class and ending with the class of the instance
to be copied.
This approach solves the problem of encapsulated variables in
superclasses.
"""
def copy(self):
"""
Create a deep copy of this object.
Returns
-------
copy
A copy of this object.
"""
clone = self.__copy_create__()
self.__copy_fill__(clone)
return clone
def __copy_create__(self):
"""
Instantiate a new object of this class.
Only the constructor should be called in this method.
All further attributes, that need to be copied are handled
in `__copy_fill__()`
Do not call the `super()` method here.
This method must be overridden, if the constructor takes
parameters.
Returns
-------
copy
A freshly instantiated copy of *self*.
"""
return type(self)()
def __copy_fill__(self, clone):
"""
Copy all necessary attributes to the new object.
Always call the `super()` method as first statement.
Parameters
----------
clone
The freshly instantiated copy of *self*.
"""
pass |
tests/keras/layers/wrappers_test.py | kalyc/keras-apache-mxnet | 300 | 59 | <filename>tests/keras/layers/wrappers_test.py<gh_stars>100-1000
import pytest
import numpy as np
import copy
from numpy.testing import assert_allclose
from keras.utils import CustomObjectScope
from keras.layers import wrappers, Input, Layer
from keras.layers import RNN
from keras import layers
from keras.models import Sequential, Model, model_from_json
from keras import backend as K
from keras.utils.generic_utils import object_list_uid, to_list
@pytest.mark.skipif(K.backend() == 'mxnet',
reason='MXNet backend does not support TimeDistributed and RNN yet')
def test_TimeDistributed():
# first, test with Dense layer
model = Sequential()
model.add(wrappers.TimeDistributed(layers.Dense(2), input_shape=(3, 4)))
model.add(layers.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(np.random.random((10, 3, 4)), np.random.random((10, 3, 2)),
epochs=1,
batch_size=10)
# test config
model.get_config()
# test when specifying a batch_input_shape
test_input = np.random.random((1, 3, 4))
test_output = model.predict(test_input)
weights = model.layers[0].get_weights()
reference = Sequential()
reference.add(wrappers.TimeDistributed(layers.Dense(2),
batch_input_shape=(1, 3, 4)))
reference.add(layers.Activation('relu'))
reference.compile(optimizer='rmsprop', loss='mse')
reference.layers[0].set_weights(weights)
reference_output = reference.predict(test_input)
assert_allclose(test_output, reference_output, atol=1e-05)
# test with Embedding
model = Sequential()
model.add(wrappers.TimeDistributed(layers.Embedding(5, 6),
batch_input_shape=(10, 3, 4),
dtype='int32'))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(np.random.randint(5, size=(10, 3, 4), dtype='int32'),
np.random.random((10, 3, 4, 6)), epochs=1, batch_size=10)
# compare to not using batch_input_shape
test_input = np.random.randint(5, size=(10, 3, 4), dtype='int32')
test_output = model.predict(test_input)
weights = model.layers[0].get_weights()
reference = Sequential()
reference.add(wrappers.TimeDistributed(layers.Embedding(5, 6),
input_shape=(3, 4), dtype='int32'))
reference.compile(optimizer='rmsprop', loss='mse')
reference.layers[0].set_weights(weights)
reference_output = reference.predict(test_input)
assert_allclose(test_output, reference_output, atol=1e-05)
# test with Conv2D
model = Sequential()
model.add(wrappers.TimeDistributed(layers.Conv2D(5, (2, 2),
padding='same'),
input_shape=(2, 4, 4, 3)))
model.add(layers.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(np.random.random((1, 2, 4, 4, 3)),
np.random.random((1, 2, 4, 4, 5)))
model = model_from_json(model.to_json())
model.summary()
# test stacked layers
model = Sequential()
model.add(wrappers.TimeDistributed(layers.Dense(2), input_shape=(3, 4)))
model.add(wrappers.TimeDistributed(layers.Dense(3)))
model.add(layers.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(np.random.random((10, 3, 4)), np.random.random((10, 3, 3)),
epochs=1, batch_size=10)
# test wrapping Sequential model
model = Sequential()
model.add(layers.Dense(3, input_dim=2))
outer_model = Sequential()
outer_model.add(wrappers.TimeDistributed(model, input_shape=(3, 2)))
outer_model.compile(optimizer='rmsprop', loss='mse')
outer_model.fit(np.random.random((10, 3, 2)), np.random.random((10, 3, 3)),
epochs=1, batch_size=10)
# test with functional API
x = Input(shape=(3, 2))
y = wrappers.TimeDistributed(model)(x)
outer_model = Model(x, y)
outer_model.compile(optimizer='rmsprop', loss='mse')
outer_model.fit(np.random.random((10, 3, 2)), np.random.random((10, 3, 3)),
epochs=1, batch_size=10)
# test with BatchNormalization
model = Sequential()
model.add(wrappers.TimeDistributed(
layers.BatchNormalization(center=True, scale=True),
name='bn', input_shape=(10, 2)))
model.compile(optimizer='rmsprop', loss='mse')
# Assert that mean and variance are 0 and 1.
td = model.layers[0]
assert np.array_equal(td.get_weights()[2], np.array([0, 0]))
assert np.array_equal(td.get_weights()[3], np.array([1, 1]))
# Train
model.train_on_batch(np.random.normal(loc=2, scale=2, size=(1, 10, 2)),
np.broadcast_to(np.array([0, 1]), (1, 10, 2)))
# Assert that mean and variance changed.
assert not np.array_equal(td.get_weights()[2], np.array([0, 0]))
assert not np.array_equal(td.get_weights()[3], np.array([1, 1]))
# Verify input_map has one mapping from inputs to reshaped inputs.
uid = object_list_uid(model.inputs)
assert len(td._input_map.keys()) == 1
assert uid in td._input_map
assert K.int_shape(td._input_map[uid]) == (None, 2)
@pytest.mark.skipif(K.backend() == 'mxnet',
reason='MXNet backend does not support TimeDistributed and RNN yet')
@pytest.mark.skipif((K.backend() == 'cntk'),
reason='Flaky with CNTK backend')
def test_TimeDistributed_learning_phase():
# test layers that need learning_phase to be set
np.random.seed(1234)
x = Input(shape=(3, 2))
y = wrappers.TimeDistributed(layers.Dropout(.999))(x, training=True)
model = Model(x, y)
y = model.predict(np.random.random((10, 3, 2)))
assert_allclose(np.mean(y), 0., atol=1e-1, rtol=1e-1)
@pytest.mark.skipif(K.backend() == 'mxnet',
reason='MXNet backend does not support TimeDistributed and RNN yet')
def test_TimeDistributed_trainable():
# test layers that need learning_phase to be set
x = Input(shape=(3, 2))
layer = wrappers.TimeDistributed(layers.BatchNormalization())
_ = layer(x)
assert len(layer.updates) == 2
assert len(layer.trainable_weights) == 2
layer.trainable = False
assert len(layer.updates) == 0
assert len(layer.trainable_weights) == 0
layer.trainable = True
assert len(layer.updates) == 2
assert len(layer.trainable_weights) == 2
@pytest.mark.skipif((K.backend() == 'cntk' or K.backend() == 'mxnet'),
reason='Unknown timestamps for RNN not supported in CNTK and MXNet.')
def test_TimeDistributed_with_masked_embedding_and_unspecified_shape():
# test with unspecified shape and Embeddings with mask_zero
model = Sequential()
model.add(wrappers.TimeDistributed(layers.Embedding(5, 6, mask_zero=True),
input_shape=(None, None)))
# the shape so far: (N, t_1, t_2, 6)
model.add(wrappers.TimeDistributed(layers.SimpleRNN(7, return_sequences=True)))
model.add(wrappers.TimeDistributed(layers.SimpleRNN(8, return_sequences=False)))
model.add(layers.SimpleRNN(1, return_sequences=False))
model.compile(optimizer='rmsprop', loss='mse')
model_input = np.random.randint(low=1, high=5, size=(10, 3, 4), dtype='int32')
for i in range(4):
model_input[i, i:, i:] = 0
model.fit(model_input,
np.random.random((10, 1)), epochs=1, batch_size=10)
mask_outputs = [model.layers[0].compute_mask(model.input)]
for layer in model.layers[1:]:
mask_outputs.append(layer.compute_mask(layer.input, mask_outputs[-1]))
func = K.function([model.input], mask_outputs[:-1])
mask_outputs_val = func([model_input])
ref_mask_val_0 = model_input > 0 # embedding layer
ref_mask_val_1 = ref_mask_val_0 # first RNN layer
ref_mask_val_2 = np.any(ref_mask_val_1, axis=-1) # second RNN layer
ref_mask_val = [ref_mask_val_0, ref_mask_val_1, ref_mask_val_2]
for i in range(3):
assert np.array_equal(mask_outputs_val[i], ref_mask_val[i])
assert mask_outputs[-1] is None # final layer
@pytest.mark.skipif(K.backend() == 'mxnet',
reason='MXNet backend does not support TimeDistributed and RNN yet')
def test_TimeDistributed_with_masking_layer():
# test with Masking layer
model = Sequential()
model.add(wrappers.TimeDistributed(layers.Masking(mask_value=0.,),
input_shape=(None, 4)))
model.add(wrappers.TimeDistributed(layers.Dense(5)))
model.compile(optimizer='rmsprop', loss='mse')
model_input = np.random.randint(low=1, high=5, size=(10, 3, 4))
for i in range(4):
model_input[i, i:, :] = 0.
model.compile(optimizer='rmsprop', loss='mse')
model.fit(model_input,
np.random.random((10, 3, 5)), epochs=1, batch_size=6)
mask_outputs = [model.layers[0].compute_mask(model.input)]
mask_outputs += [model.layers[1].compute_mask(model.layers[1].input,
mask_outputs[-1])]
func = K.function([model.input], mask_outputs)
mask_outputs_val = func([model_input])
assert np.array_equal(mask_outputs_val[0], np.any(model_input, axis=-1))
assert np.array_equal(mask_outputs_val[1], np.any(model_input, axis=-1))
def test_regularizers():
model = Sequential()
model.add(wrappers.TimeDistributed(
layers.Dense(2, kernel_regularizer='l1'), input_shape=(3, 4)))
model.add(layers.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
assert len(model.layers[0].layer.losses) == 1
assert len(model.layers[0].losses) == 1
assert len(model.layers[0].get_losses_for(None)) == 1
assert len(model.losses) == 1
model = Sequential()
model.add(wrappers.TimeDistributed(
layers.Dense(2, activity_regularizer='l1'), input_shape=(3, 4)))
model.add(layers.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
assert len(model.losses) == 1
def test_Bidirectional():
rnn = layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
dropout_rate = 0.2
for mode in ['sum', 'concat']:
x = np.random.random((samples, timesteps, dim))
target_dim = 2 * output_dim if mode == 'concat' else output_dim
y = np.random.random((samples, target_dim))
# test with Sequential model
model = Sequential()
model.add(wrappers.Bidirectional(rnn(output_dim, dropout=dropout_rate,
recurrent_dropout=dropout_rate),
merge_mode=mode,
input_shape=(timesteps, dim)))
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
# test config
model.get_config()
model = model_from_json(model.to_json())
model.summary()
# test stacked bidirectional layers
model = Sequential()
model.add(wrappers.Bidirectional(rnn(output_dim,
return_sequences=True),
merge_mode=mode,
input_shape=(timesteps, dim)))
model.add(wrappers.Bidirectional(rnn(output_dim), merge_mode=mode))
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
# test with functional API
inputs = Input((timesteps, dim))
outputs = wrappers.Bidirectional(rnn(output_dim, dropout=dropout_rate,
recurrent_dropout=dropout_rate),
merge_mode=mode)(inputs)
model = Model(inputs, outputs)
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
# Bidirectional and stateful
inputs = Input(batch_shape=(1, timesteps, dim))
outputs = wrappers.Bidirectional(rnn(output_dim, stateful=True),
merge_mode=mode)(inputs)
model = Model(inputs, outputs)
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
@pytest.mark.skipif((K.backend() == 'cntk'),
reason='Unknown timestamps not supported in CNTK.')
def test_Bidirectional_dynamic_timesteps():
# test with functional API with dynamic length
rnn = layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
dropout_rate = 0.2
for mode in ['sum', 'concat']:
x = np.random.random((samples, timesteps, dim))
target_dim = 2 * output_dim if mode == 'concat' else output_dim
y = np.random.random((samples, target_dim))
inputs = Input((None, dim))
outputs = wrappers.Bidirectional(rnn(output_dim, dropout=dropout_rate,
recurrent_dropout=dropout_rate),
merge_mode=mode)(inputs)
model = Model(inputs, outputs)
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
@pytest.mark.parametrize('merge_mode', ['sum', 'mul', 'ave', 'concat', None])
def test_Bidirectional_merged_value(merge_mode):
rnn = layers.LSTM
samples = 2
dim = 5
timesteps = 3
units = 3
X = [np.random.rand(samples, timesteps, dim)]
if merge_mode == 'sum':
merge_func = lambda y, y_rev: y + y_rev
elif merge_mode == 'mul':
merge_func = lambda y, y_rev: y * y_rev
elif merge_mode == 'ave':
merge_func = lambda y, y_rev: (y + y_rev) / 2
elif merge_mode == 'concat':
merge_func = lambda y, y_rev: np.concatenate((y, y_rev), axis=-1)
else:
merge_func = lambda y, y_rev: [y, y_rev]
# basic case
inputs = Input((timesteps, dim))
layer = wrappers.Bidirectional(rnn(units, return_sequences=True),
merge_mode=merge_mode)
f_merged = K.function([inputs], to_list(layer(inputs)))
f_forward = K.function([inputs], [layer.forward_layer.call(inputs)])
f_backward = K.function([inputs],
[K.reverse(layer.backward_layer.call(inputs), 1)])
y_merged = f_merged(X)
y_expected = to_list(merge_func(f_forward(X)[0], f_backward(X)[0]))
assert len(y_merged) == len(y_expected)
for x1, x2 in zip(y_merged, y_expected):
assert_allclose(x1, x2, atol=1e-5)
# test return_state
inputs = Input((timesteps, dim))
layer = wrappers.Bidirectional(rnn(units, return_state=True),
merge_mode=merge_mode)
f_merged = K.function([inputs], layer(inputs))
f_forward = K.function([inputs], layer.forward_layer.call(inputs))
f_backward = K.function([inputs], layer.backward_layer.call(inputs))
n_states = len(layer.layer.states)
y_merged = f_merged(X)
y_forward = f_forward(X)
y_backward = f_backward(X)
y_expected = to_list(merge_func(y_forward[0], y_backward[0]))
assert len(y_merged) == len(y_expected) + n_states * 2
for x1, x2 in zip(y_merged, y_expected):
assert_allclose(x1, x2, atol=1e-5)
# test if the state of a BiRNN is the concatenation of the underlying RNNs
y_merged = y_merged[-n_states * 2:]
y_forward = y_forward[-n_states:]
y_backward = y_backward[-n_states:]
for state_birnn, state_inner in zip(y_merged, y_forward + y_backward):
assert_allclose(state_birnn, state_inner, atol=1e-5)
@pytest.mark.skipif(K.backend() == 'theano' or K.backend() == 'mxnet', reason='Not supported.')
@pytest.mark.parametrize('merge_mode', ['sum', 'concat', None])
def test_Bidirectional_dropout(merge_mode):
rnn = layers.LSTM
samples = 2
dim = 5
timesteps = 3
units = 3
X = [np.random.rand(samples, timesteps, dim)]
inputs = Input((timesteps, dim))
wrapped = wrappers.Bidirectional(rnn(units, dropout=0.2, recurrent_dropout=0.2),
merge_mode=merge_mode)
outputs = to_list(wrapped(inputs, training=True))
assert all(not getattr(x, '_uses_learning_phase') for x in outputs)
inputs = Input((timesteps, dim))
wrapped = wrappers.Bidirectional(rnn(units, dropout=0.2, return_state=True),
merge_mode=merge_mode)
outputs = to_list(wrapped(inputs))
assert all(x._uses_learning_phase for x in outputs)
model = Model(inputs, outputs)
assert model.uses_learning_phase
y1 = to_list(model.predict(X))
y2 = to_list(model.predict(X))
for x1, x2 in zip(y1, y2):
assert_allclose(x1, x2, atol=1e-5)
def test_Bidirectional_state_reuse():
rnn = layers.LSTM
samples = 2
dim = 5
timesteps = 3
units = 3
input1 = Input((timesteps, dim))
layer = wrappers.Bidirectional(rnn(units, return_state=True,
return_sequences=True))
state = layer(input1)[1:]
# test passing invalid initial_state: passing a tensor
input2 = Input((timesteps, dim))
with pytest.raises(ValueError):
output = wrappers.Bidirectional(rnn(units))(input2, initial_state=state[0])
# test valid usage: passing a list
output = wrappers.Bidirectional(rnn(units))(input2, initial_state=state)
model = Model([input1, input2], output)
assert len(model.layers) == 4
assert isinstance(model.layers[-1].input, list)
inputs = [np.random.rand(samples, timesteps, dim),
np.random.rand(samples, timesteps, dim)]
outputs = model.predict(inputs)
@pytest.mark.skipif(K.backend() == 'mxnet',
reason='MXNet backend does not support custom RNN cell yet')
def test_Bidirectional_with_constants():
class RNNCellWithConstants(Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(RNNCellWithConstants, self).__init__(**kwargs)
def build(self, input_shape):
if not isinstance(input_shape, list):
raise TypeError('expects constants shape')
[input_shape, constant_shape] = input_shape
# will (and should) raise if more than one constant passed
self.input_kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.constant_kernel = self.add_weight(
shape=(constant_shape[-1], self.units),
initializer='uniform',
name='constant_kernel')
self.built = True
def call(self, inputs, states, constants):
[prev_output] = states
[constant] = constants
h_input = K.dot(inputs, self.input_kernel)
h_state = K.dot(prev_output, self.recurrent_kernel)
h_const = K.dot(constant, self.constant_kernel)
output = h_input + h_state + h_const
return output, [output]
def get_config(self):
config = {'units': self.units}
base_config = super(RNNCellWithConstants, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# Test basic case.
x = Input((5, 5))
c = Input((3,))
cell = RNNCellWithConstants(32)
custom_objects = {'RNNCellWithConstants': RNNCellWithConstants}
with CustomObjectScope(custom_objects):
layer = wrappers.Bidirectional(RNN(cell))
y = layer(x, constants=c)
model = Model([x, c], y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 3))],
np.zeros((6, 64))
)
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, c_np])
weights = model.get_weights()
config = layer.get_config()
with CustomObjectScope(custom_objects):
layer = wrappers.Bidirectional.from_config(copy.deepcopy(config))
y = layer(x, constants=c)
model = Model([x, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, c_np])
assert_allclose(y_np, y_np_2, atol=1e-4)
# test flat list inputs
with CustomObjectScope(custom_objects):
layer = wrappers.Bidirectional.from_config(copy.deepcopy(config))
y = layer([x, c])
model = Model([x, c], y)
model.set_weights(weights)
y_np_3 = model.predict([x_np, c_np])
assert_allclose(y_np, y_np_3, atol=1e-4)
@pytest.mark.skipif(K.backend() == 'mxnet',
reason='MXNet backend does not support custom RNN cell yet')
def test_Bidirectional_with_constants_layer_passing_initial_state():
class RNNCellWithConstants(Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(RNNCellWithConstants, self).__init__(**kwargs)
def build(self, input_shape):
if not isinstance(input_shape, list):
raise TypeError('expects constants shape')
[input_shape, constant_shape] = input_shape
# will (and should) raise if more than one constant passed
self.input_kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.constant_kernel = self.add_weight(
shape=(constant_shape[-1], self.units),
initializer='uniform',
name='constant_kernel')
self.built = True
def call(self, inputs, states, constants):
[prev_output] = states
[constant] = constants
h_input = K.dot(inputs, self.input_kernel)
h_state = K.dot(prev_output, self.recurrent_kernel)
h_const = K.dot(constant, self.constant_kernel)
output = h_input + h_state + h_const
return output, [output]
def get_config(self):
config = {'units': self.units}
base_config = super(RNNCellWithConstants, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# Test basic case.
x = Input((5, 5))
c = Input((3,))
s_for = Input((32,))
s_bac = Input((32,))
cell = RNNCellWithConstants(32)
custom_objects = {'RNNCellWithConstants': RNNCellWithConstants}
with CustomObjectScope(custom_objects):
layer = wrappers.Bidirectional(RNN(cell))
y = layer(x, initial_state=[s_for, s_bac], constants=c)
model = Model([x, s_for, s_bac, c], y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 32)),
np.zeros((6, 32)), np.zeros((6, 3))],
np.zeros((6, 64))
)
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
s_fw_np = np.random.random((6, 32))
s_bk_np = np.random.random((6, 32))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, s_fw_np, s_bk_np, c_np])
weights = model.get_weights()
config = layer.get_config()
with CustomObjectScope(custom_objects):
layer = wrappers.Bidirectional.from_config(copy.deepcopy(config))
y = layer(x, initial_state=[s_for, s_bac], constants=c)
model = Model([x, s_for, s_bac, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, s_fw_np, s_bk_np, c_np])
assert_allclose(y_np, y_np_2, atol=1e-4)
# verify that state is used
y_np_2_different_s = model.predict([x_np, s_fw_np + 10., s_bk_np + 10., c_np])
with pytest.raises(AssertionError):
assert_allclose(y_np, y_np_2_different_s, atol=1e-4)
# test flat list inputs
with CustomObjectScope(custom_objects):
layer = wrappers.Bidirectional.from_config(copy.deepcopy(config))
y = layer([x, s_for, s_bac, c])
model = Model([x, s_for, s_bac, c], y)
model.set_weights(weights)
y_np_3 = model.predict([x_np, s_fw_np, s_bk_np, c_np])
assert_allclose(y_np, y_np_3, atol=1e-4)
def test_Bidirectional_trainable():
# test layers that need learning_phase to be set
x = Input(shape=(3, 2))
layer = wrappers.Bidirectional(layers.SimpleRNN(3))
_ = layer(x)
assert len(layer.trainable_weights) == 6
layer.trainable = False
assert len(layer.trainable_weights) == 0
layer.trainable = True
assert len(layer.trainable_weights) == 6
def test_Bidirectional_updates():
x = Input(shape=(3, 2))
layer = wrappers.Bidirectional(layers.SimpleRNN(3))
assert len(layer.updates) == 0
assert len(layer.get_updates_for(None)) == 0
assert len(layer.get_updates_for(x)) == 0
layer.forward_layer.add_update(0, inputs=x)
layer.forward_layer.add_update(1, inputs=None)
layer.backward_layer.add_update(0, inputs=x)
layer.backward_layer.add_update(1, inputs=None)
assert len(layer.updates) == 4
assert len(layer.get_updates_for(None)) == 2
assert len(layer.get_updates_for(x)) == 2
def test_Bidirectional_losses():
x = Input(shape=(3, 2))
layer = wrappers.Bidirectional(
layers.SimpleRNN(3, kernel_regularizer='l1', bias_regularizer='l1'))
_ = layer(x)
assert len(layer.losses) == 4
assert len(layer.get_losses_for(None)) == 4
assert len(layer.get_losses_for(x)) == 0
layer.forward_layer.add_loss(0, inputs=x)
layer.forward_layer.add_loss(1, inputs=None)
layer.backward_layer.add_loss(0, inputs=x)
layer.backward_layer.add_loss(1, inputs=None)
assert len(layer.losses) == 8
assert len(layer.get_losses_for(None)) == 6
assert len(layer.get_losses_for(x)) == 2
if __name__ == '__main__':
pytest.main([__file__])
|
src/tornado-3.2.2/tornado/platform/common.py | code-annotator/tornado-annotated | 645 | 60 | <gh_stars>100-1000
"""Lowest-common-denominator implementations of platform functionality."""
from __future__ import absolute_import, division, print_function, with_statement
import errno
import socket
from tornado.platform import interface
class Waker(interface.Waker):
"""Create an OS independent asynchronous pipe.
For use on platforms that don't have os.pipe() (or where pipes cannot
be passed to select()), but do have sockets. This includes Windows
and Jython.
"""
def __init__(self):
# Based on Zope async.py: http://svn.zope.org/zc.ngi/trunk/src/zc/ngi/async.py
self.writer = socket.socket()
# Disable buffering -- pulling the trigger sends 1 byte,
# and we want that sent immediately, to wake up ASAP.
self.writer.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
count = 0
while 1:
count += 1
# Bind to a local port; for efficiency, let the OS pick
# a free port for us.
# Unfortunately, stress tests showed that we may not
# be able to connect to that port ("Address already in
# use") despite that the OS picked it. This appears
# to be a race bug in the Windows socket implementation.
# So we loop until a connect() succeeds (almost always
# on the first try). See the long thread at
# http://mail.zope.org/pipermail/zope/2005-July/160433.html
# for hideous details.
a = socket.socket()
a.bind(("127.0.0.1", 0))
a.listen(1)
connect_address = a.getsockname() # assigned (host, port) pair
try:
self.writer.connect(connect_address)
break # success
except socket.error as detail:
if (not hasattr(errno, 'WSAEADDRINUSE') or
detail[0] != errno.WSAEADDRINUSE):
# "Address already in use" is the only error
# I've seen on two WinXP Pro SP2 boxes, under
# Pythons 2.3.5 and 2.4.1.
raise
# (10048, 'Address already in use')
# assert count <= 2 # never triggered in Tim's tests
if count >= 10: # I've never seen it go above 2
a.close()
self.writer.close()
raise socket.error("Cannot bind trigger!")
# Close `a` and try again. Note: I originally put a short
# sleep() here, but it didn't appear to help or hurt.
a.close()
self.reader, addr = a.accept()
self.reader.setblocking(0)
self.writer.setblocking(0)
a.close()
self.reader_fd = self.reader.fileno()
def fileno(self):
return self.reader.fileno()
def write_fileno(self):
return self.writer.fileno()
def wake(self):
try:
self.writer.send(b"x")
except (IOError, socket.error):
pass
def consume(self):
try:
while True:
result = self.reader.recv(1024)
if not result:
break
except (IOError, socket.error):
pass
def close(self):
self.reader.close()
self.writer.close()
|
docs/source/auto_examples/plot_usage.py | ruhugu/brokenaxes | 362 | 81 | """
Basic usage
===========
This example presents the basic usage of brokenaxes
"""
import matplotlib.pyplot as plt
from brokenaxes import brokenaxes
import numpy as np
fig = plt.figure(figsize=(5,2))
bax = brokenaxes(xlims=((0, .1), (.4, .7)), ylims=((-1, .7), (.79, 1)), hspace=.05)
x = np.linspace(0, 1, 100)
bax.plot(x, np.sin(10 * x), label='sin')
bax.plot(x, np.cos(10 * x), label='cos')
bax.legend(loc=3)
bax.set_xlabel('time')
bax.set_ylabel('value')
|
clpy/sparse/util.py | fixstars/clpy | 142 | 116 | <filename>clpy/sparse/util.py
import clpy
import clpy.sparse.base
_preamble_atomic_add = '''
#if __CUDA_ARCH__ < 600
__device__ double atomicAdd(double* address, double val) {
unsigned long long* address_as_ull =
(unsigned long long*)address;
unsigned long long old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
'''
def isintlike(x):
try:
return bool(int(x) == x)
except (TypeError, ValueError):
return False
def isscalarlike(x):
return clpy.isscalar(x) or (clpy.sparse.base.isdense(x) and x.ndim == 0)
def isshape(x):
if not isinstance(x, tuple) or len(x) != 2:
return False
m, n = x
return isintlike(m) and isintlike(n)
|
test/test_cartesian.py | hwazni/discopy | 205 | 117 | from pytest import raises
from discopy.cartesian import *
def test_Box_repr():
f = Box('f', 1, 2, lambda x: (x, x))
assert "Box('f', 1, 2" in repr(f)
def test_Function_str():
f = Function(2, 1, lambda x, y: x + y)
assert 'Function(dom=2, cod=1,' in str(f)
def test_Function_call():
f = Swap(2, 1)
values = (2, 3)
with raises(TypeError) as err:
f(*values)
assert str(err.value) == messages.expected_input_length(f, values)
def test_Function_then():
f, g = Function(2, 1, lambda x, y: x + y), Function(1, 1, lambda x: x + 1)
assert Function.id(2).then(*(f, g))(20, 21) == 42
def test_Function_then_err():
f = Function(2, 1, lambda x, y: x + y)
g = (lambda x: x, )
with raises(TypeError) as err:
f >> g
assert str(err.value) == messages.type_err(Function, g)
g = Function.id(2)
with raises(AxiomError) as err:
f >> g
assert str(err.value) == messages.does_not_compose(f, g)
def test_Function_tensor():
assert Function.id(3)(1, 2, 3)\
== Function.id(0).tensor(*(3 * [Function.id(1)]))(1, 2, 3)
def test_Function_tensor_err():
f = Function(2, 1, lambda x, y: x + y)
g = (lambda x: x, )
with raises(TypeError) as err:
f @ g
assert str(err.value) == messages.type_err(Function, g)
|
bsp/nrf5x/tools/sdk_dist.py | BreederBai/rt-thread | 7,482 | 136 | import os
import sys
import shutil
cwd_path = os.getcwd()
sys.path.append(os.path.join(os.path.dirname(cwd_path), 'rt-thread', 'tools'))
# BSP dist function
def dist_do_building(BSP_ROOT, dist_dir):
from mkdist import bsp_copy_files
import rtconfig
library_dir = os.path.join(dist_dir, 'libraries')
print("=> copy nrf52 bsp libraries")
library_path = os.path.join(os.path.dirname(BSP_ROOT), 'libraries')
bsp_copy_files(library_path, library_dir)
|
splash/render_options.py | tashidexiaoL/splashnew | 3,612 | 178 | <reponame>tashidexiaoL/splashnew
# -*- coding: utf-8 -*-
import os
import json
from splash import defaults
from splash.utils import to_bytes, path_join_secure
from splash.errors import BadOption
class RenderOptions(object):
"""
Options that control how to render a response.
"""
_REQUIRED = object()
def __init__(self, data, max_timeout):
self.data = data
self.max_timeout = max_timeout
@classmethod
def raise_error(cls, argument, description, type='bad_argument', **kwargs):
params = {
'type': type,
'argument': argument,
'description': description
}
params.update(kwargs)
raise BadOption(params)
@classmethod
def fromrequest(cls, request, max_timeout):
"""
Initialize options from a Twisted Request.
"""
# 1. GET / POST data
data = {key.decode('utf-8'): values[0].decode('utf-8') for key, values
in request.args.items()}
if request.method == b'POST':
content_type = request.getHeader(b'content-type')
if content_type:
request.content.seek(0)
# 2. application/json POST data
if b'application/json' in content_type:
try:
content = request.content.read().decode('utf-8')
data.update(json.loads(content))
except ValueError as e:
raise BadOption({
'type': 'invalid_json',
'description': "Can't decode JSON",
'message': str(e),
})
# 3. js_source from application/javascript POST requests
if b'application/javascript' in content_type:
data['js_source'] = request.content.read().decode('utf-8')
request.content.seek(0)
data['uid'] = id(request)
return cls(data, max_timeout)
def get_expired_args(self, cache):
"""
Return a list of argument names from load_args which can't be loaded
"""
return cache.get_missing(self.get_load_args().items())
def save_args_to_cache(self, cache):
"""
Process save_args and put all values to cache.
Return a list of (name, key) pairs.
"""
save_args = self.get_save_args()
save_values = [self.data.get(name) for name in save_args]
keys = cache.add_many(save_values)
return list(zip(save_args, keys))
def load_cached_args(self, cache):
load_args = self.get_load_args()
for name, key in (load_args or {}).items():
self.data[name] = cache[key]
def get(self, name, default=_REQUIRED, type=str, range=None):
value = self.data.get(name)
if value is not None:
if type is not None:
try:
value = type(value)
except ValueError:
msg = "Argument %r has a wrong type" % (name,)
self.raise_error(name, msg, required_type=type.__name__)
if range is not None and not (range[0] <= value <= range[1]):
self.raise_error(name, 'Argument is out of the allowed range',
min=range[0], max=range[1], value=value)
return value
elif default is self._REQUIRED:
self.raise_error(name, 'Required argument is missing: %s' % name,
type='argument_required')
else:
return default
def _get_bool(self, name, default=_REQUIRED):
return self.get(name, default, type=int, range=(0, 1))
def _get_url(self, name, default=_REQUIRED):
url = self.get(name, default, type=None)
if isinstance(url, bytes):
url = url.decode('utf8')
return url
def get_uid(self):
return self.get('uid')
def get_url(self):
return self._get_url("url")
def get_baseurl(self):
return self._get_url("baseurl", default=None)
def get_wait(self):
return self.get("wait", defaults.WAIT_TIME, type=float,
range=(0, self.get_timeout()))
def get_timeout(self):
default = min(self.max_timeout, defaults.TIMEOUT)
return self.get("timeout", default, type=float,
range=(0, self.max_timeout))
def get_resource_timeout(self):
return self.get("resource_timeout", defaults.RESOURCE_TIMEOUT,
type=float, range=(0, 1e6))
def get_response_body(self):
return self._get_bool("response_body", defaults.RESPONSE_BODY_ENABLED)
def get_request_body(self):
return self._get_bool("request_body", defaults.REQUEST_BODY_ENABLED)
def get_images(self):
return self._get_bool("images", defaults.AUTOLOAD_IMAGES)
def get_proxy(self):
return self.get("proxy", default=None)
def get_js_source(self):
return self.get("js_source", default=None)
def get_width(self):
return self.get("width", None, type=int, range=(1, defaults.MAX_WIDTH))
def get_height(self):
return self.get("height", None, type=int,
range=(1, defaults.MAX_HEIGTH))
def get_scale_method(self):
scale_method = self.get("scale_method", defaults.IMAGE_SCALE_METHOD)
allowed_scale_methods = ['raster', 'vector']
if scale_method not in allowed_scale_methods:
self.raise_error(
argument='scale_method',
description="Invalid 'scale_method': %s" % scale_method,
allowed=allowed_scale_methods,
received=scale_method,
)
return scale_method
def get_quality(self):
return self.get("quality", defaults.JPEG_QUALITY, type=int, range=(0, 100))
def get_http_method(self):
method = self.get("http_method", "GET")
if method.upper() not in ["POST", "GET"]:
self.raise_error("http_method", "Unsupported HTTP method {}".format(method))
return method
def get_body(self):
body = self.get("body", None, to_bytes)
method = self.get("http_method", "GET").upper()
if method == 'GET' and body:
self.raise_error("body", "GET request should not have a body")
return body
def get_render_all(self, wait=None):
result = self._get_bool("render_all", False)
if result == 1 and wait == 0:
self.raise_error("render_all",
"Pass non-zero 'wait' to render full webpage")
return result
def get_lua_source(self):
return self.get("lua_source")
def get_js_profile(self, js_profiles_path):
js_profile = self.get("js", default=None)
if not js_profile:
return js_profile
if js_profiles_path is None:
self.raise_error('js',
'Javascript profiles are not enabled on server')
try:
profile_dir = path_join_secure(js_profiles_path, js_profile)
except ValueError as e:
# security check fails
print(e)
self.raise_error('js', 'Javascript profile does not exist')
if not os.path.isdir(profile_dir):
self.raise_error('js', 'Javascript profile does not exist')
return profile_dir
def get_headers(self):
headers = self.get("headers", default=None, type=None)
if headers is None:
return headers
if not isinstance(headers, (list, tuple, dict)):
self.raise_error(
argument='headers',
description="'headers' must be either a JSON array of "
"(name, value) pairs or a JSON object"
)
if isinstance(headers, (list, tuple)):
for el in headers:
string_only = all(isinstance(e, str) for e in el)
if not (isinstance(el, (list, tuple)) and len(el) == 2 and string_only):
self.raise_error(
argument='headers',
description="'headers' must be either a JSON array of "
"(name, value) pairs or a JSON object"
)
return headers
def get_save_args(self):
save_args = self.get("save_args", default=None, type=None)
if save_args is None:
return []
if isinstance(save_args, str):
# comma-separated string
save_args = save_args.split(',')
if not isinstance(save_args, list):
self.raise_error(
argument="save_args",
description="'save_args' should be either a comma-separated "
"string or a JSON array with argument names",
)
# JSON array
if not all(isinstance(a, str) for a in save_args):
self.raise_error(
argument="save_args",
description="'save_args' should be a list of strings",
)
return save_args
def get_load_args(self):
load_args = self.get("load_args", default=None, type=None)
if load_args is None:
return {}
if isinstance(load_args, str):
try:
load_args = dict(
kv.split("=", 1) for kv in load_args.split(';')
)
except ValueError:
self.raise_error(
argument="load_args",
description="'load_args' string value is not a "
"semicolon-separated list of name=hash pairs"
)
if not isinstance(load_args, dict):
self.raise_error(
argument="load_args",
description="'load_args' should be either a JSON object with "
"argument hashes or a semicolon-separated list "
"of name=hash pairs"
)
return load_args
def get_viewport(self, wait=None):
viewport = self.get("viewport", defaults.VIEWPORT_SIZE)
if viewport == 'full':
if wait == 0:
self.raise_error("viewport",
"Pass non-zero 'wait' to render full webpage")
else:
try:
validate_size_str(viewport)
except ValueError as e:
self.raise_error("viewport", str(e))
return viewport
def get_filters(self, pool=None, adblock_rules=None):
filter_names = self.get('filters', '')
filter_names = [f for f in filter_names.split(',') if f]
if pool is None and adblock_rules is None: # skip validation
return filter_names
if not filter_names:
return filter_names
if pool is not None:
adblock_rules = pool.network_manager_factory.adblock_rules
if adblock_rules is None:
self.raise_error(
"filters",
"Invalid filter names: %s" % (filter_names,)
)
if adblock_rules is not None:
unknown_filters = adblock_rules.get_unknown_filters(filter_names)
if unknown_filters:
self.raise_error(
"filters",
"Invalid filter names: %s" % (unknown_filters,)
)
return filter_names
def get_allowed_domains(self):
allowed_domains = self.get("allowed_domains", default=None)
if allowed_domains is not None:
return allowed_domains.split(',')
def get_allowed_content_types(self):
content_types = self.get("allowed_content_types", default=['*'])
if isinstance(content_types, str):
content_types = list(filter(None, content_types.split(',')))
return content_types
def get_forbidden_content_types(self):
content_types = self.get("forbidden_content_types", default=[])
if isinstance(content_types, str):
content_types = list(filter(None, content_types.split(',')))
return content_types
def get_html5_media(self):
return self._get_bool("html5_media", defaults.HTML5_MEDIA_ENABLED)
def get_engine(self, browser_engines_enabled=None):
engine = self.get("engine", default="webkit", type=str)
if engine not in {"webkit", "chromium"}:
self.raise_error("engine", "Unknown render engine {}".format(engine))
if browser_engines_enabled is not None:
if engine not in browser_engines_enabled:
self.raise_error("engine", "Disabled render engine {}".format(engine))
return engine
def get_http2(self):
engine = self.get_engine()
if self.get_engine() == "webkit":
default = defaults.WEBKIT_HTTP2_ENABLED
else:
assert engine == 'chromium'
default = defaults.CHROMIUM_HTTP2_ENABLED
return self._get_bool("http2", default)
def get_common_params(self, js_profiles_path):
wait = self.get_wait()
return {
'url': self.get_url(),
'baseurl': self.get_baseurl(),
'wait': wait,
'resource_timeout': self.get_resource_timeout(),
'viewport': self.get_viewport(wait),
'render_all': self.get_render_all(wait),
'images': self.get_images(),
'headers': self.get_headers(),
'proxy': self.get_proxy(),
'js_profile': self.get_js_profile(js_profiles_path),
'js_source': self.get_js_source(),
'http_method': self.get_http_method(),
'body': self.get_body(),
'html5_media': self.get_html5_media(),
'http2': self.get_http2(),
# 'lua': self.get_lua(),
}
def get_image_params(self):
return {
'width': self.get_width(),
'height': self.get_height(),
'scale_method': self.get_scale_method()
}
def get_png_params(self):
return self.get_image_params()
def get_jpeg_params(self):
params = {'quality': self.get_quality()}
params.update(self.get_image_params())
return params
def get_include_params(self):
return dict(
html=self._get_bool("html", defaults.DO_HTML),
iframes=self._get_bool("iframes", defaults.DO_IFRAMES),
png=self._get_bool("png", defaults.DO_PNG),
jpeg=self._get_bool("jpeg", defaults.DO_JPEG),
script=self._get_bool("script", defaults.SHOW_SCRIPT),
console=self._get_bool("console", defaults.SHOW_CONSOLE),
history=self._get_bool("history", defaults.SHOW_HISTORY),
har=self._get_bool("har", defaults.SHOW_HAR),
)
def validate_size_str(size_str):
"""
Validate size string in WxH format.
Can be used to validate both viewport and window size strings. Does not
special-case ``'full'`` viewport. Raises ``ValueError`` if anything goes
wrong.
:param size_str: string to validate
"""
max_width = defaults.VIEWPORT_MAX_WIDTH
max_heigth = defaults.VIEWPORT_MAX_HEIGTH
max_area = defaults.VIEWPORT_MAX_AREA
try:
w, h = map(int, size_str.split('x'))
except ValueError:
raise ValueError("Invalid viewport format: %s" % size_str)
else:
if not ((0 < w <= max_width) and (0 < h <= max_heigth) and
(w * h < max_area)):
raise ValueError("Viewport (%dx%d, area=%d) is out of range (%dx%d, area=%d)" %
(w, h, w * h, max_width, max_heigth, max_area))
|
glue/__init__.py | HPLegion/glue | 550 | 186 | <reponame>HPLegion/glue<filename>glue/__init__.py
# Set up configuration variables
__all__ = ['custom_viewer', 'qglue', 'test']
import os
import sys
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution('glue-core').version
except DistributionNotFound:
__version__ = 'undefined'
from ._mpl_backend import MatplotlibBackendSetter
sys.meta_path.append(MatplotlibBackendSetter())
from glue.viewers.custom.helper import custom_viewer
# Load user's configuration file
from .config import load_configuration
env = load_configuration()
from .qglue import qglue
from .main import load_plugins # noqa
def test(no_optional_skip=False):
from pytest import main
root = os.path.abspath(os.path.dirname(__file__))
args = [root, '-x']
if no_optional_skip:
args.append('--no-optional-skip')
return main(args=args)
from glue._settings_helpers import load_settings
load_settings()
# In PyQt 5.5+, PyQt overrides the default exception catching and fatally
# crashes the Qt application without printing out any details about the error.
# Below we revert the exception hook to the original Python one. Note that we
# can't just do sys.excepthook = sys.__excepthook__ otherwise PyQt will detect
# the default excepthook is in place and override it.
def handle_exception(exc_type, exc_value, exc_traceback):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
sys.excepthook = handle_exception
|
djangox/lib/python3.8/site-packages/allauth/socialaccount/providers/dropbox/views.py | DemarcusL/django_wiki_lab | 6,342 | 201 | import requests
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
from .provider import DropboxOAuth2Provider
class DropboxOAuth2Adapter(OAuth2Adapter):
provider_id = DropboxOAuth2Provider.id
access_token_url = "https://api.dropbox.com/oauth2/token"
authorize_url = "https://www.dropbox.com/oauth2/authorize"
profile_url = "https://api.dropbox.com/2/users/get_current_account"
redirect_uri_protocol = "https"
def complete_login(self, request, app, token, **kwargs):
response = requests.post(
self.profile_url,
headers={"Authorization": "Bearer %s" % (token.token,)},
)
response.raise_for_status()
return self.get_provider().sociallogin_from_response(request, response.json())
oauth_login = OAuth2LoginView.adapter_view(DropboxOAuth2Adapter)
oauth_callback = OAuth2CallbackView.adapter_view(DropboxOAuth2Adapter)
|
src/ros_comm/rosmsg/setup.py | jungleni/ros_code_reading | 742 | 212 | <reponame>jungleni/ros_code_reading
#!/usr/bin/env python
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
d = generate_distutils_setup(
packages=['rosmsg'],
package_dir={'': 'src'},
scripts=['scripts/rosmsg', 'scripts/rosmsg-proto', 'scripts/rossrv'],
requires=['genmsg', 'rosbag', 'roslib', 'rospkg']
)
setup(**d)
|
pixloc/visualization/viz_3d.py | jmorlana/pixloc | 457 | 227 | <filename>pixloc/visualization/viz_3d.py
"""
3D visualization primitives based on Plotly.
We might want to instead use a more powerful library like Open3D.
Plotly however supports animations, buttons and sliders.
1) Initialize a figure with `fig = init_figure()`
2) Plot points, cameras, lines, or create a slider animation.
3) Call `fig.show()` to render the figure.
"""
import plotly.graph_objects as go
import numpy as np
from ..pixlib.geometry.utils import to_homogeneous
def init_figure(height=800):
"""Initialize a 3D figure."""
fig = go.Figure()
fig.update_layout(
height=height,
scene_camera=dict(
eye=dict(x=0., y=-.1, z=-2), up=dict(x=0, y=-1., z=0)),
scene=dict(
xaxis=dict(showbackground=False),
yaxis=dict(showbackground=False),
aspectmode='data', dragmode='orbit'),
margin=dict(l=0, r=0, b=0, t=0, pad=0)) # noqa E741
return fig
def plot_points(fig, pts, color='rgba(255, 0, 0, 1)', ps=2):
"""Plot a set of 3D points."""
x, y, z = pts.T
tr = go.Scatter3d(
x=x, y=y, z=z, mode='markers', marker_size=ps,
marker_color=color, marker_line_width=.2)
fig.add_trace(tr)
def plot_camera(fig, R, t, K, color='rgb(0, 0, 255)'):
"""Plot a camera as a cone with camera frustum."""
x, y, z = t
u, v, w = R @ -np.array([0, 0, 1])
tr = go.Cone(
x=[x], y=[y], z=[z], u=[u], v=[v], w=[w], anchor='tip',
showscale=False, colorscale=[[0, color], [1, color]],
sizemode='absolute')
fig.add_trace(tr)
W, H = K[0, 2]*2, K[1, 2]*2
corners = np.array([[0, 0], [W, 0], [W, H], [0, H], [0, 0]])
corners = to_homogeneous(corners) @ np.linalg.inv(K).T
corners = (corners/2) @ R.T + t
x, y, z = corners.T
tr = go.Scatter3d(
x=x, y=y, z=z, line=dict(color='rgba(0, 0, 0, .5)'),
marker=dict(size=0.0001), showlegend=False)
fig.add_trace(tr)
def create_slider_animation(fig, traces):
"""Create a slider that animates a list of traces (e.g. 3D points)."""
slider = {'steps': []}
frames = []
fig.add_trace(traces[0])
idx = len(fig.data) - 1
for i, tr in enumerate(traces):
frames.append(go.Frame(name=str(i), traces=[idx], data=[tr]))
step = {"args": [
[str(i)],
{"frame": {"redraw": True},
"mode": "immediate"}],
"label": i,
"method": "animate"}
slider['steps'].append(step)
fig.frames = tuple(frames)
fig.layout.sliders = (slider,)
|
tests/test_subpixel_upsample.py | Project-MONAI/MONAI | 2,971 | 244 | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
import torch.nn as nn
from parameterized import parameterized
from monai.networks import eval_mode
from monai.networks.blocks import SubpixelUpsample
from monai.networks.layers.factories import Conv
TEST_CASE_SUBPIXEL = []
for inch in range(1, 5):
for dim in range(1, 4):
for factor in range(1, 3):
test_case = [
{"dimensions": dim, "in_channels": inch, "scale_factor": factor},
(2, inch, *([8] * dim)),
(2, inch, *([8 * factor] * dim)),
]
TEST_CASE_SUBPIXEL.append(test_case)
TEST_CASE_SUBPIXEL_2D_EXTRA = [
{"dimensions": 2, "in_channels": 2, "scale_factor": 3},
(2, 2, 8, 4), # different size for H and W
(2, 2, 24, 12),
]
TEST_CASE_SUBPIXEL_3D_EXTRA = [
{"dimensions": 3, "in_channels": 1, "scale_factor": 2},
(2, 1, 16, 8, 4), # different size for H, W and D
(2, 1, 32, 16, 8),
]
conv_block = nn.Sequential(
Conv[Conv.CONV, 3](1, 4, kernel_size=1), Conv[Conv.CONV, 3](4, 8, kernel_size=3, stride=1, padding=1)
)
TEST_CASE_SUBPIXEL_CONV_BLOCK_EXTRA = [
{"dimensions": 3, "in_channels": 1, "scale_factor": 2, "conv_block": conv_block},
(2, 1, 16, 8, 4), # different size for H, W and D
(2, 1, 32, 16, 8),
]
TEST_CASE_SUBPIXEL.append(TEST_CASE_SUBPIXEL_2D_EXTRA)
TEST_CASE_SUBPIXEL.append(TEST_CASE_SUBPIXEL_3D_EXTRA)
TEST_CASE_SUBPIXEL.append(TEST_CASE_SUBPIXEL_CONV_BLOCK_EXTRA)
# add every test back with the pad/pool sequential component omitted
for tests in list(TEST_CASE_SUBPIXEL):
args: dict = tests[0] # type: ignore
args = dict(args)
args["apply_pad_pool"] = False
TEST_CASE_SUBPIXEL.append([args, tests[1], tests[2]])
class TestSUBPIXEL(unittest.TestCase):
@parameterized.expand(TEST_CASE_SUBPIXEL)
def test_subpixel_shape(self, input_param, input_shape, expected_shape):
net = SubpixelUpsample(**input_param)
with eval_mode(net):
result = net.forward(torch.randn(input_shape))
self.assertEqual(result.shape, expected_shape)
if __name__ == "__main__":
unittest.main()
|
nemo/collections/asr/parts/numba/rnnt_loss/rnnt_numpy.py | madhukarkm/NeMo | 4,145 | 257 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2018-2019, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from torch.autograd import Function, Variable
from torch.nn import Module
def check_type(var, t, name):
if var.dtype is not t:
raise TypeError("{} must be {}".format(name, t))
def check_contiguous(var, name):
if not var.is_contiguous():
raise ValueError("{} must be contiguous".format(name))
def check_dim(var, dim, name):
if len(var.shape) != dim:
raise ValueError("{} must be {}D".format(name, dim))
def certify_inputs(log_probs, labels, lengths, label_lengths):
# check_type(log_probs, torch.float32, "log_probs")
check_type(labels, torch.int32, "labels")
check_type(label_lengths, torch.int32, "label_lengths")
check_type(lengths, torch.int32, "lengths")
check_contiguous(log_probs, "log_probs")
check_contiguous(labels, "labels")
check_contiguous(label_lengths, "label_lengths")
check_contiguous(lengths, "lengths")
if lengths.shape[0] != log_probs.shape[0]:
raise ValueError(
f"Must have a length per example. "
f"Given lengths dim: {lengths.shape[0]}, "
f"Log probs dim : {log_probs.shape[0]}"
)
if label_lengths.shape[0] != log_probs.shape[0]:
raise ValueError(
"Must have a label length per example. "
f"Given label lengths dim : {label_lengths.shape[0]}, "
f"Log probs dim : {log_probs.shape[0]}"
)
check_dim(log_probs, 4, "log_probs")
check_dim(labels, 2, "labels")
check_dim(lengths, 1, "lenghts")
check_dim(label_lengths, 1, "label_lenghts")
max_T = torch.max(lengths)
max_U = torch.max(label_lengths)
T, U = log_probs.shape[1:3]
if T != max_T:
raise ValueError(f"Input length mismatch! Given T: {T}, Expected max T from input lengths: {max_T}")
if U != max_U + 1:
raise ValueError(f"Output length mismatch! Given U: {U}, Expected max U from target lengths: {max_U} + 1")
def _assert_no_grad(tensor):
assert not tensor.requires_grad, (
"gradients only computed for log_probs - please " "mark other tensors as not requiring gradients"
)
def forward_pass(log_probs, labels, blank):
"""
Computes probability of the forward variable alpha.
Args:
log_probs: Tensor of shape [T, U, V+1]
labels: Labels of shape [B, U]
blank: Index of the blank token.
Returns:
A tuple of the forward variable probabilities - alpha of shape [T, U]
and the log likelihood of this forward step.
"""
T, U, _ = log_probs.shape
alphas = np.zeros((T, U), dtype='f')
for t in range(1, T):
alphas[t, 0] = alphas[t - 1, 0] + log_probs[t - 1, 0, blank]
for u in range(1, U):
alphas[0, u] = alphas[0, u - 1] + log_probs[0, u - 1, labels[u - 1]]
for t in range(1, T):
for u in range(1, U):
no_emit = alphas[t - 1, u] + log_probs[t - 1, u, blank]
emit = alphas[t, u - 1] + log_probs[t, u - 1, labels[u - 1]]
alphas[t, u] = np.logaddexp(emit, no_emit)
loglike = alphas[T - 1, U - 1] + log_probs[T - 1, U - 1, blank]
return alphas, loglike
def backward_pass(log_probs, labels, blank):
"""
Computes probability of the backward variable beta.
Args:
log_probs: Tensor of shape [T, U, V+1]
labels: Labels of shape [B, U]
blank: Index of the blank token.
Returns:
A tuple of the backward variable probabilities - beta of shape [T, U]
and the log likelihood of this backward step.
"""
T, U, _ = log_probs.shape
betas = np.zeros((T, U), dtype='f')
betas[T - 1, U - 1] = log_probs[T - 1, U - 1, blank]
for t in reversed(range(T - 1)):
betas[t, U - 1] = betas[t + 1, U - 1] + log_probs[t, U - 1, blank]
for u in reversed(range(U - 1)):
betas[T - 1, u] = betas[T - 1, u + 1] + log_probs[T - 1, u, labels[u]]
for t in reversed(range(T - 1)):
for u in reversed(range(U - 1)):
no_emit = betas[t + 1, u] + log_probs[t, u, blank]
emit = betas[t, u + 1] + log_probs[t, u, labels[u]]
betas[t, u] = np.logaddexp(emit, no_emit)
return betas, betas[0, 0]
def compute_gradient(log_probs, alphas, betas, labels, blank, fastemit_lambda):
"""
Computes the gradients of the log_probs with respect to the log probability of this step occuring.
Args:
Args:
log_probs: Tensor of shape [T, U, V+1]
alphas: Tensor of shape [T, U] which represents the forward variable.
betas: Tensor of shape [T, U] which represents the backward variable.
labels: Labels of shape [B, U]
blank: Index of the blank token.
Returns:
Gradients of shape [T, U, V+1] with respect to the forward log probability
"""
T, U, _ = log_probs.shape
grads = np.full(log_probs.shape, -float("inf"))
log_like = betas[0, 0] # == alphas[T - 1, U - 1] + betas[T - 1, U - 1]
# // grad to last blank transition
grads[T - 1, U - 1, blank] = alphas[T - 1, U - 1]
grads[: T - 1, :, blank] = alphas[: T - 1, :] + betas[1:, :]
# // grad to label transition
for u, l in enumerate(labels):
grads[:, u, l] = alphas[:, u] + betas[:, u + 1]
grads = -np.exp(grads + log_probs - log_like)
if fastemit_lambda > 0.0:
for u, l in enumerate(labels):
grads[:, u, l] = (1.0 + fastemit_lambda) * grads[:, u, l]
return grads
def fastemit_regularization(log_probs, labels, alphas, betas, blank, fastemit_lambda):
"""
Describes the computation of FastEmit regularization from the paper -
[FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization](https://arxiv.org/abs/2010.11148)
Args:
log_probs: Tensor of shape [T, U, V+1]
labels: Unused. Labels of shape [B, U]
alphas: Tensor of shape [T, U] which represents the forward variable.
betas: Unused. Tensor of shape [T, U] which represents the backward variable.
blank: Index of the blank token.
fastemit_lambda: Float scaling factor for FastEmit regularization.
Returns:
The regularized negative log likelihood - lambda * P˜(At, u|x)
"""
# General calculation of the fastemit regularization alignments
T, U, _ = log_probs.shape
# alignment = np.zeros((T, U), dtype='float32')
#
# for t in range(0, T):
# alignment[t, U - 1] = alphas[t, U - 1] + betas[t, U - 1]
#
# for t in range(0, T):
# for u in range(0, U - 1):
# emit = alphas[t, u] + log_probs[t, u, labels[u]] + betas[t, u + 1]
# alignment[t, u] = emit
# reg = fastemit_lambda * (alignment[T - 1, U - 1])
# The above is equivalent to below, without need of computing above
# reg = fastemit_lambda * (alphas[T - 1, U - 1] + betas[T - 1, U - 1])
# The above is also equivalent to below, without need of computing the betas alignment matrix
reg = fastemit_lambda * (alphas[T - 1, U - 1] + log_probs[T - 1, U - 1, blank])
return -reg
def transduce(log_probs, labels, blank=0, fastemit_lambda=0.0):
"""
Args:
log_probs: 3D array with shape
[input len, output len + 1, vocab size]
labels: 1D array with shape [output time steps]
blank: Index of the blank token.
fastemit_lambda: Float scaling factor for FastEmit regularization.
Returns:
float: The negative log-likelihood
3D array: Gradients with respect to the
unnormalized input actications
2d arrays: Alphas matrix (TxU)
2d array: Betas matrix (TxU)
"""
alphas, ll_forward = forward_pass(log_probs, labels, blank)
betas, ll_backward = backward_pass(log_probs, labels, blank)
grads = compute_gradient(log_probs, alphas, betas, labels, blank, fastemit_lambda)
return -ll_forward, grads, alphas, betas
def transduce_batch(log_probs, labels, flen, glen, blank=0, fastemit_lambda=0.0):
"""
Compute the transducer loss of the batch.
Args:
log_probs: [B, T, U, V+1]. Activation matrix normalized with log-softmax.
labels: [B, U+1] - ground truth labels with <SOS> padded as blank token in the beginning.
flen: Length vector of the acoustic sequence.
glen: Length vector of the target sequence.
blank: Id of the blank token.
fastemit_lambda: Float scaling factor for FastEmit regularization.
Returns:
Batch of transducer forward log probabilities (loss) and the gradients of the activation matrix.
"""
grads = np.zeros_like(log_probs)
costs = []
for b in range(log_probs.shape[0]):
t = int(flen[b])
u = int(glen[b]) + 1
ll, g, alphas, betas = transduce(log_probs[b, :t, :u, :], labels[b, : u - 1], blank, fastemit_lambda)
grads[b, :t, :u, :] = g
reg = fastemit_regularization(
log_probs[b, :t, :u, :], labels[b, : u - 1], alphas, betas, blank, fastemit_lambda
)
ll += reg
costs.append(ll)
return costs, grads
class _RNNT(Function):
@staticmethod
def forward(ctx, acts, labels, act_lens, label_lens, blank, fastemit_lambda):
costs, grads = transduce_batch(
acts.detach().cpu().numpy(),
labels.cpu().numpy(),
act_lens.cpu().numpy(),
label_lens.cpu().numpy(),
blank,
fastemit_lambda,
)
costs = torch.FloatTensor([sum(costs)])
grads = torch.Tensor(grads).to(acts)
ctx.grads = grads
return costs
@staticmethod
def backward(ctx, grad_output):
return ctx.grads, None, None, None, None, None
class RNNTLoss(Module):
"""
Parameters:
`blank_label` (int): default 0 - label index of blank token
fastemit_lambda: Float scaling factor for FastEmit regularization.
"""
def __init__(self, blank: int = 0, fastemit_lambda: float = 0.0):
super(RNNTLoss, self).__init__()
self.blank = blank
self.fastemit_lambda = fastemit_lambda
self.rnnt = _RNNT.apply
def forward(self, acts, labels, act_lens, label_lens):
assert len(labels.size()) == 2
_assert_no_grad(labels)
_assert_no_grad(act_lens)
_assert_no_grad(label_lens)
certify_inputs(acts, labels, act_lens, label_lens)
acts = torch.nn.functional.log_softmax(acts, -1)
return self.rnnt(acts, labels, act_lens, label_lens, self.blank, self.fastemit_lambda)
if __name__ == '__main__':
loss = RNNTLoss(fastemit_lambda=0.01)
torch.manual_seed(0)
acts = torch.randn(1, 2, 5, 3)
labels = torch.tensor([[0, 2, 1, 2]], dtype=torch.int32)
act_lens = torch.tensor([2], dtype=torch.int32)
label_lens = torch.tensor([len(labels[0])], dtype=torch.int32)
loss_val = loss(acts, labels, act_lens, label_lens)
|
gfirefly/dbentrust/dbutils.py | handsome3163/H2Dgame-Firefly | 675 | 270 | <reponame>handsome3163/H2Dgame-Firefly<gh_stars>100-1000
#coding:utf8
'''
Created on 2013-8-21
@author: lan (www.9miao.com)
'''
import itertools
import datetime
def safeunicode(obj, encoding='utf-8'):
r"""
Converts any given object to unicode string.
>>> safeunicode('hello')
u'hello'
>>> safeunicode(2)
u'2'
>>> safeunicode('\xe1\x88\xb4')
u'\u1234'
"""
t = type(obj)
if t is unicode:
return obj
elif t is str:
return obj.decode(encoding)
elif t in [int, float, bool]:
return unicode(obj)
elif hasattr(obj, '__unicode__') or isinstance(obj, unicode):
return unicode(obj)
else:
return str(obj).decode(encoding)
def safestr(obj, encoding='utf-8'):
r"""
Converts any given object to utf-8 encoded string.
>>> safestr('hello')
'hello'
>>> safestr(u'\u1234')
'\xe1\x88\xb4'
>>> safestr(2)
'2'
"""
if isinstance(obj, unicode):
return obj.encode(encoding)
elif isinstance(obj, str):
return obj
elif hasattr(obj, 'next'): # iterator
return itertools.imap(safestr, obj)
else:
return str(obj)
def sqlify(obj):
"""
converts `obj` to its proper SQL version
>>> sqlify(None)
'NULL'
>>> sqlify(True)
"'t'"
>>> sqlify(3)
'3'
"""
# because `1 == True and hash(1) == hash(True)`
# we have to do this the hard way...
if obj is None:
return 'NULL'
elif obj is True:
return "'t'"
elif obj is False:
return "'f'"
elif datetime and isinstance(obj, datetime.datetime):
return repr(obj.isoformat())
else:
if isinstance(obj, unicode): obj = obj.encode('utf8')
return repr(obj)
def sqllist(lst):
"""
Converts the arguments for use in something like a WHERE clause.
>>> sqllist(['a', 'b'])
'a, b'
>>> sqllist('a')
'a'
>>> sqllist(u'abc')
u'abc'
"""
if isinstance(lst, basestring):
return lst
else:
return ', '.join(lst)
def _sqllist(values):
"""
>>> _sqllist([1, 2, 3])
<sql: '(1, 2, 3)'>
"""
items = []
items.append('(')
for i, v in enumerate(values):
if i != 0:
items.append(', ')
items.append(sqlparam(v))
items.append(')')
return SQLQuery(items)
def sqlquote(a):
"""
Ensures `a` is quoted properly for use in a SQL query.
>>> 'WHERE x = ' + sqlquote(True) + ' AND y = ' + sqlquote(3)
<sql: "WHERE x = 't' AND y = 3">
>>> 'WHERE x = ' + sqlquote(True) + ' AND y IN ' + sqlquote([2, 3])
<sql: "WHERE x = 't' AND y IN (2, 3)">
"""
if isinstance(a, list):
return _sqllist(a)
else:
return sqlparam(a).sqlquery()
def _interpolate(sformat):
"""
Takes a format string and returns a list of 2-tuples of the form
(boolean, string) where boolean says whether string should be evaled
or not.
from <http://lfw.org/python/Itpl.py> (public domain, Ka-Ping Yee)
"""
from tokenize import tokenprog
tokenprog = tokenprog
def matchorfail(text, pos):
match = tokenprog.match(text, pos)
if match is None:
raise _ItplError(text, pos)
return match, match.end()
namechars = "abcdefghijklmnopqrstuvwxyz" \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_";
chunks = []
pos = 0
while 1:
dollar = sformat.find("$", pos)
if dollar < 0:
break
nextchar = sformat[dollar + 1]
if nextchar == "{":
chunks.append((0, sformat[pos:dollar]))
pos, level = dollar + 2, 1
while level:
match, pos = matchorfail(sformat, pos)
tstart, tend = match.regs[3]
token = sformat[tstart:tend]
if token == "{":
level = level + 1
elif token == "}":
level = level - 1
chunks.append((1, sformat[dollar + 2:pos - 1]))
elif nextchar in namechars:
chunks.append((0, sformat[pos:dollar]))
match, pos = matchorfail(sformat, dollar + 1)
while pos < len(sformat):
if sformat[pos] == "." and \
pos + 1 < len(sformat) and sformat[pos + 1] in namechars:
match, pos = matchorfail(sformat, pos + 1)
elif sformat[pos] in "([":
pos, level = pos + 1, 1
while level:
match, pos = matchorfail(sformat, pos)
tstart, tend = match.regs[3]
token = sformat[tstart:tend]
if token[0] in "([":
level = level + 1
elif token[0] in ")]":
level = level - 1
else:
break
chunks.append((1, sformat[dollar + 1:pos]))
else:
chunks.append((0, sformat[pos:dollar + 1]))
pos = dollar + 1 + (nextchar == "$")
if pos < len(sformat):
chunks.append((0, sformat[pos:]))
return chunks
def sqlwhere(dictionary, grouping=' AND '):
"""
Converts a `dictionary` to an SQL WHERE clause `SQLQuery`.
>>> sqlwhere({'cust_id': 2, 'order_id':3})
<sql: 'order_id = 3 AND cust_id = 2'>
>>> sqlwhere({'cust_id': 2, 'order_id':3}, grouping=', ')
<sql: 'order_id = 3, cust_id = 2'>
>>> sqlwhere({'a': 'a', 'b': 'b'}).query()
'a = %s AND b = %s'
"""
return SQLQuery.join([k + ' = ' + sqlparam(v) for k, v in dictionary.items()], grouping)
def reparam(string_, dictionary):
"""
Takes a string and a dictionary and interpolates the string
using values from the dictionary. Returns an `SQLQuery` for the result.
>>> reparam("s = $s", dict(s=True))
<sql: "s = 't'">
>>> reparam("s IN $s", dict(s=[1, 2]))
<sql: 's IN (1, 2)'>
"""
dictionary = dictionary.copy() # eval mucks with it
result = []
for live, chunk in _interpolate(string_):
if live:
v = eval(chunk, dictionary)
result.append(sqlquote(v))
else:
result.append(chunk)
return SQLQuery.join(result, '')
class UnknownParamstyle(Exception):
"""
raised for unsupported db paramstyles
(currently supported: qmark, numeric, format, pyformat)
"""
pass
class _ItplError(ValueError):
def __init__(self, text, pos):
ValueError.__init__(self)
self.text = text
self.pos = pos
def __str__(self):
return "unfinished expression in %s at char %d" % (
repr(self.text), self.pos)
class SQLParam(object):
"""
Parameter in SQLQuery.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam("joe")])
>>> q
<sql: "SELECT * FROM test WHERE name='joe'">
>>> q.query()
'SELECT * FROM test WHERE name=%s'
>>> q.values()
['joe']
"""
__slots__ = ["value"]
def __init__(self, value):
self.value = value
def get_marker(self, paramstyle='pyformat'):
if paramstyle == 'qmark':
return '?'
elif paramstyle == 'numeric':
return ':1'
elif paramstyle is None or paramstyle in ['format', 'pyformat']:
return '%s'
raise UnknownParamstyle, paramstyle
def sqlquery(self):
return SQLQuery([self])
def __add__(self, other):
return self.sqlquery() + other
def __radd__(self, other):
return other + self.sqlquery()
def __str__(self):
return str(self.value)
def __repr__(self):
return '<param: %s>' % repr(self.value)
sqlparam = SQLParam
class SQLQuery(object):
"""
You can pass this sort of thing as a clause in any db function.
Otherwise, you can pass a dictionary to the keyword argument `vars`
and the function will call reparam for you.
Internally, consists of `items`, which is a list of strings and
SQLParams, which get concatenated to produce the actual query.
"""
__slots__ = ["items"]
# tested in sqlquote's docstring
def __init__(self, items=None):
r"""Creates a new SQLQuery.
>>> SQLQuery("x")
<sql: 'x'>
>>> q = SQLQuery(['SELECT * FROM ', 'test', ' WHERE x=', SQLParam(1)])
>>> q
<sql: 'SELECT * FROM test WHERE x=1'>
>>> q.query(), q.values()
('SELECT * FROM test WHERE x=%s', [1])
>>> SQLQuery(SQLParam(1))
<sql: '1'>
"""
if items is None:
self.items = []
elif isinstance(items, list):
self.items = items
elif isinstance(items, SQLParam):
self.items = [items]
elif isinstance(items, SQLQuery):
self.items = list(items.items)
else:
self.items = [items]
# Take care of SQLLiterals
for i, item in enumerate(self.items):
if isinstance(item, SQLParam) and isinstance(item.value, SQLLiteral):
self.items[i] = item.value.v
def append(self, value):
self.items.append(value)
def __add__(self, other):
if isinstance(other, basestring):
items = [other]
elif isinstance(other, SQLQuery):
items = other.items
else:
return NotImplemented
return SQLQuery(self.items + items)
def __radd__(self, other):
if isinstance(other, basestring):
items = [other]
else:
return NotImplemented
return SQLQuery(items + self.items)
def __iadd__(self, other):
if isinstance(other, (basestring, SQLParam)):
self.items.append(other)
elif isinstance(other, SQLQuery):
self.items.extend(other.items)
else:
return NotImplemented
return self
def __len__(self):
return len(self.query())
def query(self, paramstyle=None):
"""
Returns the query part of the sql query.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
>>> q.query()
'SELECT * FROM test WHERE name=%s'
>>> q.query(paramstyle='qmark')
'SELECT * FROM test WHERE name=?'
"""
s = []
for x in self.items:
if isinstance(x, SQLParam):
x = x.get_marker(paramstyle)
s.append(safestr(x))
else:
x = safestr(x)
# automatically escape % characters in the query
# For backward compatability, ignore escaping when the query looks already escaped
if paramstyle in ['format', 'pyformat']:
if '%' in x and '%%' not in x:
x = x.replace('%', '%%')
s.append(x)
return "".join(s)
def values(self):
"""
Returns the values of the parameters used in the sql query.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
>>> q.values()
['joe']
"""
return [i.value for i in self.items if isinstance(i, SQLParam)]
def join(items, sep=' ', prefix=None, suffix=None, target=None):
"""
Joins multiple queries.
>>> SQLQuery.join(['a', 'b'], ', ')
<sql: 'a, b'>
Optinally, prefix and suffix arguments can be provided.
>>> SQLQuery.join(['a', 'b'], ', ', prefix='(', suffix=')')
<sql: '(a, b)'>
If target argument is provided, the items are appended to target instead of creating a new SQLQuery.
"""
if target is None:
target = SQLQuery()
target_items = target.items
if prefix:
target_items.append(prefix)
for i, item in enumerate(items):
if i != 0:
target_items.append(sep)
if isinstance(item, SQLQuery):
target_items.extend(item.items)
else:
target_items.append(item)
if suffix:
target_items.append(suffix)
return target
join = staticmethod(join)
def _str(self):
try:
return self.query() % tuple([sqlify(x) for x in self.values()])
except (ValueError, TypeError):
return self.query()
def __str__(self):
return safestr(self._str())
def __unicode__(self):
return safeunicode(self._str())
def __repr__(self):
return '<sql: %s>' % repr(str(self))
class SQLLiteral:
"""
Protects a string from `sqlquote`.
>>> sqlquote('NOW()')
<sql: "'NOW()'">
>>> sqlquote(SQLLiteral('NOW()'))
<sql: 'NOW()'>
"""
def __init__(self, v):
self.v = v
def __repr__(self):
return self.v
class SQLProducer:
"""Database"""
def __init__(self):
"""Creates a database.
"""
pass
def query(self, sql_query,processed=False, svars=None):
"""
Execute SQL query `sql_query` using dictionary `vars` to interpolate it.
If `processed=True`, `vars` is a `reparam`-style list to use
instead of interpolating.
>>> db = DB(None, {})
>>> db.query("SELECT * FROM foo", _test=True)
<sql: 'SELECT * FROM foo'>
>>> db.query("SELECT * FROM foo WHERE x = $x", vars=dict(x='f'), _test=True)
<sql: "SELECT * FROM foo WHERE x = 'f'">
>>> db.query("SELECT * FROM foo WHERE x = " + sqlquote('f'), _test=True)
<sql: "SELECT * FROM foo WHERE x = 'f'">
"""
if svars is None:
svars = {}
if not processed and not isinstance(sql_query, SQLQuery):
sql_query = reparam(sql_query, svars)
return sql_query
def sql_clauses(self, what, tables, where, group, order, limit, offset):
return (
('SELECT', what),
('FROM', sqllist(tables)),
('WHERE', where),
('GROUP BY', group),
('ORDER BY', order),
('LIMIT', limit),
('OFFSET', offset))
def gen_clause(self, sql, val, svars):
if isinstance(val, (int, long)):
if sql == 'WHERE':
nout = 'id = ' + sqlquote(val)
else:
nout = SQLQuery(val)
elif isinstance(val, (list, tuple)) and len(val) == 2:
nout = SQLQuery(val[0], val[1]) # backwards-compatibility
elif isinstance(val, SQLQuery):
nout = val
else:
nout = reparam(val, svars)
def xjoin(a, b):
if a and b: return a + ' ' + b
else: return a or b
return xjoin(sql, nout)
def _where(self, where, svars):
if isinstance(where, (int, long)):
where = "id = " + sqlparam(where)
elif isinstance(where, (list, tuple)) and len(where) == 2:
where = SQLQuery(where[0], where[1])
elif isinstance(where, SQLQuery):
pass
else:
where = reparam(where, svars)
return where
def select(self, tables, svars=None, what='*', where=None, order=None, group=None,
limit=None, offset=None, _test=False):
"""
Selects `what` from `tables` with clauses `where`, `order`,
`group`, `limit`, and `offset`. Uses vars to interpolate.
Otherwise, each clause can be a SQLQuery.
>>> db = DB(None, {})
>>> db.select('foo', _test=True)
<sql: 'SELECT * FROM foo'>
>>> db.select(['foo', 'bar'], where="foo.bar_id = bar.id", limit=5, _test=True)
<sql: 'SELECT * FROM foo, bar WHERE foo.bar_id = bar.id LIMIT 5'>
"""
if svars is None: svars = {}
sql_clauses = self.sql_clauses(what, tables, where, group, order, limit, offset)
clauses = [self.gen_clause(sql, val, svars) for sql, val in sql_clauses if val is not None]
qout = SQLQuery.join(clauses)
if _test: return qout
return self.query(qout, processed=True)
def insert(self, tablename, seqname=None, _test=False, **values):
"""
Inserts `values` into `tablename`. Returns current sequence ID.
Set `seqname` to the ID if it's not the default, or to `False`
if there isn't one.
>>> db = DB(None, {})
>>> q = db.insert('foo', name='bob', age=2, created=SQLLiteral('NOW()'), _test=True)
>>> q
<sql: "INSERT INTO foo (age, name, created) VALUES (2, 'bob', NOW())">
>>> q.query()
'INSERT INTO foo (age, name, created) VALUES (%s, %s, NOW())'
>>> q.values()
[2, 'bob']
"""
def q(x): return "(" + x + ")"
if values:
_keys = SQLQuery.join(values.keys(), ', ')
_values = SQLQuery.join([sqlparam(v) for v in values.values()], ', ')
sql_query = "INSERT INTO %s " % tablename + q(_keys) + ' VALUES ' + q(_values)
else:
sql_query = SQLQuery(self._get_insert_default_values_query(tablename))
return sql_query
def _get_insert_default_values_query(self, table):
return "INSERT INTO %s DEFAULT VALUES" % table
def multiple_insert(self, tablename, values, seqname=None, _test=False):
"""
Inserts multiple rows into `tablename`. The `values` must be a list of dictioanries,
one for each row to be inserted, each with the same set of keys.
Returns the list of ids of the inserted rows.
Set `seqname` to the ID if it's not the default, or to `False`
if there isn't one.
>>> db = DB(None, {})
>>> db.supports_multiple_insert = True
>>> values = [{"name": "foo", "email": "<EMAIL>"}, {"name": "bar", "email": "<EMAIL>"}]
>>> db.multiple_insert('person', values=values, _test=True)
<sql: "INSERT INTO person (name, email) VALUES ('foo', '<EMAIL>'), ('bar', '<EMAIL>')">
"""
if not values:
return []
if not self.supports_multiple_insert:
out = [self.insert(tablename, seqname=seqname, _test=_test, **v) for v in values]
if seqname is False:
return None
else:
return out
keys = values[0].keys()
#@@ make sure all keys are valid
# make sure all rows have same keys.
for v in values:
if v.keys() != keys:
raise ValueError, 'Bad data'
sql_query = SQLQuery('INSERT INTO %s (%s) VALUES ' % (tablename, ', '.join(keys)))
for i, row in enumerate(values):
if i != 0:
sql_query.append(", ")
SQLQuery.join([SQLParam(row[k]) for k in keys], sep=", ", target=sql_query, prefix="(", suffix=")")
if _test: return sql_query
db_cursor = self._db_cursor()
if seqname is not False:
sql_query = self._process_insert_query(sql_query, tablename, seqname)
if isinstance(sql_query, tuple):
# for some databases, a separate query has to be made to find
# the id of the inserted row.
q1, q2 = sql_query
self._db_execute(db_cursor, q1)
self._db_execute(db_cursor, q2)
else:
self._db_execute(db_cursor, sql_query)
try:
out = db_cursor.fetchone()[0]
out = range(out-len(values)+1, out+1)
except Exception:
out = None
if not self.ctx.transactions:
self.ctx.commit()
return out
def update(self, tables, where, svars=None, _test=False, **values):
"""
Update `tables` with clause `where` (interpolated using `vars`)
and setting `values`.
>>> db = DB(None, {})
>>> name = 'Joseph'
>>> q = db.update('foo', where='name = $name', name='bob', age=2,
... created=SQLLiteral('NOW()'), vars=locals(), _test=True)
>>> q
<sql: "UPDATE foo SET age = 2, name = 'bob', created = NOW() WHERE name = 'Joseph'">
>>> q.query()
'UPDATE foo SET age = %s, name = %s, created = NOW() WHERE name = %s'
>>> q.values()
[2, 'bob', 'Joseph']
"""
if svars is None: svars = {}
where = self._where(where, svars)
query = (
"UPDATE " + sqllist(tables) +
" SET " + sqlwhere(values, ', ') +
" WHERE " + where)
if _test: return query
db_cursor = self._db_cursor()
self._db_execute(db_cursor, query)
if not self.ctx.transactions:
self.ctx.commit()
return db_cursor.rowcount
def delete(self, table, where, using=None, svars=None, _test=False):
"""
Deletes from `table` with clauses `where` and `using`.
>>> db = DB(None, {})
>>> name = 'Joe'
>>> db.delete('foo', where='name = $name', vars=locals(), _test=True)
<sql: "DELETE FROM foo WHERE name = 'Joe'">
"""
if svars is None:
svars = {}
where = self._where(where, svars)
q = 'DELETE FROM ' + table
if using:
q += ' USING ' + sqllist(using)
if where:
q += ' WHERE ' + where
return q
sqlproducer = SQLProducer()
|
Arrays/LeftRotation.py | anand722000/algo_ds_101 | 175 | 286 | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the rotLeft function below.
def rotLeft(a, d):
alist = list(a)
b = alist[d:]+alist[:d]
return b
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nd = input().split()
n = int(nd[0])
d = int(nd[1])
a = list(map(int, input().rstrip().split()))
result = rotLeft(a, d)
fptr.write(' '.join(map(str, result)))
fptr.write('\n')
fptr.close()
|
nearpy/examples/example2.py | samyoo78/NearPy | 624 | 289 | # -*- coding: utf-8 -*-
# Copyright (c) 2013 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy
import scipy
import unittest
import time
from nearpy import Engine
from nearpy.distances import CosineDistance
from nearpy.hashes import RandomBinaryProjections, HashPermutations, HashPermutationMapper
def example2():
# Dimension of feature space
DIM = 100
# Number of data points (dont do too much because of exact search)
POINTS = 20000
##########################################################
print('Performing indexing with HashPermutations...')
t0 = time.time()
# Create permutations meta-hash
permutations = HashPermutations('permut')
# Create binary hash as child hash
rbp_perm = RandomBinaryProjections('rbp_perm', 14)
rbp_conf = {'num_permutation':50,'beam_size':10,'num_neighbour':100}
# Add rbp as child hash of permutations hash
permutations.add_child_hash(rbp_perm, rbp_conf)
# Create engine
engine_perm = Engine(DIM, lshashes=[permutations], distance=CosineDistance())
# First index some random vectors
matrix = numpy.zeros((POINTS,DIM))
for i in range(POINTS):
v = numpy.random.randn(DIM)
matrix[i] = v
engine_perm.store_vector(v)
# Then update permuted index
permutations.build_permuted_index()
t1 = time.time()
print('Indexing took %f seconds' % (t1-t0))
# Get random query vector
query = numpy.random.randn(DIM)
# Do random query on engine 3
print('\nNeighbour distances with HashPermutations:')
print(' -> Candidate count is %d' % engine_perm.candidate_count(query))
results = engine_perm.neighbours(query)
dists = [x[2] for x in results]
print(dists)
# Real neighbours
print('\nReal neighbour distances:')
query = query.reshape((DIM))
dists = CosineDistance().distance(matrix, query)
dists = dists.reshape((-1,))
dists = sorted(dists)
print(dists[:10])
##########################################################
print('\nPerforming indexing with HashPermutationMapper...')
t0 = time.time()
# Create permutations meta-hash
permutations2 = HashPermutationMapper('permut2')
# Create binary hash as child hash
rbp_perm2 = RandomBinaryProjections('rbp_perm2', 14)
# Add rbp as child hash of permutations hash
permutations2.add_child_hash(rbp_perm2)
# Create engine
engine_perm2 = Engine(DIM, lshashes=[permutations2], distance=CosineDistance())
# First index some random vectors
matrix = numpy.zeros((POINTS,DIM))
for i in range(POINTS):
v = numpy.random.randn(DIM)
matrix[i] = v
engine_perm2.store_vector(v)
t1 = time.time()
print('Indexing took %f seconds' % (t1-t0))
# Get random query vector
query = numpy.random.randn(DIM)
# Do random query on engine 4
print('\nNeighbour distances with HashPermutationMapper:')
print(' -> Candidate count is %d' % engine_perm2.candidate_count(query))
results = engine_perm2.neighbours(query)
dists = [x[2] for x in results]
print(dists)
# Real neighbours
print('\nReal neighbour distances:')
query = query.reshape((DIM))
dists = CosineDistance().distance(matrix,query)
dists = dists.reshape((-1,))
dists = sorted(dists)
print(dists[:10])
##########################################################
print('\nPerforming indexing with multiple binary hashes...')
t0 = time.time()
hashes = []
for k in range(20):
hashes.append(RandomBinaryProjections('rbp_%d' % k, 10))
# Create engine
engine_rbps = Engine(DIM, lshashes=hashes, distance=CosineDistance())
# First index some random vectors
matrix = numpy.zeros((POINTS,DIM))
for i in range(POINTS):
v = numpy.random.randn(DIM)
matrix[i] = v
engine_rbps.store_vector(v)
t1 = time.time()
print('Indexing took %f seconds' % (t1-t0))
# Get random query vector
query = numpy.random.randn(DIM)
# Do random query on engine 4
print('\nNeighbour distances with multiple binary hashes:')
print(' -> Candidate count is %d' % engine_rbps.candidate_count(query))
results = engine_rbps.neighbours(query)
dists = [x[2] for x in results]
print(dists)
# Real neighbours
print('\nReal neighbour distances:')
query = query.reshape((DIM))
dists = CosineDistance().distance(matrix,query)
dists = dists.reshape((-1,))
dists = sorted(dists)
print(dists[:10])
##########################################################
|
src/mem/slicc/ast/TypeDeclAST.py | qianlong4526888/haha | 135 | 299 | # Copyright (c) 1999-2008 <NAME> and <NAME>
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.DeclAST import DeclAST
from slicc.symbols.Type import Type
class TypeDeclAST(DeclAST):
def __init__(self, slicc, type_ast, pairs, field_asts):
super(TypeDeclAST, self).__init__(slicc, pairs)
self.type_ast = type_ast
self.field_asts = field_asts
def __repr__(self):
return "[TypeDecl: %r]" % (self.type_ast)
def files(self, parent=None):
if "external" in self:
return set()
if parent:
ident = "%s_%s" % (parent, self.type_ast.ident)
else:
ident = self.type_ast.ident
return set(("%s.hh" % ident, "%s.cc" % ident))
def generate(self):
ident = str(self.type_ast)
machine = self.symtab.state_machine
# Make the new type
new_type = Type(self.symtab, ident, self.location, self.pairs,
self.state_machine)
if machine:
machine.addType(new_type)
self.symtab.newSymbol(new_type)
self.symtab.pushFrame()
# Add all of the fields of the type to it
for field in self.field_asts:
field.generate(new_type)
self.symtab.popFrame()
|
src/biotite/file.py | danijoo/biotite | 208 | 308 | <reponame>danijoo/biotite
# This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
__name__ = "biotite"
__author__ = "<NAME>"
__all__ = ["File", "TextFile", "InvalidFileError"]
import abc
import io
import warnings
from .copyable import Copyable
import copy
class File(Copyable, metaclass=abc.ABCMeta):
"""
Base class for all file classes.
The constructor creates an empty file, that can be filled with data
using the class specific setter methods.
Conversely, the class method :func:`read()` reads a file from disk
(or a file-like object from other sources).
In order to write the instance content into a file the
:func:`write()` method is used.
"""
def __init__(self):
# Support for deprecated instance method 'read()':
# When creating an instance, the 'read()' class method is
# replaced by the instance method, so that subsequent
# 'read()' calls are delegated to the instance method
self.read = self._deprecated_read
@classmethod
@abc.abstractmethod
def read(cls, file):
"""
Parse a file (or file-like object).
Parameters
----------
file : file-like object or str
The file to be read.
Alternatively a file path can be supplied.
Returns
-------
file_object : File
An instance from the respective :class:`File` subclass
representing the parsed file.
"""
pass
def _deprecated_read(self, file, *args, **kwargs):
"""
Support for deprecated instance method :func:`read()`.
Internally this calls the :func:`read()` class method and
replaces the data in `self` with the data from the newly created
:class:`File` object
"""
warnings.warn(
"Instance method 'read()' is deprecated, "
"use class method instead",
DeprecationWarning
)
cls = type(self)
new_file = cls.read(file, *args, **kwargs)
self.__dict__.update(new_file.__dict__)
@abc.abstractmethod
def write(self, file):
"""
Write the contents of this :class:`File` object into a file.
Parameters
----------
file_name : file-like object or str
The file to be written to.
Alternatively a file path can be supplied.
"""
pass
class TextFile(File, metaclass=abc.ABCMeta):
"""
Base class for all line based text files.
When reading a file, the text content is saved as list of strings,
one for each line.
When writing a file, this list is written into the file.
Attributes
----------
lines : list
List of string representing the lines in the text file.
PROTECTED: Do not modify from outside.
"""
def __init__(self):
super().__init__()
self.lines = []
@classmethod
def read(cls, file, *args, **kwargs):
# File name
if isinstance(file, str):
with open(file, "r") as f:
lines = f.read().splitlines()
# File object
else:
if not is_text(file):
raise TypeError("A file opened in 'text' mode is required")
lines = file.read().splitlines()
file_object = cls(*args, **kwargs)
file_object.lines = lines
return file_object
@staticmethod
def read_iter(file):
"""
Create an iterator over each line of the given text file.
Parameters
----------
file : file-like object or str
The file to be read.
Alternatively a file path can be supplied.
Yields
------
line : str
The current line in the file.
"""
# File name
if isinstance(file, str):
with open(file, "r") as f:
while True:
line = f.readline()
if not line:
break
yield line
# File object
else:
if not is_text(file):
raise TypeError("A file opened in 'text' mode is required")
while True:
line = file.readline()
if not line:
break
yield line
def write(self, file):
"""
Write the contents of this object into a file
(or file-like object).
Parameters
----------
file_name : file-like object or str
The file to be written to.
Alternatively a file path can be supplied.
"""
if isinstance(file, str):
with open(file, "w") as f:
f.write("\n".join(self.lines) + "\n")
else:
if not is_text(file):
raise TypeError("A file opened in 'text' mode is required")
file.write("\n".join(self.lines) + "\n")
def __copy_fill__(self, clone):
super().__copy_fill__(clone)
clone.lines = copy.copy(self.lines)
def __str__(self):
return("\n".join(self.lines))
class InvalidFileError(Exception):
"""
Indicates that the file is not suitable for the requested action,
either because the file does not contain the required data or
because the file is malformed.
"""
pass
def wrap_string(text, width):
"""
A much simpler and hence much more efficient version of
`textwrap.wrap()`.
This function simply wraps the given `text` after `width`
characters, ignoring sentences, whitespaces, etc.
"""
lines = []
for i in range(0, len(text), width):
lines.append(text[i : i+width])
return lines
def is_binary(file):
if isinstance(file, io.BufferedIOBase):
return True
# for file wrappers, e.g. 'TemporaryFile'
elif hasattr(file, "file") and isinstance(file.file, io.BufferedIOBase):
return True
else:
return False
def is_text(file):
if isinstance(file, io.TextIOBase):
return True
# for file wrappers, e.g. 'TemporaryFile'
elif hasattr(file, "file") and isinstance(file.file, io.TextIOBase):
return True
else:
return False
|
electrum/dnssec.py | Jesusown/electrum | 5,905 | 321 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 <NAME>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Check DNSSEC trust chain.
# Todo: verify expiration dates
#
# Based on
# http://backreference.org/2010/11/17/dnssec-verification-with-dig/
# https://github.com/rthalley/dnspython/blob/master/tests/test_dnssec.py
import dns
import dns.name
import dns.query
import dns.dnssec
import dns.message
import dns.resolver
import dns.rdatatype
import dns.rdtypes.ANY.NS
import dns.rdtypes.ANY.CNAME
import dns.rdtypes.ANY.DLV
import dns.rdtypes.ANY.DNSKEY
import dns.rdtypes.ANY.DS
import dns.rdtypes.ANY.NSEC
import dns.rdtypes.ANY.NSEC3
import dns.rdtypes.ANY.NSEC3PARAM
import dns.rdtypes.ANY.RRSIG
import dns.rdtypes.ANY.SOA
import dns.rdtypes.ANY.TXT
import dns.rdtypes.IN.A
import dns.rdtypes.IN.AAAA
from .logging import get_logger
_logger = get_logger(__name__)
# hard-coded trust anchors (root KSKs)
trust_anchors = [
# KSK-2017:
dns.rrset.from_text('.', 1 , 'IN', 'DNSKEY', '<KEY>),
# KSK-2010:
dns.rrset.from_text('.', 15202, 'IN', 'DNSKEY', '257 3 8 AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQbSEW0O8gcCjF FVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh/RStIoO8g0NfnfL2MTJRkxoX bfDaUeVPQuYEhg37NZWAJQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaD X6RS6CXpoY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3LQpz W<KEY>S Qageu+ipAdTTJ25AsRTAoub8ONGcLmqrAmRLKBP1dfwhYB4N7knNnulq QxA+Uk1ihz0='),
]
def _check_query(ns, sub, _type, keys):
q = dns.message.make_query(sub, _type, want_dnssec=True)
response = dns.query.tcp(q, ns, timeout=5)
assert response.rcode() == 0, 'No answer'
answer = response.answer
assert len(answer) != 0, ('No DNS record found', sub, _type)
assert len(answer) != 1, ('No DNSSEC record found', sub, _type)
if answer[0].rdtype == dns.rdatatype.RRSIG:
rrsig, rrset = answer
elif answer[1].rdtype == dns.rdatatype.RRSIG:
rrset, rrsig = answer
else:
raise Exception('No signature set in record')
if keys is None:
keys = {dns.name.from_text(sub):rrset}
dns.dnssec.validate(rrset, rrsig, keys)
return rrset
def _get_and_validate(ns, url, _type):
# get trusted root key
root_rrset = None
for dnskey_rr in trust_anchors:
try:
# Check if there is a valid signature for the root dnskey
root_rrset = _check_query(ns, '', dns.rdatatype.DNSKEY, {dns.name.root: dnskey_rr})
break
except dns.dnssec.ValidationFailure:
# It's OK as long as one key validates
continue
if not root_rrset:
raise dns.dnssec.ValidationFailure('None of the trust anchors found in DNS')
keys = {dns.name.root: root_rrset}
# top-down verification
parts = url.split('.')
for i in range(len(parts), 0, -1):
sub = '.'.join(parts[i-1:])
name = dns.name.from_text(sub)
# If server is authoritative, don't fetch DNSKEY
query = dns.message.make_query(sub, dns.rdatatype.NS)
response = dns.query.udp(query, ns, 3)
assert response.rcode() == dns.rcode.NOERROR, "query error"
rrset = response.authority[0] if len(response.authority) > 0 else response.answer[0]
rr = rrset[0]
if rr.rdtype == dns.rdatatype.SOA:
continue
# get DNSKEY (self-signed)
rrset = _check_query(ns, sub, dns.rdatatype.DNSKEY, None)
# get DS (signed by parent)
ds_rrset = _check_query(ns, sub, dns.rdatatype.DS, keys)
# verify that a signed DS validates DNSKEY
for ds in ds_rrset:
for dnskey in rrset:
htype = 'SHA256' if ds.digest_type == 2 else 'SHA1'
good_ds = dns.dnssec.make_ds(name, dnskey, htype)
if ds == good_ds:
break
else:
continue
break
else:
raise Exception("DS does not match DNSKEY")
# set key for next iteration
keys = {name: rrset}
# get TXT record (signed by zone)
rrset = _check_query(ns, url, _type, keys)
return rrset
def query(url, rtype):
# 8.8.8.8 is Google's public DNS server
nameservers = ['8.8.8.8']
ns = nameservers[0]
try:
out = _get_and_validate(ns, url, rtype)
validated = True
except Exception as e:
_logger.info(f"DNSSEC error: {repr(e)}")
out = dns.resolver.resolve(url, rtype)
validated = False
return out, validated
|
TopQuarkAnalysis/TopJetCombination/python/TtSemiLepJetCombMaxSumPtWMass_cfi.py | ckamtsikis/cmssw | 852 | 340 | import FWCore.ParameterSet.Config as cms
#
# module to make the MaxSumPtWMass jet combination
#
findTtSemiLepJetCombMaxSumPtWMass = cms.EDProducer("TtSemiLepJetCombMaxSumPtWMass",
## jet input
jets = cms.InputTag("selectedPatJets"),
## lepton input
leps = cms.InputTag("selectedPatMuons"),
## maximum number of jets to be considered
maxNJets = cms.int32(4),
## nominal WMass parameter (in GeV)
wMass = cms.double(80.4),
## use b-tagging two distinguish between light and b jets
useBTagging = cms.bool(False),
## choose algorithm for b-tagging
bTagAlgorithm = cms.string("trackCountingHighEffBJetTags"),
## minimum b discriminator value required for b jets and
## maximum b discriminator value allowed for non-b jets
minBDiscBJets = cms.double(1.0),
maxBDiscLightJets = cms.double(3.0)
)
|
libsaas/services/twilio/applications.py | MidtownFellowship/libsaas | 155 | 344 | <gh_stars>100-1000
from libsaas import http, parsers
from libsaas.services import base
from libsaas.services.twilio import resource
class ApplicationsBase(resource.TwilioResource):
path = 'Applications'
class Application(ApplicationsBase):
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
class Applications(ApplicationsBase):
@base.apimethod
def get(self, FriendlyName=None, Page=None, PageSize=None, AfterSid=None):
"""
Fetch the Applications belonging to an account.
:var FriendlyName: Only return the Account resources with friendly
names that exactly match this name.
:vartype FriendlyName: str
:var Page: The current page number. Zero-indexed, so the first page
is 0.
:vartype Page: int
:var PageSize: How many resources to return in each list page.
The default is 50, and the maximum is 1000.
:vartype PageSize: int
:var AfterSid: The last Sid returned in the previous page, used to
avoid listing duplicated resources if new ones are created while
paging.
:vartype AfterSid: str
"""
params = resource.get_params(None, locals())
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
def delete(self, *args, **kwargs):
raise base.MethodNotSupported()
class ConnectAppsBase(resource.TwilioResource):
path = 'ConnectApps'
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
def delete(self, *args, **kwargs):
raise base.MethodNotSupported()
class ConnectApp(ConnectAppsBase):
pass
class ConnectApps(ConnectAppsBase):
@base.apimethod
def get(self, Page=None, PageSize=None, AfterSid=None):
"""
Fetch the Connect Apps belonging to an account.
:var Page: The current page number. Zero-indexed, so the first page
is 0.
:vartype Page: int
:var PageSize: How many resources to return in each list page.
The default is 50, and the maximum is 1000.
:vartype PageSize: int
:var AfterSid: The last Sid returned in the previous page, used to
avoid listing duplicated resources if new ones are created while
paging.
:vartype AfterSid: str
"""
params = resource.get_params(None, locals())
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
class AuthorizedConnectAppsBase(resource.TwilioResource):
path = 'AuthorizedConnectApps'
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
def delete(self, *args, **kwargs):
raise base.MethodNotSupported()
class AuthorizedConnectApp(AuthorizedConnectAppsBase):
pass
class AuthorizedConnectApps(AuthorizedConnectAppsBase):
@base.apimethod
def get(self, Page=None, PageSize=None, AfterSid=None):
"""
Fetch the Authorized Connect Apps belonging to an account.
:var Page: The current page number. Zero-indexed, so the first page
is 0.
:vartype Page: int
:var PageSize: How many resources to return in each list page.
The default is 50, and the maximum is 1000.
:vartype PageSize: int
:var AfterSid: The last Sid returned in the previous page, used to
avoid listing duplicated resources if new ones are created while
paging.
:vartype AfterSid: str
"""
params = resource.get_params(None, locals())
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
|
tests/ast/nodes/test_from_node.py | upgradvisor/vyper | 1,471 | 351 | from vyper import ast as vy_ast
def test_output_class():
old_node = vy_ast.parse_to_ast("foo = 42")
new_node = vy_ast.Int.from_node(old_node, value=666)
assert isinstance(new_node, vy_ast.Int)
def test_source():
old_node = vy_ast.parse_to_ast("foo = 42")
new_node = vy_ast.Int.from_node(old_node, value=666)
assert old_node.src == new_node.src
assert old_node.node_source_code == new_node.node_source_code
def test_kwargs():
old_node = vy_ast.parse_to_ast("42").body[0].value
new_node = vy_ast.Int.from_node(old_node, value=666)
assert old_node.value == 42
assert new_node.value == 666
def test_compare_nodes():
old_node = vy_ast.parse_to_ast("foo = 42")
new_node = vy_ast.Int.from_node(old_node, value=666)
assert not vy_ast.compare_nodes(old_node, new_node)
def test_new_node_has_no_parent():
old_node = vy_ast.parse_to_ast("foo = 42")
new_node = vy_ast.Int.from_node(old_node, value=666)
assert new_node._parent is None
assert new_node._depth == 0
|
Python/Examples/Macros/SettingsAxesOptimization.py | archformco/RoboDK-API | 161 | 425 | # This example shows how to read or modify the Axes Optimization settings using the RoboDK API and a JSON string.
# You can select "Axes optimization" in a robot machining menu or the robot parameters to view the axes optimization settings.
# It is possible to update the axes optimization settings attached to a robot or a robot machining project manually or using the API.
#
# More information about the RoboDK API here:
# https://robodk.com/doc/en/RoboDK-API.html
# For more information visit:
# https://robodk.com/doc/en/PythonAPI/robolink.html
from robolink import * # RoboDK API
# JSON tools
import json
# Start the RoboDK API
RDK = Robolink()
# Ask the user to select a robot arm (6 axis robot wich can have external axes)
robot = RDK.ItemUserPick("Select a robot arm",ITEM_TYPE_ROBOT_ARM)
# Default optimization settings test template
AxesOptimSettings = {
# Optimization parameters:
"Active": 1, # Use generic axes optimization: 0=Disabled or 1=Enabled
"Algorithm": 2, # Optimization algorithm to use: 1=Nelder Mead, 2=Samples, 3=Samples+Nelder Mead
"MaxIter": 650, # Max. number of iterations
"Tol": 0.0016, # Tolerance to stop iterations
# Absolute Reference joints (double):
"AbsJnt_1": 104.17,
"AbsJnt_2": 11.22,
"AbsJnt_3": 15.97,
"AbsJnt_4": -87.48,
"AbsJnt_5": -75.36,
"AbsJnt_6": 63.03,
"AbsJnt_7": 174.13,
"AbsJnt_8": 173.60,
"AbsJnt_9": 0,
# Using Absolute reference joints (0: No, 1: Yes):
"AbsOn_1": 1,
"AbsOn_2": 1,
"AbsOn_3": 1,
"AbsOn_4": 1,
"AbsOn_5": 1,
"AbsOn_6": 1,
"AbsOn_7": 1,
"AbsOn_8": 1,
"AbsOn_9": 1,
# Weight for absolute reference joints (double):
"AbsW_1": 100,
"AbsW_2": 100,
"AbsW_3": 100,
"AbsW_4": 89,
"AbsW_5": 90,
"AbsW_6": 92,
"AbsW_7": 92,
"AbsW_8": 96,
"AbsW_9": 50,
# Using for relative joint motion smoothing (0: No, 1: Yes):
"RelOn_1": 1,
"RelOn_2": 1,
"RelOn_3": 1,
"RelOn_4": 1,
"RelOn_5": 1,
"RelOn_6": 1,
"RelOn_7": 1,
"RelOn_8": 1,
"RelOn_9": 1,
# Weight for relative joint motion (double):
"RelW_1": 5,
"RelW_2": 47,
"RelW_3": 44,
"RelW_4": 43,
"RelW_5": 36,
"RelW_6": 47,
"RelW_7": 53,
"RelW_8": 59,
"RelW_9": 0,
}
# Update one value, for example, make it active:
ToUpdate = {}
ToUpdate["Active"] = 1
json_str = json.dumps(json.dumps(ToUpdate))
status = robot.setParam("OptimAxes", json_str)
print(status)
# Example to make a partial or full update
count = 1
while True:
for i in range(7):
# Partial update
ToUpdate = {}
ToUpdate["AbsJnt_" + str(i+1)] = (count+i)*4
ToUpdate["AbsOn_" + str(i+1)] = count % 2
ToUpdate["AbsW_" + str(i+1)] = (count+i)
json_str = json.dumps(json.dumps(ToUpdate))
status = robot.setParam("OptimAxes", json_str)
print(status)
# Full update
#OptimAxes_TEST["RefJoint_" + str(i+1)] = (count+i)*4
#OptimAxes_TEST["RefWeight_" + str(i+1)] = (count+i)
#OptimAxes_TEST["RefOn_" + str(i+1)] = count % 2
# Full update
#print(robot.setParam("OptimAxes", str(AxesOptimSettings)))
count = count + 1
# Read settings
json_data = robot.setParam("OptimAxes")
json_object = json.loads(json_data)
print(json.dumps(json_object, indent=4))
pause(0.2)
# Example to read the current axes optimization settings:
while True:
json_data = robot.setParam("OptimAxes")
json_object = json.loads(json_data)
print(json.dumps(json_object, indent=4))
pause(0.2)
|
tests/test.py | kjanik70/tflearn | 10,882 | 433 | '''
This file contains test cases for tflearn
'''
import tensorflow.compat.v1 as tf
import tflearn
import unittest
class TestActivations(unittest.TestCase):
'''
This class contains test cases for the functions in tflearn/activations.py
'''
PLACES = 4 # Number of places to match when testing floating point values
def test_linear(self):
f = tflearn.linear
# Case 1
x = tf.placeholder(tf.float32, shape=())
self.assertEqual(f(x), x)
# Case 2
x = tf.placeholder(tf.int64, shape=())
self.assertEqual(f(x), x)
def test_tanh(self):
f = tflearn.tanh
x = tf.placeholder(tf.float32, shape=())
with tf.Session() as sess:
# Case 1
self.assertEqual(sess.run(f(x), feed_dict={x:0}), 0)
# Case 2
self.assertAlmostEqual(sess.run(f(x), feed_dict={x:0.5}),
0.4621, places=TestActivations.PLACES)
# Case 3
self.assertAlmostEqual(sess.run(f(x), feed_dict={x:-0.25}),
-0.2449, places=TestActivations.PLACES)
def test_leaky_relu(self):
f = lambda x: tflearn.leaky_relu(x, alpha=0.2)
x = tf.placeholder(tf.float32, shape=())
with tf.Session() as sess:
# Case 1
self.assertEqual(sess.run(f(x), feed_dict={x:0}), 0)
# Case 2
self.assertAlmostEqual(sess.run(f(x), feed_dict={x:1}),
1, places=TestActivations.PLACES)
# Case 3
self.assertAlmostEqual(sess.run(f(x), feed_dict={x:-1}),
-0.2, places=TestActivations.PLACES)
# Case 4
self.assertAlmostEqual(sess.run(f(x), feed_dict={x:-5}),
-1, places=TestActivations.PLACES)
def test_apply_activation(self):
lrelu_02 = lambda x: tflearn.leaky_relu(x, alpha=0.2)
x = tf.constant(-0.25, tf.float32)
with tf.Session() as sess:
# Case 1: 'linear'
self.assertEqual(
sess.run(tflearn.activation(x, 'linear')),
-0.25)
# Case 2: 'relu'
self.assertEqual(
sess.run(tflearn.activation(x, 'relu')),
0)
# Case 3: 'leaky_relu'
self.assertAlmostEqual(
sess.run(tflearn.activation(x, 'leaky_relu')),
-0.025, places=TestActivations.PLACES)
# Case 4: 'tanh'
self.assertAlmostEqual(
sess.run(tflearn.activation(x, 'tanh')),
-0.2449, places=TestActivations.PLACES)
# Case 5: lrelu_02 (callable)
self.assertAlmostEqual(
sess.run(tflearn.activation(x, lrelu_02)),
-0.05, places=TestActivations.PLACES)
if __name__ == "__main__":
unittest.main() |
venv/Lib/site-packages/patsy/test_regressions.py | EkremBayar/bayar | 710 | 443 | # This file is part of Patsy
# Copyright (C) 2013 <NAME> <<EMAIL>>
# See file LICENSE.txt for license information.
# Regression tests for fixed bugs (when not otherwise better covered somewhere
# else)
from patsy import (EvalEnvironment, dmatrix, build_design_matrices,
PatsyError, Origin)
def test_issue_11():
# Give a sensible error message for level mismatches
# (At some points we've failed to put an origin= on these errors)
env = EvalEnvironment.capture()
data = {"X" : [0,1,2,3], "Y" : [1,2,3,4]}
formula = "C(X) + Y"
new_data = {"X" : [0,0,1,2,3,3,4], "Y" : [1,2,3,4,5,6,7]}
info = dmatrix(formula, data)
try:
build_design_matrices([info.design_info], new_data)
except PatsyError as e:
assert e.origin == Origin(formula, 0, 4)
else:
assert False
|
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/s/super/super_with_arguments.py | ciskoinch8/vimrc | 463 | 447 | <filename>vimfiles/bundle/vim-python/submodules/pylint/tests/functional/s/super/super_with_arguments.py
class Foo:
pass
class Bar(Foo):
def __init__(self):
super(Bar, self).__init__() # [super-with-arguments]
class Baz(Foo):
def __init__(self):
super().__init__()
class Qux(Foo):
def __init__(self):
super(Bar, self).__init__()
class NotSuperCall(Foo):
def __init__(self):
super.test(Bar, self).__init__()
class InvalidSuperCall(Foo):
def __init__(self):
super(InvalidSuperCall.__class__, self).__init__()
def method_accepting_cls(cls, self):
# Using plain `super()` is not valid here, since there's no `__class__` cell found
# (Exact exception would be 'RuntimeError: super(): __class__ cell not found')
# Instead, we expect to *not* see a warning about `super-with-arguments`.
# Explicitly passing `cls`, and `self` to `super()` is what's required.
super(cls, self).__init__()
|
examples/cmrc2018_example/main.trainer.py | fangd123/TextBrewer | 1,121 | 480 | <filename>examples/cmrc2018_example/main.trainer.py<gh_stars>1000+
import logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.INFO,
)
logger = logging.getLogger("Main")
import os,random
import numpy as np
import torch
from processing import convert_examples_to_features, read_squad_examples
from processing import ChineseFullTokenizer
from pytorch_pretrained_bert.my_modeling import BertConfig
from optimization import BERTAdam
import config
from utils import read_and_convert, divide_parameters
from modeling import BertForQASimple, BertForQASimpleAdaptorTraining
from textbrewer import DistillationConfig, TrainingConfig, BasicTrainer
from torch.utils.data import TensorDataset, DataLoader, RandomSampler
from functools import partial
from train_eval import predict
def args_check(args):
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
logger.warning("Output directory () already exists and is not empty.")
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
if not args.do_train and not args.do_predict:
raise ValueError("At least one of `do_train` or `do_predict` must be True.")
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count() if not args.no_cuda else 0
else:
device = torch.device("cuda", args.local_rank)
n_gpu = 1
torch.distributed.init_process_group(backend='nccl')
logger.info("device %s n_gpu %d distributed training %r", device, n_gpu, bool(args.local_rank != -1))
args.n_gpu = n_gpu
args.device = device
return device, n_gpu
def main():
#parse arguments
config.parse()
args = config.args
for k,v in vars(args).items():
logger.info(f"{k}:{v}")
#set seeds
torch.manual_seed(args.random_seed)
torch.cuda.manual_seed_all(args.random_seed)
np.random.seed(args.random_seed)
random.seed(args.random_seed)
#arguments check
device, n_gpu = args_check(args)
os.makedirs(args.output_dir, exist_ok=True)
forward_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
args.forward_batch_size = forward_batch_size
#load bert config
bert_config_S = BertConfig.from_json_file(args.bert_config_file_S)
assert args.max_seq_length <= bert_config_S.max_position_embeddings
#read data
train_examples = None
train_features = None
eval_examples = None
eval_features = None
num_train_steps = None
tokenizer = ChineseFullTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case)
convert_fn = partial(convert_examples_to_features,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length)
if args.do_train:
train_examples,train_features = read_and_convert(args.train_file,is_training=True, do_lower_case=args.do_lower_case,
read_fn=read_squad_examples,convert_fn=convert_fn)
if args.fake_file_1:
fake_examples1,fake_features1 = read_and_convert(args.fake_file_1,is_training=True, do_lower_case=args.do_lower_case,
read_fn=read_squad_examples,convert_fn=convert_fn)
train_examples += fake_examples1
train_features += fake_features1
if args.fake_file_2:
fake_examples2, fake_features2 = read_and_convert(args.fake_file_2,is_training=True, do_lower_case=args.do_lower_case,
read_fn=read_squad_examples,convert_fn=convert_fn)
train_examples += fake_examples2
train_features += fake_features2
num_train_steps = int(len(train_features)/args.train_batch_size) * args.num_train_epochs
if args.do_predict:
eval_examples,eval_features = read_and_convert(args.predict_file,is_training=False, do_lower_case=args.do_lower_case,
read_fn=read_squad_examples,convert_fn=convert_fn)
#Build Model and load checkpoint
model_S = BertForQASimple(bert_config_S,args)
#Load student
if args.load_model_type=='bert':
assert args.init_checkpoint_S is not None
state_dict_S = torch.load(args.init_checkpoint_S, map_location='cpu')
state_weight = {k[5:]:v for k,v in state_dict_S.items() if k.startswith('bert.')}
missing_keys,_ = model_S.bert.load_state_dict(state_weight,strict=False)
assert len(missing_keys)==0
elif args.load_model_type=='all':
assert args.tuned_checkpoint_S is not None
state_dict_S = torch.load(args.tuned_checkpoint_S,map_location='cpu')
model_S.load_state_dict(state_dict_S)
else:
logger.info("Model is randomly initialized.")
model_S.to(device)
if args.local_rank != -1 or n_gpu > 1:
if args.local_rank != -1:
raise NotImplementedError
elif n_gpu > 1:
model_S = torch.nn.DataParallel(model_S) #,output_device=n_gpu-1)
if args.do_train:
#parameters
params = list(model_S.named_parameters())
all_trainable_params = divide_parameters(params, lr=args.learning_rate)
logger.info("Length of all_trainable_params: %d", len(all_trainable_params))
optimizer = BERTAdam(all_trainable_params,lr=args.learning_rate,
warmup=args.warmup_proportion,t_total=num_train_steps,schedule=args.schedule,
s_opt1=args.s_opt1, s_opt2=args.s_opt2, s_opt3=args.s_opt3)
logger.info("***** Running training *****")
logger.info(" Num orig examples = %d", len(train_examples))
logger.info(" Num split examples = %d", len(train_features))
logger.info(" Forward batch size = %d", forward_batch_size)
logger.info(" Num backward steps = %d", num_train_steps)
########### DISTILLATION ###########
train_config = TrainingConfig(
gradient_accumulation_steps = args.gradient_accumulation_steps,
ckpt_frequency = args.ckpt_frequency,
log_dir = args.output_dir,
output_dir = args.output_dir,
device = args.device)
distiller = BasicTrainer(train_config = train_config,
model = model_S,
adaptor = BertForQASimpleAdaptorTraining)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_doc_mask = torch.tensor([f.doc_mask for f in train_features], dtype=torch.float)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_start_positions = torch.tensor([f.start_position for f in train_features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in train_features], dtype=torch.long)
train_dataset = TensorDataset(all_input_ids, all_segment_ids, all_input_mask, all_doc_mask,
all_start_positions, all_end_positions)
if args.local_rank == -1:
train_sampler = RandomSampler(train_dataset)
else:
raise NotImplementedError
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.forward_batch_size,drop_last=True)
callback_func = partial(predict,
eval_examples=eval_examples,
eval_features=eval_features,
args=args)
with distiller:
distiller.train(optimizer, scheduler=None, dataloader=train_dataloader,
num_epochs=args.num_train_epochs, callback=callback_func)
if not args.do_train and args.do_predict:
res = predict(model_S,eval_examples,eval_features,step=0,args=args)
print (res)
if __name__ == "__main__":
main()
|
gn/gn_to_bp.py | despairblue/esy-skia | 2,151 | 485 | <reponame>despairblue/esy-skia
#!/usr/bin/env python
#
# Copyright 2016 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Generate Android.bp for Skia from GN configuration.
import json
import os
import pprint
import string
import subprocess
import tempfile
import gn_to_bp_utils
# First we start off with a template for Android.bp,
# with holes for source lists and include directories.
bp = string.Template('''// This file is autogenerated by gn_to_bp.py.
cc_library_static {
name: "libskia",
cflags: [
$cflags
],
cppflags:[
$cflags_cc
],
export_include_dirs: [
$export_includes
],
local_include_dirs: [
$local_includes
],
srcs: [
$srcs
],
arch: {
arm: {
srcs: [
$arm_srcs
],
neon: {
srcs: [
$arm_neon_srcs
],
},
},
arm64: {
srcs: [
$arm64_srcs
],
},
mips: {
srcs: [
$none_srcs
],
},
mips64: {
srcs: [
$none_srcs
],
},
x86: {
srcs: [
$x86_srcs
],
cflags: [
// Clang seems to think new/malloc will only be 4-byte aligned
// on x86 Android. We're pretty sure it's actually 8-byte
// alignment. tests/OverAlignedTest.cpp has more information,
// and should fail if we're wrong.
"-Wno-over-aligned"
],
},
x86_64: {
srcs: [
$x86_srcs
],
},
},
defaults: ["skia_deps",
"skia_pgo",
],
}
// Build libskia with PGO by default.
// Location of PGO profile data is defined in build/soong/cc/pgo.go
// and is separate from skia.
// To turn it off, set ANDROID_PGO_NO_PROFILE_USE environment variable
// or set enable_profile_use property to false.
cc_defaults {
name: "skia_pgo",
pgo: {
instrumentation: true,
profile_file: "hwui/hwui.profdata",
benchmarks: ["hwui", "skia"],
enable_profile_use: true,
},
}
// "defaults" property to disable profile use for Skia tools and benchmarks.
cc_defaults {
name: "skia_pgo_no_profile_use",
defaults: [
"skia_pgo",
],
pgo: {
enable_profile_use: false,
},
}
cc_defaults {
name: "skia_deps",
shared_libs: [
"libEGL",
"libGLESv2",
"libdng_sdk",
"libexpat",
"libft2",
"libheif",
"libicui18n",
"libicuuc",
"libjpeg",
"liblog",
"libpiex",
"libpng",
"libvulkan",
"libz",
"libcutils",
"libnativewindow",
],
static_libs: [
"libarect",
"libsfntly",
"libwebp-decode",
"libwebp-encode",
],
group_static_libs: true,
}
cc_defaults {
name: "skia_tool_deps",
defaults: [
"skia_deps",
"skia_pgo_no_profile_use"
],
static_libs: [
"libjsoncpp",
"libskia",
],
cflags: [
"-Wno-unused-parameter",
"-Wno-unused-variable",
],
}
cc_test {
name: "skia_dm",
defaults: [
"skia_tool_deps"
],
local_include_dirs: [
$dm_includes
],
srcs: [
$dm_srcs
],
shared_libs: [
"libbinder",
"libutils",
],
}
cc_test {
name: "skia_nanobench",
defaults: [
"skia_tool_deps"
],
local_include_dirs: [
$nanobench_includes
],
srcs: [
$nanobench_srcs
],
data: [
"resources/*",
],
}''')
# We'll run GN to get the main source lists and include directories for Skia.
gn_args = {
'is_official_build': 'true',
'skia_enable_tools': 'true',
'skia_enable_skottie': 'false', # requires rapidjson third-party
'skia_use_libheif': 'true',
'skia_use_vulkan': 'true',
'target_cpu': '"none"',
'target_os': '"android"',
'skia_vulkan_header': '"Skia_Vulkan_Android.h"',
}
js = gn_to_bp_utils.GenerateJSONFromGN(gn_args)
def strip_slashes(lst):
return {str(p.lstrip('/')) for p in lst}
srcs = strip_slashes(js['targets']['//:skia']['sources'])
cflags = strip_slashes(js['targets']['//:skia']['cflags'])
cflags_cc = strip_slashes(js['targets']['//:skia']['cflags_cc'])
local_includes = strip_slashes(js['targets']['//:skia']['include_dirs'])
export_includes = strip_slashes(js['targets']['//:public']['include_dirs'])
defines = [str(d) for d in js['targets']['//:skia']['defines']]
dm_srcs = strip_slashes(js['targets']['//:dm']['sources'])
dm_includes = strip_slashes(js['targets']['//:dm']['include_dirs'])
nanobench_target = js['targets']['//:nanobench']
nanobench_srcs = strip_slashes(nanobench_target['sources'])
nanobench_includes = strip_slashes(nanobench_target['include_dirs'])
gn_to_bp_utils.GrabDependentValues(js, '//:skia', 'sources', srcs, None)
gn_to_bp_utils.GrabDependentValues(js, '//:dm', 'sources', dm_srcs, 'skia')
gn_to_bp_utils.GrabDependentValues(js, '//:nanobench', 'sources',
nanobench_srcs, 'skia')
# skcms is a little special, kind of a second-party library.
srcs .add("third_party/skcms/skcms.c")
local_includes.add("third_party/skcms")
dm_includes .add("third_party/skcms")
# No need to list headers.
srcs = {s for s in srcs if not s.endswith('.h')}
dm_srcs = {s for s in dm_srcs if not s.endswith('.h')}
nanobench_srcs = {s for s in nanobench_srcs if not s.endswith('.h')}
cflags = gn_to_bp_utils.CleanupCFlags(cflags)
cflags_cc = gn_to_bp_utils.CleanupCCFlags(cflags_cc)
# We need to add the include path to the vulkan defines and header file set in
# then skia_vulkan_header gn arg that is used for framework builds.
local_includes.add("platform_tools/android/vulkan")
export_includes.add("platform_tools/android/vulkan")
here = os.path.dirname(__file__)
defs = gn_to_bp_utils.GetArchSources(os.path.join(here, 'opts.gni'))
gn_to_bp_utils.WriteUserConfig('include/config/SkUserConfig.h', defines)
# Turn a list of strings into the style bpfmt outputs.
def bpfmt(indent, lst, sort=True):
if sort:
lst = sorted(lst)
return ('\n' + ' '*indent).join('"%s",' % v for v in lst)
# OK! We have everything to fill in Android.bp...
with open('Android.bp', 'w') as f:
print >>f, bp.substitute({
'export_includes': bpfmt(8, export_includes),
'local_includes': bpfmt(8, local_includes),
'srcs': bpfmt(8, srcs),
'cflags': bpfmt(8, cflags, False),
'cflags_cc': bpfmt(8, cflags_cc),
'arm_srcs': bpfmt(16, defs['armv7']),
'arm_neon_srcs': bpfmt(20, defs['neon']),
'arm64_srcs': bpfmt(16, defs['arm64'] +
defs['crc32']),
'none_srcs': bpfmt(16, defs['none']),
'x86_srcs': bpfmt(16, defs['sse2'] +
defs['ssse3'] +
defs['sse41'] +
defs['sse42'] +
defs['avx' ] +
defs['hsw' ]),
'dm_includes' : bpfmt(8, dm_includes),
'dm_srcs' : bpfmt(8, dm_srcs),
'nanobench_includes' : bpfmt(8, nanobench_includes),
'nanobench_srcs' : bpfmt(8, nanobench_srcs),
})
|
python/ray/autoscaler/tags.py | firebolt55439/ray | 21,382 | 486 | """The Ray autoscaler uses tags/labels to associate metadata with instances."""
# Tag for the name of the node
TAG_RAY_NODE_NAME = "ray-node-name"
# Tag for the kind of node (e.g. Head, Worker). For legacy reasons, the tag
# value says 'type' instead of 'kind'.
TAG_RAY_NODE_KIND = "ray-node-type"
NODE_KIND_HEAD = "head"
NODE_KIND_WORKER = "worker"
NODE_KIND_UNMANAGED = "unmanaged"
# Tag for user defined node types (e.g., m4xl_spot). This is used for multi
# node type clusters.
TAG_RAY_USER_NODE_TYPE = "ray-user-node-type"
# Tag for autofilled node types for legacy cluster yamls without multi
# node type defined in the cluster configs.
NODE_TYPE_LEGACY_HEAD = "ray-legacy-head-node-type"
NODE_TYPE_LEGACY_WORKER = "ray-legacy-worker-node-type"
# Tag that reports the current state of the node (e.g. Updating, Up-to-date)
TAG_RAY_NODE_STATUS = "ray-node-status"
STATUS_UNINITIALIZED = "uninitialized"
STATUS_WAITING_FOR_SSH = "waiting-for-ssh"
STATUS_SYNCING_FILES = "syncing-files"
STATUS_SETTING_UP = "setting-up"
STATUS_UPDATE_FAILED = "update-failed"
STATUS_UP_TO_DATE = "up-to-date"
# Tag uniquely identifying all nodes of a cluster
TAG_RAY_CLUSTER_NAME = "ray-cluster-name"
# Hash of the node launch config, used to identify out-of-date nodes
TAG_RAY_LAUNCH_CONFIG = "ray-launch-config"
# Hash of the node runtime config, used to determine if updates are needed
TAG_RAY_RUNTIME_CONFIG = "ray-runtime-config"
# Hash of the contents of the directories specified by the file_mounts config
# if the node is a worker, this also hashes content of the directories
# specified by the cluster_synced_files config
TAG_RAY_FILE_MOUNTS_CONTENTS = "ray-file-mounts-contents"
|
End of preview. Expand
in Dataset Viewer.
- Downloads last month
- 40