gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2008-2010, Frank Scholz <dev@coherence-project.org>
import os.path
import urllib
from twisted.python import failure
import rhythmdb
from coherence.upnp.core.soap_service import errorCode
from coherence.upnp.core import DIDLLite
import coherence.extern.louie as louie
from coherence.extern.simple_plugin import Plugin
from coherence import log
TRACK_COUNT = 1000000
class RhythmboxPlayer(log.Loggable):
""" a backend to the Rhythmbox
"""
logCategory = 'rb_media_renderer'
implements = ['MediaRenderer']
vendor_value_defaults = {'RenderingControl': {'A_ARG_TYPE_Channel':'Master'},
'AVTransport': {'A_ARG_TYPE_SeekMode':('ABS_TIME','REL_TIME','TRACK_NR')}}
vendor_range_defaults = {'RenderingControl': {'Volume': {'maximum':100}}}
def __init__(self, device, **kwargs):
self.warning("__init__ RhythmboxPlayer %r", kwargs)
self.shell = kwargs['shell']
self.server = device
self.rb_mediaserver = kwargs['rb_mediaserver']
self.player = None
self.entry = None
self.metadata = None
try:
self.name = kwargs['name']
except KeyError:
self.name = "Rhythmbox on %s" % self.server.coherence.hostname
self.player = self.shell.get_player()
louie.send('Coherence.UPnP.Backend.init_completed', None, backend=self)
self.playing = False
self.state = None
self.duration = None
self.volume = 1.0
self.muted_volume = None
self.view = []
self.tags = {}
def __repr__(self):
return str(self.__class__).split('.')[-1]
def volume_changed(self, player, parameter):
self.volume = self.player.props.volume
self.info('volume_changed to %r', self.volume)
if self.volume > 0:
rcs_id = self.server.connection_manager_server.lookup_rcs_id(self.current_connection_id)
self.server.rendering_control_server.set_variable(rcs_id, 'Volume', self.volume*100)
def playing_song_changed(self, player, entry):
self.info("playing_song_changed %r", entry)
if self.server != None:
connection_id = self.server.connection_manager_server.lookup_avt_id(self.current_connection_id)
if entry == None:
self.update('STOPPED')
self.playing = False
#self.entry = None
self.metadata = None
self.duration = None
else:
id = self.shell.props.db.entry_get (entry, rhythmdb.PROP_ENTRY_ID)
bitrate = self.shell.props.db.entry_get(entry, rhythmdb.PROP_BITRATE) * 1024 / 8
# Duration is in HH:MM:SS format
seconds = self.shell.props.db.entry_get(entry, rhythmdb.PROP_DURATION)
hours = seconds / 3600
seconds = seconds - hours * 3600
minutes = seconds / 60
seconds = seconds - minutes * 60
self.duration = "%02d:%02d:%02d" % (hours, minutes, seconds)
mimetype = self.shell.props.db.entry_get(entry, rhythmdb.PROP_MIMETYPE)
# This isn't a real mime-type
if mimetype == "application/x-id3":
mimetype = "audio/mpeg"
size = self.shell.props.db.entry_get(entry, rhythmdb.PROP_FILE_SIZE)
# create item
item = DIDLLite.MusicTrack(id + TRACK_COUNT,'101')
item.album = self.shell.props.db.entry_get(entry, rhythmdb.PROP_ALBUM)
item.artist = self.shell.props.db.entry_get(entry, rhythmdb.PROP_ARTIST)
item.genre = self.shell.props.db.entry_get(entry, rhythmdb.PROP_GENRE)
item.originalTrackNumber = str(self.shell.props.db.entry_get (entry, rhythmdb.PROP_TRACK_NUMBER))
item.title = self.shell.props.db.entry_get(entry, rhythmdb.PROP_TITLE) # much nicer if it was entry.title
cover = self.shell.props.db.entry_request_extra_metadata(entry, "rb:coverArt-uri")
if cover != None:
_,ext = os.path.splitext(cover)
item.albumArtURI = ''.join((self.server.coherence.urlbase+str(self.rb_mediaserver.uuid)[5:]+'/'+ str(int(id) + TRACK_COUNT),'?cover',ext))
item.res = []
location = self.shell.props.db.entry_get(entry, rhythmdb.PROP_LOCATION)
if location.startswith("file://"):
location = unicode(urllib.unquote(location[len("file://"):]))
uri = ''.join((self.server.coherence.urlbase+str(self.rb_mediaserver.uuid)[5:]+'/'+ str(int(id) + TRACK_COUNT)))
res = DIDLLite.Resource(uri, 'http-get:*:%s:*' % mimetype)
if size > 0:
res.size = size
if self.duration > 0:
res.duration = self.duration
if bitrate > 0:
res.bitrate = str(bitrate)
item.res.append(res)
# add internal resource
res = DIDLLite.Resource('track-%d' % id, 'rhythmbox:%s:%s:*' % (self.server.coherence.hostname, mimetype))
if size > 0:
res.size = size
if self.duration > 0:
res.duration = str(self.duration)
if bitrate > 0:
res.bitrate = str(bitrate)
item.res.append(res)
elt = DIDLLite.DIDLElement()
elt.addItem(item)
self.metadata = elt.toString()
self.entry = entry
if self.server != None:
self.server.av_transport_server.set_variable(connection_id, 'CurrentTrackURI',uri)
self.server.av_transport_server.set_variable(connection_id, 'AVTransportURI',uri)
self.server.av_transport_server.set_variable(connection_id, 'AVTransportURIMetaData',self.metadata)
self.server.av_transport_server.set_variable(connection_id, 'CurrentTrackMetaData',self.metadata)
self.info("playing_song_changed %r", self.metadata)
if self.server != None:
self.server.av_transport_server.set_variable(connection_id, 'CurrentTransportActions','PLAY,STOP,PAUSE,SEEK,NEXT,PREVIOUS')
self.server.av_transport_server.set_variable(connection_id, 'RelativeTimePosition', '00:00:00')
self.server.av_transport_server.set_variable(connection_id, 'AbsoluteTimePosition', '00:00:00')
def playing_changed(self, player, state):
self.info("playing_changed", state)
if state is True:
transport_state = 'PLAYING'
else:
if self.playing is False:
transport_state = 'STOPPED'
else:
transport_state = 'PAUSED_PLAYBACK'
self.update(transport_state)
try:
position = player.get_playing_time()
except:
position = None
try:
duration = player.get_playing_song_duration()
except:
duration = None
self.update_position(position,duration)
self.info("playing_changed %r %r ", position, duration)
def elapsed_changed(self, player, time):
self.info("elapsed_changed %r %r", player, time)
try:
duration = player.get_playing_song_duration()
except:
duration = None
self.update_position(time,duration)
def update(self, state):
self.info("update %r", state)
if state in ('STOPPED','READY'):
transport_state = 'STOPPED'
if state == 'PLAYING':
transport_state = 'PLAYING'
if state == 'PAUSED_PLAYBACK':
transport_state = 'PAUSED_PLAYBACK'
if self.state != transport_state:
self.state = transport_state
if self.server != None:
connection_id = self.server.connection_manager_server.lookup_avt_id(self.current_connection_id)
self.server.av_transport_server.set_variable(connection_id,
'TransportState',
transport_state)
def update_position(self, position,duration):
self.info("update_position %r %r", position,duration)
if self.server != None:
connection_id = self.server.connection_manager_server.lookup_avt_id(self.current_connection_id)
self.server.av_transport_server.set_variable(connection_id, 'CurrentTrack', 1)
if position is not None:
m,s = divmod( position, 60)
h,m = divmod(m,60)
if self.server != None:
self.server.av_transport_server.set_variable(connection_id, 'RelativeTimePosition', '%02d:%02d:%02d' % (h,m,s))
self.server.av_transport_server.set_variable(connection_id, 'AbsoluteTimePosition', '%02d:%02d:%02d' % (h,m,s))
if duration <= 0:
duration = None
if duration is not None:
m,s = divmod( duration, 60)
h,m = divmod(m,60)
if self.server != None:
self.server.av_transport_server.set_variable(connection_id, 'CurrentTrackDuration', '%02d:%02d:%02d' % (h,m,s))
self.server.av_transport_server.set_variable(connection_id, 'CurrentMediaDuration', '%02d:%02d:%02d' % (h,m,s))
if self.duration is None:
if self.metadata is not None:
self.info("update_position %r", self.metadata)
elt = DIDLLite.DIDLElement.fromString(self.metadata)
for item in elt:
for res in item.findall('res'):
res.attrib['duration'] = "%d:%02d:%02d" % (h,m,s)
self.metadata = elt.toString()
if self.server != None:
self.server.av_transport_server.set_variable(connection_id, 'AVTransportURIMetaData',self.metadata)
self.server.av_transport_server.set_variable(connection_id, 'CurrentTrackMetaData',self.metadata)
self.duration = duration
def load( self, uri, metadata):
self.info("player load %r %r", uri, metadata)
#self.shell.load_uri(uri,play=False)
self.duration = None
self.metadata = metadata
self.tags = {}
was_playing = self.playing
if was_playing == True:
self.stop()
if len(metadata)>0:
elt = DIDLLite.DIDLElement.fromString(metadata)
if elt.numItems() == 1:
item = elt.getItems()[0]
if uri.startswith('track-'):
self.entry = self.shell.props.db.entry_lookup_by_id(int(uri[6:]))
else:
self.entry = self.shell.props.db.entry_lookup_by_location(uri)
self.info("check for entry %r %r %r", self.entry,item.server_uuid,uri)
if self.entry == None:
if item.server_uuid is not None:
entry_type = self.shell.props.db.entry_register_type("CoherenceUpnp:" + item.server_uuid)
self.entry = self.shell.props.db.entry_new(entry_type, uri)
self.info("create new entry %r", self.entry)
else:
entry_type = self.shell.props.db.entry_register_type("CoherencePlayer")
self.entry = self.shell.props.db.entry_new(entry_type, uri)
self.info("load and check for entry %r", self.entry)
duration = None
size = None
bitrate = None
for res in item.res:
if res.data == uri:
duration = res.duration
size = res.size
bitrate = res.bitrate
break
self.shell.props.db.set(self.entry, rhythmdb.PROP_TITLE, item.title)
try:
if item.artist is not None:
self.shell.props.db.set(self.entry, rhythmdb.PROP_ARTIST, item.artist)
except AttributeError:
pass
try:
if item.album is not None:
self.shell.props.db.set(self.entry, rhythmdb.PROP_ALBUM, item.album)
except AttributeError:
pass
try:
self.info("%r %r", item.title,item.originalTrackNumber)
if item.originalTrackNumber is not None:
self.shell.props.db.set(self.entry, rhythmdb.PROP_TRACK_NUMBER, int(item.originalTrackNumber))
except AttributeError:
pass
if duration is not None:
h,m,s = duration.split(':')
seconds = int(h)*3600 + int(m)*60 + int(s)
self.info("%r %r:%r:%r %r", duration, h, m , s, seconds)
self.shell.props.db.set(self.entry, rhythmdb.PROP_DURATION, seconds)
if size is not None:
self.shell.props.db.set(self.entry, rhythmdb.PROP_FILE_SIZE,int(size))
else:
if uri.startswith('track-'):
self.entry = self.shell.props.db.entry_lookup_by_id(int(uri[6:]))
else:
#self.shell.load_uri(uri,play=False)
#self.entry = self.shell.props.db.entry_lookup_by_location(uri)
entry_type = self.shell.props.db.entry_register_type("CoherencePlayer")
self.entry = self.shell.props.db.entry_new(entry_type, uri)
self.playing = False
self.metadata = metadata
connection_id = self.server.connection_manager_server.lookup_avt_id(self.current_connection_id)
self.server.av_transport_server.set_variable(connection_id, 'CurrentTransportActions','PLAY,STOP,PAUSE,SEEK,NEXT,PREVIOUS')
self.server.av_transport_server.set_variable(connection_id, 'NumberOfTracks',1)
self.server.av_transport_server.set_variable(connection_id, 'CurrentTrackURI',uri)
self.server.av_transport_server.set_variable(connection_id, 'AVTransportURI',uri)
self.server.av_transport_server.set_variable(connection_id, 'AVTransportURIMetaData',metadata)
self.server.av_transport_server.set_variable(connection_id, 'CurrentTrackURI',uri)
self.server.av_transport_server.set_variable(connection_id, 'CurrentTrackMetaData',metadata)
if was_playing == True:
self.play()
def start(self, uri):
self.load(uri)
self.play()
def stop(self):
self.info("player stop")
self.player.stop()
self.playing = False
#self.server.av_transport_server.set_variable( \
# self.server.connection_manager_server.lookup_avt_id(self.current_connection_id),\
# 'TransportState', 'STOPPED')
def play(self):
self.info("player play")
if self.playing == False:
if self.entry:
self.player.play_entry(self.entry)
else:
self.player.playpause()
self.playing = True
else:
self.player.playpause()
#self.server.av_transport_server.set_variable( \
# self.server.connection_manager_server.lookup_avt_id(self.current_connection_id),\
# 'TransportState', 'PLAYING')
def pause(self):
self.player.pause()
#self.server.av_transport_server.set_variable( \
# self.server.connection_manager_server.lookup_avt_id(self.current_connection_id),\
# 'TransportState', 'PAUSED_PLAYBACK')
def seek(self, location, old_state):
"""
@param location: +nL = relative seek forward n seconds
-nL = relative seek backwards n seconds
"""
self.info("player seek %r", location)
self.player.seek(location)
self.server.av_transport_server.set_variable(0, 'TransportState', old_state)
def mute(self):
self.muted_volume = self.volume
self.player.set_volume(0)
rcs_id = self.server.connection_manager_server.lookup_rcs_id(self.current_connection_id)
self.server.rendering_control_server.set_variable(rcs_id, 'Mute', 'True')
def unmute(self):
if self.muted_volume is not None:
self.player.set_volume(self.muted_volume)
self.muted_volume = None
self.player.set_mute(False)
rcs_id = self.server.connection_manager_server.lookup_rcs_id(self.current_connection_id)
self.server.rendering_control_server.set_variable(rcs_id, 'Mute', 'False')
def get_mute(self):
return self.player.get_mute()
def get_volume(self):
self.volume = self.player.get_volume()
self.info("get_volume %r", self.volume)
return self.volume * 100
def set_volume(self, volume):
self.info("set_volume %r", volume)
volume = int(volume)
if volume < 0:
volume=0
if volume > 100:
volume=100
self.player.set_volume(float(volume/100.0))
def upnp_init(self):
self.player.connect ('playing-song-changed',
self.playing_song_changed),
self.player.connect ('playing-changed',
self.playing_changed)
self.player.connect ('elapsed-changed',
self.elapsed_changed)
self.player.connect("notify::volume", self.volume_changed)
self.current_connection_id = None
self.server.connection_manager_server.set_variable(0, 'SinkProtocolInfo',
['rhythmbox:%s:audio/mpeg:*' % self.server.coherence.hostname,
'http-get:*:audio/mpeg:*',
'rhythmbox:%s:application/ogg:*' % self.server.coherence.hostname,
'http-get:*:application/ogg:*',
'rhythmbox:%s:audio/ogg:*' % self.server.coherence.hostname,
'http-get:*:audio/ogg:*',
'rhythmbox:%s:audio/x-flac:*' % self.server.coherence.hostname,
'http-get:*:audio/x-flac:*',
'rhythmbox:%s:audio/flac:*' % self.server.coherence.hostname,
'http-get:*:audio/flac:*',
'rhythmbox:%s:audio/x-wav:*' % self.server.coherence.hostname,
'http-get:*:audio/x-wav:*',
'rhythmbox:%s:audio/L16;rate=44100;channels=2:*' % self.server.coherence.hostname,
'http-get:*:audio/L16;rate=44100;channels=2:*',
'rhythmbox:%s:audio/x-m4a:*' % self.server.coherence.hostname,
'http-get:*:audio/x-m4a:*'],
default=True)
self.server.av_transport_server.set_variable(0, 'TransportState', 'NO_MEDIA_PRESENT', default=True)
self.server.av_transport_server.set_variable(0, 'TransportStatus', 'OK', default=True)
self.server.av_transport_server.set_variable(0, 'CurrentPlayMode', 'NORMAL', default=True)
self.server.av_transport_server.set_variable(0, 'CurrentTransportActions', '', default=True)
self.server.rendering_control_server.set_variable(0, 'Volume', self.get_volume())
self.server.rendering_control_server.set_variable(0, 'Mute', self.get_mute())
def upnp_Play(self, *args, **kwargs):
InstanceID = int(kwargs['InstanceID'])
Speed = int(kwargs['Speed'])
self.play()
return {}
def upnp_Previous(self, *args, **kwargs):
InstanceID = int(kwargs['InstanceID'])
self.player.do_previous()
return {}
def upnp_Next(self, *args, **kwargs):
InstanceID = int(kwargs['InstanceID'])
self.player.do_next()
return {}
def upnp_Pause(self, *args, **kwargs):
InstanceID = int(kwargs['InstanceID'])
self.pause()
return {}
def upnp_Stop(self, *args, **kwargs):
InstanceID = int(kwargs['InstanceID'])
self.stop()
return {}
def upnp_Seek(self, *args, **kwargs):
InstanceID = int(kwargs['InstanceID'])
Unit = kwargs['Unit']
Target = kwargs['Target']
if Unit in ['ABS_TIME','REL_TIME']:
old_state = self.server.av_transport_server.get_variable('TransportState').value
self.server.av_transport_server.set_variable(0, 'TransportState', 'TRANSITIONING')
sign = 1
if Target[0] == '+':
Target = Target[1:]
if Target[0] == '-':
Target = Target[1:]
sign = -1
h,m,s = Target.split(':')
seconds = int(h)*3600 + int(m)*60 + int(s)
if Unit == 'ABS_TIME':
position = self.player.get_playing_time()
self.seek(seconds-position, old_state)
elif Unit == 'REL_TIME':
self.seek(seconds*sign, old_state)
return {}
def upnp_Next(self,*args,**kwargs):
self.player.do_next()
return {}
def upnp_Previous(self,*args,**kwargs):
self.player.do_previous()
return {}
def upnp_SetAVTransportURI(self, *args, **kwargs):
InstanceID = int(kwargs['InstanceID'])
CurrentURI = kwargs['CurrentURI']
CurrentURIMetaData = kwargs['CurrentURIMetaData']
local_protocol_infos=self.server.connection_manager_server.get_variable('SinkProtocolInfo').value.split(',')
#print '>>>', local_protocol_infos
if len(CurrentURIMetaData)==0:
self.load(CurrentURI,CurrentURIMetaData)
return {}
else:
elt = DIDLLite.DIDLElement.fromString(CurrentURIMetaData)
#import pdb; pdb.set_trace()
if elt.numItems() == 1:
item = elt.getItems()[0]
res = item.res.get_matching(local_protocol_infos, protocol_type='rhythmbox')
if len(res) == 0:
res = item.res.get_matching(local_protocol_infos)
if len(res) > 0:
res = res[0]
remote_protocol,remote_network,remote_content_format,_ = res.protocolInfo.split(':')
self.load(res.data,CurrentURIMetaData)
return {}
return failure.Failure(errorCode(714))
def upnp_SetMute(self, *args, **kwargs):
InstanceID = int(kwargs['InstanceID'])
Channel = kwargs['Channel']
DesiredMute = kwargs['DesiredMute']
if DesiredMute in ['TRUE', 'True', 'true', '1','Yes','yes']:
self.mute()
else:
self.unmute()
return {}
def upnp_SetVolume(self, *args, **kwargs):
InstanceID = int(kwargs['InstanceID'])
Channel = kwargs['Channel']
DesiredVolume = int(kwargs['DesiredVolume'])
self.set_volume(DesiredVolume)
return {}
|
|
''' Frequently used functions '''
import numpy as np
import numpy.ma as ma
import numpy.testing as npt
from sharppy.sharptab.constants import MISSING, TOL
from sharppy.sharptab import utils as utils
# vec2comp Tests
def test_vec2comp_single():
input_wdir = 225
input_wspd = 7.0710678118654755
correct_u = 5
correct_v = 5
returned_u, returned_v = utils.vec2comp(input_wdir, input_wspd)
npt.assert_almost_equal(returned_u, correct_u)
npt.assert_almost_equal(returned_v, correct_v)
def test_vec2comp_array_like():
input_wdir = [0, 45, 90, 135, 180, 225, 270, 315, 360]
input_wspd = [5, 10, 15, 20, 25, 30, 35, 40, 45]
correct_u = [0, -7.0710678118654746, -15, -14.142135623730951, 0,
21.213203435596423, 35, 28.284271247461909, 0]
correct_v = [-5, -7.0710678118654746, 0, 14.142135623730951, 25,
21.213203435596423, 0, -28.284271247461909, -45]
correct_u = np.asanyarray(correct_u).astype(np.float64)
correct_v = np.asanyarray(correct_v).astype(np.float64)
returned_u, returned_v = utils.vec2comp(input_wdir, input_wspd)
npt.assert_almost_equal(returned_u, correct_u)
npt.assert_almost_equal(returned_v, correct_v)
def test_vec2comp_zeros():
input_wdir = [0, 90, 180, 270, 360]
input_wspd = [10, 20, 30, 40, 50]
correct_u = [0, -20, 0, 40, 0]
correct_v = [-10, 0, 30, 0, -50]
correct_u = np.asanyarray(correct_u).astype(np.float64)
correct_v = np.asanyarray(correct_v).astype(np.float64)
returned_u, returned_v = utils.vec2comp(input_wdir, input_wspd)
npt.assert_equal(returned_u, correct_u)
npt.assert_equal(returned_v, correct_v)
def test_vec2comp_default_missing_val_single():
input_wdir = MISSING
input_wspd = 30
returned_u, returned_v = utils.vec2comp(input_wdir, input_wspd)
npt.assert_(type(returned_u), type(ma.masked))
npt.assert_(type(returned_v), type(ma.masked))
def test_vec2comp_default_missing_val_array():
input_wdir = [0, 90, 180, MISSING]
input_wspd = [MISSING, 10, 20, 30]
correct_u = [MISSING, -10, 0, MISSING]
correct_v= [MISSING, 0, 20, MISSING]
correct_u = ma.asanyarray(correct_u).astype(np.float64)
correct_v = ma.asanyarray(correct_v).astype(np.float64)
correct_u[correct_u == MISSING] = ma.masked
correct_v[correct_v == MISSING] = ma.masked
correct_u[correct_v.mask] = ma.masked
correct_v[correct_u.mask] = ma.masked
correct_u.set_fill_value(MISSING)
correct_v.set_fill_value(MISSING)
returned_u, returned_v = utils.vec2comp(input_wdir, input_wspd)
npt.assert_almost_equal(returned_u, correct_u)
npt.assert_almost_equal(returned_v, correct_v)
def test_vec2comp_user_missing_val_single():
missing = 50
input_wdir = missing
input_wspd = 30
returned_u, returned_v = utils.vec2comp(input_wdir, input_wspd, missing)
npt.assert_(type(returned_u), type(ma.masked))
npt.assert_(type(returned_v), type(ma.masked))
def test_vec2comp_user_missing_val_array():
missing = 50
input_wdir = [0, 90, 180, missing]
input_wspd = [missing, 10, 20, 30]
correct_u = [missing, -10, 0, missing]
correct_v= [missing, 0, 20, missing]
correct_u = ma.asanyarray(correct_u).astype(np.float64)
correct_v = ma.asanyarray(correct_v).astype(np.float64)
correct_u[correct_u == missing] = ma.masked
correct_v[correct_v == missing] = ma.masked
correct_u[correct_v.mask] = ma.masked
correct_v[correct_u.mask] = ma.masked
correct_u.set_fill_value(missing)
correct_v.set_fill_value(missing)
returned_u, returned_v = utils.vec2comp(input_wdir, input_wspd, missing)
npt.assert_almost_equal(returned_u, correct_u)
npt.assert_almost_equal(returned_v, correct_v)
# comp2vec Tests
def test_comp2vec_single():
input_u = 5
input_v = 5
correct_wdir = 225
correct_wspd = 7.0710678118654755
returned_wdir, returned_wspd = utils.comp2vec(input_u, input_v)
npt.assert_almost_equal(returned_wdir, correct_wdir)
npt.assert_almost_equal(returned_wspd, correct_wspd)
def test_comp2vec_array():
input_u = [0, -7.0710678118654746, -15, -14.142135623730951, 0,
21.213203435596423, 35, 28.284271247461909, 0]
input_v = [-5, -7.0710678118654746, 0, 14.142135623730951, 25,
21.213203435596423, 0, -28.284271247461909, -45]
correct_wdir = [0, 45, 90, 135, 180, 225, 270, 315, 0]
correct_wspd = [5, 10, 15, 20, 25, 30, 35, 40, 45]
correct_wdir = np.asanyarray(correct_wdir).astype(np.float64)
correct_wspd = np.asanyarray(correct_wspd).astype(np.float64)
returned_wdir, returned_wspd = utils.comp2vec(input_u, input_v)
npt.assert_almost_equal(correct_wdir, returned_wdir)
npt.assert_almost_equal(correct_wspd, returned_wspd)
def test_comp2vec_zeros():
input_u = [0, -20, 0, 40, 0]
input_v = [-10, 0, 30, 0, -50]
correct_wdir = [0, 90, 180, 270, 0]
correct_wspd = [10, 20, 30, 40, 50]
correct_wdir = np.asanyarray(correct_wdir).astype(np.float64)
correct_wspd = np.asanyarray(correct_wspd).astype(np.float64)
returned_wdir, returned_wspd = utils.comp2vec(input_u, input_v)
npt.assert_equal(returned_wdir, correct_wdir)
npt.assert_equal(returned_wspd, correct_wspd)
def test_comp2vec_default_missing_val_single():
input_u = MISSING
input_v = 30
returned_wdir, returned_wspd = utils.comp2vec(input_u, input_v)
npt.assert_(type(returned_wdir), type(ma.masked))
npt.assert_(type(returned_wspd), type(ma.masked))
def test_comp2vec_default_missing_val_array():
input_u = [MISSING, -10, 0, MISSING]
input_v= [MISSING, 0, 20, MISSING]
correct_wdir = [0, 90, 180, MISSING]
correct_wspd = [MISSING, 10, 20, 30]
correct_wdir = ma.asanyarray(correct_wdir).astype(np.float64)
correct_wspd = ma.asanyarray(correct_wspd).astype(np.float64)
correct_wdir[correct_wdir == MISSING] = ma.masked
correct_wspd[correct_wspd == MISSING] = ma.masked
correct_wdir[correct_wspd.mask] = ma.masked
correct_wspd[correct_wdir.mask] = ma.masked
correct_wdir.set_fill_value(MISSING)
correct_wspd.set_fill_value(MISSING)
returned_wdir, returned_wspd = utils.comp2vec(input_u, input_v)
npt.assert_almost_equal(returned_wdir, correct_wdir)
npt.assert_almost_equal(returned_wspd, correct_wspd)
def test_comp2vec_user_missing_val_single():
missing = 50
input_u = missing
input_v = 30
returned_wdir, returned_wspd = utils.vec2comp(input_u, input_v, missing)
npt.assert_(type(returned_wdir), type(ma.masked))
npt.assert_(type(returned_wspd), type(ma.masked))
def test_comp2vec_user_missing_val_array():
missing = 50
input_u = [missing, -10, 0, missing]
input_v= [missing, 0, 20, missing]
correct_wdir = [0, 90, 180, missing]
correct_wspd = [missing, 10, 20, 30]
correct_wdir = ma.asanyarray(correct_wdir).astype(np.float64)
correct_wspd = ma.asanyarray(correct_wspd).astype(np.float64)
correct_wdir[correct_wdir == missing] = ma.masked
correct_wspd[correct_wspd == missing] = ma.masked
correct_wdir[correct_wspd.mask] = ma.masked
correct_wspd[correct_wdir.mask] = ma.masked
correct_wdir.set_fill_value(missing)
correct_wspd.set_fill_value(missing)
returned_wdir, returned_wspd = utils.comp2vec(input_u, input_v)
npt.assert_almost_equal(returned_wdir, correct_wdir)
npt.assert_almost_equal(returned_wspd, correct_wspd)
# mag Tests
def test_mag_single():
input_u = 5
input_v = 5
correct_answer = np.sqrt(input_u**2 + input_v**2)
returned_answer = utils.mag(input_u, input_v)
npt.assert_almost_equal(returned_answer, correct_answer)
def test_mag_zero():
input_u = 0
input_v = 0
correct_answer = 0
returned_answer = utils.mag(input_u, input_v)
npt.assert_almost_equal(returned_answer, correct_answer)
def test_mag_array():
rt2 = np.sqrt(2)
input_u = [5, 10, 15]
input_v = [5, 10, 15]
correct_answer = [5*rt2, 10*rt2, 15*rt2]
correct_answer = ma.asanyarray(correct_answer)
returned_answer = utils.mag(input_u, input_v)
npt.assert_almost_equal(returned_answer, correct_answer)
def test_mag_default_missing_single():
input_u = MISSING
input_v = 10
correct_answer = ma.masked
returned_answer = utils.mag(input_u, input_v)
npt.assert_(type(returned_answer), type(correct_answer))
def test_mag_default_missing_array():
rt2 = np.sqrt(2)
input_u = [MISSING, 10, 20, 30, 40]
input_v = [0, 10, 20, 30, MISSING]
correct_answer = [MISSING, 10*rt2, 20*rt2, 30*rt2, MISSING]
correct_answer = ma.asanyarray(correct_answer).astype(np.float64)
correct_answer[correct_answer == MISSING] = ma.masked
returned_answer = utils.mag(input_u, input_v)
npt.assert_almost_equal(returned_answer, correct_answer)
def test_mag_user_missing_single():
missing = 50
input_u = missing
input_v = 10
correct_answer = ma.masked
returned_answer = utils.mag(input_u, input_v, missing)
npt.assert_(type(returned_answer), type(correct_answer))
def test_mag_user_missing_array():
missing = 50
rt2 = np.sqrt(2)
input_u = [missing, 10, 20, 30, 40]
input_v = [0, 10, 20, 30, missing]
correct_answer = [missing, 10*rt2, 20*rt2, 30*rt2, missing]
correct_answer = ma.asanyarray(correct_answer).astype(np.float64)
correct_answer[correct_answer == missing] = ma.masked
returned_answer = utils.mag(input_u, input_v, missing)
npt.assert_almost_equal(returned_answer, correct_answer)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for RNN cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib import rnn as contrib_rnn
from tensorflow.contrib.rnn.python.ops import core_rnn_cell
from tensorflow.contrib.rnn.python.ops import rnn_cell as contrib_rnn_cell
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.keras import layers as keras_layers
from tensorflow.python.layers import base as base_layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.training.checkpointable import util as checkpointable_utils
# pylint: enable=protected-access
Linear = core_rnn_cell._Linear # pylint: disable=invalid-name
class RNNCellTest(test.TestCase, parameterized.TestCase):
def testLinear(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(1.0)):
x = array_ops.zeros([1, 2])
l = Linear([x], 2, False)([x])
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([l], {x.name: np.array([[1., 2.]])})
self.assertAllClose(res[0], [[3.0, 3.0]])
# Checks prevent you from accidentally creating a shared function.
with self.assertRaises(ValueError):
l1 = Linear([x], 2, False)([x])
# But you can create a new one in a new scope and share the variables.
with variable_scope.variable_scope("l1") as new_scope:
l1 = Linear([x], 2, False)([x])
with variable_scope.variable_scope(new_scope, reuse=True):
Linear([l1], 2, False)([l1])
self.assertEqual(len(variables_lib.trainable_variables()), 2)
def testBasicRNNCell(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
cell = rnn_cell_impl.BasicRNNCell(2)
g, _ = cell(x, m)
self.assertEqual([
"root/basic_rnn_cell/%s:0" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/basic_rnn_cell/%s:0" % rnn_cell_impl._BIAS_VARIABLE_NAME
], [v.name for v in cell.trainable_variables])
self.assertFalse(cell.non_trainable_variables)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
self.assertEqual(res[0].shape, (1, 2))
def testBasicRNNCellNotTrainable(self):
with self.cached_session() as sess:
def not_trainable_getter(getter, *args, **kwargs):
kwargs["trainable"] = False
return getter(*args, **kwargs)
with variable_scope.variable_scope(
"root",
initializer=init_ops.constant_initializer(0.5),
custom_getter=not_trainable_getter):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
cell = rnn_cell_impl.BasicRNNCell(2)
g, _ = cell(x, m)
self.assertFalse(cell.trainable_variables)
self.assertEqual([
"root/basic_rnn_cell/%s:0" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/basic_rnn_cell/%s:0" % rnn_cell_impl._BIAS_VARIABLE_NAME
], [v.name for v in cell.non_trainable_variables])
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
self.assertEqual(res[0].shape, (1, 2))
def testIndRNNCell(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
cell = contrib_rnn_cell.IndRNNCell(2)
g, _ = cell(x, m)
self.assertEqual([
"root/ind_rnn_cell/%s_w:0" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/ind_rnn_cell/%s_u:0" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/ind_rnn_cell/%s:0" % rnn_cell_impl._BIAS_VARIABLE_NAME
], [v.name for v in cell.trainable_variables])
self.assertFalse(cell.non_trainable_variables)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
self.assertEqual(res[0].shape, (1, 2))
def testGRUCell(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
g, _ = rnn_cell_impl.GRUCell(2)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
# Smoke test
self.assertAllClose(res[0], [[0.175991, 0.175991]])
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.5)):
# Test GRUCell with input_size != num_units.
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 2])
g, _ = rnn_cell_impl.GRUCell(2)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
# Smoke test
self.assertAllClose(res[0], [[0.156736, 0.156736]])
def testIndyGRUCell(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
g, _ = contrib_rnn_cell.IndyGRUCell(2)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
# Smoke test
self.assertAllClose(res[0], [[0.185265, 0.17704]])
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.5)):
# Test IndyGRUCell with input_size != num_units.
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 2])
g, _ = contrib_rnn_cell.IndyGRUCell(2)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
# Smoke test
self.assertAllClose(res[0], [[0.155127, 0.157328]])
def testSRUCell(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
g, _ = contrib_rnn_cell.SRUCell(2)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
# Smoke test
self.assertAllClose(res[0], [[0.509682, 0.509682]])
def testSRUCellKerasRNN(self):
"""Tests that SRUCell works with keras RNN layer."""
cell = contrib_rnn_cell.SRUCell(10)
seq_input = ops.convert_to_tensor(
np.random.rand(2, 3, 5), name="seq_input", dtype=dtypes.float32)
rnn_layer = keras_layers.RNN(cell=cell)
rnn_outputs_keras = rnn_layer(seq_input)
with self.cached_session() as sess:
sess.run([variables_lib.global_variables_initializer()])
self.assertEqual(sess.run(rnn_outputs_keras).shape, (2, 10))
def testSRUCellBiasType(self):
"""Tests that the bias' dtype is properly set."""
cell = contrib_rnn_cell.SRUCell(10)
cell.build((2, 3, 5))
self.assertEqual(cell._bias.dtype, dtypes.float32_ref)
cell = contrib_rnn_cell.SRUCell(10, dtype=dtypes.int32)
cell.build((2, 3, 5))
self.assertEqual(cell._bias.dtype, dtypes.int32_ref)
cell_input = ops.convert_to_tensor(
np.random.rand(2, 5), name="cell_input", dtype=dtypes.float16)
cell_state = ops.convert_to_tensor(
np.random.rand(2, 10), name="cell_state", dtype=dtypes.float16)
cell = contrib_rnn_cell.SRUCell(10)
cell(cell_input, [cell_state])
self.assertEqual(cell._bias.dtype, dtypes.float16_ref)
def testSRUCellWithDiffSize(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 2])
g, _ = contrib_rnn_cell.SRUCell(2)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
# Smoke test
self.assertAllClose(res[0], [[0.55255556, 0.55255556]])
def testBasicLSTMCell(self):
for dtype in [dtypes.float16, dtypes.float32]:
np_dtype = dtype.as_numpy_dtype
with self.session(graph=ops.Graph()) as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2], dtype=dtype)
m = array_ops.zeros([1, 8], dtype=dtype)
cell = rnn_cell_impl.MultiRNNCell(
[
rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=False)
for _ in range(2)
],
state_is_tuple=False)
self.assertEqual(cell.dtype, None)
self.assertEqual("cell-0", cell._checkpoint_dependencies[0].name)
self.assertEqual("cell-1", cell._checkpoint_dependencies[1].name)
cell.get_config() # Should not throw an error
g, out_m = cell(x, m)
# Layer infers the input type.
self.assertEqual(cell.dtype, dtype.name)
expected_variable_names = [
"root/multi_rnn_cell/cell_0/basic_lstm_cell/%s:0" %
rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_0/basic_lstm_cell/%s:0" %
rnn_cell_impl._BIAS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_1/basic_lstm_cell/%s:0" %
rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_1/basic_lstm_cell/%s:0" %
rnn_cell_impl._BIAS_VARIABLE_NAME
]
self.assertEqual(expected_variable_names,
[v.name for v in cell.trainable_variables])
self.assertFalse(cell.non_trainable_variables)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g, out_m], {
x.name: np.array([[1., 1.]]),
m.name: 0.1 * np.ones([1, 8])
})
self.assertEqual(len(res), 2)
variables = variables_lib.global_variables()
self.assertEqual(expected_variable_names, [v.name for v in variables])
# The numbers in results were not calculated, this is just a
# smoke test.
self.assertAllClose(res[0], np.array(
[[0.240, 0.240]], dtype=np_dtype), 1e-2)
expected_mem = np.array(
[[0.689, 0.689, 0.448, 0.448, 0.398, 0.398, 0.240, 0.240]],
dtype=np_dtype)
self.assertAllClose(res[1], expected_mem, 1e-2)
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.5)):
# Test BasicLSTMCell with input_size != num_units.
x = array_ops.zeros([1, 3], dtype=dtype)
m = array_ops.zeros([1, 4], dtype=dtype)
g, out_m = rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=False)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g, out_m], {
x.name: np.array([[1., 1., 1.]], dtype=np_dtype),
m.name: 0.1 * np.ones([1, 4], dtype=np_dtype)
})
self.assertEqual(len(res), 2)
def testBasicLSTMCellDimension0Error(self):
"""Tests that dimension 0 in both(x and m) shape must be equal."""
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
num_units = 2
state_size = num_units * 2
batch_size = 3
input_size = 4
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size - 1, state_size])
with self.assertRaises(ValueError):
g, out_m = rnn_cell_impl.BasicLSTMCell(
num_units, state_is_tuple=False)(x, m)
sess.run([variables_lib.global_variables_initializer()])
sess.run(
[g, out_m], {
x.name: 1 * np.ones([batch_size, input_size]),
m.name: 0.1 * np.ones([batch_size - 1, state_size])
})
def testBasicLSTMCellStateSizeError(self):
"""Tests that state_size must be num_units * 2."""
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
num_units = 2
state_size = num_units * 3 # state_size must be num_units * 2
batch_size = 3
input_size = 4
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size, state_size])
with self.assertRaises(ValueError):
g, out_m = rnn_cell_impl.BasicLSTMCell(
num_units, state_is_tuple=False)(x, m)
sess.run([variables_lib.global_variables_initializer()])
sess.run(
[g, out_m], {
x.name: 1 * np.ones([batch_size, input_size]),
m.name: 0.1 * np.ones([batch_size, state_size])
})
def testBasicLSTMCellStateTupleType(self):
with self.cached_session():
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m0 = (array_ops.zeros([1, 2]),) * 2
m1 = (array_ops.zeros([1, 2]),) * 2
cell = rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.BasicLSTMCell(2) for _ in range(2)],
state_is_tuple=True)
self.assertTrue(isinstance(cell.state_size, tuple))
self.assertTrue(
isinstance(cell.state_size[0], rnn_cell_impl.LSTMStateTuple))
self.assertTrue(
isinstance(cell.state_size[1], rnn_cell_impl.LSTMStateTuple))
# Pass in regular tuples
_, (out_m0, out_m1) = cell(x, (m0, m1))
self.assertTrue(isinstance(out_m0, rnn_cell_impl.LSTMStateTuple))
self.assertTrue(isinstance(out_m1, rnn_cell_impl.LSTMStateTuple))
# Pass in LSTMStateTuples
variable_scope.get_variable_scope().reuse_variables()
zero_state = cell.zero_state(1, dtypes.float32)
self.assertTrue(isinstance(zero_state, tuple))
self.assertTrue(isinstance(zero_state[0], rnn_cell_impl.LSTMStateTuple))
self.assertTrue(isinstance(zero_state[1], rnn_cell_impl.LSTMStateTuple))
_, (out_m0, out_m1) = cell(x, zero_state)
self.assertTrue(isinstance(out_m0, rnn_cell_impl.LSTMStateTuple))
self.assertTrue(isinstance(out_m1, rnn_cell_impl.LSTMStateTuple))
def testBasicLSTMCellWithStateTuple(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m0 = array_ops.zeros([1, 4])
m1 = array_ops.zeros([1, 4])
cell = rnn_cell_impl.MultiRNNCell(
[
rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=False)
for _ in range(2)
],
state_is_tuple=True)
g, (out_m0, out_m1) = cell(x, (m0, m1))
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g, out_m0, out_m1], {
x.name: np.array([[1., 1.]]),
m0.name: 0.1 * np.ones([1, 4]),
m1.name: 0.1 * np.ones([1, 4])
})
self.assertEqual(len(res), 3)
# The numbers in results were not calculated, this is just a smoke test.
# Note, however, these values should match the original
# version having state_is_tuple=False.
self.assertAllClose(res[0], [[0.24024698, 0.24024698]])
expected_mem0 = np.array(
[[0.68967271, 0.68967271, 0.44848421, 0.44848421]])
expected_mem1 = np.array(
[[0.39897051, 0.39897051, 0.24024698, 0.24024698]])
self.assertAllClose(res[1], expected_mem0)
self.assertAllClose(res[2], expected_mem1)
def testIndyLSTMCell(self):
for dtype in [dtypes.float16, dtypes.float32]:
np_dtype = dtype.as_numpy_dtype
with self.session(graph=ops.Graph()) as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2], dtype=dtype)
state_0 = (array_ops.zeros([1, 2], dtype=dtype),) * 2
state_1 = (array_ops.zeros([1, 2], dtype=dtype),) * 2
cell = rnn_cell_impl.MultiRNNCell(
[contrib_rnn_cell.IndyLSTMCell(2) for _ in range(2)])
self.assertEqual(cell.dtype, None)
self.assertEqual("cell-0", cell._checkpoint_dependencies[0].name)
self.assertEqual("cell-1", cell._checkpoint_dependencies[1].name)
cell.get_config() # Should not throw an error
g, (out_state_0, out_state_1) = cell(x, (state_0, state_1))
# Layer infers the input type.
self.assertEqual(cell.dtype, dtype.name)
expected_variable_names = [
"root/multi_rnn_cell/cell_0/indy_lstm_cell/%s_w:0" %
rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_0/indy_lstm_cell/%s_u:0" %
rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_0/indy_lstm_cell/%s:0" %
rnn_cell_impl._BIAS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_1/indy_lstm_cell/%s_w:0" %
rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_1/indy_lstm_cell/%s_u:0" %
rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_1/indy_lstm_cell/%s:0" %
rnn_cell_impl._BIAS_VARIABLE_NAME
]
self.assertEqual(expected_variable_names,
[v.name for v in cell.trainable_variables])
self.assertFalse(cell.non_trainable_variables)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g, out_state_0, out_state_1], {
x.name: np.array([[1., 1.]]),
state_0[0].name: 0.1 * np.ones([1, 2]),
state_0[1].name: 0.1 * np.ones([1, 2]),
state_1[0].name: 0.1 * np.ones([1, 2]),
state_1[1].name: 0.1 * np.ones([1, 2]),
})
self.assertEqual(len(res), 3)
variables = variables_lib.global_variables()
self.assertEqual(expected_variable_names, [v.name for v in variables])
# Only check the range of outputs as this is just a smoke test.
self.assertAllInRange(res[0], -1.0, 1.0)
self.assertAllInRange(res[1], -1.0, 1.0)
self.assertAllInRange(res[2], -1.0, 1.0)
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.5)):
# Test IndyLSTMCell with input_size != num_units.
x = array_ops.zeros([1, 3], dtype=dtype)
state = (array_ops.zeros([1, 2], dtype=dtype),) * 2
g, out_state = contrib_rnn_cell.IndyLSTMCell(2)(x, state)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g, out_state], {
x.name: np.array([[1., 1., 1.]], dtype=np_dtype),
state[0].name: 0.1 * np.ones([1, 2], dtype=np_dtype),
state[1].name: 0.1 * np.ones([1, 2], dtype=np_dtype),
})
self.assertEqual(len(res), 2)
def testLSTMCell(self):
with self.cached_session() as sess:
num_units = 8
num_proj = 6
state_size = num_units + num_proj
batch_size = 3
input_size = 2
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size, state_size])
cell = rnn_cell_impl.LSTMCell(
num_units=num_units,
num_proj=num_proj,
forget_bias=1.0,
state_is_tuple=False)
output, state = cell(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[output, state], {
x.name: np.array([[1., 1.], [2., 2.], [3., 3.]]),
m.name: 0.1 * np.ones((batch_size, state_size))
})
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_proj))
self.assertEqual(res[1].shape, (batch_size, state_size))
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6)
self.assertTrue(
float(np.linalg.norm((res[1][0, :] - res[1][i, :]))) > 1e-6)
def testLSTMCellVariables(self):
with self.cached_session():
num_units = 8
num_proj = 6
state_size = num_units + num_proj
batch_size = 3
input_size = 2
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size, state_size])
cell = rnn_cell_impl.LSTMCell(
num_units=num_units,
num_proj=num_proj,
forget_bias=1.0,
state_is_tuple=False)
cell(x, m) # Execute to create variables
variables = variables_lib.global_variables()
self.assertEquals(variables[0].op.name, "root/lstm_cell/kernel")
self.assertEquals(variables[1].op.name, "root/lstm_cell/bias")
self.assertEquals(variables[2].op.name,
"root/lstm_cell/projection/kernel")
def testLSTMCellLayerNorm(self):
with self.cached_session() as sess:
num_units = 2
num_proj = 3
batch_size = 1
input_size = 4
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([batch_size, input_size])
c = array_ops.zeros([batch_size, num_units])
h = array_ops.zeros([batch_size, num_proj])
state = rnn_cell_impl.LSTMStateTuple(c, h)
cell = contrib_rnn_cell.LayerNormLSTMCell(
num_units=num_units,
num_proj=num_proj,
forget_bias=1.0,
layer_norm=True,
norm_gain=1.0,
norm_shift=0.0)
g, out_m = cell(x, state)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g, out_m], {
x.name: np.ones((batch_size, input_size)),
c.name: 0.1 * np.ones((batch_size, num_units)),
h.name: 0.1 * np.ones((batch_size, num_proj))
})
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_proj))
self.assertEqual(res[1][0].shape, (batch_size, num_units))
self.assertEqual(res[1][1].shape, (batch_size, num_proj))
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) < 1e-6)
self.assertTrue(
float(np.linalg.norm((res[1][0, :] - res[1][i, :]))) < 1e-6)
@test_util.run_in_graph_and_eager_modes
def testWrapperCheckpointing(self):
for wrapper_type in [
rnn_cell_impl.DropoutWrapper,
rnn_cell_impl.ResidualWrapper,
lambda cell: rnn_cell_impl.MultiRNNCell([cell])]:
cell = rnn_cell_impl.BasicRNNCell(1)
wrapper = wrapper_type(cell)
wrapper(array_ops.ones([1, 1]),
state=wrapper.zero_state(batch_size=1, dtype=dtypes.float32))
self.evaluate([v.initializer for v in cell.variables])
checkpoint = checkpointable_utils.Checkpoint(wrapper=wrapper)
prefix = os.path.join(self.get_temp_dir(), "ckpt")
self.evaluate(cell._bias.assign([40.]))
save_path = checkpoint.save(prefix)
self.evaluate(cell._bias.assign([0.]))
checkpoint.restore(save_path).assert_consumed().run_restore_ops()
self.assertAllEqual([40.], self.evaluate(cell._bias))
def testOutputProjectionWrapper(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 3])
cell = contrib_rnn.OutputProjectionWrapper(rnn_cell_impl.GRUCell(3), 2)
g, new_m = cell(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g, new_m], {
x.name: np.array([[1., 1., 1.]]),
m.name: np.array([[0.1, 0.1, 0.1]])
})
self.assertEqual(res[1].shape, (1, 3))
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.231907, 0.231907]])
def testInputProjectionWrapper(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 3])
cell = contrib_rnn.InputProjectionWrapper(
rnn_cell_impl.GRUCell(3), num_proj=3)
g, new_m = cell(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g, new_m], {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1, 0.1]])
})
self.assertEqual(res[1].shape, (1, 3))
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.154605, 0.154605, 0.154605]])
@parameterized.parameters(
[rnn_cell_impl.ResidualWrapper, rnn_cell_impl.ResidualWrapperV2])
@test_util.run_in_graph_and_eager_modes
def testResidualWrapper(self, wrapper_type):
x = ops.convert_to_tensor(np.array([[1., 1., 1.]]))
m = ops.convert_to_tensor(np.array([[0.1, 0.1, 0.1]]))
base_cell = rnn_cell_impl.GRUCell(
3, kernel_initializer=init_ops.constant_initializer(0.5),
bias_initializer=init_ops.constant_initializer(0.5))
g, m_new = base_cell(x, m)
wrapper_object = wrapper_type(base_cell)
(name, dep), = wrapper_object._checkpoint_dependencies
wrapper_object.get_config() # Should not throw an error
self.assertIs(dep, base_cell)
self.assertEqual("cell", name)
g_res, m_new_res = wrapper_object(x, m)
self.evaluate([variables_lib.global_variables_initializer()])
res = self.evaluate([g, g_res, m_new, m_new_res])
# Residual connections
self.assertAllClose(res[1], res[0] + [1., 1., 1.])
# States are left untouched
self.assertAllClose(res[2], res[3])
@parameterized.parameters(
[rnn_cell_impl.ResidualWrapper, rnn_cell_impl.ResidualWrapperV2])
@test_util.run_in_graph_and_eager_modes
def testResidualWrapperWithSlice(self, wrapper_type):
x = ops.convert_to_tensor(np.array([[1., 1., 1., 1., 1.]]))
m = ops.convert_to_tensor(np.array([[0.1, 0.1, 0.1]]))
base_cell = rnn_cell_impl.GRUCell(
3, kernel_initializer=init_ops.constant_initializer(0.5),
bias_initializer=init_ops.constant_initializer(0.5))
g, m_new = base_cell(x, m)
def residual_with_slice_fn(inp, out):
inp_sliced = array_ops.slice(inp, [0, 0], [-1, 3])
return inp_sliced + out
g_res, m_new_res = wrapper_type(
base_cell, residual_with_slice_fn)(x, m)
self.evaluate([variables_lib.global_variables_initializer()])
res_g, res_g_res, res_m_new, res_m_new_res = self.evaluate(
[g, g_res, m_new, m_new_res])
# Residual connections
self.assertAllClose(res_g_res, res_g + [1., 1., 1.])
# States are left untouched
self.assertAllClose(res_m_new, res_m_new_res)
def testDeviceWrapper(self):
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 3])
wrapped = rnn_cell_impl.GRUCell(3)
cell = rnn_cell_impl.DeviceWrapper(wrapped, "/cpu:14159")
(name, dep), = cell._checkpoint_dependencies
cell.get_config() # Should not throw an error
self.assertIs(dep, wrapped)
self.assertEqual("cell", name)
outputs, _ = cell(x, m)
self.assertTrue("cpu:14159" in outputs.device.lower())
def _retrieve_cpu_gpu_stats(self, run_metadata):
cpu_stats = None
gpu_stats = None
step_stats = run_metadata.step_stats
for ds in step_stats.dev_stats:
if "cpu:0" in ds.device[-5:].lower():
cpu_stats = ds.node_stats
if "gpu:0" == ds.device[-5:].lower():
gpu_stats = ds.node_stats
return cpu_stats, gpu_stats
def testDeviceWrapperDynamicExecutionNodesAreAllProperlyLocated(self):
if not test.is_gpu_available():
# Can't perform this test w/o a GPU
return
gpu_dev = test.gpu_device_name()
with self.session(use_gpu=True) as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 1, 3])
cell = rnn_cell_impl.DeviceWrapper(rnn_cell_impl.GRUCell(3), gpu_dev)
with ops.device("/cpu:0"):
outputs, _ = rnn.dynamic_rnn(
cell=cell, inputs=x, dtype=dtypes.float32)
run_metadata = config_pb2.RunMetadata()
opts = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
sess.run([variables_lib.global_variables_initializer()])
_ = sess.run(outputs, options=opts, run_metadata=run_metadata)
cpu_stats, gpu_stats = self._retrieve_cpu_gpu_stats(run_metadata)
self.assertFalse([s for s in cpu_stats if "gru_cell" in s.node_name])
self.assertTrue([s for s in gpu_stats if "gru_cell" in s.node_name])
def testEmbeddingWrapper(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 1], dtype=dtypes.int32)
m = array_ops.zeros([1, 2])
embedding_cell = contrib_rnn.EmbeddingWrapper(
rnn_cell_impl.GRUCell(2), embedding_classes=3, embedding_size=2)
self.assertEqual(embedding_cell.output_size, 2)
g, new_m = embedding_cell(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g, new_m], {
x.name: np.array([[1]]),
m.name: np.array([[0.1, 0.1]])
})
self.assertEqual(res[1].shape, (1, 2))
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.17139, 0.17139]])
def testEmbeddingWrapperWithDynamicRnn(self):
with self.cached_session() as sess:
with variable_scope.variable_scope("root"):
inputs = ops.convert_to_tensor([[[0], [0]]], dtype=dtypes.int64)
input_lengths = ops.convert_to_tensor([2], dtype=dtypes.int64)
embedding_cell = contrib_rnn.EmbeddingWrapper(
rnn_cell_impl.BasicLSTMCell(1, state_is_tuple=True),
embedding_classes=1,
embedding_size=2)
outputs, _ = rnn.dynamic_rnn(
cell=embedding_cell,
inputs=inputs,
sequence_length=input_lengths,
dtype=dtypes.float32)
sess.run([variables_lib.global_variables_initializer()])
# This will fail if output's dtype is inferred from input's.
sess.run(outputs)
def testMultiRNNCell(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 4])
multi_rnn_cell = rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.GRUCell(2) for _ in range(2)],
state_is_tuple=False)
_, ml = multi_rnn_cell(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(ml, {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1, 0.1, 0.1]])
})
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res, [[0.175991, 0.175991, 0.13248, 0.13248]])
self.assertEqual(len(multi_rnn_cell.weights), 2 * 4)
self.assertTrue(
[x.dtype == dtypes.float32 for x in multi_rnn_cell.weights])
def testMultiRNNCellWithStateTuple(self):
with self.cached_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m_bad = array_ops.zeros([1, 4])
m_good = (array_ops.zeros([1, 2]), array_ops.zeros([1, 2]))
# Test incorrectness of state
with self.assertRaisesRegexp(ValueError, "Expected state .* a tuple"):
rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.GRUCell(2) for _ in range(2)],
state_is_tuple=True)(x, m_bad)
_, ml = rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.GRUCell(2) for _ in range(2)],
state_is_tuple=True)(x, m_good)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
ml, {
x.name: np.array([[1., 1.]]),
m_good[0].name: np.array([[0.1, 0.1]]),
m_good[1].name: np.array([[0.1, 0.1]])
})
# The numbers in results were not calculated, this is just a
# smoke test. However, these numbers should match those of
# the test testMultiRNNCell.
self.assertAllClose(res[0], [[0.175991, 0.175991]])
self.assertAllClose(res[1], [[0.13248, 0.13248]])
@parameterized.parameters(
[[rnn_cell_impl.DropoutWrapper, rnn_cell_impl.DropoutWrapperV2],
[rnn_cell_impl.ResidualWrapper, rnn_cell_impl.ResidualWrapperV2]])
@test_util.run_in_graph_and_eager_modes
def testWrapperKerasStyle(self, wrapper, wrapper_v2):
"""Tests if wrapper cell is instantiated in keras style scope."""
wrapped_cell_v2 = wrapper_v2(rnn_cell_impl.BasicRNNCell(1))
self.assertTrue(wrapped_cell_v2._keras_style)
wrapped_cell = wrapper(rnn_cell_impl.BasicRNNCell(1))
self.assertFalse(wrapped_cell._keras_style)
@parameterized.parameters(
[rnn_cell_impl.DropoutWrapperV2, rnn_cell_impl.ResidualWrapperV2])
@test_util.run_in_graph_and_eager_modes
def testWrapperV2VariableNames(self, wrapper):
"""Tests that variables names do not depend on wrapper in RNN layer."""
def _rnn_input(apply_wrapper, name):
"""Creates a RNN layer with/without wrapper and returns built rnn cell."""
with base_layer.keras_style_scope():
base_cell = rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.BasicRNNCell(1, name="basic_rnn_cell")
for _ in range(2)])
if apply_wrapper:
rnn_cell = wrapper(base_cell)
else:
rnn_cell = base_cell
rnn_layer = keras_layers.RNN(rnn_cell, name=name)
inputs = ops.convert_to_tensor([[[1]]], dtype=dtypes.float32)
_ = rnn_layer(inputs)
return base_cell._cells[0]
rnn_1 = _rnn_input(True, name="rnn_0")
rnn_2 = _rnn_input(False, name="rnn_1")
for i, cell in enumerate([rnn_1, rnn_2]):
var_prefix = "rnn_{}/cell_0/basic_rnn_cell/".format(i)
self.assertCountEqual([v.name for v in cell.weights],
(var_prefix + "kernel:0", var_prefix + "bias:0"))
@parameterized.parameters(
[rnn_cell_impl.DropoutWrapperV2, rnn_cell_impl.ResidualWrapperV2])
@test_util.run_in_graph_and_eager_modes
def testWrapperWeights(self, wrapper):
"""Tests that wrapper weights contain wrapped cells weights."""
with base_layer.keras_style_scope():
base_cell = rnn_cell_impl.BasicRNNCell(1, name="basic_rnn_cell")
rnn_cell = wrapper(base_cell)
rnn_layer = keras_layers.RNN(rnn_cell)
inputs = ops.convert_to_tensor([[[1]]], dtype=dtypes.float32)
rnn_layer(inputs)
expected_weights = ["rnn/" + var for var in ("kernel:0", "bias:0")]
self.assertEqual(len(rnn_cell.weights), 2)
self.assertCountEqual([v.name for v in rnn_cell.weights], expected_weights)
self.assertCountEqual([v.name for v in rnn_cell.trainable_variables],
expected_weights)
self.assertCountEqual([v.name for v in rnn_cell.non_trainable_variables],
[])
self.assertCountEqual([v.name for v in rnn_cell._cell.weights],
expected_weights)
@parameterized.parameters(
[rnn_cell_impl.DropoutWrapperV2, rnn_cell_impl.ResidualWrapperV2])
@test_util.run_in_graph_and_eager_modes
def testWrapperV2Caller(self, wrapper):
"""Tests that wrapper V2 is using the LayerRNNCell's caller."""
with base_layer.keras_style_scope():
base_cell = rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.BasicRNNCell(1) for _ in range(2)])
rnn_cell = wrapper(base_cell)
inputs = ops.convert_to_tensor([[1]], dtype=dtypes.float32)
state = ops.convert_to_tensor([[1]], dtype=dtypes.float32)
_ = rnn_cell(inputs, [state, state])
weights = base_cell._cells[0].weights
self.assertLen(weights, expected_len=2)
self.assertTrue(all(["_wrapper" in v.name for v in weights]))
@parameterized.parameters(
[rnn_cell_impl.DropoutWrapperV2, rnn_cell_impl.ResidualWrapperV2])
@test_util.run_in_graph_and_eager_modes
def testWrapperV2Build(self, wrapper):
cell = rnn_cell_impl.LSTMCell(10)
wrapper = wrapper(cell)
wrapper.build((1,))
self.assertTrue(cell.built)
@test_util.run_all_in_graph_and_eager_modes
class DropoutWrapperTest(test.TestCase, parameterized.TestCase):
def _testDropoutWrapper(self,
batch_size=None,
time_steps=None,
parallel_iterations=None,
wrapper_type=None,
scope="root",
**kwargs):
if batch_size is None and time_steps is None:
# 2 time steps, batch size 1, depth 3
batch_size = 1
time_steps = 2
x = constant_op.constant(
[[[2., 2., 2.]], [[1., 1., 1.]]], dtype=dtypes.float32)
m = rnn_cell_impl.LSTMStateTuple(
*[constant_op.constant([[0.1, 0.1, 0.1]], dtype=dtypes.float32)] * 2)
else:
x = constant_op.constant(
np.random.randn(time_steps, batch_size, 3).astype(np.float32))
m = rnn_cell_impl.LSTMStateTuple(*[
constant_op.
constant([[0.1, 0.1, 0.1]] * batch_size, dtype=dtypes.float32)] * 2)
outputs, final_state = rnn.dynamic_rnn(
cell=wrapper_type(
rnn_cell_impl.LSTMCell(
3, initializer=init_ops.constant_initializer(0.5)),
dtype=x.dtype, **kwargs),
time_major=True,
parallel_iterations=parallel_iterations,
inputs=x,
initial_state=m,
scope=scope)
self.evaluate([variables_lib.global_variables_initializer()])
res = self.evaluate([outputs, final_state])
self.assertEqual(res[0].shape, (time_steps, batch_size, 3))
self.assertEqual(res[1].c.shape, (batch_size, 3))
self.assertEqual(res[1].h.shape, (batch_size, 3))
return res
@parameterized.parameters(
[rnn_cell_impl.DropoutWrapper, rnn_cell_impl.DropoutWrapperV2])
def testDropoutWrapperProperties(self, wrapper_type):
cell = rnn_cell_impl.BasicRNNCell(10)
wrapper = wrapper_type(cell)
# Github issue 15810
self.assertEqual(wrapper.wrapped_cell, cell)
self.assertEqual(wrapper.state_size, 10)
self.assertEqual(wrapper.output_size, 10)
@parameterized.parameters(
[rnn_cell_impl.DropoutWrapper, rnn_cell_impl.DropoutWrapperV2])
def testDropoutWrapperZeroState(self, wrapper_type):
class _Cell(rnn_cell_impl.BasicRNNCell):
def zero_state(self, batch_size=None, dtype=None):
return "wrapped_cell_zero_state"
wrapper = wrapper_type(_Cell(10))
self.assertEqual(wrapper.zero_state(10, dtypes.float32),
"wrapped_cell_zero_state")
@parameterized.parameters(
[rnn_cell_impl.DropoutWrapper, rnn_cell_impl.DropoutWrapperV2])
def testDropoutWrapperKeepAllConstantInput(self, wrapper_type):
keep = array_ops.ones([])
res = self._testDropoutWrapper(
input_keep_prob=keep, output_keep_prob=keep, state_keep_prob=keep,
wrapper_type=wrapper_type)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]], [[0.895509, 0.895509, 0.895509]]],
dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
self.assertAllClose(true_full_output, res[0])
self.assertAllClose(true_full_output[1], res[1].h)
self.assertAllClose(true_full_final_c, res[1].c)
@parameterized.parameters(
[rnn_cell_impl.DropoutWrapper, rnn_cell_impl.DropoutWrapperV2])
def testDropoutWrapperKeepAll(self, wrapper_type):
keep = variable_scope.get_variable("all", initializer=1.0)
res = self._testDropoutWrapper(
input_keep_prob=keep, output_keep_prob=keep, state_keep_prob=keep,
wrapper_type=wrapper_type)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]], [[0.895509, 0.895509, 0.895509]]],
dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
self.assertAllClose(true_full_output, res[0])
self.assertAllClose(true_full_output[1], res[1].h)
self.assertAllClose(true_full_final_c, res[1].c)
@parameterized.parameters(
[rnn_cell_impl.DropoutWrapper, rnn_cell_impl.DropoutWrapperV2])
def testDropoutWrapperWithSeed(self, wrapper_type):
keep_some = 0.5
random_seed.set_random_seed(2)
## Use parallel_iterations = 1 in both calls to
## _testDropoutWrapper to ensure the (per-time step) dropout is
## consistent across both calls. Otherwise the seed may not end
## up being munged consistently across both graphs.
res_standard_1 = self._testDropoutWrapper(
input_keep_prob=keep_some,
output_keep_prob=keep_some,
state_keep_prob=keep_some,
seed=10,
parallel_iterations=1,
wrapper_type=wrapper_type,
scope="root_1")
random_seed.set_random_seed(2)
res_standard_2 = self._testDropoutWrapper(
input_keep_prob=keep_some,
output_keep_prob=keep_some,
state_keep_prob=keep_some,
seed=10,
parallel_iterations=1,
wrapper_type=wrapper_type,
scope="root_2")
self.assertAllClose(res_standard_1[0], res_standard_2[0])
self.assertAllClose(res_standard_1[1].c, res_standard_2[1].c)
self.assertAllClose(res_standard_1[1].h, res_standard_2[1].h)
@parameterized.parameters(
[rnn_cell_impl.DropoutWrapper, rnn_cell_impl.DropoutWrapperV2])
def testDropoutWrapperKeepNoOutput(self, wrapper_type):
keep_all = variable_scope.get_variable("all", initializer=1.0)
keep_none = variable_scope.get_variable("none", initializer=1e-6)
res = self._testDropoutWrapper(
input_keep_prob=keep_all,
output_keep_prob=keep_none,
state_keep_prob=keep_all,
wrapper_type=wrapper_type)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]], [[0.895509, 0.895509, 0.895509]]],
dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
self.assertAllClose(np.zeros(res[0].shape), res[0])
self.assertAllClose(true_full_output[1], res[1].h)
self.assertAllClose(true_full_final_c, res[1].c)
@parameterized.parameters(
[rnn_cell_impl.DropoutWrapper, rnn_cell_impl.DropoutWrapperV2])
def testDropoutWrapperKeepNoStateExceptLSTMCellMemory(self, wrapper_type):
keep_all = variable_scope.get_variable("all", initializer=1.0)
keep_none = variable_scope.get_variable("none", initializer=1e-6)
# Even though we dropout state, by default DropoutWrapper never
# drops out the memory ("c") term of an LSTMStateTuple.
res = self._testDropoutWrapper(
input_keep_prob=keep_all,
output_keep_prob=keep_all,
state_keep_prob=keep_none,
wrapper_type=wrapper_type)
true_c_state = np.array([[1.713925, 1.713925, 1.713925]], dtype=np.float32)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]], [[0.895509, 0.895509, 0.895509]]],
dtype=np.float32)
self.assertAllClose(true_full_output[0], res[0][0])
# Second output is modified by zero input state
self.assertGreater(np.linalg.norm(true_full_output[1] - res[0][1]), 1e-4)
# h state has been set to zero
self.assertAllClose(np.zeros(res[1].h.shape), res[1].h)
# c state of an LSTMStateTuple is NEVER modified.
self.assertAllClose(true_c_state, res[1].c)
@parameterized.parameters(
[rnn_cell_impl.DropoutWrapper, rnn_cell_impl.DropoutWrapperV2])
def testDropoutWrapperKeepNoInput(self, wrapper_type):
keep_all = variable_scope.get_variable("all", initializer=1.0)
keep_none = variable_scope.get_variable("none", initializer=1e-6)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]], [[0.895509, 0.895509, 0.895509]]],
dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
# All outputs are different because inputs are zeroed out
res = self._testDropoutWrapper(
input_keep_prob=keep_none,
output_keep_prob=keep_all,
state_keep_prob=keep_all,
wrapper_type=wrapper_type)
self.assertGreater(np.linalg.norm(res[0] - true_full_output), 1e-4)
self.assertGreater(np.linalg.norm(res[1].h - true_full_output[1]), 1e-4)
self.assertGreater(np.linalg.norm(res[1].c - true_full_final_c), 1e-4)
@parameterized.parameters(
[rnn_cell_impl.DropoutWrapper, rnn_cell_impl.DropoutWrapperV2])
def testDropoutWrapperRecurrentOutput(self, wrapper_type):
keep_some = 0.8
keep_all = variable_scope.get_variable("all", initializer=1.0)
res = self._testDropoutWrapper(
input_keep_prob=keep_all,
output_keep_prob=keep_some,
state_keep_prob=keep_all,
variational_recurrent=True,
wrapper_type=wrapper_type,
input_size=3,
batch_size=5,
time_steps=7)
# Ensure the same dropout pattern for all time steps
output_mask = np.abs(res[0]) > 1e-6
for m in output_mask[1:]:
self.assertAllClose(output_mask[0], m)
@parameterized.parameters(
[rnn_cell_impl.DropoutWrapper, rnn_cell_impl.DropoutWrapperV2])
def testDropoutWrapperRecurrentStateInputAndOutput(self, wrapper_type):
keep_some = 0.9
res = self._testDropoutWrapper(
input_keep_prob=keep_some,
output_keep_prob=keep_some,
state_keep_prob=keep_some,
variational_recurrent=True,
wrapper_type=wrapper_type,
input_size=3,
batch_size=5,
time_steps=7)
# Smoke test for the state/input masks.
output_mask = np.abs(res[0]) > 1e-6
for time_step in output_mask:
# Ensure the same dropout output pattern for all time steps
self.assertAllClose(output_mask[0], time_step)
for batch_entry in time_step:
# Assert all batch entries get the same mask
self.assertAllClose(batch_entry, time_step[0])
# For state, ensure all batch entries have the same mask
state_c_mask = np.abs(res[1].c) > 1e-6
state_h_mask = np.abs(res[1].h) > 1e-6
for batch_entry in state_c_mask:
self.assertAllClose(batch_entry, state_c_mask[0])
for batch_entry in state_h_mask:
self.assertAllClose(batch_entry, state_h_mask[0])
@parameterized.parameters(
[rnn_cell_impl.DropoutWrapper, rnn_cell_impl.DropoutWrapperV2])
def testDropoutWrapperRecurrentStateInputAndOutputWithSeed(
self, wrapper_type):
keep_some = 0.9
random_seed.set_random_seed(2347)
np.random.seed(23487)
res0 = self._testDropoutWrapper(
input_keep_prob=keep_some,
output_keep_prob=keep_some,
state_keep_prob=keep_some,
variational_recurrent=True,
wrapper_type=wrapper_type,
input_size=3,
batch_size=5,
time_steps=7,
seed=-234987,
scope="root_0")
random_seed.set_random_seed(2347)
np.random.seed(23487)
res1 = self._testDropoutWrapper(
input_keep_prob=keep_some,
output_keep_prob=keep_some,
state_keep_prob=keep_some,
variational_recurrent=True,
wrapper_type=wrapper_type,
input_size=3,
batch_size=5,
time_steps=7,
seed=-234987,
scope="root_1")
output_mask = np.abs(res0[0]) > 1e-6
for time_step in output_mask:
# Ensure the same dropout output pattern for all time steps
self.assertAllClose(output_mask[0], time_step)
for batch_entry in time_step:
# Assert all batch entries get the same mask
self.assertAllClose(batch_entry, time_step[0])
# For state, ensure all batch entries have the same mask
state_c_mask = np.abs(res0[1].c) > 1e-6
state_h_mask = np.abs(res0[1].h) > 1e-6
for batch_entry in state_c_mask:
self.assertAllClose(batch_entry, state_c_mask[0])
for batch_entry in state_h_mask:
self.assertAllClose(batch_entry, state_h_mask[0])
# Ensure seeded calculation is identical.
self.assertAllClose(res0[0], res1[0])
self.assertAllClose(res0[1].c, res1[1].c)
self.assertAllClose(res0[1].h, res1[1].h)
def basic_rnn_cell(inputs, state, num_units, scope=None):
if state is None:
if inputs is not None:
batch_size = inputs.get_shape()[0]
dtype = inputs.dtype
else:
batch_size = 0
dtype = dtypes.float32
init_output = array_ops.zeros(
array_ops.stack([batch_size, num_units]), dtype=dtype)
init_state = array_ops.zeros(
array_ops.stack([batch_size, num_units]), dtype=dtype)
init_output.set_shape([batch_size, num_units])
init_state.set_shape([batch_size, num_units])
return init_output, init_state
else:
with variable_scope.variable_scope(scope, "basic_rnn_cell",
[inputs, state]):
output = math_ops.tanh(
Linear([inputs, state], num_units, True)([inputs, state]))
return output, output
if __name__ == "__main__":
test.main()
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A microbenchmark for measuring performance of coders.
This runs a sequence of encode-decode operations on random inputs
to collect performance of various coders.
To evaluate coders performance we approximate the behavior
how the coders are used in PCollections: we encode and decode
a list of elements. An element can be a string, a list of integers,
a windowed value, or any other object we want a coder to process.
Run as:
python -m apache_beam.tools.coders_microbenchmark
"""
from __future__ import absolute_import
from __future__ import print_function
import random
import string
import sys
from past.builtins import unicode
from apache_beam.coders import coders
from apache_beam.tools import utils
from apache_beam.transforms import window
from apache_beam.utils import windowed_value
def coder_benchmark_factory(coder, generate_fn):
"""Creates a benchmark that encodes and decodes a list of elements.
Args:
coder: coder to use to encode an element.
generate_fn: a callable that generates an element.
"""
class CoderBenchmark(object):
def __init__(self, num_elements_per_benchmark):
self._coder = coders.IterableCoder(coder)
self._list = [generate_fn()
for _ in range(num_elements_per_benchmark)]
def __call__(self):
# Calling coder operations on a single element at a time may incur
# unrelevant overhead. To compensate, we use a list elements.
_ = self._coder.decode(self._coder.encode(self._list))
CoderBenchmark.__name__ = "%s, %s" % (
generate_fn.__name__, str(coder))
return CoderBenchmark
def small_int():
return random.randint(0, 127)
def large_int():
return random.randint(sys.maxsize >> 2, sys.maxsize)
def random_string(length):
return unicode(''.join(random.choice(
string.ascii_letters + string.digits) for _ in range(length)))
def small_string():
return random_string(4)
def large_string():
return random_string(100)
def list_int(size):
return [small_int() for _ in range(size)]
def dict_int_int(size):
return {i: i for i in list_int(size)}
def small_list():
return list_int(10)
def large_list():
return list_int(1000)
def small_tuple():
# Benchmark a common case of 2-element tuples.
return tuple(list_int(2))
def large_tuple():
return tuple(large_list())
def small_dict():
return {i: i for i in small_list()}
def large_dict():
return {i: i for i in large_list()}
def random_windowed_value(num_windows):
return windowed_value.WindowedValue(
value=small_int(),
timestamp=12345678,
windows=tuple(
window.IntervalWindow(i * 10, i * 10 + small_int())
for i in range(num_windows)
))
def wv_with_one_window():
return random_windowed_value(num_windows=1)
def wv_with_multiple_windows():
return random_windowed_value(num_windows=32)
def run_coder_benchmarks(num_runs, input_size, seed, verbose):
random.seed(seed)
# TODO(BEAM-4441): Pick coders using type hints, for example:
# tuple_coder = typecoders.registry.get_coder(typehints.Tuple[int, ...])
benchmarks = [
coder_benchmark_factory(
coders.FastPrimitivesCoder(), small_int),
coder_benchmark_factory(
coders.FastPrimitivesCoder(), large_int),
coder_benchmark_factory(
coders.FastPrimitivesCoder(), small_string),
coder_benchmark_factory(
coders.FastPrimitivesCoder(), large_string),
coder_benchmark_factory(
coders.FastPrimitivesCoder(),
small_list),
coder_benchmark_factory(
coders.IterableCoder(coders.FastPrimitivesCoder()),
small_list),
coder_benchmark_factory(
coders.FastPrimitivesCoder(),
large_list),
coder_benchmark_factory(
coders.IterableCoder(coders.FastPrimitivesCoder()),
large_list),
coder_benchmark_factory(
coders.FastPrimitivesCoder(),
small_tuple),
coder_benchmark_factory(
coders.FastPrimitivesCoder(),
large_tuple),
coder_benchmark_factory(
coders.FastPrimitivesCoder(),
small_dict),
coder_benchmark_factory(
coders.FastPrimitivesCoder(),
large_dict),
coder_benchmark_factory(
coders.WindowedValueCoder(coders.FastPrimitivesCoder()),
wv_with_one_window),
coder_benchmark_factory(
coders.WindowedValueCoder(coders.FastPrimitivesCoder()),
wv_with_multiple_windows),
]
suite = [utils.BenchmarkConfig(b, input_size, num_runs) for b in benchmarks]
utils.run_benchmarks(suite, verbose=verbose)
if __name__ == "__main__":
utils.check_compiled("apache_beam.coders.coder_impl")
num_runs = 20
num_elements_per_benchmark = 1000
seed = 42 # Fix the seed for better consistency
run_coder_benchmarks(num_runs, num_elements_per_benchmark, seed,
verbose=True)
|
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands that can be used to operate on activity summaries."""
from __future__ import annotations
from core import utils
from core.constants import constants
from core.domain import activity_services
from core.domain import collection_services
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import rights_domain
from core.domain import rights_manager
from core.domain import search_services
from core.domain import stats_services
from core.domain import user_services
_LIBRARY_INDEX_GROUPS = [{
'header_i18n_id': 'I18N_LIBRARY_GROUPS_MATHEMATICS_&_STATISTICS',
'search_categories': [
'Mathematics', 'Algebra', 'Arithmetic', 'Calculus', 'Combinatorics',
'Geometry', 'Graph Theory', 'Logic', 'Probability', 'Statistics',
'Trigonometry',
],
}, {
'header_i18n_id': 'I18N_LIBRARY_GROUPS_COMPUTING',
'search_categories': ['Algorithms', 'Computing', 'Programming'],
}, {
'header_i18n_id': 'I18N_LIBRARY_GROUPS_SCIENCE',
'search_categories': [
'Astronomy', 'Biology', 'Chemistry', 'Engineering', 'Environment',
'Medicine', 'Physics',
],
}, {
'header_i18n_id': 'I18N_LIBRARY_GROUPS_HUMANITIES',
'search_categories': [
'Architecture', 'Art', 'Music', 'Philosophy', 'Poetry'
],
}, {
'header_i18n_id': 'I18N_LIBRARY_GROUPS_LANGUAGES',
'search_categories': [
'Languages', 'Reading', 'English', 'Latin', 'Spanish', 'Gaulish'
],
}, {
'header_i18n_id': 'I18N_LIBRARY_GROUPS_SOCIAL_SCIENCE',
'search_categories': [
'Business', 'Economics', 'Geography', 'Government', 'History', 'Law'
],
}]
def get_human_readable_contributors_summary(contributors_summary):
"""Gets contributors summary in human readable form.
Args:
contributors_summary: dict. The keys are user ids and
the values are the number of commits made by that user.
Returns:
dict. Dicts of contributors in human readable form; the keys are
usernames and the values are a dict. Example:
{
'albert': {
'num_commits': 10,
},
}
"""
contributor_ids = list(contributors_summary.keys())
contributor_usernames = user_services.get_human_readable_user_ids(
contributor_ids, strict=False)
return {
contributor_usernames[ind]: {
'num_commits': contributors_summary[contributor_ids[ind]],
}
for ind in range(len(contributor_ids))
}
def get_learner_collection_dict_by_id(
collection_id, user, strict=True,
allow_invalid_explorations=False, version=None):
"""Gets a dictionary representation of a collection given by the provided
collection ID. This dict includes user-specific playthrough information.
Args:
collection_id: str. The id of the collection.
user: UserActionsInfo. Object having user_id, role and actions for
given user.
strict: bool. Whether to fail noisily if no collection with the given
id exists in the datastore.
allow_invalid_explorations: bool. Whether to also return explorations
that are invalid, such as deleted/private explorations.
version: str or None. The version number of the collection to be
retrieved. If it is None, the latest version will be retrieved.
Returns:
dict. A dictionary that contains extra information along with the dict
returned by collection_domain.Collection.to_dict() which includes useful
data for the collection learner view. The information includes progress
in the collection, information about explorations referenced within the
collection, and a slightly nicer data structure for frontend work.
Raises:
ValidationError. If the collection retrieved using the given
ID references non-existent explorations.
"""
collection = collection_services.get_collection_by_id(
collection_id, strict=strict, version=version)
exp_ids = collection.exploration_ids
exp_summary_dicts = get_displayable_exp_summary_dicts_matching_ids(
exp_ids, user=user)
exp_summaries_dict_map = {
exp_summary_dict['id']: exp_summary_dict
for exp_summary_dict in exp_summary_dicts
}
# TODO(bhenning): Users should not be recommended explorations they have
# completed outside the context of a collection (see #1461).
next_exploration_id = None
completed_exp_ids = None
if user.user_id:
completed_exp_ids = (
collection_services.get_valid_completed_exploration_ids(
user.user_id, collection))
next_exploration_id = collection.get_next_exploration_id(
completed_exp_ids)
else:
# If the user is not logged in or they have not completed any of
# the explorations yet within the context of this collection,
# recommend the initial exploration.
next_exploration_id = collection.first_exploration_id
completed_exp_ids = []
collection_dict = collection.to_dict()
collection_dict['nodes'] = [
node.to_dict() for node in collection.nodes]
collection_dict['playthrough_dict'] = {
'next_exploration_id': next_exploration_id,
'completed_exploration_ids': completed_exp_ids
}
collection_dict['version'] = collection.version
collection_is_public = rights_manager.is_collection_public(collection_id)
# Insert an 'exploration' dict into each collection node, where the
# dict includes meta information about the exploration (ID and title).
for collection_node in collection_dict['nodes']:
exploration_id = collection_node['exploration_id']
summary_dict = exp_summaries_dict_map.get(exploration_id)
if not allow_invalid_explorations:
if not summary_dict:
raise utils.ValidationError(
'Expected collection to only reference valid '
'explorations, but found an exploration with ID: %s (was '
'the exploration deleted or is it a private exploration '
'that you do not have edit access to?)'
% exploration_id)
if collection_is_public and rights_manager.is_exploration_private(
exploration_id):
raise utils.ValidationError(
'Cannot reference a private exploration within a public '
'collection, exploration ID: %s' % exploration_id)
if summary_dict:
collection_node['exploration_summary'] = summary_dict
else:
collection_node['exploration_summary'] = None
return collection_dict
def get_displayable_collection_summary_dicts_matching_ids(collection_ids):
"""Returns a list of collection summary dicts corresponding to the given
collection ids.
Args:
collection_ids: list(str). A list of collection ids.
Returns:
list(dict). Each element in this list is a collection summary dict.
These elements are returned in the same order as that given
in collection_ids.
"""
collection_summaries = (
collection_services.get_collection_summaries_matching_ids(
collection_ids))
return _get_displayable_collection_summary_dicts(collection_summaries)
def get_exp_metadata_dicts_matching_query(query_string, search_offset, user):
"""Given a query string and a search offset, returns a list of exploration
metadata dicts that satisfy the search query.
Args:
query_string: str. The search query for which the search is to be
performed.
search_offset: int or None. The offset location to start the search
from. If None, the returned values are from the beginning
of the results list.
user: UserActionsInfo. Object having user_id, role and actions for
given user.
Returns:
2-tuple of (exploration_list, new_search_offset). Where:
- exploration_list list(dict). A list of metadata dicts for
explorations matching the query.
- new_search_offset (int). New search offset location.
"""
exp_ids, new_search_offset = (
exp_services.get_exploration_ids_matching_query(
query_string, [], [], offset=search_offset))
exploration_list = get_exploration_metadata_dicts(
exp_ids, user)
return exploration_list, new_search_offset
def get_exploration_metadata_dicts(exploration_ids, user):
"""Given a list of exploration ids, optionally filters the list for
explorations that are currently non-private and not deleted, and returns a
list of dicts of the corresponding exploration summaries for collection
node search.
Args:
exploration_ids: list(str). A list of exploration ids for which
exploration metadata dicts are to be returned.
user: UserActionsInfo. Object having user_id, role and actions for
given user.
Returns:
list(dict). A list of metadata dicts corresponding to the given
exploration ids. Each dict has three keys:
'id': the exploration id;
'title': the exploration title;
'objective': the exploration objective.
"""
exploration_summaries = (
exp_fetchers.get_exploration_summaries_matching_ids(exploration_ids))
exploration_rights_objects = (
rights_manager.get_multiple_exploration_rights_by_ids(exploration_ids))
filtered_exploration_summaries = []
for (exploration_summary, exploration_rights) in (
zip(exploration_summaries, exploration_rights_objects)):
if exploration_summary is not None and exploration_rights is not None:
if exploration_summary.status == (
rights_domain.ACTIVITY_STATUS_PRIVATE):
if user.user_id is None:
continue
if not rights_manager.check_can_edit_activity(
user, exploration_rights):
continue
filtered_exploration_summaries.append(exploration_summary)
return [
summary.to_metadata_dict()
for summary in filtered_exploration_summaries]
def get_displayable_exp_summary_dicts_matching_ids(exploration_ids, user=None):
"""Gets a summary of explorations in human readable form from
exploration ids.
Given a list of exploration ids, optionally filters the list for
explorations that are currently non-private and not deleted, and returns a
list of dicts of the corresponding exploration summaries. This function can
also filter based on a user ID who has edit access to the corresponding
exploration, where the editor ID is for private explorations. Please use
this function when needing summary information to display on exploration
summary tiles in the frontend.
Args:
exploration_ids: list(str). List of exploration ids.
user: UserActionsInfo or None. Object having user_id, role and actions
for given user.
Returns:
list(dict). A list of exploration summary dicts in human readable form.
Example:
[ {
'category': u'A category',
'community_owned': False,
'id': 'eid2',
'language_code': 'en',
'num_views': 0,
'objective': u'An objective',
'status': 'public',
'tags': [],
'thumbnail_bg_color': '#a33f40',
'thumbnail_icon_url': self.get_static_asset_url(
'/images/subjects/Lightbulb.svg'),
'title': u'Exploration 2 Albert title',
}, ]
"""
exploration_summaries = (
exp_fetchers.get_exploration_summaries_matching_ids(exploration_ids))
exploration_rights_objects = (
rights_manager.get_multiple_exploration_rights_by_ids(exploration_ids))
filtered_exploration_summaries = []
for (exploration_summary, exploration_rights) in (
zip(exploration_summaries, exploration_rights_objects)):
if exploration_summary is not None and exploration_rights is not None:
if exploration_summary.status == (
rights_domain.ACTIVITY_STATUS_PRIVATE):
if user is None:
continue
if not rights_manager.check_can_edit_activity(
user, exploration_rights):
continue
filtered_exploration_summaries.append(exploration_summary)
return get_displayable_exp_summary_dicts(filtered_exploration_summaries)
def get_displayable_exp_summary_dicts(exploration_summaries):
"""Gets a summary of explorations in human readable form.
Given a list of exploration summary domain objects, returns a list,
with the same number of elements, of the corresponding human-readable
exploration summary dicts.
This assumes that all the exploration summary domain objects passed in are
valid (i.e., none of them are None).
Args:
exploration_summaries: list(ExplorationSummary). List of exploration
summary objects.
Returns:
list(dict). A list of exploration summary dicts in human readable form.
Example:
[ {
'category': u'A category',
'community_owned': False,
'id': 'eid2',
'language_code': 'en',
'num_views': 0,
'objective': u'An objective',
'status': 'public',
'tags': [],
'thumbnail_bg_color': '#a33f40',
'thumbnail_icon_url': self.get_static_asset_url(
'/images/subjects/Lightbulb.svg'),
'title': u'Exploration 2 Albert title',
}, ]
"""
exp_version_references = [
exp_domain.ExpVersionReference(exp_summary.id, exp_summary.version)
for exp_summary in exploration_summaries]
exp_stats_list = stats_services.get_exploration_stats_multi(
exp_version_references)
view_counts = [exp_stats.num_starts for exp_stats in exp_stats_list]
displayable_exp_summaries = []
for ind, exploration_summary in enumerate(exploration_summaries):
if exploration_summary:
summary_dict = {
'id': exploration_summary.id,
'title': exploration_summary.title,
'activity_type': constants.ACTIVITY_TYPE_EXPLORATION,
'category': exploration_summary.category,
'created_on_msec': utils.get_time_in_millisecs(
exploration_summary.exploration_model_created_on),
'objective': exploration_summary.objective,
'language_code': exploration_summary.language_code,
'last_updated_msec': utils.get_time_in_millisecs(
exploration_summary.exploration_model_last_updated
),
'human_readable_contributors_summary': (
get_human_readable_contributors_summary(
exploration_summary.contributors_summary)
),
'status': exploration_summary.status,
'ratings': exploration_summary.ratings,
'community_owned': exploration_summary.community_owned,
'tags': exploration_summary.tags,
'thumbnail_icon_url': utils.get_thumbnail_icon_url_for_category(
exploration_summary.category),
'thumbnail_bg_color': utils.get_hex_color_for_category(
exploration_summary.category),
'num_views': view_counts[ind],
}
displayable_exp_summaries.append(summary_dict)
return displayable_exp_summaries
def _get_displayable_collection_summary_dicts(collection_summaries):
"""Gets a summary of collections in human readable form.
Args:
collection_summaries: list(CollectionSummary). List of collection
summary domain object.
Returns:
list(dict). A list of exploration summary dicts in human readable form.
Example:
[ {
'category': u'A category',
'community_owned': False,
'id': 'eid2',
'language_code': 'en',
'num_views': 0,
'objective': u'An objective',
'status': 'public',
'tags': [],
'thumbnail_bg_color': '#a33f40',
'thumbnail_icon_url': self.get_static_asset_url(
'/images/subjects/Lightbulb.svg'),
'title': u'Exploration 2 Albert title',
}, ]
"""
displayable_collection_summaries = []
for collection_summary in collection_summaries:
if collection_summary and collection_summary.status != (
rights_domain.ACTIVITY_STATUS_PRIVATE):
displayable_collection_summaries.append({
'id': collection_summary.id,
'title': collection_summary.title,
'category': collection_summary.category,
'activity_type': constants.ACTIVITY_TYPE_COLLECTION,
'objective': collection_summary.objective,
'language_code': collection_summary.language_code,
'tags': collection_summary.tags,
'node_count': collection_summary.node_count,
'last_updated_msec': utils.get_time_in_millisecs(
collection_summary.collection_model_last_updated),
'thumbnail_icon_url': (
utils.get_thumbnail_icon_url_for_category(
collection_summary.category)),
'thumbnail_bg_color': utils.get_hex_color_for_category(
collection_summary.category)})
return displayable_collection_summaries
def get_library_groups(language_codes):
"""Returns a list of groups for the library index page. Each group has a
header and a list of dicts representing activity summaries.
Args:
language_codes: list(str). A list of language codes. Only explorations
with these languages will be returned.
Returns:
list(dict). A list of groups for the library index page. Each group is
represented by a dict with the following keys and values:
- activity_summary_dicts: list(dict). A list of dicts representing
activity summaries.
- categories: list(str). The list of group categories.
- header_i18n_id: str. The i18n id for the header of the category.
- has_full_results_page: bool. Whether the group header links to
a "full results" page. This is always True for the
"exploration category" groups.
- full_results_url: str. The URL to the corresponding "full results"
page.
"""
# Collect all collection ids so that the summary details can be retrieved
# with a single get_multi() call.
all_collection_ids = []
header_id_to_collection_ids = {}
for group in _LIBRARY_INDEX_GROUPS:
collection_ids = search_services.search_collections(
'', group['search_categories'], language_codes, 8)[0]
header_id_to_collection_ids[group['header_i18n_id']] = collection_ids
all_collection_ids += collection_ids
collection_summaries = [
summary for summary in
collection_services.get_collection_summaries_matching_ids(
all_collection_ids)
if summary is not None]
collection_summary_dicts = {
summary_dict['id']: summary_dict
for summary_dict in _get_displayable_collection_summary_dicts(
collection_summaries)
}
# Collect all exp ids so that the summary details can be retrieved with a
# single get_multi() call.
all_exp_ids = []
header_to_exp_ids = {}
for group in _LIBRARY_INDEX_GROUPS:
exp_ids = search_services.search_explorations(
'', group['search_categories'], language_codes, 8)[0]
header_to_exp_ids[group['header_i18n_id']] = exp_ids
all_exp_ids += exp_ids
exp_summaries = [
summary for summary in
exp_fetchers.get_exploration_summaries_matching_ids(all_exp_ids)
if summary is not None]
exp_summary_dicts = {
summary_dict['id']: summary_dict
for summary_dict in get_displayable_exp_summary_dicts(exp_summaries)
}
results = []
for group in _LIBRARY_INDEX_GROUPS:
summary_dicts = []
collection_ids_to_display = (
header_id_to_collection_ids[group['header_i18n_id']])
summary_dicts = [
collection_summary_dicts[collection_id]
for collection_id in collection_ids_to_display
if collection_id in collection_summary_dicts]
exp_ids_to_display = header_to_exp_ids[group['header_i18n_id']]
summary_dicts += [
exp_summary_dicts[exp_id] for exp_id in exp_ids_to_display
if exp_id in exp_summary_dicts]
if not summary_dicts:
continue
results.append({
'header_i18n_id': group['header_i18n_id'],
'categories': group['search_categories'],
'activity_summary_dicts': summary_dicts,
'has_full_results_page': True,
'full_results_url': None,
})
return results
def require_activities_to_be_public(activity_references):
"""Raises an exception if any activity reference in the list does not
exist, or is not public.
Args:
activity_references: list(ActivityReference). A list of
ActivityReference domain objects.
Raises:
Exception. Any activity reference in the list does not
exist, or is not public.
"""
exploration_ids, collection_ids = activity_services.split_by_type(
activity_references)
activity_summaries_by_type = [{
'type': constants.ACTIVITY_TYPE_EXPLORATION,
'ids': exploration_ids,
'summaries': exp_fetchers.get_exploration_summaries_matching_ids(
exploration_ids),
}, {
'type': constants.ACTIVITY_TYPE_COLLECTION,
'ids': collection_ids,
'summaries': collection_services.get_collection_summaries_matching_ids(
collection_ids),
}]
for activities_info in activity_summaries_by_type:
for index, summary in enumerate(activities_info['summaries']):
if summary is None:
raise Exception(
'Cannot feature non-existent %s with id %s' %
(activities_info['type'], activities_info['ids'][index]))
if summary.status == rights_domain.ACTIVITY_STATUS_PRIVATE:
raise Exception(
'Cannot feature private %s with id %s' %
(activities_info['type'], activities_info['ids'][index]))
def get_featured_activity_summary_dicts(language_codes):
"""Returns a list of featured activities with the given language codes.
The return value is sorted according to the list stored in the datastore.
Args:
language_codes: list(str). A list of language codes. Only explorations
with these languages will be returned.
Returns:
list(dict). Each dict in this list represents a featured activity.
For example:
[ {
'status': 'public',
'thumbnail_bg_color': '#a33f40',
'community_owned': False,
'tags': [],
'thumbnail_icon_url': self.get_static_asset_url(
'/images/subjects/Lightbulb.svg'),
'language_code': constants.DEFAULT_LANGUAGE_CODE,
'id': 'eid2',
'category': 'A category',
'ratings': feconf.get_empty_ratings(),
'title': 'A title',
'num_views': 0,
'objective': 'An objective',
}, ]
"""
activity_references = activity_services.get_featured_activity_references()
exploration_ids, collection_ids = activity_services.split_by_type(
activity_references)
exp_summary_dicts = get_displayable_exp_summary_dicts_matching_ids(
exploration_ids)
col_summary_dicts = get_displayable_collection_summary_dicts_matching_ids(
collection_ids)
summary_dicts_by_id = {
constants.ACTIVITY_TYPE_EXPLORATION: {
summary_dict['id']: summary_dict
for summary_dict in exp_summary_dicts
},
constants.ACTIVITY_TYPE_COLLECTION: {
summary_dict['id']: summary_dict
for summary_dict in col_summary_dicts
},
}
featured_summary_dicts = []
for reference in activity_references:
if reference.id in summary_dicts_by_id[reference.type]:
summary_dict = summary_dicts_by_id[reference.type][reference.id]
if summary_dict and summary_dict['language_code'] in language_codes:
featured_summary_dicts.append(summary_dict)
return featured_summary_dicts
def get_top_rated_exploration_summary_dicts(language_codes, limit):
"""Returns a list of top rated explorations with the given language codes.
The return value is sorted in decreasing order of average rating.
Args:
language_codes: list(str). A list of language codes. Only explorations
with these languages will be returned.
limit: int. The maximum number of explorations to return.
Returns:
list(dict). Each dict in this list represents a exploration summary in
human readable form. The list is sorted in decreasing order of average
rating. For example:
[ {
'category': u'A category',
'community_owned': False,
'id': 'eid2',
'language_code': 'en',
'num_views': 0,
'objective': u'An objective',
'status': 'public',
'tags': [],
'thumbnail_bg_color': '#a33f40',
'thumbnail_icon_url': self.get_static_asset_url(
'/images/subjects/Lightbulb.svg'),
'title': u'Exploration 2 Albert title',
}, ]
"""
filtered_exp_summaries = [
exp_summary for exp_summary in
exp_services.get_top_rated_exploration_summaries(limit).values()
if exp_summary.language_code in language_codes and
sum(exp_summary.ratings.values()) > 0]
sorted_exp_summaries = sorted(
filtered_exp_summaries,
key=lambda exp_summary: exp_summary.scaled_average_rating,
reverse=True)
return get_displayable_exp_summary_dicts(sorted_exp_summaries)
def get_recently_published_exp_summary_dicts(limit):
"""Returns a list of recently published explorations.
Args:
limit: int. The maximum number of explorations to return.
Returns:
list(dict). Each dict in this list represents a featured activity in
human readable form. For example:
[ {
'category': u'A category',
'community_owned': False,
'id': 'eid2',
'language_code': 'en',
'num_views': 0,
'objective': u'An objective',
'status': 'public',
'tags': [],
'thumbnail_bg_color': '#a33f40',
'thumbnail_icon_url': self.get_static_asset_url(
'/images/subjects/Lightbulb.svg'),
'title': u'Exploration 2 Albert title',
}, ]
"""
recently_published_exploration_summaries = list(
exp_services.get_recently_published_exp_summaries(limit).values())
# Arranging recently published exploration summaries with respect to time.
# sorted() is used to sort the random list of recently published summaries.
summaries = sorted(
recently_published_exploration_summaries,
key=lambda exp_summary: exp_summary.first_published_msec,
reverse=True)
return get_displayable_exp_summary_dicts(summaries)
|
|
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2010 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A driver for XenServer or Xen Cloud Platform.
**Variable Naming Scheme**
- suffix "_ref" for opaque references
- suffix "_uuid" for UUIDs
- suffix "_rec" for record objects
"""
import math
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import units
from oslo_utils import versionutils
import six
import six.moves.urllib.parse as urlparse
import nova.conf
from nova.i18n import _, _LE, _LW
from nova import exception
from nova.virt import driver
from nova.virt.xenapi.client import session
from nova.virt.xenapi import host
from nova.virt.xenapi import pool
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import vmops
from nova.virt.xenapi import volumeops
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
OVERHEAD_BASE = 3
OVERHEAD_PER_MB = 0.00781
OVERHEAD_PER_VCPU = 1.5
def invalid_option(option_name, recommended_value):
LOG.exception(_LE('Current value of '
'CONF.xenserver.%(option)s option incompatible with '
'CONF.xenserver.independent_compute=True. '
'Consider using "%(recommended)s"') % {
'option': option_name,
'recommended': recommended_value})
raise exception.NotSupportedWithOption(
operation=option_name,
option='CONF.xenserver.independent_compute')
class XenAPIDriver(driver.ComputeDriver):
"""A connection to XenServer or Xen Cloud Platform."""
def __init__(self, virtapi, read_only=False):
super(XenAPIDriver, self).__init__(virtapi)
url = CONF.xenserver.connection_url
username = CONF.xenserver.connection_username
password = CONF.xenserver.connection_password
if not url or password is None:
raise Exception(_('Must specify connection_url, '
'connection_username (optionally), and '
'connection_password to use '
'compute_driver=xenapi.XenAPIDriver'))
self._session = session.XenAPISession(url, username, password)
self._volumeops = volumeops.VolumeOps(self._session)
self._host_state = None
self._host = host.Host(self._session, self.virtapi)
self._vmops = vmops.VMOps(self._session, self.virtapi)
self._initiator = None
self._hypervisor_hostname = None
self._pool = pool.ResourcePool(self._session, self.virtapi)
@property
def host_state(self):
if not self._host_state:
self._host_state = host.HostState(self._session)
return self._host_state
def init_host(self, host):
if CONF.xenserver.independent_compute:
# Check various options are in the correct state:
if CONF.xenserver.check_host:
invalid_option('CONF.xenserver.check_host', False)
if CONF.flat_injected:
invalid_option('CONF.flat_injected', False)
if CONF.default_ephemeral_format and \
CONF.default_ephemeral_format != 'ext3':
invalid_option('CONF.default_ephemeral_format', 'ext3')
if CONF.xenserver.check_host:
vm_utils.ensure_correct_host(self._session)
if not CONF.xenserver.independent_compute:
try:
vm_utils.cleanup_attached_vdis(self._session)
except Exception:
LOG.exception(_LE('Failure while cleaning up attached VDIs'))
def instance_exists(self, instance):
"""Checks existence of an instance on the host.
:param instance: The instance to lookup
Returns True if supplied instance exists on the host, False otherwise.
NOTE(belliott): This is an override of the base method for
efficiency.
"""
return self._vmops.instance_exists(instance.name)
def estimate_instance_overhead(self, instance_info):
"""Get virtualization overhead required to build an instance of the
given flavor.
:param instance_info: Instance/flavor to calculate overhead for.
:returns: Overhead memory in MB.
"""
# XenServer memory overhead is proportional to the size of the
# VM. Larger flavor VMs become more efficient with respect to
# overhead.
# interpolated formula to predict overhead required per vm.
# based on data from:
# https://wiki.openstack.org/wiki/XenServer/Overhead
# Some padding is done to each value to fit all available VM data
memory_mb = instance_info['memory_mb']
vcpus = instance_info.get('vcpus', 1)
overhead = ((memory_mb * OVERHEAD_PER_MB) + (vcpus * OVERHEAD_PER_VCPU)
+ OVERHEAD_BASE)
overhead = math.ceil(overhead)
return {'memory_mb': overhead}
def list_instances(self):
"""List VM instances."""
return self._vmops.list_instances()
def list_instance_uuids(self):
"""Get the list of nova instance uuids for VMs found on the
hypervisor.
"""
return self._vmops.list_instance_uuids()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""Create VM instance."""
self._vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
# TODO(Vek): Need to pass context in for access to auth_token
self._vmops.confirm_migration(migration, instance, network_info)
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
"""Finish reverting a resize."""
# NOTE(vish): Xen currently does not use network info.
self._vmops.finish_revert_migration(context, instance,
block_device_info,
power_on)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
"""Completes a resize, turning on the migrated instance."""
self._vmops.finish_migration(context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info, power_on)
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance."""
self._vmops.snapshot(context, instance, image_id, update_task_state)
def post_interrupted_snapshot_cleanup(self, context, instance):
"""Cleans up any resources left after a failed snapshot."""
self._vmops.post_interrupted_snapshot_cleanup(context, instance)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot VM instance."""
self._vmops.reboot(instance, reboot_type,
bad_volumes_callback=bad_volumes_callback)
def set_admin_password(self, instance, new_pass):
"""Set the root/admin password on the VM instance."""
self._vmops.set_admin_password(instance, new_pass)
def inject_file(self, instance, b64_path, b64_contents):
"""Create a file on the VM instance. The file path and contents
should be base64-encoded.
"""
self._vmops.inject_file(instance, b64_path, b64_contents)
def change_instance_metadata(self, context, instance, diff):
"""Apply a diff to the instance metadata."""
self._vmops.change_instance_metadata(instance, diff)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
"""Destroy VM instance."""
self._vmops.destroy(instance, network_info, block_device_info,
destroy_disks)
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
"""Cleanup after instance being destroyed by Hypervisor."""
pass
def pause(self, instance):
"""Pause VM instance."""
self._vmops.pause(instance)
def unpause(self, instance):
"""Unpause paused VM instance."""
self._vmops.unpause(instance)
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
"""Transfers the VHD of a running instance to another host, then shuts
off the instance copies over the COW disk
"""
# NOTE(vish): Xen currently does not use network info.
# TODO(PhilDay): Add support for timeout (clean shutdown)
return self._vmops.migrate_disk_and_power_off(context, instance,
dest, flavor, block_device_info)
def suspend(self, context, instance):
"""suspend the specified instance."""
self._vmops.suspend(instance)
def resume(self, context, instance, network_info, block_device_info=None):
"""resume the specified instance."""
self._vmops.resume(instance)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Rescue the specified instance."""
self._vmops.rescue(context, instance, network_info, image_meta,
rescue_password)
def set_bootable(self, instance, is_bootable):
"""Set the ability to power on/off an instance."""
self._vmops.set_bootable(instance, is_bootable)
def unrescue(self, instance, network_info):
"""Unrescue the specified instance."""
self._vmops.unrescue(instance)
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance."""
# TODO(PhilDay): Add support for timeout (clean shutdown)
self._vmops.power_off(instance)
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
self._vmops.power_on(instance)
def soft_delete(self, instance):
"""Soft delete the specified instance."""
self._vmops.soft_delete(instance)
def restore(self, instance):
"""Restore the specified instance."""
self._vmops.restore(instance)
def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances."""
self._vmops.poll_rebooting_instances(timeout, instances)
def reset_network(self, instance):
"""reset networking for specified instance."""
self._vmops.reset_network(instance)
def inject_network_info(self, instance, nw_info):
"""inject network info for specified instance."""
self._vmops.inject_network_info(instance, nw_info)
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
self._vmops.plug_vifs(instance, network_info)
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
self._vmops.unplug_vifs(instance, network_info)
def get_info(self, instance):
"""Return data about VM instance."""
return self._vmops.get_info(instance)
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
return self._vmops.get_diagnostics(instance)
def get_instance_diagnostics(self, instance):
"""Return data about VM diagnostics."""
return self._vmops.get_instance_diagnostics(instance)
def get_all_bw_counters(self, instances):
"""Return bandwidth usage counters for each interface on each
running VM.
"""
# we only care about VMs that correspond to a nova-managed
# instance:
imap = {inst['name']: inst['uuid'] for inst in instances}
bwcounters = []
# get a dictionary of instance names. values are dictionaries
# of mac addresses with values that are the bw counters:
# e.g. {'instance-001' : { 12:34:56:78:90:12 : {'bw_in': 0, ....}}
all_counters = self._vmops.get_all_bw_counters()
for instance_name, counters in six.iteritems(all_counters):
if instance_name in imap:
# yes these are stats for a nova-managed vm
# correlate the stats with the nova instance uuid:
for vif_counter in counters.values():
vif_counter['uuid'] = imap[instance_name]
bwcounters.append(vif_counter)
return bwcounters
def get_console_output(self, context, instance):
"""Return snapshot of console."""
return self._vmops.get_console_output(instance)
def get_vnc_console(self, context, instance):
"""Return link to instance's VNC console."""
return self._vmops.get_vnc_console(instance)
def get_volume_connector(self, instance):
"""Return volume connector information."""
if not self._initiator or not self._hypervisor_hostname:
stats = self.host_state.get_host_stats(refresh=True)
try:
self._initiator = stats['host_other-config']['iscsi_iqn']
self._hypervisor_hostname = stats['host_hostname']
except (TypeError, KeyError) as err:
LOG.warning(_LW('Could not determine key: %s'), err,
instance=instance)
self._initiator = None
return {
'ip': self._get_block_storage_ip(),
'initiator': self._initiator,
'host': self._hypervisor_hostname
}
def _get_block_storage_ip(self):
# If CONF.my_block_storage_ip is set, use it.
if CONF.my_block_storage_ip != CONF.my_ip:
return CONF.my_block_storage_ip
return self.get_host_ip_addr()
def get_host_ip_addr(self):
xs_url = urlparse.urlparse(CONF.xenserver.connection_url)
return xs_url.netloc
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
"""Attach volume storage to VM instance."""
self._volumeops.attach_volume(connection_info,
instance['name'],
mountpoint)
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
"""Detach volume storage from VM instance."""
self._volumeops.detach_volume(connection_info,
instance['name'],
mountpoint)
def get_console_pool_info(self, console_type):
xs_url = urlparse.urlparse(CONF.xenserver.connection_url)
return {'address': xs_url.netloc,
'username': CONF.xenserver.connection_username,
'password': CONF.xenserver.connection_password}
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task that records the results in the DB.
:param nodename: ignored in this driver
:returns: dictionary describing resources
"""
host_stats = self.host_state.get_host_stats(refresh=True)
# Updating host information
total_ram_mb = host_stats['host_memory_total'] / units.Mi
# NOTE(belliott) memory-free-computed is a value provided by XenServer
# for gauging free memory more conservatively than memory-free.
free_ram_mb = host_stats['host_memory_free_computed'] / units.Mi
total_disk_gb = host_stats['disk_total'] / units.Gi
used_disk_gb = host_stats['disk_used'] / units.Gi
allocated_disk_gb = host_stats['disk_allocated'] / units.Gi
hyper_ver = versionutils.convert_version_to_int(
self._session.product_version)
dic = {'vcpus': host_stats['host_cpu_info']['cpu_count'],
'memory_mb': total_ram_mb,
'local_gb': total_disk_gb,
'vcpus_used': host_stats['vcpus_used'],
'memory_mb_used': total_ram_mb - free_ram_mb,
'local_gb_used': used_disk_gb,
'hypervisor_type': 'XenServer',
'hypervisor_version': hyper_ver,
'hypervisor_hostname': host_stats['host_hostname'],
'cpu_info': jsonutils.dumps(host_stats['cpu_model']),
'disk_available_least': total_disk_gb - allocated_disk_gb,
'supported_instances': host_stats['supported_instances'],
'pci_passthrough_devices': jsonutils.dumps(
host_stats['pci_passthrough_devices']),
'numa_topology': None}
return dic
def ensure_filtering_rules_for_instance(self, instance, network_info):
# NOTE(salvatore-orlando): it enforces security groups on
# host initialization and live migration.
# In XenAPI we do not assume instances running upon host initialization
return
def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False, disk_over_commit=False):
"""Check if it is possible to execute live migration.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance object
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
:returns: a XenapiLiveMigrateData object
"""
return self._vmops.check_can_live_migrate_destination(context,
instance,
block_migration,
disk_over_commit)
def cleanup_live_migration_destination_check(self, context,
dest_check_data):
"""Do required cleanup on dest host after check_can_live_migrate calls
:param context: security context
:param dest_check_data: result of check_can_live_migrate_destination
"""
pass
def check_can_live_migrate_source(self, context, instance,
dest_check_data, block_device_info=None):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
includes the block_migration flag
:param block_device_info: result of _get_instance_block_device_info
:returns: a XenapiLiveMigrateData object
"""
return self._vmops.check_can_live_migrate_source(context, instance,
dest_check_data)
def get_instance_disk_info(self, instance,
block_device_info=None):
"""Used by libvirt for live migration. We rely on xenapi
checks to do this for us.
"""
pass
def live_migration(self, context, instance, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Performs the live migration of the specified instance.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param dest: destination host
:param post_method:
post operation method.
expected nova.compute.manager._post_live_migration.
:param recover_method:
recovery method when any exception occurs.
expected nova.compute.manager._rollback_live_migration.
:param block_migration: if true, migrate VM disk.
:param migrate_data: a XenapiLiveMigrateData object
"""
self._vmops.live_migrate(context, instance, dest, post_method,
recover_method, block_migration, migrate_data)
def rollback_live_migration_at_destination(self, context, instance,
network_info,
block_device_info,
destroy_disks=True,
migrate_data=None):
"""Performs a live migration rollback.
:param context: security context
:param instance: instance object that was being migrated
:param network_info: instance network information
:param block_device_info: instance block device information
:param destroy_disks:
if true, destroy disks at destination during cleanup
:param migrate_data: A XenapiLiveMigrateData object
"""
# NOTE(johngarbutt) Destroying the VM is not appropriate here
# and in the cases where it might make sense,
# XenServer has already done it.
# NOTE(sulo): The only cleanup we do explicitly is to forget
# any volume that was attached to the destination during
# live migration. XAPI should take care of all other cleanup.
self._vmops.rollback_live_migration_at_destination(instance,
block_device_info)
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk_info, migrate_data):
"""Preparation live migration.
:param block_device_info:
It must be the result of _get_instance_volume_bdms()
at compute manager.
:returns: a XenapiLiveMigrateData object
"""
return self._vmops.pre_live_migration(context, instance,
block_device_info, network_info, disk_info, migrate_data)
def post_live_migration(self, context, instance, block_device_info,
migrate_data=None):
"""Post operation of live migration at source host.
:param context: security context
:instance: instance object that was migrated
:block_device_info: instance block device information
:param migrate_data: a XenapiLiveMigrateData object
"""
self._vmops.post_live_migration(context, instance, migrate_data)
def post_live_migration_at_destination(self, context, instance,
network_info,
block_migration=False,
block_device_info=None):
"""Post operation of live migration at destination host.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param network_info: instance network information
:param block_migration: if true, post operation of block_migration.
"""
self._vmops.post_live_migration_at_destination(context, instance,
network_info, block_device_info, block_device_info)
def unfilter_instance(self, instance, network_info):
"""Removes security groups configured for an instance."""
return self._vmops.unfilter_instance(instance, network_info)
def refresh_security_group_rules(self, security_group_id):
"""Updates security group rules for all instances associated with a
given security group.
Invoked when security group rules are updated.
"""
return self._vmops.refresh_security_group_rules(security_group_id)
def refresh_instance_security_rules(self, instance):
"""Updates security group rules for specified instance.
Invoked when instances are added/removed to a security group
or when a rule is added/removed to a security group.
"""
return self._vmops.refresh_instance_security_rules(instance)
def get_available_nodes(self, refresh=False):
stats = self.host_state.get_host_stats(refresh=refresh)
return [stats["hypervisor_hostname"]]
def host_power_action(self, action):
"""The only valid values for 'action' on XenServer are 'reboot' or
'shutdown', even though the API also accepts 'startup'. As this is
not technically possible on XenServer, since the host is the same
physical machine as the hypervisor, if this is requested, we need to
raise an exception.
"""
if action in ("reboot", "shutdown"):
return self._host.host_power_action(action)
else:
msg = _("Host startup on XenServer is not supported.")
raise NotImplementedError(msg)
def set_host_enabled(self, enabled):
"""Sets the compute host's ability to accept new instances."""
return self._host.set_host_enabled(enabled)
def get_host_uptime(self):
"""Returns the result of calling "uptime" on the target host."""
return self._host.get_host_uptime()
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
return self._host.host_maintenance_mode(host, mode)
def add_to_aggregate(self, context, aggregate, host, **kwargs):
"""Add a compute host to an aggregate."""
return self._pool.add_to_aggregate(context, aggregate, host, **kwargs)
def remove_from_aggregate(self, context, aggregate, host, **kwargs):
"""Remove a compute host from an aggregate."""
return self._pool.remove_from_aggregate(context,
aggregate, host, **kwargs)
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error=True):
"""Undo aggregate operation when pool error raised."""
return self._pool.undo_aggregate_operation(context, op,
aggregate, host, set_error)
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
self._vmops.power_on(instance)
def get_per_instance_usage(self):
"""Get information about instance resource usage.
:returns: dict of nova uuid => dict of usage info
"""
return self._vmops.get_per_instance_usage()
|
|
# Copyright (c) 2013 Per Unneberg
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import os
import re
import collections
from datetime import datetime
import time
import luigi
import glob
import itertools
import logging
from ratatosk.log import get_logger
from ratatosk.experiment import Sample
logger = get_logger()
# http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
# FIX ME: make override work
def update(d, u, override=True, expandvars=True):
"""Update values of a nested dictionary of varying depth"""
for k, v in u.iteritems():
if isinstance(v, collections.Mapping):
r = update(d.get(k, {}), v)
d[k] = r
else:
if expandvars and isinstance(v, str):
u[k] = os.path.expandvars(v)
d[k] = u[k]
return d
# FIXME: implement replacement for cement.ConfigHandler check
# Would allow passing d["_sections"] or d
def config_to_dict(d):
"""Convert config handler or OrderedDict entries to dict for yaml
output.
:param d: config handler or ordered dict
"""
if d is None:
return {}
if isinstance(d, dict):
pass
else:
raise TypeError("unsupported type <{}>".format(type(d)))
u = {}
for k, v in d.iteritems():
u[k] = {}
if isinstance(v, collections.Mapping):
for x, y in v.iteritems():
if isinstance(y, collections.Mapping):
u[k][x] = dict(y)
else:
u[k][x] = y
else:
u[k] = v
return u
# http://stackoverflow.com/questions/2556108/how-to-replace-the-last-occurence-of-an-expression-in-a-string
def rreplace(s, old, new, occurrence):
li = s.rsplit(old, occurrence)
return new.join(li)
# http://stackoverflow.com/questions/2020014/get-fully-qualified-class-name-of-an-object-in-python
def fullclassname(o):
return o.__module__ + "." + o.__name__
def utc_time():
"""Make an utc_time with appended 'Z'"""
return str(datetime.utcnow()) + 'Z'
def make_fastq_links(targets, indir, outdir, fastq_suffix="001.fastq.gz", ssheet="SampleSheet.csv"):
"""Given a set of targets and an output directory, create links
from targets (source raw data) to an output directory.
:param targets: list of :class:`ratatosk.experiment.ISample` objects
:param outdir: (top) output directory
:param fastq_suffix: fastq suffix
:param ssheet: sample sheet name
:returns: new targets list with updated output directory
"""
newtargets = []
for tgt in targets:
fastq = glob.glob("{}*{}".format(tgt.prefix("sample_run"), fastq_suffix))
if len(fastq) == 0:
logger.warn("No fastq files for prefix {} in {}".format(tgt.prefix("sample_run"), "make_fastq_links"))
for f in fastq:
newpath = os.path.join(outdir, os.path.relpath(f, indir))
if not os.path.exists(os.path.dirname(newpath)):
logger.info("Making directories to {}".format(os.path.dirname(newpath)))
os.makedirs(os.path.dirname(newpath))
if not os.path.exists(os.path.join(os.path.dirname(newpath), ssheet)):
try:
os.symlink(os.path.abspath(os.path.join(os.path.dirname(f), ssheet)),
os.path.join(os.path.dirname(newpath), ssheet))
except:
logger.warn("No sample sheet found for {}".format())
if not os.path.exists(newpath):
logger.info("Linking {} -> {}".format(newpath, os.path.abspath(f)))
os.symlink(os.path.abspath(f), newpath)
if not os.path.lexists(os.path.join(os.path.dirname(newpath), ssheet)) and os.path.exists(os.path.abspath(os.path.join(os.path.dirname(f), ssheet))):
os.symlink(os.path.abspath(os.path.join(os.path.dirname(f), ssheet)), os.path.join(os.path.dirname(newpath), ssheet))
newsample = Sample(project_id=tgt.project_id(), sample_id=tgt.sample_id(),
project_prefix=outdir, sample_prefix=os.path.join(outdir, os.path.relpath(tgt.prefix("sample"), indir)),
sample_run_prefix=os.path.join(outdir, os.path.relpath(tgt.prefix("sample_run"), indir)))
newtargets.append(newsample)
# newtargets.append((tgt.sample_id(),
# os.path.join(outdir, os.path.relpath(tgt.prefix("sample"), indir)),
# os.path.join(outdir, os.path.relpath(tgt.prefix("sample_run"), indir))))
return newtargets
# Shamelessly stolen from http://twistedmatrix.com/trac/browser/tags/releases/twisted-8.2.0/twisted/python/procutils.py
# See http://stackoverflow.com/questions/5226958/which-equivalent-function-in-python
def which(name, flags=os.X_OK):
"""Search PATH for executable files with the given name.
On newer versions of MS-Windows, the PATHEXT environment variable will be
set to the list of file extensions for files considered executable. This
will normally include things like ".EXE". This fuction will also find files
with the given name ending with any of these extensions.
On MS-Windows the only flag that has any meaning is os.F_OK. Any other
flags will be ignored.
@type name: C{str}
@param name: The name for which to search.
@type flags: C{int}
@param flags: Arguments to L{os.access}.
@rtype: C{list}
@param: A list of the full paths to files found, in the
order in which they were found.
"""
result = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ.get('PATH', None)
# if path is None:
# return []
# for p in os.environ.get('PATH', '').split(os.pathsep):
# p = os.path.join(p, name)
# if os.access(p, flags):
# result.append(p)
# for e in exts:
# pext = p + e
# if os.access(pext, flags):
# result.append(pext)
return result
def opt_to_dict(opts):
"""Transform option list to a dictionary.
:param opts: option list
:returns: option dictionary
"""
if isinstance(opts, dict):
return
if isinstance(opts, str):
opts = opts.split(" ")
args = list(itertools.chain.from_iterable([x.split("=") for x in opts]))
opt_d = {k: True if v.startswith('-') else v
for k,v in zip(args, args[1:]+["--"]) if k.startswith('-')}
return opt_d
def dict_to_opt(opt_dict):
"""Transform option dict to an option list.
:param opt_dict: option dict
:returns: option list
"""
args = list(itertools.chain.from_iterable([(k,v) for k,v in opt_dict.iteritems()]))
ret_args = [x for x in args if not isinstance(x, bool)]
return ret_args
def determine_read_type(fn, read1_suffix, read2_suffix=None, suffix="(.fastq.gz$|.fastq$|.fq.gz$|.fq$|.sai$)"):
"""Deduce if fn is first or second in read pair.
:param fn: file name
:param read1_suffix: read1 suffix
:param read2_suffix: read2 suffix
:param suffix: suffix to use for paired files
"""
parts = os.path.basename(fn).split(".")
if parts[0].endswith(read1_suffix):
return 1
if read2_suffix:
if parts[0].endswith(str(read2_suffix)):
return 2
raise Exception("file name {} doesn't appear to have any read pair information in it's name".format(fn))
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'robot.ui'
#
# Created: Sat Feb 21 20:25:38 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
import rospy
import actionlib
from move_base_msgs.msg import *
import time
from PyQt4 import QtCore, QtGui
table_position = dict()
table_position[0] = (-0.465, 0.37, 0.010, 0, 0, 0.998, 0.069)
table_position[1] = (0.599, 1.03, 0.010, 0, 0, 1.00, -0.020)
table_position[2] = (4.415, 0.645, 0.010, 0, 0, -0.034, 0.999)
table_position[3] = (7.409, 0.812, 0.010, 0, 0, -0.119, 0.993)
table_position[4] = (1.757, 4.377, 0.010, 0, 0, -0.040, 0.999)
table_position[5] = (1.757, 4.377, 0.010, 0, 0, -0.040, 0.999)
table_position[6] = (1.757, 4.377, 0.010, 0, 0, -0.040, 0.999)
table_position[7] = (1.757, 4.377, 0.010, 0, 0, -0.040, 0.999)
table_position[8] = (1.757, 4.377, 0.010, 0, 0, -0.040, 0.999)
table_position[9] = (1.757, 4.377, 0.010, 0, 0, -0.040, 0.999)
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(376, 338)
self.spinBox = QtGui.QSpinBox(Form)
self.spinBox.setGeometry(QtCore.QRect(20, 160, 161, 121))
font = QtGui.QFont()
font.setPointSize(35)
font.setBold(True)
font.setWeight(75)
self.spinBox.setFont(font)
self.spinBox.setMaximum(9)
self.spinBox.setObjectName(_fromUtf8("spinBox"))
self.label = QtGui.QLabel(Form)
self.label.setGeometry(QtCore.QRect(20, 120, 111, 21))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName(_fromUtf8("label"))
self.pushButton = QtGui.QPushButton(Form)
self.pushButton.setGeometry(QtCore.QRect(220, 190, 131, 41))
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.pushButton_2 = QtGui.QPushButton(Form)
self.pushButton_2.setGeometry(QtCore.QRect(220, 240, 131, 41))
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.pushButton_3 = QtGui.QPushButton(Form)
self.pushButton_3.setGeometry(QtCore.QRect(220, 140, 131, 41))
self.pushButton_3.setObjectName(_fromUtf8("pushButton_3"))
self.progressBar = QtGui.QProgressBar(Form)
self.progressBar.setGeometry(QtCore.QRect(20, 60, 118, 23))
self.progressBar.setProperty("value", 0)
self.progressBar.setObjectName(_fromUtf8("progressBar"))
self.label_2 = QtGui.QLabel(Form)
self.label_2.setGeometry(QtCore.QRect(20, 20, 111, 21))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_2.setFont(font)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.label_3 = QtGui.QLabel(Form)
self.label_3.setGeometry(QtCore.QRect(200, 20, 111, 21))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_3.setFont(font)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.label_4 = QtGui.QLabel(Form)
self.label_4.setGeometry(QtCore.QRect(190, 60, 131, 31))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_4.setFont(font)
self.label_4.setText(_fromUtf8(""))
self.label_4.setObjectName(_fromUtf8("label_4"))
self.table_no = 0
self.current_table_position = 0
self.client = actionlib.SimpleActionClient('move_base',MoveBaseAction)
self.goal = MoveBaseGoal()
self.update_values()
self.retranslateUi(Form)
QtCore.QObject.connect(self.spinBox, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.set_table_number)
QtCore.QObject.connect(self.pushButton_3, QtCore.SIGNAL(_fromUtf8("clicked()")), self.Home)
QtCore.QObject.connect(self.pushButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.Go)
QtCore.QObject.connect(self.pushButton_2, QtCore.SIGNAL(_fromUtf8("clicked()")), self.Cancel)
QtCore.QMetaObject.connectSlotsByName(Form)
def set_table_number(self):
self.table_no = self.spinBox.value()
self.current_table_position = table_position[self.table_no]
print self.current_table_position
def Go(self):
print "Go"
print "Waiting for server"
# self.client.wait_for_server()
self.goal.target_pose.pose.position.x=float(self.current_table_position[0])
self.goal.target_pose.pose.position.y=float(self.current_table_position[1])
self.goal.target_pose.pose.position.z=float(self.current_table_position[2])
self.goal.target_pose.pose.orientation.x = float(self.current_table_position[3])
self.goal.target_pose.pose.orientation.y= float(self.current_table_position[4])
self.goal.target_pose.pose.orientation.z= float(self.current_table_position[5])
self.goal.target_pose.header.frame_id= 'map'
self.goal.target_pose.header.stamp = rospy.Time.now()
# print temp_table_pose[0]
# print temp_table_pose[1]
print "Go"
self.client.send_goal(self.goal)
# self.client.wait_for_result()
# rospy.loginfo(self.client.get_result())
def Cancel(self):
print "Cancel"
self.client.cancel_all_goals()
def Home(self):
print "Home"
self.current_table_position = table_position[0]
self.Go()
def add(self,text):
battery_value = rospy.get_param("battery_value")
robot_status = rospy.get_param("robot_status")
self.progressBar.setProperty("value", battery_value)
self.label_4.setText(_fromUtf8(robot_status))
def update_values(self):
self.thread = WorkThread()
QtCore.QObject.connect( self.thread, QtCore.SIGNAL("update(QString)"), self.add )
self.thread.start()
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Robot", None))
self.label.setText(_translate("Form", "Table No(1-9)", None))
self.pushButton.setText(_translate("Form", "Go", None))
self.pushButton_2.setText(_translate("Form", "Cancel", None))
self.pushButton_3.setText(_translate("Form", "Home", None))
self.label_2.setText(_translate("Form", "Battery Level", None))
self.label_3.setText(_translate("Form", "Robot Status", None))
class WorkThread(QtCore.QThread):
def __init__(self):
QtCore.QThread.__init__(self)
def __del__(self):
self.wait()
def run(self):
while True:
time.sleep(0.3) # artificial time delay
self.emit( QtCore.SIGNAL('update(QString)'), " " )
# print "Hello"
return
if __name__ == "__main__":
import sys
rospy.init_node('robot_gui')
rospy.set_param('battery_value',0)
rospy.set_param('robot_status'," ")
app = QtGui.QApplication(sys.argv)
Form = QtGui.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
|
|
import pytest
from datetime import datetime
from marshmallow import EXCLUDE, fields, INCLUDE, RAISE, Schema, validate
from apispec.ext.marshmallow import MarshmallowPlugin
from apispec import exceptions, utils, APISpec
from .schemas import CustomList, CustomStringField
from .utils import get_schemas, build_ref
class TestMarshmallowFieldToOpenAPI:
def test_fields_with_load_default_load(self, openapi):
class MySchema(Schema):
field = fields.Str(dump_default="foo", load_default="bar")
res = openapi.schema2parameters(MySchema, location="query")
if openapi.openapi_version.major < 3:
assert res[0]["default"] == "bar"
else:
assert res[0]["schema"]["default"] == "bar"
# json/body is invalid for OpenAPI 3
@pytest.mark.parametrize("openapi", ("2.0",), indirect=True)
def test_fields_default_location_mapping_if_schema_many(self, openapi):
class ExampleSchema(Schema):
id = fields.Int()
schema = ExampleSchema(many=True)
res = openapi.schema2parameters(schema=schema, location="json")
assert res[0]["in"] == "body"
def test_fields_with_dump_only(self, openapi):
class UserSchema(Schema):
name = fields.Str(dump_only=True)
res = openapi.schema2parameters(schema=UserSchema(), location="query")
assert len(res) == 0
class UserSchema(Schema):
name = fields.Str()
class Meta:
dump_only = ("name",)
res = openapi.schema2parameters(schema=UserSchema(), location="query")
assert len(res) == 0
class TestMarshmallowSchemaToModelDefinition:
def test_schema2jsonschema_with_explicit_fields(self, openapi):
class UserSchema(Schema):
_id = fields.Int()
email = fields.Email(metadata={"description": "email address of the user"})
name = fields.Str()
class Meta:
title = "User"
res = openapi.schema2jsonschema(UserSchema)
assert res["title"] == "User"
assert res["type"] == "object"
props = res["properties"]
assert props["_id"]["type"] == "integer"
assert props["email"]["type"] == "string"
assert props["email"]["format"] == "email"
assert props["email"]["description"] == "email address of the user"
def test_schema2jsonschema_override_name(self, openapi):
class ExampleSchema(Schema):
_id = fields.Int(data_key="id")
_global = fields.Int(data_key="global")
class Meta:
exclude = ("_global",)
res = openapi.schema2jsonschema(ExampleSchema)
assert res["type"] == "object"
props = res["properties"]
# `_id` renamed to `id`
assert "_id" not in props and props["id"]["type"] == "integer"
# `_global` excluded correctly
assert "_global" not in props and "global" not in props
def test_required_fields(self, openapi):
class BandSchema(Schema):
drummer = fields.Str(required=True)
bassist = fields.Str()
res = openapi.schema2jsonschema(BandSchema)
assert res["required"] == ["drummer"]
def test_partial(self, openapi):
class BandSchema(Schema):
drummer = fields.Str(required=True)
bassist = fields.Str(required=True)
res = openapi.schema2jsonschema(BandSchema(partial=True))
assert "required" not in res
res = openapi.schema2jsonschema(BandSchema(partial=("drummer",)))
assert res["required"] == ["bassist"]
def test_no_required_fields(self, openapi):
class BandSchema(Schema):
drummer = fields.Str()
bassist = fields.Str()
res = openapi.schema2jsonschema(BandSchema)
assert "required" not in res
def test_title_and_description_may_be_added(self, openapi):
class UserSchema(Schema):
class Meta:
title = "User"
description = "A registered user"
res = openapi.schema2jsonschema(UserSchema)
assert res["description"] == "A registered user"
assert res["title"] == "User"
def test_excluded_fields(self, openapi):
class WhiteStripesSchema(Schema):
class Meta:
exclude = ("bassist",)
guitarist = fields.Str()
drummer = fields.Str()
bassist = fields.Str()
res = openapi.schema2jsonschema(WhiteStripesSchema)
assert set(res["properties"].keys()) == {"guitarist", "drummer"}
def test_unknown_values_disallow(self, openapi):
class UnknownRaiseSchema(Schema):
class Meta:
unknown = RAISE
first = fields.Str()
res = openapi.schema2jsonschema(UnknownRaiseSchema)
assert res["additionalProperties"] is False
def test_unknown_values_allow(self, openapi):
class UnknownIncludeSchema(Schema):
class Meta:
unknown = INCLUDE
first = fields.Str()
res = openapi.schema2jsonschema(UnknownIncludeSchema)
assert res["additionalProperties"] is True
def test_unknown_values_ignore(self, openapi):
class UnknownExcludeSchema(Schema):
class Meta:
unknown = EXCLUDE
first = fields.Str()
res = openapi.schema2jsonschema(UnknownExcludeSchema)
assert "additionalProperties" not in res
def test_only_explicitly_declared_fields_are_translated(self, openapi):
class UserSchema(Schema):
_id = fields.Int()
class Meta:
title = "User"
fields = ("_id", "email")
with pytest.warns(
UserWarning,
match="Only explicitly-declared fields will be included in the Schema Object.",
):
res = openapi.schema2jsonschema(UserSchema)
assert res["type"] == "object"
props = res["properties"]
assert "_id" in props
assert "email" not in props
def test_observed_field_name_for_required_field(self, openapi):
fields_dict = {"user_id": fields.Int(data_key="id", required=True)}
res = openapi.fields2jsonschema(fields_dict)
assert res["required"] == ["id"]
@pytest.mark.parametrize("many", (True, False))
def test_schema_instance_inspection(self, openapi, many):
class UserSchema(Schema):
_id = fields.Int()
res = openapi.schema2jsonschema(UserSchema(many=many))
assert res["type"] == "object"
props = res["properties"]
assert "_id" in props
def test_raises_error_if_no_declared_fields(self, openapi):
class NotASchema:
pass
expected_error = (
f"{NotASchema!r} is neither a Schema class nor a Schema instance."
)
with pytest.raises(ValueError, match=expected_error):
openapi.schema2jsonschema(NotASchema)
class TestMarshmallowSchemaToParameters:
@pytest.mark.parametrize("ListClass", [fields.List, CustomList])
def test_field_multiple(self, ListClass, openapi):
field = ListClass(fields.Str)
res = openapi._field2parameter(field, name="field", location="query")
assert res["in"] == "query"
if openapi.openapi_version.major < 3:
assert res["type"] == "array"
assert res["items"]["type"] == "string"
assert res["collectionFormat"] == "multi"
else:
assert res["schema"]["type"] == "array"
assert res["schema"]["items"]["type"] == "string"
assert res["style"] == "form"
assert res["explode"] is True
def test_field_required(self, openapi):
field = fields.Str(required=True)
res = openapi._field2parameter(field, name="field", location="query")
assert res["required"] is True
def test_schema_partial(self, openapi):
class UserSchema(Schema):
field = fields.Str(required=True)
res_nodump = openapi.schema2parameters(
UserSchema(partial=True), location="query"
)
param = res_nodump[0]
assert param["required"] is False
def test_schema_partial_list(self, openapi):
class UserSchema(Schema):
field = fields.Str(required=True)
partial_field = fields.Str(required=True)
res_nodump = openapi.schema2parameters(
UserSchema(partial=("partial_field",)), location="query"
)
param = next(p for p in res_nodump if p["name"] == "field")
assert param["required"] is True
param = next(p for p in res_nodump if p["name"] == "partial_field")
assert param["required"] is False
# json/body is invalid for OpenAPI 3
@pytest.mark.parametrize("openapi", ("2.0",), indirect=True)
def test_schema_body(self, openapi):
class UserSchema(Schema):
name = fields.Str()
email = fields.Email()
res = openapi.schema2parameters(UserSchema, location="body")
assert len(res) == 1
param = res[0]
assert param["in"] == "body"
assert param["schema"] == {"$ref": "#/definitions/User"}
# json/body is invalid for OpenAPI 3
@pytest.mark.parametrize("openapi", ("2.0",), indirect=True)
def test_schema_body_with_dump_only(self, openapi):
class UserSchema(Schema):
name = fields.Str()
email = fields.Email(dump_only=True)
res_nodump = openapi.schema2parameters(UserSchema, location="body")
assert len(res_nodump) == 1
param = res_nodump[0]
assert param["in"] == "body"
assert param["schema"] == build_ref(openapi.spec, "schema", "User")
# json/body is invalid for OpenAPI 3
@pytest.mark.parametrize("openapi", ("2.0",), indirect=True)
def test_schema_body_many(self, openapi):
class UserSchema(Schema):
name = fields.Str()
email = fields.Email()
res = openapi.schema2parameters(UserSchema(many=True), location="body")
assert len(res) == 1
param = res[0]
assert param["in"] == "body"
assert param["schema"]["type"] == "array"
assert param["schema"]["items"] == {"$ref": "#/definitions/User"}
def test_schema_query(self, openapi):
class UserSchema(Schema):
name = fields.Str()
email = fields.Email()
res = openapi.schema2parameters(UserSchema, location="query")
assert len(res) == 2
res.sort(key=lambda param: param["name"])
assert res[0]["name"] == "email"
assert res[0]["in"] == "query"
assert res[1]["name"] == "name"
assert res[1]["in"] == "query"
def test_schema_query_instance(self, openapi):
class UserSchema(Schema):
name = fields.Str()
email = fields.Email()
res = openapi.schema2parameters(UserSchema(), location="query")
assert len(res) == 2
res.sort(key=lambda param: param["name"])
assert res[0]["name"] == "email"
assert res[0]["in"] == "query"
assert res[1]["name"] == "name"
assert res[1]["in"] == "query"
def test_schema_query_instance_many_should_raise_exception(self, openapi):
class UserSchema(Schema):
name = fields.Str()
email = fields.Email()
with pytest.raises(AssertionError):
openapi.schema2parameters(UserSchema(many=True), location="query")
def test_fields_query(self, openapi):
class MySchema(Schema):
name = fields.Str()
email = fields.Email()
res = openapi.schema2parameters(MySchema, location="query")
assert len(res) == 2
res.sort(key=lambda param: param["name"])
assert res[0]["name"] == "email"
assert res[0]["in"] == "query"
assert res[1]["name"] == "name"
assert res[1]["in"] == "query"
def test_raises_error_if_not_a_schema(self, openapi):
class NotASchema:
pass
expected_error = (
f"{NotASchema!r} is neither a Schema class nor a Schema instance."
)
with pytest.raises(ValueError, match=expected_error):
openapi.schema2jsonschema(NotASchema)
class CategorySchema(Schema):
id = fields.Int()
name = fields.Str(required=True)
breed = fields.Str(dump_only=True)
class PageSchema(Schema):
offset = fields.Int()
limit = fields.Int()
class PetSchema(Schema):
category = fields.Nested(CategorySchema, many=True)
name = fields.Str()
class TestNesting:
def test_schema2jsonschema_with_nested_fields(self, spec_fixture):
res = spec_fixture.openapi.schema2jsonschema(PetSchema)
props = res["properties"]
assert props["category"]["items"] == build_ref(
spec_fixture.spec, "schema", "Category"
)
@pytest.mark.parametrize("modifier", ("only", "exclude"))
def test_schema2jsonschema_with_nested_fields_only_exclude(
self, spec_fixture, modifier
):
class Child(Schema):
i = fields.Int()
j = fields.Int()
class Parent(Schema):
child = fields.Nested(Child, **{modifier: ("i",)})
spec_fixture.openapi.schema2jsonschema(Parent)
props = get_schemas(spec_fixture.spec)["Child"]["properties"]
assert ("i" in props) == (modifier == "only")
assert ("j" not in props) == (modifier == "only")
def test_schema2jsonschema_with_plucked_field(self, spec_fixture):
class PetSchema(Schema):
breed = fields.Pluck(CategorySchema, "breed")
category_schema = spec_fixture.openapi.schema2jsonschema(CategorySchema)
pet_schema = spec_fixture.openapi.schema2jsonschema(PetSchema)
assert (
pet_schema["properties"]["breed"] == category_schema["properties"]["breed"]
)
def test_schema2jsonschema_with_nested_fields_with_adhoc_changes(
self, spec_fixture
):
category_schema = CategorySchema()
category_schema.fields["id"].required = True
class PetSchema(Schema):
category = fields.Nested(category_schema, many=True)
name = fields.Str()
spec_fixture.spec.components.schema("Pet", schema=PetSchema)
props = get_schemas(spec_fixture.spec)
assert props["Category"] == spec_fixture.openapi.schema2jsonschema(
category_schema
)
assert set(props["Category"]["required"]) == {"id", "name"}
props["Category"]["required"] = ["name"]
assert props["Category"] == spec_fixture.openapi.schema2jsonschema(
CategorySchema
)
def test_schema2jsonschema_with_plucked_fields_with_adhoc_changes(
self, spec_fixture
):
category_schema = CategorySchema()
category_schema.fields["breed"].dump_only = True
class PetSchema(Schema):
breed = fields.Pluck(category_schema, "breed", many=True)
spec_fixture.spec.components.schema("Pet", schema=PetSchema)
props = get_schemas(spec_fixture.spec)["Pet"]["properties"]
assert props["breed"]["items"]["readOnly"] is True
def test_schema2jsonschema_with_nested_excluded_fields(self, spec):
category_schema = CategorySchema(exclude=("breed",))
class PetSchema(Schema):
category = fields.Nested(category_schema)
spec.components.schema("Pet", schema=PetSchema)
category_props = get_schemas(spec)["Category"]["properties"]
assert "breed" not in category_props
def test_openapi_tools_validate_v2():
ma_plugin = MarshmallowPlugin()
spec = APISpec(
title="Pets", version="0.1", plugins=(ma_plugin,), openapi_version="2.0"
)
openapi = ma_plugin.converter
spec.components.schema("Category", schema=CategorySchema)
spec.components.schema("Pet", {"discriminator": "name"}, schema=PetSchema)
spec.path(
view=None,
path="/category/{category_id}",
operations={
"get": {
"parameters": [
{"name": "q", "in": "query", "type": "string"},
{
"name": "category_id",
"in": "path",
"required": True,
"type": "string",
},
openapi._field2parameter(
field=fields.List(
fields.Str(),
validate=validate.OneOf(["freddie", "roger"]),
),
location="query",
name="body",
),
]
+ openapi.schema2parameters(PageSchema, location="query"),
"responses": {200: {"schema": PetSchema, "description": "A pet"}},
},
"post": {
"parameters": (
[
{
"name": "category_id",
"in": "path",
"required": True,
"type": "string",
}
]
+ openapi.schema2parameters(CategorySchema, location="body")
),
"responses": {201: {"schema": PetSchema, "description": "A pet"}},
},
},
)
try:
utils.validate_spec(spec)
except exceptions.OpenAPIError as error:
pytest.fail(str(error))
def test_openapi_tools_validate_v3():
ma_plugin = MarshmallowPlugin()
spec = APISpec(
title="Pets", version="0.1", plugins=(ma_plugin,), openapi_version="3.0.0"
)
openapi = ma_plugin.converter
spec.components.schema("Category", schema=CategorySchema)
spec.components.schema("Pet", schema=PetSchema)
spec.path(
view=None,
path="/category/{category_id}",
operations={
"get": {
"parameters": [
{"name": "q", "in": "query", "schema": {"type": "string"}},
{
"name": "category_id",
"in": "path",
"required": True,
"schema": {"type": "string"},
},
openapi._field2parameter(
field=fields.List(
fields.Str(),
validate=validate.OneOf(["freddie", "roger"]),
),
location="query",
name="body",
),
]
+ openapi.schema2parameters(PageSchema, location="query"),
"responses": {
200: {
"description": "success",
"content": {"application/json": {"schema": PetSchema}},
}
},
},
"post": {
"parameters": (
[
{
"name": "category_id",
"in": "path",
"required": True,
"schema": {"type": "string"},
}
]
),
"requestBody": {
"content": {"application/json": {"schema": CategorySchema}}
},
"responses": {
201: {
"description": "created",
"content": {"application/json": {"schema": PetSchema}},
}
},
},
},
)
try:
utils.validate_spec(spec)
except exceptions.OpenAPIError as error:
pytest.fail(str(error))
class TestFieldValidation:
class ValidationSchema(Schema):
id = fields.Int(dump_only=True)
range = fields.Int(validate=validate.Range(min=1, max=10))
range_no_upper = fields.Float(validate=validate.Range(min=1))
multiple_ranges = fields.Int(
validate=[
validate.Range(min=1),
validate.Range(min=3),
validate.Range(max=10),
validate.Range(max=7),
]
)
list_length = fields.List(fields.Str, validate=validate.Length(min=1, max=10))
custom_list_length = CustomList(
fields.Str, validate=validate.Length(min=1, max=10)
)
string_length = fields.Str(validate=validate.Length(min=1, max=10))
custom_field_length = CustomStringField(validate=validate.Length(min=1, max=10))
multiple_lengths = fields.Str(
validate=[
validate.Length(min=1),
validate.Length(min=3),
validate.Length(max=10),
validate.Length(max=7),
]
)
equal_length = fields.Str(
validate=[validate.Length(equal=5), validate.Length(min=1, max=10)]
)
date_range = fields.DateTime(
validate=validate.Range(
min=datetime(1900, 1, 1),
)
)
@pytest.mark.parametrize(
("field", "properties"),
[
("range", {"minimum": 1, "maximum": 10}),
("range_no_upper", {"minimum": 1}),
("multiple_ranges", {"minimum": 3, "maximum": 7}),
("list_length", {"minItems": 1, "maxItems": 10}),
("custom_list_length", {"minItems": 1, "maxItems": 10}),
("string_length", {"minLength": 1, "maxLength": 10}),
("custom_field_length", {"minLength": 1, "maxLength": 10}),
("multiple_lengths", {"minLength": 3, "maxLength": 7}),
("equal_length", {"minLength": 5, "maxLength": 5}),
("date_range", {"x-minimum": datetime(1900, 1, 1)}),
],
)
def test_properties(self, field, properties, spec):
spec.components.schema("Validation", schema=self.ValidationSchema)
result = get_schemas(spec)["Validation"]["properties"][field]
for attr, expected_value in properties.items():
assert attr in result
assert result[attr] == expected_value
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the `SnapshotDataset` transformation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import os
import shutil
import time
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.experimental.kernel_tests import reader_dataset_ops_test_base
from tensorflow.python.data.experimental.ops import snapshot
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.framework import combinations
from tensorflow.python.framework import errors
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class SnapshotDatasetTest(reader_dataset_ops_test_base.TFRecordDatasetTestBase,
parameterized.TestCase):
def setUp(self):
super(SnapshotDatasetTest, self).setUp()
tmpdir = self.get_temp_dir()
tmpdir = os.path.join(tmpdir, "snapshot")
os.mkdir(tmpdir)
self._snapshot_dir = tmpdir
def tearDown(self):
super(SnapshotDatasetTest, self).tearDown()
shutil.rmtree(self._snapshot_dir)
def createTFRecords(self, num_files=10, num_records=100):
self._num_files = num_files
self._num_records = num_records
self._test_filenames = self._createFiles()
def removeTFRecords(self):
for filename in self._test_filenames:
os.remove(filename)
self._test_filenames = []
self._num_files = None
self._num_records = None
def assertDatasetProducesSet(self, dataset, expected):
actual = []
next_fn = self.getNext(dataset)
for _ in range(len(expected)):
elem = self.evaluate(next_fn())
actual.append(elem)
self.assertCountEqual(actual, expected)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_fn())
def assertSnapshotDirectoryContains(self, directory, num_fingerprints,
num_runs_per_fingerprint,
num_snapshot_shards_per_run):
dirlist_raw = os.listdir(directory)
dirlist = []
# Ignore the graphdef pbtxts we write for debugging purposes.
for i in range(len(dirlist_raw)):
if not dirlist_raw[i].endswith("-graph.pbtxt"):
dirlist.append(dirlist_raw[i])
self.assertLen(dirlist, num_fingerprints)
for i in range(num_fingerprints):
fingerprint_dir = os.path.join(directory, dirlist[i])
fingerprint_dir_list = sorted(os.listdir(fingerprint_dir))
self.assertLen(fingerprint_dir_list, num_runs_per_fingerprint + 1)
self.assertEqual(fingerprint_dir_list[num_runs_per_fingerprint],
"snapshot.metadata")
for j in range(num_runs_per_fingerprint):
run_dir = os.path.join(fingerprint_dir, fingerprint_dir_list[j])
run_dirlist = sorted(os.listdir(run_dir))
self.assertLen(run_dirlist, num_snapshot_shards_per_run)
file_counter = 0
for filename in run_dirlist:
self.assertEqual(filename, "%08d.shard" % file_counter)
file_counter += 1
@combinations.generate(test_base.default_test_combinations())
def testCreateSnapshotDataset(self):
dataset = dataset_ops.Dataset.from_tensors([1, 2, 3])
dataset.apply(snapshot.snapshot(self._snapshot_dir))
@combinations.generate(test_base.default_test_combinations())
def testReadSnapshotDatasetDefault(self):
self.createTFRecords()
filenames = self._test_filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 100)
]
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(snapshot.snapshot(self._snapshot_dir))
self.assertDatasetProduces(dataset, expected)
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=1,
num_runs_per_fingerprint=1,
num_snapshot_shards_per_run=multiprocessing.cpu_count())
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(snapshot.snapshot(self._snapshot_dir))
self.assertDatasetProduces(dataset2, expected)
@combinations.generate(test_base.default_test_combinations())
def testReadSnapshotDatasetAutoWriteSnappyRead(self):
self.createTFRecords()
filenames = self._test_filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 100)
]
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(
snapshot.snapshot(self._snapshot_dir, compression="AUTO"))
self.assertDatasetProduces(dataset, expected)
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(
snapshot.snapshot(self._snapshot_dir, compression="SNAPPY"))
self.assertDatasetProduces(dataset2, expected)
@combinations.generate(test_base.default_test_combinations())
def testReadSnapshotDatasetCustomShardFn(self):
self.createTFRecords()
filenames = self._test_filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 100)
]
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(
snapshot.snapshot(self._snapshot_dir, shard_func=lambda _: np.int64(0)))
self.assertDatasetProduces(dataset, expected)
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=1,
num_runs_per_fingerprint=1,
num_snapshot_shards_per_run=1)
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(
snapshot.snapshot(self._snapshot_dir, shard_func=lambda _: 0))
self.assertDatasetProduces(dataset2, expected)
@combinations.generate(test_base.default_test_combinations())
def testReadSnapshotDatasetCustomReaderFn(self):
self.createTFRecords()
filenames = self._test_filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 100)
]
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(
snapshot.snapshot(
self._snapshot_dir,
reader_func=(
lambda ds: ds.interleave( # pylint:disable=g-long-lambda
lambda x: x,
cycle_length=4,
num_parallel_calls=4))))
self.assertDatasetProduces(dataset, expected)
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=1,
num_runs_per_fingerprint=1,
num_snapshot_shards_per_run=multiprocessing.cpu_count())
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(
snapshot.snapshot(
self._snapshot_dir,
reader_func=(
lambda ds: ds.interleave( # pylint:disable=g-long-lambda
lambda x: x,
cycle_length=4,
num_parallel_calls=4))))
self.assertDatasetProducesSet(dataset2, expected)
@combinations.generate(test_base.default_test_combinations())
def testSnapshotDatasetInvalidShardFn(self):
dataset = dataset_ops.Dataset.range(1000)
with self.assertRaises(TypeError):
dataset = dataset.apply(
snapshot.snapshot(
self._snapshot_dir, shard_func=lambda _: "invalid_fn"))
next_fn = self.getNext(dataset)
self.evaluate(next_fn())
@combinations.generate(test_base.default_test_combinations())
def testSnapshotDatasetInvalidReaderFn(self):
dataset = dataset_ops.Dataset.range(1000)
with self.assertRaises(TypeError):
dataset = dataset.apply(
snapshot.snapshot(self._snapshot_dir, reader_func=lambda x: x + 1))
next_fn = self.getNext(dataset)
self.evaluate(next_fn())
@combinations.generate(test_base.default_test_combinations())
def testWriteSnapshotDatasetSimple(self):
dataset = dataset_ops.Dataset.range(1000)
dataset = dataset.apply(snapshot.snapshot(self._snapshot_dir))
self.assertDatasetProduces(dataset, list(range(1000)))
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=1,
num_runs_per_fingerprint=1,
num_snapshot_shards_per_run=multiprocessing.cpu_count())
@combinations.generate(test_base.default_test_combinations())
def testWriteSnapshotDatasetMultipleFingerprints(self):
dataset1 = dataset_ops.Dataset.range(1000)
dataset1 = dataset1.apply(snapshot.snapshot(self._snapshot_dir))
self.assertDatasetProduces(dataset1, list(range(1000)))
dataset2 = dataset_ops.Dataset.range(2000)
dataset2 = dataset2.apply(snapshot.snapshot(self._snapshot_dir))
self.assertDatasetProduces(dataset2, list(range(2000)))
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=2,
num_runs_per_fingerprint=1,
num_snapshot_shards_per_run=multiprocessing.cpu_count())
@combinations.generate(test_base.default_test_combinations())
def testWriteSnapshotDatasetSameFingerprintMultipleCompleteRuns(self):
dataset1 = dataset_ops.Dataset.range(1000)
dataset1 = dataset1.apply(snapshot.snapshot(self._snapshot_dir))
self.assertDatasetProduces(dataset1, list(range(1000)))
dataset2 = dataset_ops.Dataset.range(1000)
dataset2 = dataset2.apply(snapshot.snapshot(self._snapshot_dir))
self.assertDatasetProduces(dataset2, list(range(1000)))
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=1,
num_runs_per_fingerprint=1,
num_snapshot_shards_per_run=multiprocessing.cpu_count())
@combinations.generate(test_base.default_test_combinations())
def testWriteSnapshotDatasetSameFingerprintIncompleteRunRestart(self):
dataset1 = dataset_ops.Dataset.range(1000)
dataset1 = dataset1.apply(snapshot.snapshot(self._snapshot_dir))
next1 = self.getNext(dataset1)
for i in range(500):
self.assertEqual(i, self.evaluate(next1()))
dataset2 = dataset_ops.Dataset.range(1000)
dataset2 = dataset2.apply(snapshot.snapshot(self._snapshot_dir))
next2 = self.getNext(dataset2)
for i in range(500):
self.assertEqual(i, self.evaluate(next2()))
for i in range(500, 1000):
self.assertEqual(i, self.evaluate(next1()))
self.assertEqual(i, self.evaluate(next2()))
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=1,
num_runs_per_fingerprint=2,
num_snapshot_shards_per_run=multiprocessing.cpu_count())
@combinations.generate(test_base.default_test_combinations())
def testWriteSnapshotCustomShardFunction(self):
dataset = dataset_ops.Dataset.range(1000)
dataset = dataset.enumerate()
dataset = dataset.apply(
snapshot.snapshot(self._snapshot_dir, shard_func=lambda i, _: i % 2))
dataset = dataset.map(lambda _, elem: elem)
self.assertDatasetProduces(dataset, list(range(1000)))
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=1,
num_runs_per_fingerprint=1,
num_snapshot_shards_per_run=2)
@combinations.generate(test_base.default_test_combinations())
def testWriteSnapshotDatasetWithTuples(self):
dataset1 = dataset_ops.Dataset.range(0, 1000)
dataset2 = dataset_ops.Dataset.range(1000, 2000)
dataset3 = dataset_ops.Dataset.range(2000, 3000)
dataset4 = dataset_ops.Dataset.range(3000, 4000)
dataset = dataset_ops.Dataset.zip((dataset1, dataset2, dataset3, dataset4))
dataset = dataset.apply(snapshot.snapshot(self._snapshot_dir))
next1 = self.getNext(dataset)
for i in range(0, 1000):
self.assertEqual((i, i + 1000, i + 2000, i + 3000),
self.evaluate(next1()))
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=1,
num_runs_per_fingerprint=1,
num_snapshot_shards_per_run=multiprocessing.cpu_count())
@combinations.generate(test_base.default_test_combinations())
def testWriteSnapshotShuffleSameFingerprint(self):
def make_dataset():
dataset = dataset_ops.Dataset.range(1000)
dataset = dataset.shuffle(1000)
dataset = dataset.apply(snapshot.snapshot(self._snapshot_dir))
return dataset
dataset1 = make_dataset()
self.assertDatasetProducesSet(dataset1, list(range(1000)))
dataset2 = make_dataset()
self.assertDatasetProducesSet(dataset2, list(range(1000)))
self.assertSnapshotDirectoryContains(
self._snapshot_dir,
num_fingerprints=1,
num_runs_per_fingerprint=1,
num_snapshot_shards_per_run=multiprocessing.cpu_count())
class LegacySnapshotDatasetTest(
reader_dataset_ops_test_base.TFRecordDatasetTestBase,
parameterized.TestCase):
def setUp(self):
super(LegacySnapshotDatasetTest, self).setUp()
self.removeTFRecords()
tmpdir = self.get_temp_dir()
tmpdir = os.path.join(tmpdir, "snapshot")
os.mkdir(tmpdir)
self.snapshot_dir = tmpdir
def tearDown(self):
super(LegacySnapshotDatasetTest, self).tearDown()
shutil.rmtree(self.snapshot_dir)
def removeTFRecords(self):
for filename in self.test_filenames:
os.remove(filename)
self.test_filenames = []
def setUpTFRecord(self, num_files=10, num_records=10):
self._num_files = num_files
self._num_records = num_records
self.test_filenames = self._createFiles()
def makeSnapshotDirectory(self):
return self.snapshot_dir
def assertSnapshotDirectoryContains(self, directory, num_fingerprints,
num_runs_per_fp, num_snapshot_files):
dirlist_raw = os.listdir(directory)
dirlist = []
# Ignore the graphdef pbtxts we write for debugging purposes.
for i in range(len(dirlist_raw)):
if not dirlist_raw[i].endswith("-graph.pbtxt"):
dirlist.append(dirlist_raw[i])
self.assertLen(dirlist, num_fingerprints)
for i in range(num_fingerprints):
fingerprint_dir = os.path.join(directory, dirlist[i])
fingerprint_dir_list = sorted(os.listdir(fingerprint_dir))
self.assertLen(fingerprint_dir_list, num_runs_per_fp + 1)
self.assertEqual(fingerprint_dir_list[num_runs_per_fp],
"snapshot.metadata")
for j in range(num_runs_per_fp):
run_dir = os.path.join(fingerprint_dir, fingerprint_dir_list[j])
run_dirlist = sorted(os.listdir(run_dir))
self.assertLen(run_dirlist, num_snapshot_files)
file_counter = 0
for filename in run_dirlist:
self.assertEqual(filename, "%08d.snapshot" % file_counter)
file_counter += 1
@combinations.generate(test_base.default_test_combinations())
def testWriteDifferentPipelinesInOneDirectory(self):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(1000)
dataset = dataset.apply(snapshot.legacy_snapshot(tmpdir))
self.assertDatasetProduces(dataset, list(range(1000)))
dataset = dataset_ops.Dataset.range(1001)
dataset = dataset.apply(snapshot.legacy_snapshot(tmpdir))
self.assertDatasetProduces(dataset, list(range(1001)))
self.assertSnapshotDirectoryContains(tmpdir, 2, 1, 1)
@combinations.generate(test_base.default_test_combinations())
def testWriteSnapshotMultipleSimultaneous(self):
tmpdir = self.snapshot_dir
dataset1 = dataset_ops.Dataset.range(1000)
dataset1 = dataset1.apply(snapshot.legacy_snapshot(tmpdir))
next1 = self.getNext(dataset1)
dataset2 = dataset_ops.Dataset.range(1000)
dataset2 = dataset2.apply(snapshot.legacy_snapshot(tmpdir))
next2 = self.getNext(dataset2)
for i in range(0, 1000):
self.assertEqual(i, self.evaluate(next1()))
self.assertEqual(i, self.evaluate(next2()))
# we check that only one copy of the metadata has been written, and the
# one that lost the race would be in passthrough mode.
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
@combinations.generate(test_base.default_test_combinations())
def testGetNextCreatesDir(self):
tmpdir = self.snapshot_dir
# We create two iterators but call getNext on only one.
dataset1 = dataset_ops.Dataset.range(1000)
dataset1 = dataset1.apply(snapshot.legacy_snapshot(tmpdir))
next1 = self.getNext(dataset1)
dataset2 = dataset_ops.Dataset.range(1001)
dataset2 = dataset2.apply(snapshot.legacy_snapshot(tmpdir))
_ = self.getNext(dataset2)
for _ in range(1000):
self.evaluate(next1())
# We check that only one directory is created.
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(compression=[
snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP,
snapshot.COMPRESSION_SNAPPY
])))
def testWriteSnapshotSimpleSuccessful(self, compression):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(1000)
dataset = dataset.apply(
snapshot.legacy_snapshot(tmpdir, compression=compression))
self.assertDatasetProduces(dataset, list(range(1000)))
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(compression=[
snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP,
snapshot.COMPRESSION_SNAPPY
])))
def testWriteSnapshotRepeatAfterwards(self, compression):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(
snapshot.legacy_snapshot(tmpdir, compression=compression))
dataset = dataset.repeat(10)
self.assertDatasetProduces(dataset, list(range(10)) * 10)
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(compression=[
snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP,
snapshot.COMPRESSION_SNAPPY
])))
def testWriteSnapshotMixTypes(self, compression):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(10)
def map_fn(x):
return (x, string_ops.as_string(x), string_ops.as_string(2 * x), 2 * x)
dataset = dataset.map(map_fn)
dataset = dataset.apply(
snapshot.legacy_snapshot(tmpdir, compression=compression))
dataset = dataset.repeat(10)
expected = []
for i in range(10):
expected.append((i, str(i), str(2 * i), 2 * i))
self.assertDatasetProduces(dataset, expected * 10)
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
@combinations.generate(test_base.default_test_combinations())
def testSpecifySnapshotNameWriteAndRead(self):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(
snapshot.legacy_snapshot(tmpdir, snapshot_name="my_custom_snapshot"))
dataset = dataset.repeat(10)
self.assertDatasetProduces(dataset, list(range(10)) * 10)
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
self.assertTrue(
os.path.exists(os.path.join(tmpdir, "custom-my_custom_snapshot")))
self.assertTrue(
os.path.exists(
os.path.join(tmpdir, "custom-my_custom_snapshot", "custom")))
@combinations.generate(test_base.default_test_combinations())
def testForcePassthroughMode(self):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(
snapshot.legacy_snapshot(tmpdir, mode="passthrough"))
dataset = dataset.repeat(10)
self.assertDatasetProduces(dataset, list(range(10)) * 10)
self.assertSnapshotDirectoryContains(tmpdir, 0, 0, 0)
@combinations.generate(test_base.default_test_combinations())
def testForceWriteMode(self):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(snapshot.legacy_snapshot(tmpdir, mode="write"))
dataset = dataset.repeat(10)
self.assertDatasetProduces(dataset, list(range(10)) * 10)
# We will end up writing 10 different runs.
self.assertSnapshotDirectoryContains(tmpdir, 1, 10, 1)
@combinations.generate(test_base.default_test_combinations())
def testForceReadMode(self):
tmpdir = self.snapshot_dir
# We write a copy of the snapshot first.
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(
snapshot.legacy_snapshot(
tmpdir, mode="write", snapshot_name="my_custom_snapshot"))
self.assertDatasetProduces(dataset, list(range(10)))
# We move the run to a new name.
shutil.move(
os.path.join(tmpdir, "custom-my_custom_snapshot"),
os.path.join(tmpdir, "custom-my_custom_snapshot_2"))
# Even though the snapshot.metadata is pointing to the old run that no
# longer exists after we moved, we force it to read from the run we specify.
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(
snapshot.legacy_snapshot(
tmpdir, mode="read", snapshot_name="my_custom_snapshot_2"))
self.assertDatasetProduces(dataset, list(range(10)))
# We should still have one snapshot and one run.
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
@combinations.generate(test_base.default_test_combinations())
def testForceReadNonexistentSnapshot(self):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(10)
with self.assertRaises(errors.NotFoundError):
dataset = dataset.apply(snapshot.legacy_snapshot(tmpdir, mode="read"))
get_next = self.getNext(dataset)
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testForceReadNonexistentNamedSnapshot(self):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.range(10)
with self.assertRaises(errors.NotFoundError):
dataset = dataset.apply(
snapshot.legacy_snapshot(
tmpdir, mode="read", snapshot_name="my_nonexistent_snapshot"))
get_next = self.getNext(dataset)
self.evaluate(get_next())
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(compression=[
snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP,
snapshot.COMPRESSION_SNAPPY
])))
def testReadSnapshotBackAfterWrite(self, compression):
self.setUpTFRecord()
filenames = self.test_filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 10)
]
tmpdir = self.snapshot_dir
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(
snapshot.legacy_snapshot(tmpdir, compression=compression))
self.assertDatasetProduces(dataset, expected)
# remove the original files and try to read the data back only from snapshot
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(
snapshot.legacy_snapshot(tmpdir, compression=compression))
self.assertDatasetProduces(dataset2, expected)
@combinations.generate(test_base.default_test_combinations())
def testReadShuffledSnapshotAfterWrite(self):
self.setUpTFRecord(num_files=10, num_records=50)
filenames = self.test_filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 50)
]
tmpdir = self.snapshot_dir
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(
snapshot.legacy_snapshot(tmpdir, shard_size_bytes=100))
self.assertDatasetProduces(dataset, expected)
# remove the original files and try to read the data back only from snapshot
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(
snapshot.legacy_snapshot(
tmpdir, shard_size_bytes=100, shuffle_on_read=True))
next2 = self.getNext(dataset2)
res1 = self.evaluate(next2())
res2 = self.evaluate(next2())
res3 = self.evaluate(next2())
res4 = self.evaluate(next2())
res5 = self.evaluate(next2())
# make sure that we don't read the file back in the same order.
self.assertNotEqual([res1, res2, res3, res4, res5], expected[0:5])
# make sure all the elements are still there
dataset3 = core_readers._TFRecordDataset(filenames)
dataset3 = dataset3.apply(
snapshot.legacy_snapshot(
tmpdir, shard_size_bytes=100, shuffle_on_read=True))
self.assertDatasetProduces(dataset3, expected, assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testReadShuffledSnapshotWithSeedAfterWrite(self):
self.setUpTFRecord(num_files=10, num_records=50)
filenames = self.test_filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 50)
]
tmpdir = self.snapshot_dir
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(
snapshot.legacy_snapshot(tmpdir, shard_size_bytes=10))
self.assertDatasetProduces(dataset, expected)
# remove the original files and try to read the data back only from snapshot
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(
snapshot.legacy_snapshot(
tmpdir,
shard_size_bytes=10,
shuffle_on_read=True,
shuffle_seed=123456))
next2 = self.getNext(dataset2)
dataset3 = core_readers._TFRecordDataset(filenames)
dataset3 = dataset3.apply(
snapshot.legacy_snapshot(
tmpdir,
shard_size_bytes=10,
shuffle_on_read=True,
shuffle_seed=123456))
next3 = self.getNext(dataset3)
# make sure that the items are read back in the same order for both datasets
for _ in range(500):
res2 = self.evaluate(next2())
res3 = self.evaluate(next3())
self.assertEqual(res2, res3)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(compression=[
snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP,
snapshot.COMPRESSION_SNAPPY
])))
def testReadSnapshotParallelAfterWrite(self, compression):
self.setUpTFRecord(10, 4000)
filenames = self.test_filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 4000)
]
tmpdir = self.snapshot_dir
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(
snapshot.legacy_snapshot(
tmpdir,
shard_size_bytes=1024 * 1024,
num_reader_threads=2,
reader_buffer_size=10,
compression=compression))
self.assertDatasetProduces(dataset, expected, assert_items_equal=True)
# remove the original files and try to read the data back only from
# snapshot.
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(
snapshot.legacy_snapshot(
tmpdir,
shard_size_bytes=1024 * 1024,
num_reader_threads=2,
reader_buffer_size=10,
compression=compression))
self.assertDatasetProduces(dataset2, expected, assert_items_equal=True)
# Not testing Snappy here because Snappy reads currently require a lot of
# memory.
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.times(
combinations.combine(compression=[
snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP
]),
combinations.combine(threads=2, size=[1, 2]) +
combinations.combine(threads=8, size=[1, 4, 8]))))
def testReadSnapshotBackAfterMultiThreadedWrite(self, compression, threads,
size):
self.setUpTFRecord()
filenames = self.test_filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 10)
]
tmpdir = self.snapshot_dir
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(
snapshot.legacy_snapshot(
tmpdir,
compression=compression,
num_writer_threads=threads,
writer_buffer_size=size))
self.assertDatasetProduces(dataset, expected)
# remove the original files and try to read the data back only from
# snapshot
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(
snapshot.legacy_snapshot(tmpdir, compression=compression))
self.assertDatasetProduces(dataset2, expected, assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testSameFingerprintWithDifferentInitializationOrder(self):
tmpdir = self.snapshot_dir
dataset1 = dataset_ops.Dataset.range(0, 100)
dataset2 = dataset_ops.Dataset.range(100, 200)
dataset3 = dataset_ops.Dataset.range(200, 300)
dataset = dataset1.concatenate(dataset2).concatenate(dataset3)
dataset = dataset.apply(snapshot.legacy_snapshot(tmpdir))
self.assertDatasetProduces(dataset, list(range(300)))
dataset4 = dataset_ops.Dataset.range(200, 300)
dataset5 = dataset_ops.Dataset.range(100, 200)
dataset6 = dataset_ops.Dataset.range(0, 100)
dataset = dataset6.concatenate(dataset5).concatenate(dataset4)
dataset = dataset.apply(snapshot.legacy_snapshot(tmpdir))
self.assertDatasetProduces(dataset, list(range(300)))
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
@combinations.generate(test_base.default_test_combinations())
def testExpiredSnapshotRewrite(self):
tmpdir = self.snapshot_dir
dataset1 = dataset_ops.Dataset.range(1000)
dataset1 = dataset1.apply(
snapshot.legacy_snapshot(tmpdir, pending_snapshot_expiry_seconds=1))
next1 = self.getNext(dataset1)
# Don't finish reading dataset1, so it is never finalized
for _ in range(500):
self.evaluate(next1())
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
time.sleep(2)
# Creating dataset2 after we run through dataset1 due to eager mode, where
# the snapshot state is determined immediately upon dataset creation. We
# only want to determine the snapshot state for dataset2 after the first
# snapshot has expired.
dataset2 = dataset_ops.Dataset.range(1000)
dataset2 = dataset2.apply(
snapshot.legacy_snapshot(tmpdir, pending_snapshot_expiry_seconds=1))
next2 = self.getNext(dataset2)
for _ in range(500):
self.evaluate(next2())
self.assertSnapshotDirectoryContains(tmpdir, 1, 2, 1)
@combinations.generate(test_base.default_test_combinations())
def testSnapshotArgsCreateNewSnapshot(self):
tmpdir = self.snapshot_dir
dataset1 = dataset_ops.Dataset.range(1000)
dataset1 = dataset1.apply(
snapshot.legacy_snapshot(tmpdir, shard_size_bytes=10000))
next1 = self.getNext(dataset1)
for _ in range(1000):
self.evaluate(next1())
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
# Create second snapshot with a different shard_size_bytes
dataset2 = dataset_ops.Dataset.range(1000)
dataset2 = dataset1.apply(
snapshot.legacy_snapshot(tmpdir, shard_size_bytes=20000))
next2 = self.getNext(dataset2)
for _ in range(1000):
self.evaluate(next2())
self.assertSnapshotDirectoryContains(tmpdir, 2, 1, 1)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(compression=[
snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP,
snapshot.COMPRESSION_SNAPPY
])))
def testSpecifyShardSize(self, compression):
tmpdir = self.snapshot_dir
dataset = dataset_ops.Dataset.from_tensor_slices([1.0])
dataset = dataset.map(lambda x: gen_array_ops.broadcast_to(x, [1024, 1024]))
dataset = dataset.repeat(10)
dataset = dataset.apply(
snapshot.legacy_snapshot(
tmpdir, shard_size_bytes=10 * 1024 * 1024, compression=compression))
next_fn = self.getNext(dataset)
for _ in range(10):
self.evaluate(next_fn())
num_files = 1
if compression == snapshot.COMPRESSION_NONE:
num_files = 3
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, num_files)
@combinations.generate(test_base.default_test_combinations())
def testAdditionalOperationsAfterReadBack(self):
self.setUpTFRecord()
filenames = self.test_filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 10)
]
tmpdir = self.snapshot_dir
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(snapshot.legacy_snapshot(tmpdir))
self.assertDatasetProduces(dataset, expected)
# remove the original files and try to read the data back only from snapshot
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(snapshot.legacy_snapshot(tmpdir))
self.assertDatasetProduces(dataset2, expected)
expected_after = [
b"cord %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 10)
]
dataset3 = core_readers._TFRecordDataset(filenames)
dataset3 = dataset3.apply(snapshot.legacy_snapshot(tmpdir))
dataset3 = dataset3.map(lambda x: string_ops.substr_v2(x, 2, 1000))
self.assertDatasetProduces(dataset3, expected_after)
if __name__ == "__main__":
test.main()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
import tvm
from .. import defop, AllTypes, RealTypes
from .. import assign_by_req, reduce_axes
def compute_add(dtype, ndim):
A = tvm.placeholder([tvm.size_var() for _ in range(ndim)], name='A', dtype=dtype)
B = tvm.placeholder([tvm.size_var() for _ in range(ndim)], name='B', dtype=dtype)
C = tvm.compute([tvm.size_var() for _ in range(ndim)],
lambda *index: A[index] + B[index], name='C')
s = tvm.create_schedule(C.op)
return s, A, B, C
@defop(name="vadd", target="cpu", auto_broadcast=True,
dtype=AllTypes, ndim=[5])
def vadd(dtype, ndim):
s, A, B, C = compute_add(dtype, ndim)
axes = [axis for axis in C.op.axis]
fused = s[C].fuse(*axes)
s[C].parallel(fused)
return s, [A, B, C]
@defop(name="cuda_vadd", target="cuda", auto_broadcast=True,
dtype=["float32", "float64"], ndim=[5])
def vadd_gpu(dtype, ndim):
s, A, B, C = compute_add(dtype, ndim)
s = tvm.create_schedule(C.op)
axes = [axis for axis in C.op.axis]
fused = s[C].fuse(*axes)
bx, tx = s[C].split(fused, factor=64)
s[C].bind(bx, tvm.thread_axis("blockIdx.x"))
s[C].bind(tx, tvm.thread_axis("threadIdx.x"))
return s, [A, B, C]
def compute_backward_vadd(dtype, ndim, reduce1st, req):
# The backward of broadcast op is basically a reduction on broadcast axes.
# We label the reduce axes as 1 and other axes as 0, and they form a bit string.
# Each bit string correponds to a kernel, so the number of kernels is as many as `2^n`
# To reduce it, the bit string is compressed by combining consecutive 0s or 1s.
# In this way, the number of bit string (the number of kernels) is reduced to `2 * n`
# They compressed bit string is stored in `axes`. And `reduce1st` represents the first bit
# of the compressed bit string. Credit to @junrushao1994 and @yzhliu.
axes = ([reduce1st, 1 - reduce1st] * ndim)[:ndim]
X = tvm.placeholder([tvm.size_var() for _ in range(ndim)], name='X', dtype=dtype)
reducer = tvm.comm_reducer(lambda x, y: x + y,
lambda t: tvm.const(0, dtype=t), name="sum")
ret = reduce_axes(X, axes, reducer)
in_grad_a, in_grad = assign_by_req(ret, req)
s = tvm.create_schedule(in_grad.op)
return s, X, in_grad_a, in_grad, [ret, in_grad]
@defop(name="backward_vadd", target="cpu", dtype=AllTypes,
ndim=[5], reduce1st=[0, 1],
req=["kWriteTo", "kAddTo"], attrs=["reduce1st", "req"])
def backward_vadd(dtype, ndim, reduce1st, req):
s, X, in_grad_a, in_grad, c_list = compute_backward_vadd(dtype, ndim, reduce1st, req)
for t in c_list:
axes = [axis for axis in t.op.axis]
fused = s[t].fuse(*axes)
s[t].parallel(fused)
return s, [X, in_grad_a, in_grad]
@defop(name="cuda_backward_vadd", target="gpu", dtype=["float32", "float64"],
ndim=[5], reduce1st=[0, 1],
req=["kWriteTo", "kAddTo"], attrs=["reduce1st", "req"])
def backward_vadd_gpu(dtype, ndim, reduce1st, req):
s, X, in_grad_a, in_grad, c_list = compute_backward_vadd(dtype, ndim, reduce1st, req)
num_thread = 64
for t in c_list:
block_x = tvm.thread_axis("blockIdx.x")
thread_x = tvm.thread_axis("threadIdx.x")
axes = [axis for axis in t.op.axis]
fused = s[t].fuse(*axes)
bx, tx = s[t].split(fused, factor=num_thread)
s[t].bind(bx, block_x)
s[t].bind(tx, thread_x)
return s, [X, in_grad_a, in_grad]
def compute_degandrad(dtype, ndim, n):
A = tvm.placeholder([tvm.size_var() for _ in range(ndim)], name='A', dtype=dtype)
import math
if n == 0:
B = tvm.compute([tvm.size_var() for _ in range(ndim)],
lambda *index: A[index] * tvm.const(math.pi, dtype) / tvm.const(180, dtype), name='B')
else:
B = tvm.compute([tvm.size_var() for _ in range(ndim)],
lambda *index: A[index] / tvm.const(math.pi, dtype) * tvm.const(180, dtype), name='B')
s = tvm.create_schedule(B.op)
return s, A, B
@defop(name="deg2rad", target="cpu", auto_broadcast=False,
dtype=["float32", "float64"], ndim=list(range(0, 6)))
def deg2rad(dtype, ndim):
s, A, B = compute_degandrad(dtype, ndim, 0)
axes = [axis for axis in B.op.axis]
fused = s[B].fuse(*axes)
s[B].parallel(fused)
return s, [A, B]
@defop(name="rad2deg", target="cpu", auto_broadcast=False,
dtype=["float32", "float64"], ndim=list(range(0, 6)))
def rad2deg(dtype, ndim):
s, A, B = compute_degandrad(dtype, ndim, 1)
axes = [axis for axis in B.op.axis]
fused = s[B].fuse(*axes)
s[B].parallel(fused)
return s, [A, B]
@defop(name="cuda_deg2rad", target="cuda", auto_broadcast=False,
dtype=["float32", "float64"], ndim=list(range(0, 6)))
def deg2rad_gpu(dtype, ndim):
s, A, B = compute_degandrad(dtype, ndim, 0)
s = tvm.create_schedule(B.op)
axes = [axis for axis in B.op.axis]
fused = s[B].fuse(*axes)
bx, tx = s[B].split(fused, factor=64)
s[B].bind(bx, tvm.thread_axis("blockIdx.x"))
s[B].bind(tx, tvm.thread_axis("threadIdx.x"))
return s, [A, B]
@defop(name="cuda_rad2deg", target="cuda", auto_broadcast=False,
dtype=["float32", "float64"], ndim=list(range(0, 6)))
def rad2deg_gpu(dtype, ndim):
s, A, B = compute_degandrad(dtype, ndim, 1)
s = tvm.create_schedule(B.op)
axes = [axis for axis in B.op.axis]
fused = s[B].fuse(*axes)
bx, tx = s[B].split(fused, factor=64)
s[B].bind(bx, tvm.thread_axis("blockIdx.x"))
s[B].bind(tx, tvm.thread_axis("threadIdx.x"))
return s, [A, B]
def compute_backward_degandrad(dtype, ndim, req, n):
ishape = [tvm.size_var() for _ in range(ndim)]
in_grad_tmp = tvm.placeholder(ishape, name='in_grad_tmp', dtype=dtype)
in_grad = tvm.placeholder(ishape, name='in_grad', dtype=dtype)
out_grad = tvm.placeholder(ishape, name='out_grad', dtype=dtype)
import math
if n == 0:
ret = tvm.compute(ishape, lambda *index: out_grad[index] * tvm.const(math.pi, dtype) / tvm.const(180, dtype))
else:
ret = tvm.compute(ishape, lambda *index: out_grad[index] / tvm.const(math.pi, dtype) * tvm.const(180, dtype))
if (req == "kAddTo"):
in_grad = tvm.compute(ishape, lambda *index: in_grad_tmp[index] + ret[index])
else:
in_grad = tvm.compute(ishape, lambda *index: ret[index])
s = tvm.create_schedule(in_grad.op)
return s, out_grad, in_grad_tmp, in_grad, [ret, in_grad]
@defop(name="backward_deg2rad", target="cpu", auto_broadcast=False,
dtype=["float32", "float64"], ndim=list(range(0, 6)), req=["kWriteTo", "kAddTo"],
attrs=["req"])
def backward_deg2rad(dtype, ndim, req):
s, out_grad, in_grad_tmp, in_grad, c_list = compute_backward_degandrad(dtype, ndim, req, 0)
for t in c_list:
axes = [axis for axis in t.op.axis]
fused = s[t].fuse(*axes)
s[t].parallel(fused)
return s, [out_grad, in_grad, in_grad_tmp]
@defop(name="backward_rad2deg", target="cpu", auto_broadcast=False,
dtype=["float32", "float64"], ndim=list(range(0, 6)), req=["kWriteTo", "kAddTo"],
attrs=["req"])
def backward_rad2deg(dtype, ndim, req):
s, out_grad, in_grad_tmp, in_grad, c_list = compute_backward_degandrad(dtype, ndim, req, 1)
for t in c_list:
axes = [axis for axis in t.op.axis]
fused = s[t].fuse(*axes)
s[t].parallel(fused)
return s, [out_grad, in_grad, in_grad_tmp]
@defop(name="cuda_backward_deg2rad", target="gpu", auto_broadcast=False,
dtype=["float32", "float64"], ndim=list(range(0, 6)), req=["kWriteTo", "kAddTo"],
attrs=["req"])
def cuda_backward_deg2rad(dtype, ndim, req):
s, out_grad, in_grad_tmp, in_grad, c_list = compute_backward_degandrad(dtype, ndim, req, 0)
num_thread = 64
for t in c_list:
block_x = tvm.thread_axis("blockIdx.x")
thread_x = tvm.thread_axis("threadIdx.x")
axes = [axis for axis in t.op.axis]
fused = s[t].fuse(*axes)
bx, tx = s[t].split(fused, factor=num_thread)
s[t].bind(bx, block_x)
s[t].bind(tx, thread_x)
return s, [out_grad, in_grad, in_grad_tmp]
@defop(name="cuda_backward_rad2deg", target="gpu", auto_broadcast=False,
dtype=["float32", "float64"], ndim=list(range(0, 6)), req=["kWriteTo", "kAddTo"],
attrs=["req"])
def cuda_backward_rad2deg(dtype, ndim, req):
s, out_grad, in_grad_tmp, in_grad, c_list = compute_backward_degandrad(dtype, ndim, req, 1)
num_thread = 64
for t in c_list:
block_x = tvm.thread_axis("blockIdx.x")
thread_x = tvm.thread_axis("threadIdx.x")
axes = [axis for axis in t.op.axis]
fused = s[t].fuse(*axes)
bx, tx = s[t].split(fused, factor=num_thread)
s[t].bind(bx, block_x)
s[t].bind(tx, thread_x)
return s, [out_grad, in_grad, in_grad_tmp]
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the rawtransaction RPCs.
Test the following RPCs:
- createrawtransaction
- signrawtransactionwithwallet
- sendrawtransaction
- decoderawtransaction
- getrawtransaction
"""
from collections import OrderedDict
from decimal import Decimal
from io import BytesIO
from test_framework.messages import CTransaction, ToHex
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, connect_nodes_bi, hex_str_to_bytes
class multidict(dict):
"""Dictionary that allows duplicate keys.
Constructed with a list of (key, value) tuples. When dumped by the json module,
will output invalid json with repeated keys, eg:
>>> json.dumps(multidict([(1,2),(1,2)])
'{"1": 2, "1": 2}'
Used to test calls to rpc methods with repeated keys in the json object."""
def __init__(self, x):
dict.__init__(self, x)
self.x = x
def items(self):
return self.x
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [
["-txindex"],
["-txindex"],
["-txindex"],
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
super().setup_network()
connect_nodes_bi(self.nodes, 0, 2)
def run_test(self):
self.log.info('prepare some coins for multiple *rawtransaction commands')
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(101)
self.sync_all()
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),5.0)
self.sync_all()
self.nodes[0].generate(5)
self.sync_all()
self.log.info('Test getrawtransaction on genesis block coinbase returns an error')
block = self.nodes[0].getblock(self.nodes[0].getblockhash(0))
assert_raises_rpc_error(-5, "The genesis block coinbase is not considered an ordinary transaction", self.nodes[0].getrawtransaction, block['merkleroot'])
self.log.info('Check parameter types and required parameters of createrawtransaction')
# Test `createrawtransaction` required parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction)
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [])
# Test `createrawtransaction` invalid extra parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [], {}, 0, False, 'foo')
# Test `createrawtransaction` invalid `inputs`
txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000'
assert_raises_rpc_error(-3, "Expected type array", self.nodes[0].createrawtransaction, 'foo', {})
assert_raises_rpc_error(-1, "JSON value is not an object as expected", self.nodes[0].createrawtransaction, ['foo'], {})
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].createrawtransaction, [{}], {})
assert_raises_rpc_error(-8, "txid must be of length 64 (not 3, for 'foo')", self.nodes[0].createrawtransaction, [{'txid': 'foo'}], {})
assert_raises_rpc_error(-8, "txid must be hexadecimal string (not 'ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844')", self.nodes[0].createrawtransaction, [{'txid': 'ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 'foo'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, vout must be positive", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': -1}], {})
assert_raises_rpc_error(-8, "Invalid parameter, sequence number is out of range", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 0, 'sequence': -1}], {})
# Test `createrawtransaction` invalid `outputs`
address = self.nodes[0].getnewaddress()
address2 = self.nodes[0].getnewaddress()
assert_raises_rpc_error(-1, "JSON value is not an array as expected", self.nodes[0].createrawtransaction, [], 'foo')
self.nodes[0].createrawtransaction(inputs=[], outputs={}) # Should not throw for backwards compatibility
self.nodes[0].createrawtransaction(inputs=[], outputs=[])
assert_raises_rpc_error(-8, "Data must be hexadecimal string", self.nodes[0].createrawtransaction, [], {'data': 'foo'})
assert_raises_rpc_error(-5, "Invalid Bitcoin address", self.nodes[0].createrawtransaction, [], {'foo': 0})
assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].createrawtransaction, [], {address: 'foo'})
assert_raises_rpc_error(-3, "Amount out of range", self.nodes[0].createrawtransaction, [], {address: -1})
assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], multidict([(address, 1), (address, 1)]))
assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], [{address: 1}, {address: 1}])
assert_raises_rpc_error(-8, "Invalid parameter, duplicate key: data", self.nodes[0].createrawtransaction, [], [{"data": 'aa'}, {"data": "bb"}])
assert_raises_rpc_error(-8, "Invalid parameter, duplicate key: data", self.nodes[0].createrawtransaction, [], multidict([("data", 'aa'), ("data", "bb")]))
assert_raises_rpc_error(-8, "Invalid parameter, key-value pair must contain exactly one key", self.nodes[0].createrawtransaction, [], [{'a': 1, 'b': 2}])
assert_raises_rpc_error(-8, "Invalid parameter, key-value pair not an object as expected", self.nodes[0].createrawtransaction, [], [['key-value pair1'], ['2']])
# Test `createrawtransaction` invalid `locktime`
assert_raises_rpc_error(-3, "Expected type number", self.nodes[0].createrawtransaction, [], {}, 'foo')
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, -1)
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, 4294967296)
# Test `createrawtransaction` invalid `replaceable`
assert_raises_rpc_error(-3, "Expected type bool", self.nodes[0].createrawtransaction, [], {}, 0, 'foo')
self.log.info('Check that createrawtransaction accepts an array and object as outputs')
tx = CTransaction()
# One output
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs={address: 99}))))
assert_equal(len(tx.vout), 1)
assert_equal(
tx.serialize().hex(),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}]),
)
# Two outputs
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=OrderedDict([(address, 99), (address2, 99)])))))
assert_equal(len(tx.vout), 2)
assert_equal(
tx.serialize().hex(),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}, {address2: 99}]),
)
# Multiple mixed outputs
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=multidict([(address, 99), (address2, 99), ('data', '99')])))))
assert_equal(len(tx.vout), 3)
assert_equal(
tx.serialize().hex(),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}, {address2: 99}, {'data': '99'}]),
)
for type in ["bech32", "p2sh-segwit", "legacy"]:
addr = self.nodes[0].getnewaddress("", type)
addrinfo = self.nodes[0].getaddressinfo(addr)
pubkey = addrinfo["scriptPubKey"]
self.log.info('sendrawtransaction with missing prevtx info (%s)' %(type))
# Test `signrawtransactionwithwallet` invalid `prevtxs`
inputs = [ {'txid' : txid, 'vout' : 3, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
prevtx = dict(txid=txid, scriptPubKey=pubkey, vout=3, amount=1)
succ = self.nodes[0].signrawtransactionwithwallet(rawtx, [prevtx])
assert succ["complete"]
if type == "legacy":
del prevtx["amount"]
succ = self.nodes[0].signrawtransactionwithwallet(rawtx, [prevtx])
assert succ["complete"]
if type != "legacy":
assert_raises_rpc_error(-3, "Missing amount", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": txid,
"scriptPubKey": pubkey,
"vout": 3,
}
])
assert_raises_rpc_error(-3, "Missing vout", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": txid,
"scriptPubKey": pubkey,
"amount": 1,
}
])
assert_raises_rpc_error(-3, "Missing txid", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"scriptPubKey": pubkey,
"vout": 3,
"amount": 1,
}
])
assert_raises_rpc_error(-3, "Missing scriptPubKey", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": txid,
"vout": 3,
"amount": 1
}
])
#########################################
# sendrawtransaction with missing input #
#########################################
self.log.info('sendrawtransaction with missing input')
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1}] #won't exists
outputs = { self.nodes[0].getnewaddress() : 4.998 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
rawtx = self.nodes[2].signrawtransactionwithwallet(rawtx)
# This will raise an exception since there are missing inputs
assert_raises_rpc_error(-25, "Missing inputs", self.nodes[2].sendrawtransaction, rawtx['hex'])
#####################################
# getrawtransaction with block hash #
#####################################
# make a tx by sending then generate 2 blocks; block1 has the tx in it
tx = self.nodes[2].sendtoaddress(self.nodes[1].getnewaddress(), 1)
block1, block2 = self.nodes[2].generate(2)
self.sync_all()
# We should be able to get the raw transaction by providing the correct block
gottx = self.nodes[0].getrawtransaction(tx, True, block1)
assert_equal(gottx['txid'], tx)
assert_equal(gottx['in_active_chain'], True)
# We should not have the 'in_active_chain' flag when we don't provide a block
gottx = self.nodes[0].getrawtransaction(tx, True)
assert_equal(gottx['txid'], tx)
assert 'in_active_chain' not in gottx
# We should not get the tx if we provide an unrelated block
assert_raises_rpc_error(-5, "No such transaction found", self.nodes[0].getrawtransaction, tx, True, block2)
# An invalid block hash should raise the correct errors
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].getrawtransaction, tx, True, True)
assert_raises_rpc_error(-8, "parameter 3 must be of length 64 (not 6, for 'foobar')", self.nodes[0].getrawtransaction, tx, True, "foobar")
assert_raises_rpc_error(-8, "parameter 3 must be of length 64 (not 8, for 'abcd1234')", self.nodes[0].getrawtransaction, tx, True, "abcd1234")
assert_raises_rpc_error(-8, "parameter 3 must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].getrawtransaction, tx, True, "ZZZ0000000000000000000000000000000000000000000000000000000000000")
assert_raises_rpc_error(-5, "Block hash not found", self.nodes[0].getrawtransaction, tx, True, "0000000000000000000000000000000000000000000000000000000000000000")
# Undo the blocks and check in_active_chain
self.nodes[0].invalidateblock(block1)
gottx = self.nodes[0].getrawtransaction(txid=tx, verbose=True, blockhash=block1)
assert_equal(gottx['in_active_chain'], False)
self.nodes[0].reconsiderblock(block1)
assert_equal(self.nodes[0].getbestblockhash(), block2)
#########################
# RAW TX MULTISIG TESTS #
#########################
# 2of2 test
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
# Tests for createmultisig and addmultisigaddress
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, ["01020304"])
self.nodes[0].createmultisig(2, [addr1Obj['pubkey'], addr2Obj['pubkey']]) # createmultisig can only take public keys
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 2, [addr1Obj['pubkey'], addr1]) # addmultisigaddress can take both pubkeys and addresses so long as they are in the wallet, which is tested here.
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr1])['address']
#use balance deltas instead of absolute values
bal = self.nodes[2].getbalance()
# send 1.2 BTC to msig adr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal+Decimal('1.20000000')) #node2 has both keys of the 2of2 ms addr., tx should affect the balance
# 2of3 test from different nodes
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr3 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
addr3Obj = self.nodes[2].getaddressinfo(addr3)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])['address']
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#THIS IS AN INCOMPLETE FEATURE
#NODE2 HAS TWO OF THREE KEY AND THE FUNDS SHOULD BE SPENDABLE AND COUNT AT BALANCE CALCULATION
assert_equal(self.nodes[2].getbalance(), bal) #for now, assume the funds of a 2of3 multisig tx are not marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = next(o for o in rawTx['vout'] if o['value'] == Decimal('2.20000000'))
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "amount" : vout['value']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned = self.nodes[1].signrawtransactionwithwallet(rawTx, inputs)
assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx, inputs)
assert_equal(rawTxSigned['complete'], True) #node2 can sign the tx compl., own two of three keys
self.nodes[2].sendrawtransaction(rawTxSigned['hex'])
rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
# 2of2 test for combining transactions
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
mSigObjValid = self.nodes[2].getaddressinfo(mSigObj)
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx2 = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal) # the funds of a 2of2 multisig tx should not be marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx2 = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = next(o for o in rawTx2['vout'] if o['value'] == Decimal('2.20000000'))
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "redeemScript" : mSigObjValid['hex'], "amount" : vout['value']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx2 = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned1 = self.nodes[1].signrawtransactionwithwallet(rawTx2, inputs)
self.log.debug(rawTxPartialSigned1)
assert_equal(rawTxPartialSigned1['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxPartialSigned2 = self.nodes[2].signrawtransactionwithwallet(rawTx2, inputs)
self.log.debug(rawTxPartialSigned2)
assert_equal(rawTxPartialSigned2['complete'], False) #node2 only has one key, can't comp. sign the tx
rawTxComb = self.nodes[2].combinerawtransaction([rawTxPartialSigned1['hex'], rawTxPartialSigned2['hex']])
self.log.debug(rawTxComb)
self.nodes[2].sendrawtransaction(rawTxComb)
rawTx2 = self.nodes[0].decoderawtransaction(rawTxComb)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
# decoderawtransaction tests
# witness transaction
encrawtx = "010000000001010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f50500000000000102616100000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, True) # decode as witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
assert_raises_rpc_error(-22, 'TX decode failed', self.nodes[0].decoderawtransaction, encrawtx, False) # force decode as non-witness transaction
# non-witness transaction
encrawtx = "01000000010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f505000000000000000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, False) # decode as non-witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
# getrawtransaction tests
# 1. valid parameters - only supply txid
txId = rawTx["txid"]
assert_equal(self.nodes[0].getrawtransaction(txId), rawTxSigned['hex'])
# 2. valid parameters - supply txid and 0 for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txId, 0), rawTxSigned['hex'])
# 3. valid parameters - supply txid and False for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txId, False), rawTxSigned['hex'])
# 4. valid parameters - supply txid and 1 for verbose.
# We only check the "hex" field of the output so we don't need to update this test every time the output format changes.
assert_equal(self.nodes[0].getrawtransaction(txId, 1)["hex"], rawTxSigned['hex'])
# 5. valid parameters - supply txid and True for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txId, True)["hex"], rawTxSigned['hex'])
# 6. invalid parameters - supply txid and string "Flase"
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txId, "Flase")
# 7. invalid parameters - supply txid and empty array
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txId, [])
# 8. invalid parameters - supply txid and empty dict
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txId, {})
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 1000)
# 9. invalid parameters - sequence number out of range
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : -1}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
# 10. invalid parameters - sequence number out of range
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967296}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967294}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 4294967294)
####################################
# TRANSACTION VERSION NUMBER TESTS #
####################################
# Test the minimum transaction version number that fits in a signed 32-bit integer.
tx = CTransaction()
tx.nVersion = -0x80000000
rawtx = ToHex(tx)
decrawtx = self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['version'], -0x80000000)
# Test the maximum transaction version number that fits in a signed 32-bit integer.
tx = CTransaction()
tx.nVersion = 0x7fffffff
rawtx = ToHex(tx)
decrawtx = self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['version'], 0x7fffffff)
self.log.info('sendrawtransaction/testmempoolaccept with maxfeerate')
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
rawTx = self.nodes[0].getrawtransaction(txId, True)
vout = next(o for o in rawTx['vout'] if o['value'] == Decimal('1.00000000'))
self.sync_all()
inputs = [{ "txid" : txId, "vout" : vout['n'] }]
outputs = { self.nodes[0].getnewaddress() : Decimal("0.99999000") } # 1000 sat fee
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx)
assert_equal(rawTxSigned['complete'], True)
# 1000 sat fee, ~100 b transaction, fee rate should land around 10 sat/b = 0.00010000 BTC/kB
# Thus, testmempoolaccept should reject
testres = self.nodes[2].testmempoolaccept([rawTxSigned['hex']], 0.00001000)[0]
assert_equal(testres['allowed'], False)
assert_equal(testres['reject-reason'], '256: absurdly-high-fee')
# and sendrawtransaction should throw
assert_raises_rpc_error(-26, "absurdly-high-fee", self.nodes[2].sendrawtransaction, rawTxSigned['hex'], 0.00001000)
# And below calls should both succeed
testres = self.nodes[2].testmempoolaccept(rawtxs=[rawTxSigned['hex']], maxfeerate='0.00070000')[0]
assert_equal(testres['allowed'], True)
self.nodes[2].sendrawtransaction(hexstring=rawTxSigned['hex'], maxfeerate='0.00070000')
if __name__ == '__main__':
RawTransactionsTest().main()
|
|
from xml.etree import ElementTree
from collections import OrderedDict
import io
import re
tree = ElementTree.parse('vk.xml').getroot()
defined_defs = {'void', 'char', 'float', 'uint8_t', 'uint32_t', 'uint64_t', 'int32_t', 'size_t', 'DWORD', 'HINSTANCE', 'HWND', 'HANDLE'}
external_structs = {'Display', 'xcb_connection_t', 'wl_display', 'wl_surface', 'MirConnection', 'MirSurface', 'ANativeWindow', 'SECURITY_ATTRIBUTES'}
handle_defs = {'Window': 'uint32_t',
'VisualID': 'uint32_t',
'xcb_window_t': 'uint32_t',
'xcb_visualid_t': 'uint32_t',
}
platform_protects = {'linux': ('VK_USE_PLATFORM_XLIB_KHR', 'VK_USE_PLATFORM_XCB_KHR', 'VK_USE_PLATFORM_WAYLAND_KHR', 'VK_USE_PLATFORM_MIR_KHR'),
'win32': ('VK_USE_PLATFORM_WIN32_KHR', ),
'android': ('VK_USE_PLATFORM_ANDROID_KHR')}
platform_defs = {'linux': {'Display', 'Window', 'VisualID', 'xcb_connection_t', 'xcb_window_t', 'xcb_visualid_t', 'wl_display', 'wl_surface', 'MirConnection', 'MirSurface'},
'win32': {'SECURITY_ATTRIBUTES'},
'android': {'ANativeWindow'}}
extension_types = {}
platform_extensions = {k: set() for k in platform_protects}
general_extensions = set()
ext_base = 1000000000
ext_block_size = 1000
typedefs = {}
struct_unions = {}
macros = {}
enums = {}
funcpointers = {}
funcs = {}
ext_funcs = {}
structs_default_values = {}
structs_len_autos = {}
def innertext(tag):
return (tag.text or '') + ''.join(innertext(e) for e in tag) + (tag.tail or '')
for i in tree.findall('types/type'):
name = i.get('name')
requires = i.get('requires')
category = i.get('category')
if category in {'struct', 'union'}:
members = i.findall('member')
def _(elem):
tail = elem.find('name').tail
if tail:
enum = elem.find('enum')
if enum is None:
return int(tail[1:-1])
return enum.text
struct_unions[name] = (category, [((j.find('type').text + (j.find('type').tail or '')).strip(), j.find('name').text, _(j)) for j in members])
structs_default_values[name] = {j.find('name').text: j.get('values') for j in members if j.get('values')}
structs_len_autos[name] = {}
member_names = [j.find('name').text for j in members]
for j in members:
len_ = j.get('len')
name_ = j.find('name').text
if len_:
lens = [i for i in len_.split(',') if i != 'null-terminated']
if len(lens) == 1:
if lens[0] in member_names:
assert not (name_ in structs_default_values[name])
structs_default_values[name][name_] = []
if not lens[0] in structs_len_autos[name]:
structs_len_autos[name][lens[0]] = []
structs_len_autos[name][lens[0]].append("len(%s)" % name_)
else:
assert not lens
elif category == 'bitmask':
typedefs[i.find('name').text] = (i.find('type').text, i.find('name').text)
elif category == 'include':
pass
elif category == 'define':
name = i.find('name')
if name is None:
continue
# print>>linux_header, innertext(i).strip()
elif category == 'basetype':
typedefs[i.find('name').text] = (i.find('type').text, i.find('name').text)
elif category == 'handle':
type_ = i.find('type').text
name = i.find('name').text
if type_ == 'VK_DEFINE_HANDLE':
typedefs[name] = ('struct %s_T' % name, '*%s' % name)
elif type_ == 'VK_DEFINE_NON_DISPATCHABLE_HANDLE':
# FIXME
typedefs[name] = ('uint64_t', name)
else:
assert False
elif category == 'enum':
name = i.get('name')
enums[name] = {}
elif category == 'funcpointer':
funcpointers[i.find('name').text] = ' '.join(innertext(i).split()).replace('( ', '(').strip()
elif category is None:
requires = i.get('requires')
if requires is None:
continue
platform = None
if requires in {'X11/Xlib.h', 'mir_toolkit/client_types.h', 'wayland-client.h', 'xcb/xcb.h'}:
platform = 'linux'
elif requires == 'windows.h':
platform = 'win32'
elif requires == 'android/native_window.h':
platform = 'android'
else:
assert requires == 'vk_platform'
if not platform is None:
platform_extensions[platform].add(name)
if name in external_structs:
typedefs[name] = ("struct %s" % name, name)
elif name in handle_defs:
typedefs[name] = (handle_defs[name], name)
else:
assert name in defined_defs
else:
assert False
def evalEnum(enum, number=0):
if 'value' in enum.attrib:
value = enum.attrib['value']
special_cases = {'1000.0f': '1000.0', '(~0U)': -1, '(~0ULL)': -1}
if value in special_cases:
return special_cases[value]
return value
elif 'bitpos' in enum.attrib:
return 1 << int(enum.attrib['bitpos'])
elif 'extends' in enum.attrib:
sign = -1 if enum.get('dir') == '-' else 1
return sign * (ext_base + ext_block_size * (number - 1) + int(enum.attrib['offset']))
else:
assert False
enum_types = {}
for i in tree.findall('enums'):
type_ = i.get('type')
if type_ in ('enum', 'bitmask'):
name = i.attrib['name']
enum_types[name] = type_
for j in i.findall('enum'):
enums[name][j.attrib['name']] = evalEnum(j)
else:
for j in i.findall('enum'):
macros[j.attrib['name']] = evalEnum(j)
pattern = re.compile('(.*?)([A-Z]*)$')
enums_ranges = {}
for i in enums:
if i != 'VkCompositeAlphaFlagBitsKHR':
continue
if not enums[i]:
continue
name = pattern.match(i).group(1)
ext = pattern.match(i).group(2)
postfix = '_' + ext if ext else ''
def _(name):
upper_pos = [j for j, k in enumerate(name) if k.isupper()]
return '_'.join(name[begin:end].upper() for begin, end in zip(upper_pos, upper_pos[1:] + [len(name)])) + '_'
is_bitmask = enum_types[i] == 'bitmask'
if is_bitmask:
assert name.endswith('FlagBits')
prefix = _(name[:-8])
enums_ranges[i] = {prefix + 'FLAG_BITS_MAX_ENUM' + postfix: 0x7FFFFFFF}
else:
prefix = _(name)
values = [int(j) for _, j in enums[i].items()]
enums_ranges[i] = {prefix + 'BEGIN_RANGE' + postfix: min(values),
prefix + 'END_RANGE' + postfix: max(values),
prefix + 'RANGE_SIZE' + postfix: max(values) - min(values) + 1,
prefix + 'MAX_ENUM' + postfix: 0x7FFFFFFF}
for i in tree.findall('extensions/extension'):
#TODO:add extension macro
if i.attrib['supported'] == 'disabled':
continue
number = int(i.get('number'), 0)
require = i.find('require')
protect = i.get('protect')
type_ = i.attrib['type']
extension = None
if protect:
for j in platform_protects:
if protect in platform_protects[j]:
extension = platform_extensions[j]
break
else:
extension = general_extensions
assert not extension is None
macros[i.attrib['name']] = 1
extension.update(j.attrib['name'] for j in require.findall('enum'))
extension.update(j.attrib['name'] for j in require.findall('type'))
extension.update(j.attrib['name'] for j in require.findall('command'))
for j in require.findall('command'):
extension_types[j.attrib['name']] = type_
for j in require.findall('enum'):
if 'extends' in j.attrib:
assert j.attrib['extends'] in enums
enums[j.attrib['extends']][j.attrib['name']] = evalEnum(j, number)
else:
macros[j.attrib['name']] = evalEnum(j)
for i in enums_ranges:
enums[i].update(**enums_ranges[i])
all_extensions = reduce(lambda x, y: x.union(y), platform_extensions.values()).union(general_extensions)
def_orders = []
for i in struct_unions:
def _(name):
if name in def_orders:
return
__, members = struct_unions[name]
for j, __, ___ in members:
if j.endswith('*'):
j = j[:-1]
if j in struct_unions:
_(j)
def_orders.append(name)
_(i)
assert len(struct_unions) == len(def_orders)
struct_unions = OrderedDict((k, struct_unions[k]) for k in def_orders)
funcs_return_list = set()
funcs_return_list_len_specified = set()
funcs_return_single = set()
funcs_return_nothing = set()
funcs_return_procaddr = set()
all_successcodes = set()
all_errorcodes = set()
funcs_optional_params = {}
funcs_len_autos = {}
for i in tree.findall('commands/command'):
type_ = i.find('proto/type').text
name = i.find('proto/name').text
successcodes = i.get('successcodes')
if successcodes:
all_successcodes.update(successcodes.split(','))
errorcodes = i.get('errorcodes')
if errorcodes:
all_errorcodes.update(errorcodes.split(','))
params = i.findall('param')
param_names = [j.find('name').text for j in params]
value = (type_, name, [innertext(j).strip() for j in params], [(((j.text or '') + j.find('type').text + j.find('type').tail).strip(), j.find('name').text) for j in params])
funcs[name] = value
len_ = params[-1].get('len')
if len_:
lens = len_.split(',')
assert len(lens) == 1
if lens[0] == 'null-terminated':
assert name in {'vkGetDeviceProcAddr', 'vkGetInstanceProcAddr'}
params = params[:-1]
funcs_return_procaddr.add(name)
elif params[-1].text and params[-1].text.strip() == 'const':
funcs_return_nothing.add(name)
elif lens[0] in param_names:
if params[-1].get('optional') != 'true':
params = params[:-1]
funcs_return_list_len_specified.add(name)
else:
assert lens[0] == param_names[-2]
assert params[-1].find('type').tail.strip() == '*'
params = params[:-2]
funcs_return_list.add(name)
else:
assert name in ['vkAllocateDescriptorSets', 'vkAllocateCommandBuffers']
params = params[:-1]
funcs_return_list_len_specified.add(name)
elif (params[-1].text is None or params[-1].text.strip() != 'const'):
tail = params[-1].find('type').tail.strip()
if tail == '*':
if any(name.startswith(i) for i in {'vkGet', 'vkCreate', 'vkAllocate', 'vkAcquire'}):
params = params[:-1]
funcs_return_single.add(name)
else:
assert name in {'vkDebugMarkerSetObjectNameEXT', 'vkDebugMarkerSetObjectTagEXT', 'vkCmdDebugMarkerBeginEXT', 'vkCmdDebugMarkerInsertEXT'}
funcs_return_nothing.add(name)
elif tail == '**':
assert name in {'vkMapMemory'}
params = params[:-1]
funcs_return_single.add(name)
else:
funcs_return_nothing.add(name)
else:
funcs_return_nothing.add(name)
param_names = [j.find('name').text for j in params]
funcs_optional_params[name] = set()
funcs_len_autos[name] = {}
add_optional = True
for j in params[::-1]:
name_ = j.find('name').text
if add_optional:
optional = j.get('optional')
if optional is None or not 'true' in optional.split(','):
add_optional = False
else:
funcs_optional_params[name].add(name_)
len_ = j.get('len')
if len_:
lens = [i for i in len_.split(',') if i != 'null-terminated']
if len(lens) == 1:
if lens[0] in param_names:
if not lens[0] in funcs_len_autos[name]:
funcs_len_autos[name][lens[0]] = []
funcs_len_autos[name][lens[0]].append("len(%s)" % name_)
else:
assert not lens
# print lens[0]
for i in funcs_len_autos:
if funcs_len_autos[i]:
pass
assert not all_errorcodes.intersection(all_successcodes)
all_errorcodes = set(i for i in enums['VkResult'] if i not in all_successcodes)
all_successcodes.remove('VK_SUCCESS')
def _(name):
chunks = name.split('_')
if name in all_extensions:
return ''.join(i[0] + i[1:].lower() for i in chunks[:-1]) + chunks[-1]
else:
return ''.join(i[0] + i[1:].lower() for i in chunks)
exceptions = {i: _(i) for i in all_successcodes}
errors = {i: _(i) for i in all_errorcodes}
exception_codes = '{%s}' % ', '.join('%s:%s' % (i, _(i)) for i in all_successcodes.union(all_errorcodes))
constructors = {}
len_auto_special_cases = ['VkWriteDescriptorSet']
for i in struct_unions:
_, fields = struct_unions[i]
wrapper_params = ', '.join([("%s=%s" % (k, structs_default_values[i][k] if k in structs_default_values[i] else None)) for _, k, _ in fields])
call_params = ', '.join("%s=%s" % (k, k) for _, k, _ in fields)
len_autos = structs_len_autos[i].items() if not i in len_auto_special_cases else []
constructors[i] = (wrapper_params, call_params, len_autos)
throwable_funcs = set(k for k, (v, _, _, _) in funcs.items() if v == 'VkResult')
func_wrappers = {}
for name, (type_, _, _, params) in funcs.items():
func_wrappers[name] = (type_, [i for i, _ in params], [i for _, i in params], funcs_optional_params[name])
platform_vkapi_ptr = {'linux': '', 'win32': '__stdcall ', 'android': ''}
platform_newline = {'linux': '\n', 'win32': '\n', 'android': '\n'}
from jinja2 import *
import os
env = Environment(loader=FileSystemLoader(os.path.dirname(os.path.abspath(__file__))), trim_blocks=True)
instance_ext_funcs = [i for i in all_extensions if i in funcs and extension_types[i] == 'instance']
device_ext_funcs = [i for i in all_extensions if i in funcs and extension_types[i] == 'device']
genvulkan = env.get_template('vulkan.template.py')
with open('../pyVulkan/_vulkan.py', 'w') as f:
f.write(genvulkan.render(len=len, **globals()))
genheader = env.get_template('header.template.h')
for i in platform_extensions:
def _(x):
return {j: x[j] for j in x if not j in all_extensions or j in platform_extensions[i] or j in general_extensions}
with io.open('../pyVulkan/vulkan_%s_cffi.h' % i, 'w', newline=platform_newline[i]) as f:
f.write(genheader.render(extensions=all_extensions, macros=macros, typedefs=_(typedefs), enums=_(enums),
struct_unions=_(struct_unions), funcs=_(funcs), ext_funcs=_(ext_funcs), funcpointers=_(funcpointers),
vkapi_ptr=platform_vkapi_ptr[i], isinstance=isinstance, int=int))
|
|
# -*- coding: utf-8 -*-
# __ _ _______ ______ _______ __ _
# | | | | _ | _ | | _ | | | |
# | |_| | |_| | | || | |_| | |_| |
# | | | |_||_| | |
# | _ | | __ | | _ |
# | | | | _ | | | | _ | | | |
# |_| |__|__| |__|___| |_|__| |__|_| |__|
"""Indiegogo notifier (Slack & IFTTT).
Author: Taehyun Park (taehyun@thenaran.com)
"""
import logging
import requests
import json
from tinydb import TinyDB, where
import time
import iso8601
import getpass
import subprocess
BASE_URL = 'https://api.indiegogo.com/1.1'
DB = TinyDB('data.json')
try:
# Read configurations
with open('config.json', 'r') as f:
CONFIGS = json.loads(f.read())
except:
CONFIGS = {}
def get_campaign_info():
payload = {'api_token': CONFIGS['api_key'],
'access_token': CONFIGS['access_token'],
}
resp = requests.get(
'{base}/campaigns/{ident}.json'.format(base=BASE_URL,
ident=CONFIGS['campaign_id']),
params=payload)
result = json.loads(resp.text)
return result['response']
def get_perks_info():
payload = {'api_token': CONFIGS['api_key'],
'access_token': CONFIGS['access_token'],
}
resp = requests.get(
'{base}/campaigns/{ident}/perks.json'.format(base=BASE_URL,
ident=CONFIGS['campaign_id']),
params=payload)
result = json.loads(resp.text)
return result['response']
def new_comments(last_ts):
page = 1
go = True
while go:
payload = {'api_token': CONFIGS['api_key'],
'access_token': CONFIGS['access_token'],
'page': page}
page += 1
logging.info("comments on page %s", page)
resp = requests.get(
'{base}/campaigns/{ident}/comments.json'.format(base=BASE_URL,
ident=CONFIGS['campaign_id']),
params=payload)
result = json.loads(resp.text)
if not result['response']:
break
for comment in result['response']:
created_ts = _convert_to_ts(comment['created_at'])
if last_ts >= created_ts:
go = False
break
yield comment
def new_contribs(last_ts):
page = 1
go = True
while go:
payload = {'api_token': CONFIGS['api_key'],
'access_token': CONFIGS['access_token'],
'page': page}
page += 1
resp = requests.get(
'{base}/campaigns/{ident}/contributions.json'.format(base=BASE_URL,
ident=CONFIGS['campaign_id']),
params=payload)
result = json.loads(resp.text)
if not result['response']:
break
for contrib in result['response']:
created_ts = _convert_to_ts(contrib['created_at'])
if last_ts >= created_ts:
go = False
break
yield contrib
def all_campaigns():
page = 1
while True:
payload = {'api_token': CONFIGS['api_key'],
'access_token': CONFIGS['access_token'],
'page': page}
logging.info("On page %s.", page)
page += 1
resp = requests.get('{base}/campaigns.json'.format(base=BASE_URL), params=payload)
result = json.loads(resp.text)
if not result['response']:
break
for campaign in result['response']:
yield campaign
def search_campaigns(terms, max_page=10, only_mine=True):
page = 1
while True:
payload = {'api_token': CONFIGS['api_key'],
'access_token': CONFIGS['access_token'],
'title': terms,
'sort': only_mine and 'new' or 'popular_all',
'page': page
}
logging.info("On page %s.", page)
page += 1
if page > max_page:
break
resp = requests.get('{base}/search/campaigns.json'.format(base=BASE_URL), params=payload)
result = json.loads(resp.text)
if not result['response']:
break
for campaign in result['response']:
for member in campaign['team_members']:
if not only_mine or member['account_id'] == CONFIGS['account_id']:
yield campaign
break
def get_current_account():
payload = {'api_token': CONFIGS['api_key'],
'access_token': CONFIGS['access_token']
}
resp = requests.get('{base}/me.json'.format(base=BASE_URL), params=payload)
result = json.loads(resp.text)
return result['response']
def get_account_info(ident):
payload = {'api_token': CONFIGS['api_key'],
'access_token': CONFIGS['access_token']
}
url = '{base}/accounts/{ident}.json'.format(base=BASE_URL, ident=ident)
resp = requests.get(url, params=payload)
result = json.loads(resp.text)
return result['response']
def check_now():
try:
_check_comments()
except KeyboardInterrupt:
raise
except:
logging.exception("Failed to check comments.")
try:
_check_contribs()
except KeyboardInterrupt:
raise
except:
logging.exception("Failed to check contributions.")
try:
_check_campaign_status()
except KeyboardInterrupt:
raise
except:
logging.exception("Failed to check campaign status.")
try:
_check_perks_status()
except KeyboardInterrupt:
raise
except:
logging.exception("Failed to check perks status.")
def write_to_slack(pretext, text, color, fields=None):
"""Write the text to the Slack channel.
"""
if 'slack_url' not in CONFIGS or not CONFIGS['slack_url']:
logging.info("Slack URL not configured.")
return
payload = {
'pretext': pretext,
'text': text,
'color': color,
'fields': fields,
'parse': 'full'
}
try:
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
requests.post(CONFIGS['slack_url'], data=json.dumps(payload), headers=headers)
except:
logging.info("Failed to write to slack.", exc_info=True)
def notify_ifttt(event, text, link, image):
"""Make a HTTP request to the IFTTT Maker channel.
"""
if 'ifttt_maker_key' not in CONFIGS:
logging.info("IFTTT not configured.")
return
payload = {
'value1': text,
'value2': link,
'value3': image
}
url = 'https://maker.ifttt.com/trigger/{event}/with/key/{key}'.format(
event=event,
key=CONFIGS['ifttt_maker_key'])
try:
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
requests.post(url, data=json.dumps(payload), headers=headers)
except:
logging.info("Failed to notify IFTTT.", exc_info=True)
def start():
"""Start monitoring the Indiegogo campaign.
"""
# Retrieve the current campaign information
campaign = get_campaign_info()
CONFIGS['slug'] = campaign['slug']
CONFIGS['campaign_id'] = campaign['id']
CONFIGS['campaign_preview_url'] = campaign['preview_url']
CONFIGS['campaign_thumbnail_image_url'] = campaign['thumbnail_image_url']
# Initialize timestamps
last_comment_ts = DB.search(where('type') == 'comment')
if not last_comment_ts:
DB.insert({'ts': 0, 'type': 'comment'})
last_contrib_ts = DB.search(where('type') == 'contrib')
if not last_contrib_ts:
DB.insert({'ts': 0, 'type': 'contrib'})
# Insert markers for each campaign goal
goal = campaign['goal']
funds = campaign['collected_funds']
achieved = int(funds * 100 / goal)
for i in [30, 70, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000]:
# notify at each achievement
p = 'p' + str(i)
marker = DB.search(where('type') == p)
if not marker and achieved >= i:
DB.insert({'ts': time.time(), 'type': p})
update_interval = CONFIGS['update_interval']
print "Start monitoring (CTRL-c to stop)..."
try:
while True:
check_now()
time.sleep(update_interval)
except:
pass
print "Monitoring stopped."
def ftl():
"""Initializer.
"""
print "Indiegogo Campaign Monitor v0.1 (by taehyun@thenaran.com)"
print
api_key = raw_input("Enter your Indiegogo API key [press enter to use the default]: ")
if not api_key:
api_key = 'ce450f4a26ed1b72136d58cd73fd38441e699f90aee8b7caacd0f144ad982a98'
slack_url = raw_input("Enter your Slack webhook URL (default: none): ")
ifttt_maker_key = _prompt_required("Enter your IFTTT maker key (required): ", "Please enter valid one: ")
update_interval = raw_input("Input update interval in seconds [default: 60]: ")
try:
update_interval = int(update_interval)
except:
logging.warn("Setting the update interval to 60 seconds.")
update_interval = 60
# Sync configurations
data = {
'api_key': api_key,
'slack_url': slack_url,
'ifttt_maker_key': ifttt_maker_key,
'update_interval': update_interval
}
CONFIGS.update(data)
print
authenticate()
print
print "Please enter a campaign ID. If you don't know what it is, type in keywords which would find the campaign."
campaign_id = _prompt_required("Input campaign ID or keywords: ", "Please enter valid one: ")
try:
campaign_id = int(campaign_id)
except:
only_mine = _prompt_yes_no("Is it your campaign", default_yes=False)
terms = campaign_id
found = False
while not found:
for campaign in search_campaigns(terms, max_page=10, only_mine=only_mine):
print
print u'[{title}]'.format(title=campaign['title'])
yes = _prompt_yes_no("Select this one", default_yes=False)
if yes:
campaign_id = campaign['id']
found = True
break
if not found:
print
terms = _prompt_required("Please use different keywords: ", "Please enter valid one: ")
only_mine = _prompt_yes_no("Is it your campaign", default_yes=False)
CONFIGS['campaign_id'] = campaign_id
data['campaign_id'] = campaign_id
s = json.dumps(data)
with open('config.json', 'w') as f:
f.write(s)
print
print "Do you want to sync all comments and contributions from the beginning? If no, it will ignore existing ones and only start keeping track of new ones from now on. Be warned if you choose to sync and there are already a lot of comments and contributions!"
yes = _prompt_yes_no("Do you want to sync existing comments and contributions", default_yes=False)
if not yes:
# Insert the current timestamp so that it would ignore the existing comments and contributions.
DB.insert({'ts': time.time(), 'type': 'comment'})
DB.insert({'ts': time.time(), 'type': 'contrib'})
def authenticate():
access_token = DB.search(where('type') == 'access_token')
if not access_token:
ident = _prompt_required('Indiegogo ID (email): ', 'Please enter your Indiegogo ID (email): ')
password = getpass.getpass('Password: ')
output = subprocess.check_output('curl -ss -X POST -d grant_type=password -d credential_type=email -d email={email} -d password={password} https://auth.indiegogo.com/oauth/token'.format(email=ident, password=password), shell=True)
tokens = json.loads(output)
DB.insert({'value': tokens['access_token'], 'type': 'access_token'})
DB.insert({'value': tokens['refresh_token'], 'type': 'refresh_token'})
CONFIGS['access_token'] = tokens['access_token']
CONFIGS['refresh_token'] = tokens['refresh_token']
print "Authentication successful."
else:
CONFIGS['access_token'] = access_token[0]['value']
refresh_token = DB.search(where('type') == 'refresh_token')
CONFIGS['refresh_token'] = refresh_token[0]['value']
me = get_current_account()
CONFIGS['account_id'] = me['id']
def _check_comments():
last_comment_ts = DB.search(where('type') == 'comment')[0]['ts']
comments = [c for c in new_comments(last_comment_ts)]
if len(comments) > 0:
comment_ts = _convert_to_ts(comments[0]['created_at'])
if last_comment_ts != comment_ts:
DB.update({'ts': comment_ts}, where('type') == 'comment')
for comment in reversed(comments):
# notify in slack
write_to_slack('New comment',
comment['text'].replace('\n', '\\n').replace('\r', ''),
'warn')
# notify IFTTT:
# value1 : comment text
# value2 : direct link to the comment
# value3 : avatar url of the commenter
notify_ifttt('igg-comments',
comment['text'],
_build_comments_url(comment['id']),
comment['account']['avatar_url'])
else:
logging.info("No new comments.")
def _check_contribs():
last_contrib_ts = DB.search(where('type') == 'contrib')[0]['ts']
contribs = [c for c in new_contribs(last_contrib_ts)]
if len(contribs) > 0:
contrib_ts = _convert_to_ts(contribs[0]['created_at'])
if last_contrib_ts != contrib_ts:
DB.update({'ts': contrib_ts}, where('type') == 'contrib')
for contrib in reversed(contribs):
if not contrib['perk']:
# Contributed without selecting any perk
contrib['perk'] = {'label': 'No perk'}
contributor_name = \
contrib['by'] if 'contributor_name' not in contrib else contrib['contributor_name'],
# notify in slack
slack_fields = [
{
'title': 'Name',
'value': contributor_name,
'short': False
},
{
'title': 'Value',
'value': '$' + str(contrib['amount']),
'short': False
}
]
if 'referrer_id' in contrib:
referrer = get_account_info(contrib['referrer_id'])
slack_fields.append({
'title': 'Referrer',
'value': referrer['name'],
'short': False
})
else:
referrer = None
write_to_slack('New contribution!',
contrib['perk']['label'],
'good',
slack_fields
)
# notify IFTTT:
# value1 : perk text
# value2 : direct link to the contributor
# value3 : avatar url of the contributor
notify_ifttt('igg-contributions',
u'{contrib} claimed by {who} for ${amount}{referrer}'.format(
contrib=contrib['perk']['label'],
who=contributor_name,
amount=contrib['amount'],
referrer=u'' if not referrer else u' referred by {}'.format(referrer['name'])
),
_build_contrib_url(contrib['id']),
contrib['avatar_url'])
else:
logging.info("No new contributions yet.")
def _check_campaign_status():
campaign = get_campaign_info()
goal = campaign['goal']
funds = campaign['collected_funds']
achieved = int(funds * 100 / goal)
for i in [30, 70, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000]:
# notify at each achievement
p = 'p' + str(i)
marker = DB.search(where('type') == p)
if not marker and achieved >= i:
DB.insert({'ts': time.time(), 'type': p})
msg = u'"{title}" reached {achieved}%: ${funds}'.format(
title=campaign['title'],
achieved=achieved,
funds=funds)
# notify in slack
write_to_slack('Campaign updates', msg, 'good')
# notify IFTTT:
# value1 : message
# value2 : direct link to the campaign
# value3 : thumbnail of the campaign
notify_ifttt('igg-status',
msg,
CONFIGS['campaign_preview_url'],
CONFIGS['campaign_thumbnail_image_url'])
return
def _check_perks_status():
perks = get_perks_info()
for perk in perks:
claimed = perk['number_claimed']
available = perk['number_available']
if available and claimed != available and claimed + 10 >= available:
p = 'soldout-' + str(perk['id'])
marker = DB.search(where('type') == p)
if not marker:
DB.insert({'ts': time.time(), 'type': p})
# notify that it's sold out!
write_to_slack('Sold-out warning.',
perk['label'] + ' is almost sold out. @channel',
'danger',
[
{
'title': 'Claimed',
'value': perk['number_claimed'],
'short': False
},
{
'title': 'Availability',
'value': perk['number_available'],
'short': False
}
])
# notify IFTTT:
# value1 : message
# value2 : direct link to the campaign
# value3 : thumbnail of the campaign
notify_ifttt('igg-perks-status',
u'{perk} is almost sold out. - {claimed}/{available}'.format(perk=perk['label'],
claimed=perk['number_claimed'],
available=perk['number_available']),
CONFIGS['campaign_preview_url'],
CONFIGS['campaign_thumbnail_image_url'])
elif available and claimed >= available:
p = 'almost-' + str(perk['id'])
marker = DB.search(where('type') == p)
if not marker:
DB.insert({'ts': time.time(), 'type': p})
# notify that it's almost sold out!
write_to_slack('Sold-out warning.',
perk['label'] + ' is sold out!! @channel',
'danger',
[
{
'title': 'Claimed',
'value': perk['number_claimed'],
'short': False
},
{
'title': 'Availability',
'value': perk['number_available'],
'short': False
}
])
# notify IFTTT:
# value1 : message
# value2 : direct link to the campaign
# value3 : thumbnail of the campaign
notify_ifttt('igg-perks-status',
u'{perk} is completely sold out. - {claimed}/{available}'.format(perk=perk['label'],
claimed=perk['number_claimed'],
available=perk['number_available']),
CONFIGS['campaign_preview_url'],
CONFIGS['campaign_thumbnail_image_url'])
def _convert_to_ts(s):
d = iso8601.parse_date(s)
return time.mktime(d.timetuple())
def _build_comments_url(ident):
return 'https://www.indiegogo.com/projects/{slug}/x/{account_id}#/comments?id={ident}'.format(
slug=CONFIGS['slug'],
account_id=CONFIGS['account_id'],
ident=ident)
def _build_contrib_url(ident):
return 'https://www.indiegogo.com/command_center/{slug}#/contributions/{ident}'.format(
slug=CONFIGS['slug'],
ident=ident)
def _prompt_required(msg, retry_msg):
ret = raw_input(msg)
while not ret:
ret = raw_input(retry_msg)
return ret
def _prompt_yes_no(question, default_yes=True):
yes = raw_input("{question} {yes_no}? ".format(question=question,
yes_no=default_yes and '(YES/no)' or '(yes/NO)'))
yes = yes.strip().lower()
if yes == 'yes' or yes == 'y':
return True
elif yes == 'no' or yes == 'n':
return False
else:
return default_yes
if __name__ == '__main__':
#logging.getLogger().setLevel(logging.INFO)
if len(CONFIGS) == 0:
ftl()
else:
authenticate()
start()
|
|
import domain
import pre_processing as pp
import transformation as trans
import math
class Classification(object):
"""This class holds several classification methods"""
NLTK_TAG_TO_SENTIWORDNET_TAG = {'JJ':'ADJECTIVE',
'RB':'ADVERB',
'VB':'VERB',
'VBZ':'VERB',
'VBP':'VERB'}
def __init__(self, list_of_documents):
self.list_of_documents = list_of_documents
def _sentiwordnet_scores(self, elements):
tuples = []
for e in elements:
word = e[0].split('/')[0]
tag = e[0].split('/')[1]
if len(word) > 0:
weight = trans.word_polarity(word, Classification.NLTK_TAG_TO_SENTIWORDNET_TAG[tag])
if weight:
tuples.append(weight)
return tuples
class OhanaBrendan(Classification):
"""'SentiWordNet scores were calculated as positive and negative terms were
found on each document, and used to determine sentiment orientation by
assigning the document to the class with the highest score.'
Based on 'Sentiment Classification of Reviews Using SentiWordNet'
by Bruno Ohana and Brendan Tierney
#ONLY ADJECTIVES
positive precision = 66,26%
negative recall = 73,20%
accuracy = 69.56%
#ADJECTIVES AND ADVERBS as unigrams
positive precision = 71,37%
negative recall = 69.30%
accuracy = 70.32%
#ADJECTIVES, ADVERBS AND VERBS as unigrams
positive precision = 67,47%
negative recall = 71.99%
accuracy = 69.66%
#ADJECTIVES AND VERBS as unigrams
positive precision = 72.15%
negative recall = 68.39%
accuracy = 70.22%
BEST ACCURACY: ADJECTIVES AND ADVERBS
OVERALL ACCURACY: 69.69%
"""
def _extract_pos_tagged_element(self, doc, tag):
elements = []
for t in doc.unigrams:
unigram = t[0].split('/')
if len(unigram) > 1 and unigram[1] == tag:
elements.append(t)
return elements
# return [t for t in doc.unigrams if t[0].split('/')[1] == tag]
def _select_documents(self, document, rule=None):
"""
pre process a document with elements to extrac in rule list
e.g.: rule = ['JJ', 'RB']
"""
elements = []
if self.rule == None:
elements = self._extract_pos_tagged_element(document, "JJ")
else:
for e in self.rule:
elements = elements + self._extract_pos_tagged_element(document, e)
elements = set(elements)
return elements
def term_counting(self):
num_of_documents = 1
total_documents = len(self.list_of_documents)
for d in self.list_of_documents:
print str(num_of_documents) + '/' + str(total_documents) + '-' + d.name
elements = self._select_documents(d)
tuples = self._sentiwordnet_scores(elements)
d.predicted_polarity = max(tuples, key=lambda x:abs(x[0]))[0]
num_of_documents = num_of_documents + 1
class Custom(Classification):
"""Custom classification methods to this research"""
def __init__(self):
super(Custom, self).__init__()
def custom_classification_1(self):
"""This classification method merges the term_counting concept from OhanaBrendan,
but introducing the study with bigrams, trigrams and the rule-based system from
A Sentimental Analysis of Movie Reviews Involving Fuzzy Rule- Based
"""
pass
class Pimpalkar(Classification):
"""Class for Pimpalkar opinion mining paper"""
ADVERBS = ['very','really','extremely','simply','always','never','not',
'absolutely','highly','overall','truly','too']
ADJECTIVE = 'JJ'
ADVERB = 'RB'
VERBS = ('VB','VBZ','VBP')
NEGATIONS = ('not','never')
def _sentiwordnet_scores(self, word_tag_pair_string):
word = word_tag_pair_string.split('/')[0]
tag = word_tag_pair_string.split('/')[1]
if len(word) > 0:
return trans.word_polarity(word, Classification.NLTK_TAG_TO_SENTIWORDNET_TAG[tag])
return None
def __fuzzy_intensity_finder(self, doc):
case_1 = [] #RB + JJ
case_2 = [] #(not/never) + RB/(VB/VBZ/VPB)
case_3 = [] #(not/never) + RB/JJ
#case_4 = [] ??? but/also/nor
for bigram in doc.bigrams:
bigram_1 = bigram[0]
bigram_2 = bigram[1]
word_tag_1 = bigram_1.split('/')
word_tag_2 = bigram_2.split('/')
if len(word_tag_1) > 1 and len(word_tag_2) > 1:
if (word_tag_1[1] == self.ADVERB and word_tag_1[0].lower() in self.ADVERBS) and word_tag_2[1] == self.ADJECTIVE:
case_1.append(bigram)
elif (word_tag_1[0].lower() in self.NEGATIONS) and (word_tag_2[1] == self.ADJECTIVE or word_tag_2[1] in self.VERBS):
case_2.append(bigram)
for trigram in doc.trigrams:
trigram_1 = trigram[0]
trigram_2 = trigram[1]
trigram_3 = trigram[2]
word_tag_1 = trigram_1.split('/')
word_tag_2 = trigram_2.split('/')
word_tag_3 = trigram_3.split('/')
if len(word_tag_1) > 1 and len(word_tag_2) > 1 and len(word_tag_3) > 1:
if word_tag_1[0] in self.NEGATIONS and word_tag_2[0] in self.ADVERBS and word_tag_3[1] == self.ADJECTIVE:
case_3.append(trigram)
scores = []
for bigram in case_1:
jj_weight = self._sentiwordnet_scores(bigram[1])
if jj_weight:
if jj_weight[0] >= 0.5:
scores.append(math.sqrt(jj_weight[0]))
elif jj_weight[0] < 0.5:
scores.append(math.pow(jj_weight[0],2))
for bigram in case_2:
rb_or_jj_weight = self._sentiwordnet_scores(bigram[1])
if rb_or_jj_weight:
scores.append(1 - rb_or_jj_weight[0])
for trigram in case_3:
jj_weight = self._sentiwordnet_scores(trigram[2])
if jj_weight:
A = 0
if jj_weight[0] >= 0.5:
A = math.sqrt(jj_weight[0])
elif jj_weight[0] < 0.5:
A = math.pow(jj_weight[0],2)
B = 1 - jj_weight[0]
scores.append(math.sqrt(A * B))
doc.scores = scores
def __final_sentiment_score(self, doc):
"""It should be a summation of max polarity divided by count annotations.
But a could'nt find out what it is that annotations"""
doc.predicted_polarity = max(doc.scores) if len(doc.scores) > 0 else None
def opinion_analyzer(self):
num_of_documents = 1
total_documents = len(self.list_of_documents)
for doc in self.list_of_documents:
print str(num_of_documents) + '/' + str(total_documents) + '-' + doc.name
self.__fuzzy_intensity_finder(doc)
self.__final_sentiment_score(doc)
num_of_documents = num_of_documents + 1
|
|
import datetime
import re
from typing import Any, Dict, List, Mapping, Union
from unittest import mock
import orjson
from django.conf import settings
from django.utils.timezone import now as timezone_now
from confirmation.models import Confirmation, create_confirmation_link
from zerver.lib.actions import (
do_add_deactivated_redirect,
do_change_realm_org_type,
do_change_realm_plan_type,
do_change_realm_subdomain,
do_create_realm,
do_deactivate_realm,
do_deactivate_stream,
do_scrub_realm,
do_send_realm_reactivation_email,
do_set_realm_property,
do_set_realm_user_default_setting,
)
from zerver.lib.realm_description import get_realm_rendered_description, get_realm_text_description
from zerver.lib.send_email import send_future_email
from zerver.lib.streams import create_stream_if_needed
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import (
Attachment,
CustomProfileField,
Message,
Realm,
RealmAuditLog,
RealmUserDefault,
ScheduledEmail,
Stream,
UserMessage,
UserProfile,
get_realm,
get_stream,
get_user_profile_by_id,
)
class RealmTest(ZulipTestCase):
def assert_user_profile_cache_gets_new_name(
self, user_profile: UserProfile, new_realm_name: str
) -> None:
self.assertEqual(user_profile.realm.name, new_realm_name)
def test_realm_creation_ensures_internal_realms(self) -> None:
with mock.patch("zerver.lib.actions.server_initialized", return_value=False):
with mock.patch(
"zerver.lib.actions.create_internal_realm"
) as mock_create_internal, self.assertLogs(level="INFO") as info_logs:
do_create_realm("testrealm", "Test Realm")
mock_create_internal.assert_called_once()
self.assertEqual(
info_logs.output,
["INFO:root:Server not yet initialized. Creating the internal realm first."],
)
def test_realm_creation_on_social_auth_subdomain_disallowed(self) -> None:
with self.settings(SOCIAL_AUTH_SUBDOMAIN="zulipauth"):
with self.assertRaises(AssertionError):
do_create_realm("zulipauth", "Test Realm")
def test_permission_for_education_non_profit_organization(self) -> None:
realm = do_create_realm(
"test_education_non_profit",
"education_org_name",
org_type=Realm.ORG_TYPES["education_nonprofit"]["id"],
)
self.assertEqual(realm.create_public_stream_policy, Realm.POLICY_ADMINS_ONLY)
self.assertEqual(realm.create_private_stream_policy, Realm.POLICY_MEMBERS_ONLY)
self.assertEqual(realm.invite_to_realm_policy, Realm.POLICY_ADMINS_ONLY)
self.assertEqual(realm.move_messages_between_streams_policy, Realm.POLICY_MODERATORS_ONLY)
self.assertEqual(realm.user_group_edit_policy, Realm.POLICY_MODERATORS_ONLY)
self.assertEqual(realm.invite_to_stream_policy, Realm.POLICY_MODERATORS_ONLY)
def test_permission_for_education_for_profit_organization(self) -> None:
realm = do_create_realm(
"test_education_for_profit",
"education_org_name",
org_type=Realm.ORG_TYPES["education"]["id"],
)
self.assertEqual(realm.create_public_stream_policy, Realm.POLICY_ADMINS_ONLY)
self.assertEqual(realm.create_private_stream_policy, Realm.POLICY_MEMBERS_ONLY)
self.assertEqual(realm.invite_to_realm_policy, Realm.POLICY_ADMINS_ONLY)
self.assertEqual(realm.move_messages_between_streams_policy, Realm.POLICY_MODERATORS_ONLY)
self.assertEqual(realm.user_group_edit_policy, Realm.POLICY_MODERATORS_ONLY)
self.assertEqual(realm.invite_to_stream_policy, Realm.POLICY_MODERATORS_ONLY)
def test_realm_enable_spectator_access(self) -> None:
realm = do_create_realm("test_web_public_true", "Foo", enable_spectator_access=True)
self.assertEqual(realm.enable_spectator_access, True)
realm = do_create_realm("test_web_public_false", "Boo", enable_spectator_access=False)
self.assertEqual(realm.enable_spectator_access, False)
def test_do_set_realm_name_caching(self) -> None:
"""The main complicated thing about setting realm names is fighting the
cache, and we start by populating the cache for Hamlet, and we end
by checking the cache to ensure that the new value is there."""
realm = get_realm("zulip")
new_name = "Zed You Elle Eye Pea"
do_set_realm_property(realm, "name", new_name, acting_user=None)
self.assertEqual(get_realm(realm.string_id).name, new_name)
self.assert_user_profile_cache_gets_new_name(self.example_user("hamlet"), new_name)
def test_update_realm_name_events(self) -> None:
realm = get_realm("zulip")
new_name = "Puliz"
events: List[Mapping[str, Any]] = []
with self.tornado_redirected_to_list(events, expected_num_events=1):
do_set_realm_property(realm, "name", new_name, acting_user=None)
event = events[0]["event"]
self.assertEqual(
event,
dict(
type="realm",
op="update",
property="name",
value=new_name,
),
)
def test_update_realm_description_events(self) -> None:
realm = get_realm("zulip")
new_description = "zulip dev group"
events: List[Mapping[str, Any]] = []
with self.tornado_redirected_to_list(events, expected_num_events=1):
do_set_realm_property(realm, "description", new_description, acting_user=None)
event = events[0]["event"]
self.assertEqual(
event,
dict(
type="realm",
op="update",
property="description",
value=new_description,
),
)
def test_update_realm_description(self) -> None:
self.login("iago")
new_description = "zulip dev group"
data = dict(description=new_description)
events: List[Mapping[str, Any]] = []
with self.tornado_redirected_to_list(events, expected_num_events=1):
result = self.client_patch("/json/realm", data)
self.assert_json_success(result)
realm = get_realm("zulip")
self.assertEqual(realm.description, new_description)
event = events[0]["event"]
self.assertEqual(
event,
dict(
type="realm",
op="update",
property="description",
value=new_description,
),
)
def test_realm_description_length(self) -> None:
new_description = "A" * 1001
data = dict(description=new_description)
# create an admin user
self.login("iago")
result = self.client_patch("/json/realm", data)
self.assert_json_error(result, "description is too long (limit: 1000 characters)")
realm = get_realm("zulip")
self.assertNotEqual(realm.description, new_description)
def test_realm_convert_demo_realm(self) -> None:
data = dict(string_id="coolrealm")
self.login("iago")
result = self.client_patch("/json/realm", data)
self.assert_json_error(result, "Must be an organization owner")
self.login("desdemona")
result = self.client_patch("/json/realm", data)
self.assert_json_error(result, "Must be a demo organization.")
data = dict(string_id="lear")
self.login("desdemona")
realm = get_realm("zulip")
realm.demo_organization_scheduled_deletion_date = timezone_now() + datetime.timedelta(
days=30
)
realm.save()
result = self.client_patch("/json/realm", data)
self.assert_json_error(result, "Subdomain unavailable. Please choose a different one.")
# Now try to change the string_id to something available.
data = dict(string_id="coolrealm")
result = self.client_patch("/json/realm", data)
self.assert_json_success(result)
json = orjson.loads(result.content)
self.assertEqual(json["realm_uri"], "http://coolrealm.testserver")
realm = get_realm("coolrealm")
self.assertIsNone(realm.demo_organization_scheduled_deletion_date)
self.assertEqual(realm.string_id, data["string_id"])
def test_realm_name_length(self) -> None:
new_name = "A" * (Realm.MAX_REALM_NAME_LENGTH + 1)
data = dict(name=new_name)
# create an admin user
self.login("iago")
result = self.client_patch("/json/realm", data)
self.assert_json_error(result, "name is too long (limit: 40 characters)")
realm = get_realm("zulip")
self.assertNotEqual(realm.name, new_name)
def test_admin_restrictions_for_changing_realm_name(self) -> None:
new_name = "Mice will play while the cat is away"
self.login("othello")
req = dict(name=new_name)
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Must be an organization administrator")
def test_unauthorized_name_change(self) -> None:
data = {"full_name": "Sir Hamlet"}
user_profile = self.example_user("hamlet")
self.login_user(user_profile)
do_set_realm_property(user_profile.realm, "name_changes_disabled", True, acting_user=None)
url = "/json/settings"
result = self.client_patch(url, data)
self.assertEqual(result.status_code, 200)
# Since the setting fails silently, no message is returned
self.assert_in_response("", result)
# Realm admins can change their name even setting is disabled.
data = {"full_name": "New Iago"}
self.login("iago")
url = "/json/settings"
result = self.client_patch(url, data)
self.assert_json_success(result)
def test_do_deactivate_realm_clears_user_realm_cache(self) -> None:
"""The main complicated thing about deactivating realm names is
updating the cache, and we start by populating the cache for
Hamlet, and we end by checking the cache to ensure that his
realm appears to be deactivated. You can make this test fail
by disabling cache.flush_realm()."""
hamlet_id = self.example_user("hamlet").id
get_user_profile_by_id(hamlet_id)
realm = get_realm("zulip")
do_deactivate_realm(realm, acting_user=None)
user = get_user_profile_by_id(hamlet_id)
self.assertTrue(user.realm.deactivated)
def test_do_change_realm_delete_clears_user_realm_cache(self) -> None:
hamlet_id = self.example_user("hamlet").id
get_user_profile_by_id(hamlet_id)
realm = get_realm("zulip")
realm.delete()
with self.assertRaises(UserProfile.DoesNotExist):
get_user_profile_by_id(hamlet_id)
def test_do_change_realm_subdomain_clears_user_realm_cache(self) -> None:
"""The main complicated thing about changing realm subdomains is
updating the cache, and we start by populating the cache for
Hamlet, and we end by checking the cache to ensure that his
realm appears to be deactivated. You can make this test fail
by disabling cache.flush_realm()."""
hamlet_id = self.example_user("hamlet").id
user = get_user_profile_by_id(hamlet_id)
realm = get_realm("zulip")
iago = self.example_user("iago")
do_change_realm_subdomain(realm, "newzulip", acting_user=iago)
user = get_user_profile_by_id(hamlet_id)
self.assertEqual(user.realm.string_id, "newzulip")
placeholder_realm = get_realm("zulip")
self.assertTrue(placeholder_realm.deactivated)
self.assertEqual(placeholder_realm.deactivated_redirect, user.realm.uri)
realm_audit_log = RealmAuditLog.objects.filter(
event_type=RealmAuditLog.REALM_SUBDOMAIN_CHANGED, acting_user=iago
).last()
assert realm_audit_log is not None
expected_extra_data = {"old_subdomain": "zulip", "new_subdomain": "newzulip"}
self.assertEqual(realm_audit_log.extra_data, str(expected_extra_data))
self.assertEqual(realm_audit_log.acting_user, iago)
def test_do_deactivate_realm_clears_scheduled_jobs(self) -> None:
user = self.example_user("hamlet")
send_future_email(
"zerver/emails/followup_day1",
user.realm,
to_user_ids=[user.id],
delay=datetime.timedelta(hours=1),
)
self.assertEqual(ScheduledEmail.objects.count(), 1)
do_deactivate_realm(user.realm, acting_user=None)
self.assertEqual(ScheduledEmail.objects.count(), 0)
def test_do_change_realm_description_clears_cached_descriptions(self) -> None:
realm = get_realm("zulip")
rendered_description = get_realm_rendered_description(realm)
text_description = get_realm_text_description(realm)
realm.description = "New description"
realm.save(update_fields=["description"])
new_rendered_description = get_realm_rendered_description(realm)
self.assertNotEqual(rendered_description, new_rendered_description)
self.assertIn(realm.description, new_rendered_description)
new_text_description = get_realm_text_description(realm)
self.assertNotEqual(text_description, new_text_description)
self.assertEqual(realm.description, new_text_description)
def test_do_deactivate_realm_on_deactivated_realm(self) -> None:
"""Ensure early exit is working in realm deactivation"""
realm = get_realm("zulip")
self.assertFalse(realm.deactivated)
do_deactivate_realm(realm, acting_user=None)
self.assertTrue(realm.deactivated)
do_deactivate_realm(realm, acting_user=None)
self.assertTrue(realm.deactivated)
def test_do_set_deactivated_redirect_on_deactivated_realm(self) -> None:
"""Ensure that the redirect url is working when deactivating realm"""
realm = get_realm("zulip")
redirect_url = "new_server.zulip.com"
do_deactivate_realm(realm, acting_user=None)
self.assertTrue(realm.deactivated)
do_add_deactivated_redirect(realm, redirect_url)
self.assertEqual(realm.deactivated_redirect, redirect_url)
new_redirect_url = "test.zulip.com"
do_add_deactivated_redirect(realm, new_redirect_url)
self.assertEqual(realm.deactivated_redirect, new_redirect_url)
self.assertNotEqual(realm.deactivated_redirect, redirect_url)
def test_realm_reactivation_link(self) -> None:
realm = get_realm("zulip")
do_deactivate_realm(realm, acting_user=None)
self.assertTrue(realm.deactivated)
confirmation_url = create_confirmation_link(realm, Confirmation.REALM_REACTIVATION)
response = self.client_get(confirmation_url)
self.assert_in_success_response(
["Your organization has been successfully reactivated"], response
)
realm = get_realm("zulip")
self.assertFalse(realm.deactivated)
def test_realm_reactivation_confirmation_object(self) -> None:
realm = get_realm("zulip")
do_deactivate_realm(realm, acting_user=None)
self.assertTrue(realm.deactivated)
create_confirmation_link(realm, Confirmation.REALM_REACTIVATION)
confirmation = Confirmation.objects.last()
assert confirmation is not None
self.assertEqual(confirmation.content_object, realm)
self.assertEqual(confirmation.realm, realm)
def test_do_send_realm_reactivation_email(self) -> None:
realm = get_realm("zulip")
iago = self.example_user("iago")
do_send_realm_reactivation_email(realm, acting_user=iago)
from django.core.mail import outbox
self.assert_length(outbox, 1)
self.assertEqual(self.email_envelope_from(outbox[0]), settings.NOREPLY_EMAIL_ADDRESS)
self.assertRegex(
self.email_display_from(outbox[0]),
fr"^Zulip Account Security <{self.TOKENIZED_NOREPLY_REGEX}>\Z",
)
self.assertIn("Reactivate your Zulip organization", outbox[0].subject)
self.assertIn("Dear former administrators", outbox[0].body)
admins = realm.get_human_admin_users()
confirmation_url = self.get_confirmation_url_from_outbox(admins[0].delivery_email)
response = self.client_get(confirmation_url)
self.assert_in_success_response(
["Your organization has been successfully reactivated"], response
)
realm = get_realm("zulip")
self.assertFalse(realm.deactivated)
self.assertEqual(
RealmAuditLog.objects.filter(
event_type=RealmAuditLog.REALM_REACTIVATION_EMAIL_SENT, acting_user=iago
).count(),
1,
)
def test_realm_reactivation_with_random_link(self) -> None:
random_link = "/reactivate/5e89081eb13984e0f3b130bf7a4121d153f1614b"
response = self.client_get(random_link)
self.assert_in_success_response(
["The organization reactivation link has expired or is not valid."], response
)
def test_change_notifications_stream(self) -> None:
# We need an admin user.
self.login("iago")
disabled_notif_stream_id = -1
req = dict(notifications_stream_id=orjson.dumps(disabled_notif_stream_id).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
realm = get_realm("zulip")
self.assertEqual(realm.notifications_stream, None)
new_notif_stream_id = Stream.objects.get(name="Denmark").id
req = dict(notifications_stream_id=orjson.dumps(new_notif_stream_id).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
realm = get_realm("zulip")
assert realm.notifications_stream is not None
self.assertEqual(realm.notifications_stream.id, new_notif_stream_id)
invalid_notif_stream_id = 1234
req = dict(notifications_stream_id=orjson.dumps(invalid_notif_stream_id).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Invalid stream id")
realm = get_realm("zulip")
assert realm.notifications_stream is not None
self.assertNotEqual(realm.notifications_stream.id, invalid_notif_stream_id)
def test_get_default_notifications_stream(self) -> None:
realm = get_realm("zulip")
verona = get_stream("verona", realm)
notifications_stream = realm.get_notifications_stream()
assert notifications_stream is not None
self.assertEqual(notifications_stream.id, verona.id)
do_deactivate_stream(notifications_stream, acting_user=None)
self.assertIsNone(realm.get_notifications_stream())
def test_change_signup_notifications_stream(self) -> None:
# We need an admin user.
self.login("iago")
disabled_signup_notifications_stream_id = -1
req = dict(
signup_notifications_stream_id=orjson.dumps(
disabled_signup_notifications_stream_id
).decode()
)
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
realm = get_realm("zulip")
self.assertEqual(realm.signup_notifications_stream, None)
new_signup_notifications_stream_id = Stream.objects.get(name="Denmark").id
req = dict(
signup_notifications_stream_id=orjson.dumps(new_signup_notifications_stream_id).decode()
)
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
realm = get_realm("zulip")
assert realm.signup_notifications_stream is not None
self.assertEqual(realm.signup_notifications_stream.id, new_signup_notifications_stream_id)
invalid_signup_notifications_stream_id = 1234
req = dict(
signup_notifications_stream_id=orjson.dumps(
invalid_signup_notifications_stream_id
).decode()
)
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Invalid stream id")
realm = get_realm("zulip")
assert realm.signup_notifications_stream is not None
self.assertNotEqual(
realm.signup_notifications_stream.id, invalid_signup_notifications_stream_id
)
def test_get_default_signup_notifications_stream(self) -> None:
realm = get_realm("zulip")
verona = get_stream("verona", realm)
realm.signup_notifications_stream = verona
realm.save(update_fields=["signup_notifications_stream"])
signup_notifications_stream = realm.get_signup_notifications_stream()
assert signup_notifications_stream is not None
self.assertEqual(signup_notifications_stream, verona)
do_deactivate_stream(signup_notifications_stream, acting_user=None)
self.assertIsNone(realm.get_signup_notifications_stream())
def test_change_realm_default_language(self) -> None:
# we need an admin user.
self.login("iago")
# Test to make sure that when invalid languages are passed
# as the default realm language, correct validation error is
# raised and the invalid language is not saved in db
invalid_lang = "invalid_lang"
req = dict(default_language=invalid_lang)
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, f"Invalid language '{invalid_lang}'")
realm = get_realm("zulip")
self.assertNotEqual(realm.default_language, invalid_lang)
def test_deactivate_realm_by_owner(self) -> None:
self.login("desdemona")
realm = get_realm("zulip")
self.assertFalse(realm.deactivated)
result = self.client_post("/json/realm/deactivate")
self.assert_json_success(result)
realm = get_realm("zulip")
self.assertTrue(realm.deactivated)
def test_deactivate_realm_by_non_owner(self) -> None:
self.login("iago")
realm = get_realm("zulip")
self.assertFalse(realm.deactivated)
result = self.client_post("/json/realm/deactivate")
self.assert_json_error(result, "Must be an organization owner")
realm = get_realm("zulip")
self.assertFalse(realm.deactivated)
def test_invalid_integer_attribute_values(self) -> None:
integer_values = [key for key, value in Realm.property_types.items() if value is int]
invalid_values = dict(
bot_creation_policy=10,
create_public_stream_policy=10,
create_private_stream_policy=10,
create_web_public_stream_policy=10,
invite_to_stream_policy=10,
email_address_visibility=10,
message_retention_days=10,
video_chat_provider=10,
giphy_rating=10,
waiting_period_threshold=-10,
digest_weekday=10,
user_group_edit_policy=10,
private_message_policy=10,
message_content_delete_limit_seconds=-10,
wildcard_mention_policy=10,
invite_to_realm_policy=10,
move_messages_between_streams_policy=10,
add_custom_emoji_policy=10,
delete_own_message_policy=10,
)
# We need an admin user.
self.login("iago")
for name in integer_values:
invalid_value = invalid_values.get(name)
if invalid_value is None:
raise AssertionError(f"No test created for {name}")
self.do_test_invalid_integer_attribute_value(name, invalid_value)
def do_test_invalid_integer_attribute_value(self, val_name: str, invalid_val: int) -> None:
possible_messages = {
f"Invalid {val_name}",
f"Bad value for '{val_name}'",
f"Bad value for '{val_name}': {invalid_val}",
f"Invalid {val_name} {invalid_val}",
}
req = {val_name: invalid_val}
result = self.client_patch("/json/realm", req)
msg = self.get_json_error(result)
self.assertTrue(msg in possible_messages)
def test_change_video_chat_provider(self) -> None:
self.assertEqual(
get_realm("zulip").video_chat_provider, Realm.VIDEO_CHAT_PROVIDERS["jitsi_meet"]["id"]
)
self.login("iago")
invalid_video_chat_provider_value = 10
req = {"video_chat_provider": orjson.dumps(invalid_video_chat_provider_value).decode()}
result = self.client_patch("/json/realm", req)
self.assert_json_error(
result, ("Invalid video_chat_provider {}").format(invalid_video_chat_provider_value)
)
req = {
"video_chat_provider": orjson.dumps(
Realm.VIDEO_CHAT_PROVIDERS["disabled"]["id"]
).decode()
}
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
self.assertEqual(
get_realm("zulip").video_chat_provider, Realm.VIDEO_CHAT_PROVIDERS["disabled"]["id"]
)
req = {
"video_chat_provider": orjson.dumps(
Realm.VIDEO_CHAT_PROVIDERS["jitsi_meet"]["id"]
).decode()
}
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
self.assertEqual(
get_realm("zulip").video_chat_provider, Realm.VIDEO_CHAT_PROVIDERS["jitsi_meet"]["id"]
)
req = {
"video_chat_provider": orjson.dumps(
Realm.VIDEO_CHAT_PROVIDERS["big_blue_button"]["id"]
).decode()
}
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
self.assertEqual(
get_realm("zulip").video_chat_provider,
Realm.VIDEO_CHAT_PROVIDERS["big_blue_button"]["id"],
)
req = {
"video_chat_provider": orjson.dumps(Realm.VIDEO_CHAT_PROVIDERS["zoom"]["id"]).decode()
}
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
def test_initial_plan_type(self) -> None:
with self.settings(BILLING_ENABLED=True):
self.assertEqual(do_create_realm("hosted", "hosted").plan_type, Realm.PLAN_TYPE_LIMITED)
self.assertEqual(
get_realm("hosted").max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX
)
self.assertEqual(
get_realm("hosted").message_visibility_limit, Realm.MESSAGE_VISIBILITY_LIMITED
)
self.assertEqual(get_realm("hosted").upload_quota_gb, Realm.UPLOAD_QUOTA_LIMITED)
with self.settings(BILLING_ENABLED=False):
self.assertEqual(
do_create_realm("onpremise", "onpremise").plan_type, Realm.PLAN_TYPE_SELF_HOSTED
)
self.assertEqual(
get_realm("onpremise").max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX
)
self.assertEqual(get_realm("onpremise").message_visibility_limit, None)
self.assertEqual(get_realm("onpremise").upload_quota_gb, None)
def test_change_org_type(self) -> None:
realm = get_realm("zulip")
iago = self.example_user("iago")
self.assertEqual(realm.org_type, Realm.ORG_TYPES["business"]["id"])
do_change_realm_org_type(realm, Realm.ORG_TYPES["government"]["id"], acting_user=iago)
realm = get_realm("zulip")
realm_audit_log = RealmAuditLog.objects.filter(
event_type=RealmAuditLog.REALM_ORG_TYPE_CHANGED
).last()
assert realm_audit_log is not None
expected_extra_data = {
"old_value": Realm.ORG_TYPES["business"]["id"],
"new_value": Realm.ORG_TYPES["government"]["id"],
}
self.assertEqual(realm_audit_log.extra_data, str(expected_extra_data))
self.assertEqual(realm_audit_log.acting_user, iago)
self.assertEqual(realm.org_type, Realm.ORG_TYPES["government"]["id"])
def test_change_realm_plan_type(self) -> None:
realm = get_realm("zulip")
iago = self.example_user("iago")
self.assertEqual(realm.plan_type, Realm.PLAN_TYPE_SELF_HOSTED)
self.assertEqual(realm.max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, None)
self.assertEqual(realm.upload_quota_gb, None)
do_change_realm_plan_type(realm, Realm.PLAN_TYPE_STANDARD, acting_user=iago)
realm = get_realm("zulip")
realm_audit_log = RealmAuditLog.objects.filter(
event_type=RealmAuditLog.REALM_PLAN_TYPE_CHANGED
).last()
assert realm_audit_log is not None
expected_extra_data = {
"old_value": Realm.PLAN_TYPE_SELF_HOSTED,
"new_value": Realm.PLAN_TYPE_STANDARD,
}
self.assertEqual(realm_audit_log.extra_data, str(expected_extra_data))
self.assertEqual(realm_audit_log.acting_user, iago)
self.assertEqual(realm.plan_type, Realm.PLAN_TYPE_STANDARD)
self.assertEqual(realm.max_invites, Realm.INVITES_STANDARD_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, None)
self.assertEqual(realm.upload_quota_gb, Realm.UPLOAD_QUOTA_STANDARD)
do_change_realm_plan_type(realm, Realm.PLAN_TYPE_LIMITED, acting_user=iago)
realm = get_realm("zulip")
self.assertEqual(realm.plan_type, Realm.PLAN_TYPE_LIMITED)
self.assertEqual(realm.max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, Realm.MESSAGE_VISIBILITY_LIMITED)
self.assertEqual(realm.upload_quota_gb, Realm.UPLOAD_QUOTA_LIMITED)
do_change_realm_plan_type(realm, Realm.PLAN_TYPE_STANDARD_FREE, acting_user=iago)
realm = get_realm("zulip")
self.assertEqual(realm.plan_type, Realm.PLAN_TYPE_STANDARD_FREE)
self.assertEqual(realm.max_invites, Realm.INVITES_STANDARD_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, None)
self.assertEqual(realm.upload_quota_gb, Realm.UPLOAD_QUOTA_STANDARD)
do_change_realm_plan_type(realm, Realm.PLAN_TYPE_LIMITED, acting_user=iago)
do_change_realm_plan_type(realm, Realm.PLAN_TYPE_PLUS, acting_user=iago)
realm = get_realm("zulip")
self.assertEqual(realm.plan_type, Realm.PLAN_TYPE_PLUS)
self.assertEqual(realm.max_invites, Realm.INVITES_STANDARD_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, None)
self.assertEqual(realm.upload_quota_gb, Realm.UPLOAD_QUOTA_STANDARD)
do_change_realm_plan_type(realm, Realm.PLAN_TYPE_SELF_HOSTED, acting_user=iago)
self.assertEqual(realm.plan_type, Realm.PLAN_TYPE_SELF_HOSTED)
self.assertEqual(realm.max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, None)
self.assertEqual(realm.upload_quota_gb, None)
def test_message_retention_days(self) -> None:
self.login("iago")
realm = get_realm("zulip")
self.assertEqual(realm.plan_type, Realm.PLAN_TYPE_SELF_HOSTED)
req = dict(message_retention_days=orjson.dumps(10).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Must be an organization owner")
self.login("desdemona")
req = dict(message_retention_days=orjson.dumps(0).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Bad value for 'message_retention_days': 0")
req = dict(message_retention_days=orjson.dumps(-10).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Bad value for 'message_retention_days': -10")
req = dict(message_retention_days=orjson.dumps("invalid").decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Bad value for 'message_retention_days': invalid")
req = dict(message_retention_days=orjson.dumps(-1).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Bad value for 'message_retention_days': -1")
req = dict(message_retention_days=orjson.dumps("unlimited").decode())
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
req = dict(message_retention_days=orjson.dumps(10).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
do_change_realm_plan_type(realm, Realm.PLAN_TYPE_LIMITED, acting_user=None)
req = dict(message_retention_days=orjson.dumps(10).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Available on Zulip Standard. Upgrade to access.")
do_change_realm_plan_type(realm, Realm.PLAN_TYPE_STANDARD, acting_user=None)
req = dict(message_retention_days=orjson.dumps(10).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
def test_do_create_realm(self) -> None:
realm = do_create_realm("realm_string_id", "realm name")
self.assertEqual(realm.string_id, "realm_string_id")
self.assertEqual(realm.name, "realm name")
self.assertFalse(realm.emails_restricted_to_domains)
self.assertEqual(realm.email_address_visibility, Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE)
self.assertEqual(realm.description, "")
self.assertTrue(realm.invite_required)
self.assertEqual(realm.plan_type, Realm.PLAN_TYPE_LIMITED)
self.assertEqual(realm.org_type, Realm.ORG_TYPES["unspecified"]["id"])
self.assertEqual(type(realm.date_created), datetime.datetime)
self.assertTrue(
RealmAuditLog.objects.filter(
realm=realm, event_type=RealmAuditLog.REALM_CREATED, event_time=realm.date_created
).exists()
)
assert realm.notifications_stream is not None
self.assertEqual(realm.notifications_stream.name, "general")
self.assertEqual(realm.notifications_stream.realm, realm)
assert realm.signup_notifications_stream is not None
self.assertEqual(realm.signup_notifications_stream.name, "core team")
self.assertEqual(realm.signup_notifications_stream.realm, realm)
self.assertEqual(realm.plan_type, Realm.PLAN_TYPE_LIMITED)
def test_do_create_realm_with_keyword_arguments(self) -> None:
date_created = timezone_now() - datetime.timedelta(days=100)
realm = do_create_realm(
"realm_string_id",
"realm name",
emails_restricted_to_domains=True,
date_created=date_created,
email_address_visibility=Realm.EMAIL_ADDRESS_VISIBILITY_MEMBERS,
description="realm description",
invite_required=False,
plan_type=Realm.PLAN_TYPE_STANDARD_FREE,
org_type=Realm.ORG_TYPES["community"]["id"],
)
self.assertEqual(realm.string_id, "realm_string_id")
self.assertEqual(realm.name, "realm name")
self.assertTrue(realm.emails_restricted_to_domains)
self.assertEqual(realm.email_address_visibility, Realm.EMAIL_ADDRESS_VISIBILITY_MEMBERS)
self.assertEqual(realm.description, "realm description")
self.assertFalse(realm.invite_required)
self.assertEqual(realm.plan_type, Realm.PLAN_TYPE_STANDARD_FREE)
self.assertEqual(realm.org_type, Realm.ORG_TYPES["community"]["id"])
self.assertEqual(realm.date_created, date_created)
self.assertTrue(
RealmAuditLog.objects.filter(
realm=realm, event_type=RealmAuditLog.REALM_CREATED, event_time=realm.date_created
).exists()
)
assert realm.notifications_stream is not None
self.assertEqual(realm.notifications_stream.name, "general")
self.assertEqual(realm.notifications_stream.realm, realm)
assert realm.signup_notifications_stream is not None
self.assertEqual(realm.signup_notifications_stream.name, "core team")
self.assertEqual(realm.signup_notifications_stream.realm, realm)
def test_realm_is_web_public(self) -> None:
realm = get_realm("zulip")
# By default "Rome" is web_public in zulip realm
rome = Stream.objects.get(name="Rome")
self.assertEqual(rome.is_web_public, True)
self.assertEqual(realm.has_web_public_streams(), True)
self.assertEqual(realm.web_public_streams_enabled(), True)
with self.settings(WEB_PUBLIC_STREAMS_ENABLED=False):
self.assertEqual(realm.has_web_public_streams(), False)
self.assertEqual(realm.web_public_streams_enabled(), False)
realm.enable_spectator_access = False
realm.save()
self.assertEqual(realm.has_web_public_streams(), False)
self.assertEqual(realm.web_public_streams_enabled(), False)
realm.enable_spectator_access = True
realm.save()
# Convert Rome to a public stream
rome.is_web_public = False
rome.save()
self.assertEqual(Stream.objects.filter(realm=realm, is_web_public=True).count(), 0)
self.assertEqual(realm.web_public_streams_enabled(), True)
self.assertEqual(realm.has_web_public_streams(), False)
with self.settings(WEB_PUBLIC_STREAMS_ENABLED=False):
self.assertEqual(realm.web_public_streams_enabled(), False)
self.assertEqual(realm.has_web_public_streams(), False)
# Restore state
rome.is_web_public = True
rome.save()
self.assertEqual(Stream.objects.filter(realm=realm, is_web_public=True).count(), 1)
self.assertEqual(realm.has_web_public_streams(), True)
self.assertEqual(realm.web_public_streams_enabled(), True)
with self.settings(WEB_PUBLIC_STREAMS_ENABLED=False):
self.assertEqual(realm.web_public_streams_enabled(), False)
self.assertEqual(realm.has_web_public_streams(), False)
realm.plan_type = Realm.PLAN_TYPE_LIMITED
realm.save()
self.assertEqual(Stream.objects.filter(realm=realm, is_web_public=True).count(), 1)
self.assertEqual(realm.web_public_streams_enabled(), False)
self.assertEqual(realm.has_web_public_streams(), False)
with self.settings(WEB_PUBLIC_STREAMS_ENABLED=False):
self.assertEqual(realm.web_public_streams_enabled(), False)
self.assertEqual(realm.has_web_public_streams(), False)
class RealmAPITest(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.login("desdemona")
def set_up_db(self, attr: str, value: Any) -> None:
realm = get_realm("zulip")
setattr(realm, attr, value)
realm.save(update_fields=[attr])
def update_with_api(self, name: str, value: Union[int, str]) -> Realm:
if not isinstance(value, str):
value = orjson.dumps(value).decode()
result = self.client_patch("/json/realm", {name: value})
self.assert_json_success(result)
return get_realm("zulip") # refresh data
def update_with_api_multiple_value(self, data_dict: Dict[str, Any]) -> Realm:
result = self.client_patch("/json/realm", data_dict)
self.assert_json_success(result)
return get_realm("zulip")
def do_test_realm_update_api(self, name: str) -> None:
"""Test updating realm properties.
If new realm properties have been added to the Realm model but the
test_values dict below has not been updated, this will raise an
assertion error.
"""
bool_tests: List[bool] = [False, True]
test_values: Dict[str, Any] = dict(
default_language=["de", "en"],
default_code_block_language=["javascript", ""],
description=["Realm description", "New description"],
digest_weekday=[0, 1, 2],
message_retention_days=[10, 20],
name=["Zulip", "New Name"],
waiting_period_threshold=[10, 20],
create_private_stream_policy=Realm.COMMON_POLICY_TYPES,
create_public_stream_policy=Realm.COMMON_POLICY_TYPES,
create_web_public_stream_policy=Realm.CREATE_WEB_PUBLIC_STREAM_POLICY_TYPES,
user_group_edit_policy=Realm.COMMON_POLICY_TYPES,
private_message_policy=Realm.PRIVATE_MESSAGE_POLICY_TYPES,
invite_to_stream_policy=Realm.COMMON_POLICY_TYPES,
wildcard_mention_policy=Realm.WILDCARD_MENTION_POLICY_TYPES,
bot_creation_policy=Realm.BOT_CREATION_POLICY_TYPES,
email_address_visibility=Realm.EMAIL_ADDRESS_VISIBILITY_TYPES,
video_chat_provider=[
dict(
video_chat_provider=orjson.dumps(
Realm.VIDEO_CHAT_PROVIDERS["jitsi_meet"]["id"]
).decode(),
),
],
giphy_rating=[
Realm.GIPHY_RATING_OPTIONS["y"]["id"],
Realm.GIPHY_RATING_OPTIONS["r"]["id"],
],
message_content_delete_limit_seconds=[1000, 1100, 1200],
invite_to_realm_policy=Realm.INVITE_TO_REALM_POLICY_TYPES,
move_messages_between_streams_policy=Realm.COMMON_POLICY_TYPES,
add_custom_emoji_policy=Realm.COMMON_POLICY_TYPES,
delete_own_message_policy=Realm.COMMON_MESSAGE_POLICY_TYPES,
)
vals = test_values.get(name)
if Realm.property_types[name] is bool:
vals = bool_tests
if vals is None:
raise AssertionError(f"No test created for {name}")
if name == "video_chat_provider":
self.set_up_db(name, vals[0][name])
realm = self.update_with_api_multiple_value(vals[0])
self.assertEqual(getattr(realm, name), orjson.loads(vals[0][name]))
return
self.set_up_db(name, vals[0])
for val in vals[1:]:
realm = self.update_with_api(name, val)
self.assertEqual(getattr(realm, name), val)
realm = self.update_with_api(name, vals[0])
self.assertEqual(getattr(realm, name), vals[0])
def test_update_realm_properties(self) -> None:
for prop in Realm.property_types:
with self.subTest(property=prop):
self.do_test_realm_update_api(prop)
def update_with_realm_default_api(self, name: str, val: Any) -> None:
if not isinstance(val, str):
val = orjson.dumps(val).decode()
result = self.client_patch("/json/realm/user_settings_defaults", {name: val})
self.assert_json_success(result)
def do_test_realm_default_setting_update_api(self, name: str) -> None:
bool_tests: List[bool] = [False, True]
test_values: Dict[str, Any] = dict(
color_scheme=UserProfile.COLOR_SCHEME_CHOICES,
default_view=["recent_topics", "all_messages"],
emojiset=[emojiset["key"] for emojiset in RealmUserDefault.emojiset_choices()],
demote_inactive_streams=UserProfile.DEMOTE_STREAMS_CHOICES,
desktop_icon_count_display=[1, 2, 3],
notification_sound=["zulip", "ding"],
email_notifications_batching_period_seconds=[120, 300],
)
vals = test_values.get(name)
property_type = RealmUserDefault.property_types[name]
if property_type is bool:
vals = bool_tests
if vals is None:
raise AssertionError(f"No test created for {name}")
realm = get_realm("zulip")
realm_user_default = RealmUserDefault.objects.get(realm=realm)
do_set_realm_user_default_setting(realm_user_default, name, vals[0], acting_user=None)
for val in vals[1:]:
self.update_with_realm_default_api(name, val)
realm_user_default = RealmUserDefault.objects.get(realm=realm)
self.assertEqual(getattr(realm_user_default, name), val)
self.update_with_realm_default_api(name, vals[0])
realm_user_default = RealmUserDefault.objects.get(realm=realm)
self.assertEqual(getattr(realm_user_default, name), vals[0])
def test_update_default_realm_settings(self) -> None:
for prop in RealmUserDefault.property_types:
# enable_marketing_emails setting is not actually used and thus cannot be updated
# using this endpoint. It is included in notification_setting_types only for avoiding
# duplicate code. default_language is currently present in Realm table also and thus
# is updated using '/realm' endpoint, but this will be removed in future and the
# settings in RealmUserDefault table will be used.
if prop in ["default_language", "enable_login_emails", "enable_marketing_emails"]:
continue
self.do_test_realm_default_setting_update_api(prop)
def test_invalid_default_notification_sound_value(self) -> None:
result = self.client_patch(
"/json/realm/user_settings_defaults", {"notification_sound": "invalid"}
)
self.assert_json_error(result, "Invalid notification sound 'invalid'")
result = self.client_patch(
"/json/realm/user_settings_defaults", {"notification_sound": "zulip"}
)
self.assert_json_success(result)
realm = get_realm("zulip")
realm_user_default = RealmUserDefault.objects.get(realm=realm)
self.assertEqual(realm_user_default.notification_sound, "zulip")
def test_invalid_email_notifications_batching_period_setting(self) -> None:
result = self.client_patch(
"/json/realm/user_settings_defaults",
{"email_notifications_batching_period_seconds": -1},
)
self.assert_json_error(result, "Invalid email batching period: -1 seconds")
result = self.client_patch(
"/json/realm/user_settings_defaults",
{"email_notifications_batching_period_seconds": 7 * 24 * 60 * 60 + 10},
)
self.assert_json_error(result, "Invalid email batching period: 604810 seconds")
def test_ignored_parameters_in_realm_default_endpoint(self) -> None:
params = {"starred_message_counts": orjson.dumps(False).decode(), "emoji_set": "twitter"}
json_result = self.client_patch("/json/realm/user_settings_defaults", params)
self.assert_json_success(json_result)
realm = get_realm("zulip")
realm_user_default = RealmUserDefault.objects.get(realm=realm)
self.assertEqual(realm_user_default.starred_message_counts, False)
result = orjson.loads(json_result.content)
self.assertIn("ignored_parameters_unsupported", result)
self.assertEqual(result["ignored_parameters_unsupported"], ["emoji_set"])
def test_update_realm_allow_message_editing(self) -> None:
"""Tests updating the realm property 'allow_message_editing'."""
self.set_up_db("allow_message_editing", False)
self.set_up_db("message_content_edit_limit_seconds", 0)
self.set_up_db("edit_topic_policy", Realm.POLICY_ADMINS_ONLY)
realm = self.update_with_api("allow_message_editing", True)
realm = self.update_with_api("message_content_edit_limit_seconds", 100)
realm = self.update_with_api("edit_topic_policy", Realm.POLICY_EVERYONE)
self.assertEqual(realm.allow_message_editing, True)
self.assertEqual(realm.message_content_edit_limit_seconds, 100)
self.assertEqual(realm.edit_topic_policy, Realm.POLICY_EVERYONE)
realm = self.update_with_api("allow_message_editing", False)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 100)
self.assertEqual(realm.edit_topic_policy, Realm.POLICY_EVERYONE)
realm = self.update_with_api("message_content_edit_limit_seconds", 200)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 200)
self.assertEqual(realm.edit_topic_policy, Realm.POLICY_EVERYONE)
realm = self.update_with_api("edit_topic_policy", Realm.POLICY_ADMINS_ONLY)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 200)
self.assertEqual(realm.edit_topic_policy, Realm.POLICY_ADMINS_ONLY)
realm = self.update_with_api("edit_topic_policy", Realm.POLICY_MODERATORS_ONLY)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 200)
self.assertEqual(realm.edit_topic_policy, Realm.POLICY_MODERATORS_ONLY)
realm = self.update_with_api("edit_topic_policy", Realm.POLICY_FULL_MEMBERS_ONLY)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 200)
self.assertEqual(realm.edit_topic_policy, Realm.POLICY_FULL_MEMBERS_ONLY)
realm = self.update_with_api("edit_topic_policy", Realm.POLICY_MEMBERS_ONLY)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 200)
self.assertEqual(realm.edit_topic_policy, Realm.POLICY_MEMBERS_ONLY)
# Test an invalid value for edit_topic_policy
invalid_edit_topic_policy_value = 10
req = {"edit_topic_policy": orjson.dumps(invalid_edit_topic_policy_value).decode()}
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Invalid edit_topic_policy")
def test_update_realm_delete_own_message_policy(self) -> None:
"""Tests updating the realm property 'delete_own_message_policy'."""
self.set_up_db("delete_own_message_policy", Realm.POLICY_EVERYONE)
realm = self.update_with_api("delete_own_message_policy", Realm.POLICY_ADMINS_ONLY)
self.assertEqual(realm.delete_own_message_policy, Realm.POLICY_ADMINS_ONLY)
self.assertEqual(realm.message_content_delete_limit_seconds, 600)
realm = self.update_with_api("delete_own_message_policy", Realm.POLICY_EVERYONE)
realm = self.update_with_api("message_content_delete_limit_seconds", 100)
self.assertEqual(realm.delete_own_message_policy, Realm.POLICY_EVERYONE)
self.assertEqual(realm.message_content_delete_limit_seconds, 100)
realm = self.update_with_api(
"message_content_delete_limit_seconds", orjson.dumps("unlimited").decode()
)
self.assertEqual(realm.message_content_delete_limit_seconds, None)
realm = self.update_with_api("message_content_delete_limit_seconds", 600)
self.assertEqual(realm.delete_own_message_policy, Realm.POLICY_EVERYONE)
self.assertEqual(realm.message_content_delete_limit_seconds, 600)
realm = self.update_with_api("delete_own_message_policy", Realm.POLICY_MODERATORS_ONLY)
self.assertEqual(realm.delete_own_message_policy, Realm.POLICY_MODERATORS_ONLY)
realm = self.update_with_api("delete_own_message_policy", Realm.POLICY_FULL_MEMBERS_ONLY)
self.assertEqual(realm.delete_own_message_policy, Realm.POLICY_FULL_MEMBERS_ONLY)
realm = self.update_with_api("delete_own_message_policy", Realm.POLICY_MEMBERS_ONLY)
self.assertEqual(realm.delete_own_message_policy, Realm.POLICY_MEMBERS_ONLY)
# Test that 0 is invalid value.
req = dict(message_content_delete_limit_seconds=orjson.dumps(0).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Bad value for 'message_content_delete_limit_seconds': 0")
# Test that only "unlimited" string is valid and others are invalid.
req = dict(message_content_delete_limit_seconds=orjson.dumps("invalid").decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(
result, "Bad value for 'message_content_delete_limit_seconds': invalid"
)
def test_change_invite_to_realm_policy_by_owners_only(self) -> None:
self.login("iago")
req = {"invite_to_realm_policy": Realm.POLICY_ADMINS_ONLY}
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Must be an organization owner")
self.login("desdemona")
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
realm = get_realm("zulip")
self.assertEqual(realm.invite_to_realm_policy, Realm.POLICY_ADMINS_ONLY)
class ScrubRealmTest(ZulipTestCase):
def test_scrub_realm(self) -> None:
zulip = get_realm("zulip")
lear = get_realm("lear")
iago = self.example_user("iago")
othello = self.example_user("othello")
cordelia = self.lear_user("cordelia")
king = self.lear_user("king")
create_stream_if_needed(lear, "Shakespeare")
self.subscribe(cordelia, "Shakespeare")
self.subscribe(king, "Shakespeare")
Message.objects.all().delete()
UserMessage.objects.all().delete()
for i in range(5):
self.send_stream_message(iago, "Scotland")
self.send_stream_message(othello, "Scotland")
self.send_stream_message(cordelia, "Shakespeare")
self.send_stream_message(king, "Shakespeare")
Attachment.objects.filter(realm=zulip).delete()
Attachment.objects.create(realm=zulip, owner=iago, path_id="a/b/temp1.txt", size=512)
Attachment.objects.create(realm=zulip, owner=othello, path_id="a/b/temp2.txt", size=512)
Attachment.objects.filter(realm=lear).delete()
Attachment.objects.create(realm=lear, owner=cordelia, path_id="c/d/temp1.txt", size=512)
Attachment.objects.create(realm=lear, owner=king, path_id="c/d/temp2.txt", size=512)
CustomProfileField.objects.create(realm=lear)
self.assertEqual(Message.objects.filter(sender__in=[iago, othello]).count(), 10)
self.assertEqual(Message.objects.filter(sender__in=[cordelia, king]).count(), 10)
self.assertEqual(UserMessage.objects.filter(user_profile__in=[iago, othello]).count(), 20)
self.assertEqual(UserMessage.objects.filter(user_profile__in=[cordelia, king]).count(), 20)
self.assertNotEqual(CustomProfileField.objects.filter(realm=zulip).count(), 0)
with self.assertLogs(level="WARNING"):
do_scrub_realm(zulip, acting_user=None)
self.assertEqual(Message.objects.filter(sender__in=[iago, othello]).count(), 0)
self.assertEqual(Message.objects.filter(sender__in=[cordelia, king]).count(), 10)
self.assertEqual(UserMessage.objects.filter(user_profile__in=[iago, othello]).count(), 0)
self.assertEqual(UserMessage.objects.filter(user_profile__in=[cordelia, king]).count(), 20)
self.assertEqual(Attachment.objects.filter(realm=zulip).count(), 0)
self.assertEqual(Attachment.objects.filter(realm=lear).count(), 2)
self.assertEqual(CustomProfileField.objects.filter(realm=zulip).count(), 0)
self.assertNotEqual(CustomProfileField.objects.filter(realm=lear).count(), 0)
zulip_users = UserProfile.objects.filter(realm=zulip)
for user in zulip_users:
self.assertTrue(re.search("Scrubbed [a-z0-9]{15}", user.full_name))
self.assertTrue(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.email))
self.assertTrue(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.delivery_email))
lear_users = UserProfile.objects.filter(realm=lear)
for user in lear_users:
self.assertIsNone(re.search("Scrubbed [a-z0-9]{15}", user.full_name))
self.assertIsNone(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.email))
self.assertIsNone(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.delivery_email))
|
|
'''
Configuration object
====================
The :class:`Config` object is an instance of a modified Python ConfigParser.
See the `ConfigParser documentation
<http://docs.python.org/library/configparser.html>`_ for more information.
Kivy has a configuration file which determines the default settings. In
order to change these settings, you can alter this file manually or use
the Config object. Please see the :ref:`Configure Kivy` section for more
information.
Usage of the Config object
--------------------------
To read a configuration token from a particular section::
>>> from kivy.config import Config
>>> Config.getint('kivy', 'show_fps')
0
Change the configuration and save it::
>>> Config.set('kivy', 'retain_time', '50')
>>> Config.write()
.. versionchanged:: 1.7.1
The ConfigParser should work correctly with utf-8 now. The values are
converted from ascii to unicode only when needed. The method get() returns
utf-8 strings.
Available configuration tokens
------------------------------
.. |log_levels| replace:: 'debug', 'info', 'warning', 'error' or 'critical'
:kivy:
`desktop`: int, 0 or 1
This option controls desktop OS specific features, such as enabling
drag-able scroll-bar in scroll views, disabling of bubbles in
TextInput etc. 0 is disabled, 1 is enabled.
`exit_on_escape`: int, 0 or 1
Enables exiting kivy when escape is pressed.
0 is disabled, 1 is enabled.
`log_level`: string, one of |log_levels|
Set the minimum log level to use.
`log_dir`: string
Path of log directory.
`log_name`: string
Format string to use for the filename of log file.
`log_enable`: int, 0 or 1
Activate file logging. 0 is disabled, 1 is enabled.
`keyboard_mode`: string
Specifies the keyboard mode to use. If can be one of the following:
* '' - Let Kivy choose the best option for your current platform.
* 'system' - real keyboard.
* 'dock' - one virtual keyboard docked to a screen side.
* 'multi' - one virtual keyboard for every widget request.
* 'systemanddock' - virtual docked keyboard plus input from real
keyboard.
* 'systemandmulti' - analogous.
`keyboard_layout`: string
Identifier of the layout to use.
`window_icon`: string
Path of the window icon. Use this if you want to replace the default
pygame icon.
:postproc:
`double_tap_time`: int
Time allowed for the detection of double tap, in milliseconds.
`double_tap_distance`: float
Maximum distance allowed for a double tap, normalized inside the range
0 - 1000.
`triple_tap_time`: int
Time allowed for the detection of triple tap, in milliseconds.
`triple_tap_distance`: float
Maximum distance allowed for a triple tap, normalized inside the range
0 - 1000.
`retain_time`: int
Time allowed for a retain touch, in milliseconds.
`retain_distance`: int
If the touch moves more than is indicated by retain_distance, it will
not be retained. Argument should be an int between 0 and 1000.
`jitter_distance`: int
Maximum distance for jitter detection, normalized inside the range 0
- 1000.
`jitter_ignore_devices`: string, separated with commas
List of devices to ignore from jitter detection.
`ignore`: list of tuples
List of regions where new touches are ignored.
This configuration token can be used to resolve hotspot problems
with DIY hardware. The format of the list must be::
ignore = [(xmin, ymin, xmax, ymax), ...]
All the values must be inside the range 0 - 1.
:graphics:
`maxfps`: int, defaults to 60
Maximum FPS allowed.
`fullscreen`: int or string, one of 0, 1, 'fake' or 'auto'
Activate fullscreen. If set to `1`, a resolution of `width`
times `height` pixels will be used.
If set to `auto`, your current display's resolution will be
used instead. This is most likely what you want.
If you want to place the window in another display,
use `fake` and adjust `width`, `height`, `top` and `left`.
`width`: int
Width of the :class:`~kivy.core.window.Window`, not used if
`fullscreen` is set to `auto`.
`height`: int
Height of the :class:`~kivy.core.window.Window`, not used if
`fullscreen` is set to `auto`.
`fbo`: string, one of 'hardware', 'software' or 'force-hardware'
Selects the FBO backend to use.
`show_cursor`: int, one of 0 or 1
Show the cursor on the screen.
`position`: string, one of 'auto' or 'custom'
Position of the window on your display. If `auto` is used, you have no
control of the initial position: `top` and `left` are ignored.
`top`: int
Top position of the :class:`~kivy.core.window.Window`.
`left`: int
Left position of the :class:`~kivy.core.window.Window`.
`rotation`: int, one of 0, 90, 180 or 270
Rotation of the :class:`~kivy.core.window.Window`.
`resizable`: int, one of 0 or 1
If 0, the window will have a fixed size. If 1, the window will be
resizable.
:input:
You can create new input devices using this syntax::
# example of input provider instance
yourid = providerid,parameters
# example for tuio provider
default = tuio,127.0.0.1:3333
mytable = tuio,192.168.0.1:3334
.. seealso::
Check the providers in kivy.input.providers for the syntax to use
inside the configuration file.
:widgets:
`scroll_distance`: int
Default value of the
:data:`~kivy.uix.scrollview.ScrollView.scroll_distance`
property used by the :class:`~kivy.uix.scrollview.ScrollView` widget.
Check the widget documentation for more information.
`scroll_friction`: float
Default value of the
:data:`~kivy.uix.scrollview.ScrollView.scroll_friction`
property used by the :class:`~kivy.uix.scrollview.ScrollView` widget.
Check the widget documentation for more information.
`scroll_timeout`: int
Default value of the
:data:`~kivy.uix.scrollview.ScrollView.scroll_timeout`
property used by the :class:`~kivy.uix.scrollview.ScrollView` widget.
Check the widget documentation for more information.
`scroll_stoptime`: int
Default value of the
:data:`~kivy.uix.scrollview.ScrollView.scroll_stoptime`
property used by the :class:`~kivy.uix.scrollview.ScrollView` widget.
Check the widget documentation for more information.
.. deprecated:: 1.7.0
Please use
:class:`~kivy.uix.scrollview.ScrollView.effect_cls` instead.
`scroll_moves`: int
Default value of the
:data:`~kivy.uix.scrollview.ScrollView.scroll_moves`
property used by the :class:`~kivy.uix.scrollview.ScrollView` widget.
Check the widget documentation for more information.
.. deprecated:: 1.7.0
Please use
:class:`~kivy.uix.scrollview.ScrollView.effect_cls` instead.
:modules:
You can activate modules with this syntax::
modulename =
Anything after the = will be passed to the module as arguments.
Check the specific module's documentation for a list of accepted
arguments.
.. versionchanged:: 1.8.0
`systemanddock` and `systemandmulti` has been added as possible values for
`keyboard_mode` in the kivy section. `exit_on_escape` has been added
to the kivy section.
.. versionchanged:: 1.2.0
`resizable` has been added to graphics section.
.. versionchanged:: 1.1.0
tuio is not listening by default anymore. Window icons are not copied to
user directory anymore. You can still set a new window icon by using the
``window_icon`` config setting.
.. versionchanged:: 1.0.8
`scroll_timeout`, `scroll_distance` and `scroll_friction` have been added.
`list_friction`, `list_trigger_distance` and `list_friction_bound`
have been removed. `keyboard_type` and `keyboard_layout` have been
removed from the widget. `keyboard_mode` and `keyboard_layout` have
been added to the kivy section.
'''
__all__ = ('Config', 'ConfigParser')
try:
from ConfigParser import ConfigParser as PythonConfigParser
except ImportError:
from configparser import RawConfigParser as PythonConfigParser
from os import environ
from os.path import exists
from kivy import kivy_config_fn
from kivy.logger import Logger, logger_config_update
from collections import OrderedDict
from kivy.utils import platform
from kivy.compat import PY2, string_types
_is_rpi = exists('/opt/vc/include/bcm_host.h')
# Version number of current configuration format
KIVY_CONFIG_VERSION = 10
#: Kivy configuration object
Config = None
class ConfigParser(PythonConfigParser):
'''Enhanced ConfigParser class that supports the addition of default
sections and default values.
.. versionadded:: 1.0.7
'''
def __init__(self):
PythonConfigParser.__init__(self)
self._sections = OrderedDict()
self.filename = None
self._callbacks = []
def add_callback(self, callback, section=None, key=None):
'''Add a callback to be called when a specific section/key changed. If
you don't specify a section or a key, it will call the callback
for all section/keys changes.
Callbacks will receive 3 arguments: the section, key and value.
.. versionadded:: 1.4.1
'''
if section is None and key is not None:
raise Exception('You cannot specify a key without a section')
self._callbacks.append((callback, section, key))
def _do_callbacks(self, section, key, value):
for callback, csection, ckey in self._callbacks:
if csection is not None and csection != section:
continue
elif ckey is not None and ckey != key:
continue
callback(section, key, value)
def read(self, filename):
'''Read only one filename. In contrast to the original ConfigParser of
Python, this one is able to read only one file at a time. The last
read file will be used for the :meth:`write` method.
'''
if not isinstance(filename, string_types):
raise Exception('Only one filename is accepted ({})'.format(
string_types.__name__))
self.filename = filename
# If we try to open directly the configuration file in utf-8,
# we correctly get the unicode value by default.
# But, when we try to save it again, all the values we didn't changed
# are still unicode, and then the PythonConfigParser internal do
# a str() conversion -> fail.
# Instead we currently to the conversion to utf-8 when value are
# "get()", but we internally store them in ascii.
#with codecs.open(filename, 'r', encoding='utf-8') as f:
# self.readfp(f)
PythonConfigParser.read(self, filename)
def set(self, section, option, value):
'''Functions similarly to PythonConfigParser's set method, except that
the value is implicitly converted to a string.
'''
e_value = value
if PY2:
if not isinstance(value, string_types):
# might be boolean, int, etc.
e_value = str(value)
else:
if isinstance(value, unicode):
e_value = value.encode('utf-8')
ret = PythonConfigParser.set(self, section, option, e_value)
self._do_callbacks(section, option, value)
return ret
def get(self, section, option, **kwargs):
value = PythonConfigParser.get(self, section, option, **kwargs)
if PY2:
if type(value) is str:
return value.decode('utf-8')
return value
def setdefaults(self, section, keyvalues):
'''Set a lot of keys/values in one section at the same time.
'''
self.adddefaultsection(section)
for key, value in keyvalues.items():
self.setdefault(section, key, value)
def setdefault(self, section, option, value):
'''Set the default value of a particular option.
'''
if self.has_option(section, option):
return
self.set(section, option, value)
def getdefault(self, section, option, defaultvalue):
'''Get an option. If not found, it will return the default value.
'''
if not self.has_section(section):
return defaultvalue
if not self.has_option(section, option):
return defaultvalue
return self.get(section, option)
def getdefaultint(self, section, option, defaultvalue):
'''Get an option. If not found, it will return the default value.
The return value will be always converted as an integer.
.. versionadded:: 1.6.0
'''
return int(self.getdefault(section, option, defaultvalue))
def adddefaultsection(self, section):
'''Add a section if the section is missing.
'''
if self.has_section(section):
return
self.add_section(section)
def write(self):
'''Write the configuration to the last file opened using the
:meth:`read` method.
Return True if the write finished successfully.
'''
if self.filename is None:
return False
try:
with open(self.filename, 'w') as fd:
PythonConfigParser.write(self, fd)
except IOError:
Logger.exception('Unable to write the config <%s>' % self.filename)
return False
return True
if not environ.get('KIVY_DOC_INCLUDE'):
#
# Read, analyse configuration file
# Support upgrade of older config file versions
#
# Create default configuration
Config = ConfigParser()
Config.add_callback(logger_config_update, 'kivy', 'log_level')
# Read config file if exist
if (exists(kivy_config_fn) and
'KIVY_USE_DEFAULTCONFIG' not in environ and
'KIVY_NO_CONFIG' not in environ):
try:
Config.read(kivy_config_fn)
except Exception as e:
Logger.exception('Core: error while reading local'
'configuration')
version = Config.getdefaultint('kivy', 'config_version', 0)
# Add defaults section
Config.adddefaultsection('kivy')
Config.adddefaultsection('graphics')
Config.adddefaultsection('input')
Config.adddefaultsection('postproc')
Config.adddefaultsection('widgets')
Config.adddefaultsection('modules')
# Upgrade default configuration until we have the current version
need_save = False
if version != KIVY_CONFIG_VERSION and 'KIVY_NO_CONFIG' not in environ:
Logger.warning('Config: Older configuration version detected'
' ({0} instead of {1})'.format(
version, KIVY_CONFIG_VERSION))
Logger.warning('Config: Upgrading configuration in progress.')
need_save = True
while version < KIVY_CONFIG_VERSION:
Logger.debug('Config: Upgrading from %d to %d' %
(version, version + 1))
if version == 0:
# log level
Config.setdefault('kivy', 'keyboard_repeat_delay', '300')
Config.setdefault('kivy', 'keyboard_repeat_rate', '30')
Config.setdefault('kivy', 'log_dir', 'logs')
Config.setdefault('kivy', 'log_enable', '1')
Config.setdefault('kivy', 'log_level', 'info')
Config.setdefault('kivy', 'log_name', 'kivy_%y-%m-%d_%_.txt')
Config.setdefault('kivy', 'window_icon', '')
# default graphics parameters
Config.setdefault('graphics', 'display', '-1')
Config.setdefault('graphics', 'fullscreen', 'no')
Config.setdefault('graphics', 'height', '600')
Config.setdefault('graphics', 'left', '0')
Config.setdefault('graphics', 'maxfps', '0')
Config.setdefault('graphics', 'multisamples', '2')
Config.setdefault('graphics', 'position', 'auto')
Config.setdefault('graphics', 'rotation', '0')
Config.setdefault('graphics', 'show_cursor', '1')
Config.setdefault('graphics', 'top', '0')
Config.setdefault('graphics', 'vsync', '1')
Config.setdefault('graphics', 'width', '800')
# input configuration
Config.setdefault('input', 'mouse', 'mouse')
# activate native input provider in configuration
# from 1.0.9, don't activate mactouch by default, or app are
# unusable.
if platform == 'win':
Config.setdefault('input', 'wm_touch', 'wm_touch')
Config.setdefault('input', 'wm_pen', 'wm_pen')
elif platform == 'linux':
probesysfs = 'probesysfs'
if _is_rpi:
probesysfs += ',provider=hidinput'
Config.setdefault('input', '%(name)s', probesysfs)
# input postprocessing configuration
Config.setdefault('postproc', 'double_tap_distance', '20')
Config.setdefault('postproc', 'double_tap_time', '250')
Config.setdefault('postproc', 'ignore', '[]')
Config.setdefault('postproc', 'jitter_distance', '0')
Config.setdefault('postproc', 'jitter_ignore_devices',
'mouse,mactouch,')
Config.setdefault('postproc', 'retain_distance', '50')
Config.setdefault('postproc', 'retain_time', '0')
# default configuration for keyboard repeatition
Config.setdefault('widgets', 'keyboard_layout', 'qwerty')
Config.setdefault('widgets', 'keyboard_type', '')
Config.setdefault('widgets', 'list_friction', '10')
Config.setdefault('widgets', 'list_friction_bound', '20')
Config.setdefault('widgets', 'list_trigger_distance', '5')
elif version == 1:
Config.remove_option('graphics', 'vsync')
Config.set('graphics', 'maxfps', '60')
elif version == 2:
# was a version to automatically copy windows icon in the user
# directory, but it's now not used anymore. User can still change
# the window icon by touching the config.
pass
elif version == 3:
# add token for scrollview
Config.setdefault('widgets', 'scroll_timeout', '55')
Config.setdefault('widgets', 'scroll_distance', '20')
Config.setdefault('widgets', 'scroll_friction', '1.')
# remove old list_* token
Config.remove_option('widgets', 'list_friction')
Config.remove_option('widgets', 'list_friction_bound')
Config.remove_option('widgets', 'list_trigger_distance')
elif version == 4:
Config.remove_option('widgets', 'keyboard_type')
Config.remove_option('widgets', 'keyboard_layout')
# add keyboard token
Config.setdefault('kivy', 'keyboard_mode', '')
Config.setdefault('kivy', 'keyboard_layout', 'qwerty')
elif version == 5:
Config.setdefault('graphics', 'resizable', '1')
elif version == 6:
# if the timeout is still the default value, change it
Config.setdefault('widgets', 'scroll_stoptime', '300')
Config.setdefault('widgets', 'scroll_moves', '5')
elif version == 7:
# desktop bool indicating whether to use desktop specific features
is_desktop = int(platform in ('win', 'macosx', 'linux'))
Config.setdefault('kivy', 'desktop', is_desktop)
Config.setdefault('postproc', 'triple_tap_distance', '20')
Config.setdefault('postproc', 'triple_tap_time', '375')
elif version == 8:
if Config.getint('widgets', 'scroll_timeout') == 55:
Config.set('widgets', 'scroll_timeout', '250')
elif version == 9:
Config.setdefault('kivy', 'exit_on_escape', '1')
#elif version == 1:
# # add here the command for upgrading from configuration 0 to 1
#
else:
# for future.
break
# Pass to the next version
version += 1
# Indicate to the Config that we've upgrade to the latest version.
Config.set('kivy', 'config_version', KIVY_CONFIG_VERSION)
# Now, activate log file
Logger.logfile_activated = bool(Config.getint('kivy', 'log_enable'))
# If no configuration exist, write the default one.
if ((not exists(kivy_config_fn) or need_save) and
'KIVY_NO_CONFIG' not in environ):
try:
Config.filename = kivy_config_fn
Config.write()
except Exception as e:
Logger.exception('Core: Error while saving default config file')
|
|
"""Tests for Broadlink devices."""
import broadlink.exceptions as blke
from homeassistant.components.broadlink.const import DOMAIN
from homeassistant.components.broadlink.device import get_domains
from homeassistant.config_entries import (
ENTRY_STATE_LOADED,
ENTRY_STATE_NOT_LOADED,
ENTRY_STATE_SETUP_ERROR,
ENTRY_STATE_SETUP_RETRY,
)
from homeassistant.helpers.entity_registry import async_entries_for_device
from . import get_device
from tests.async_mock import patch
from tests.common import mock_device_registry, mock_registry
async def test_device_setup(hass):
"""Test a successful setup."""
device = get_device("Office")
with patch.object(
hass.config_entries, "async_forward_entry_setup"
) as mock_forward, patch.object(
hass.config_entries.flow, "async_init"
) as mock_init:
mock_api, mock_entry = await device.setup_entry(hass)
assert mock_entry.state == ENTRY_STATE_LOADED
assert mock_api.auth.call_count == 1
assert mock_api.get_fwversion.call_count == 1
forward_entries = {c[1][1] for c in mock_forward.mock_calls}
domains = get_domains(mock_api.type)
assert mock_forward.call_count == len(domains)
assert forward_entries == domains
assert mock_init.call_count == 0
async def test_device_setup_authentication_error(hass):
"""Test we handle an authentication error."""
device = get_device("Living Room")
mock_api = device.get_mock_api()
mock_api.auth.side_effect = blke.AuthenticationError()
with patch.object(
hass.config_entries, "async_forward_entry_setup"
) as mock_forward, patch.object(
hass.config_entries.flow, "async_init"
) as mock_init:
mock_api, mock_entry = await device.setup_entry(hass, mock_api=mock_api)
assert mock_entry.state == ENTRY_STATE_SETUP_ERROR
assert mock_api.auth.call_count == 1
assert mock_forward.call_count == 0
assert mock_init.call_count == 1
assert mock_init.mock_calls[0][2]["context"]["source"] == "reauth"
assert mock_init.mock_calls[0][2]["data"] == {
"name": device.name,
**device.get_entry_data(),
}
async def test_device_setup_device_offline(hass):
"""Test we handle a device offline."""
device = get_device("Office")
mock_api = device.get_mock_api()
mock_api.auth.side_effect = blke.DeviceOfflineError()
with patch.object(
hass.config_entries, "async_forward_entry_setup"
) as mock_forward, patch.object(
hass.config_entries.flow, "async_init"
) as mock_init:
mock_api, mock_entry = await device.setup_entry(hass, mock_api=mock_api)
assert mock_entry.state == ENTRY_STATE_SETUP_RETRY
assert mock_api.auth.call_count == 1
assert mock_forward.call_count == 0
assert mock_init.call_count == 0
async def test_device_setup_os_error(hass):
"""Test we handle an OS error."""
device = get_device("Office")
mock_api = device.get_mock_api()
mock_api.auth.side_effect = OSError()
with patch.object(
hass.config_entries, "async_forward_entry_setup"
) as mock_forward, patch.object(
hass.config_entries.flow, "async_init"
) as mock_init:
mock_api, mock_entry = await device.setup_entry(hass, mock_api=mock_api)
assert mock_entry.state == ENTRY_STATE_SETUP_RETRY
assert mock_api.auth.call_count == 1
assert mock_forward.call_count == 0
assert mock_init.call_count == 0
async def test_device_setup_broadlink_exception(hass):
"""Test we handle a Broadlink exception."""
device = get_device("Office")
mock_api = device.get_mock_api()
mock_api.auth.side_effect = blke.BroadlinkException()
with patch.object(
hass.config_entries, "async_forward_entry_setup"
) as mock_forward, patch.object(
hass.config_entries.flow, "async_init"
) as mock_init:
mock_api, mock_entry = await device.setup_entry(hass, mock_api=mock_api)
assert mock_entry.state == ENTRY_STATE_SETUP_ERROR
assert mock_api.auth.call_count == 1
assert mock_forward.call_count == 0
assert mock_init.call_count == 0
async def test_device_setup_update_device_offline(hass):
"""Test we handle a device offline in the update step."""
device = get_device("Office")
mock_api = device.get_mock_api()
mock_api.check_sensors.side_effect = blke.DeviceOfflineError()
with patch.object(
hass.config_entries, "async_forward_entry_setup"
) as mock_forward, patch.object(
hass.config_entries.flow, "async_init"
) as mock_init:
mock_api, mock_entry = await device.setup_entry(hass, mock_api=mock_api)
assert mock_entry.state == ENTRY_STATE_SETUP_RETRY
assert mock_api.auth.call_count == 1
assert mock_api.check_sensors.call_count == 1
assert mock_forward.call_count == 0
assert mock_init.call_count == 0
async def test_device_setup_update_authorization_error(hass):
"""Test we handle an authorization error in the update step."""
device = get_device("Office")
mock_api = device.get_mock_api()
mock_api.check_sensors.side_effect = (blke.AuthorizationError(), None)
with patch.object(
hass.config_entries, "async_forward_entry_setup"
) as mock_forward, patch.object(
hass.config_entries.flow, "async_init"
) as mock_init:
mock_api, mock_entry = await device.setup_entry(hass, mock_api=mock_api)
assert mock_entry.state == ENTRY_STATE_LOADED
assert mock_api.auth.call_count == 2
assert mock_api.check_sensors.call_count == 2
forward_entries = {c[1][1] for c in mock_forward.mock_calls}
domains = get_domains(mock_api.type)
assert mock_forward.call_count == len(domains)
assert forward_entries == domains
assert mock_init.call_count == 0
async def test_device_setup_update_authentication_error(hass):
"""Test we handle an authentication error in the update step."""
device = get_device("Living Room")
mock_api = device.get_mock_api()
mock_api.check_sensors.side_effect = blke.AuthorizationError()
mock_api.auth.side_effect = (None, blke.AuthenticationError())
with patch.object(
hass.config_entries, "async_forward_entry_setup"
) as mock_forward, patch.object(
hass.config_entries.flow, "async_init"
) as mock_init:
mock_api, mock_entry = await device.setup_entry(hass, mock_api=mock_api)
assert mock_entry.state == ENTRY_STATE_SETUP_RETRY
assert mock_api.auth.call_count == 2
assert mock_api.check_sensors.call_count == 1
assert mock_forward.call_count == 0
assert mock_init.call_count == 1
assert mock_init.mock_calls[0][2]["context"]["source"] == "reauth"
assert mock_init.mock_calls[0][2]["data"] == {
"name": device.name,
**device.get_entry_data(),
}
async def test_device_setup_update_broadlink_exception(hass):
"""Test we handle a Broadlink exception in the update step."""
device = get_device("Living Room")
mock_api = device.get_mock_api()
mock_api.check_sensors.side_effect = blke.BroadlinkException()
with patch.object(
hass.config_entries, "async_forward_entry_setup"
) as mock_forward, patch.object(
hass.config_entries.flow, "async_init"
) as mock_init:
mock_api, mock_entry = await device.setup_entry(hass, mock_api=mock_api)
assert mock_entry.state == ENTRY_STATE_SETUP_RETRY
assert mock_api.auth.call_count == 1
assert mock_api.check_sensors.call_count == 1
assert mock_forward.call_count == 0
assert mock_init.call_count == 0
async def test_device_setup_get_fwversion_broadlink_exception(hass):
"""Test we load the device even if we cannot read the firmware version."""
device = get_device("Office")
mock_api = device.get_mock_api()
mock_api.get_fwversion.side_effect = blke.BroadlinkException()
with patch.object(hass.config_entries, "async_forward_entry_setup") as mock_forward:
mock_api, mock_entry = await device.setup_entry(hass, mock_api=mock_api)
assert mock_entry.state == ENTRY_STATE_LOADED
forward_entries = {c[1][1] for c in mock_forward.mock_calls}
domains = get_domains(mock_api.type)
assert mock_forward.call_count == len(domains)
assert forward_entries == domains
async def test_device_setup_get_fwversion_os_error(hass):
"""Test we load the device even if we cannot read the firmware version."""
device = get_device("Office")
mock_api = device.get_mock_api()
mock_api.get_fwversion.side_effect = OSError()
with patch.object(hass.config_entries, "async_forward_entry_setup") as mock_forward:
_, mock_entry = await device.setup_entry(hass, mock_api=mock_api)
assert mock_entry.state == ENTRY_STATE_LOADED
forward_entries = {c[1][1] for c in mock_forward.mock_calls}
domains = get_domains(mock_api.type)
assert mock_forward.call_count == len(domains)
assert forward_entries == domains
async def test_device_setup_registry(hass):
"""Test we register the device and the entries correctly."""
device = get_device("Office")
device_registry = mock_device_registry(hass)
entity_registry = mock_registry(hass)
_, mock_entry = await device.setup_entry(hass)
await hass.async_block_till_done()
assert len(device_registry.devices) == 1
device_entry = device_registry.async_get_device(
{(DOMAIN, mock_entry.unique_id)}, set()
)
assert device_entry.identifiers == {(DOMAIN, device.mac)}
assert device_entry.name == device.name
assert device_entry.model == device.model
assert device_entry.manufacturer == device.manufacturer
assert device_entry.sw_version == device.fwversion
for entry in async_entries_for_device(entity_registry, device_entry.id):
assert entry.original_name.startswith(device.name)
async def test_device_unload_works(hass):
"""Test we unload the device."""
device = get_device("Office")
with patch.object(hass.config_entries, "async_forward_entry_setup"):
mock_api, mock_entry = await device.setup_entry(hass)
with patch.object(
hass.config_entries, "async_forward_entry_unload", return_value=True
) as mock_forward:
await hass.config_entries.async_unload(mock_entry.entry_id)
assert mock_entry.state == ENTRY_STATE_NOT_LOADED
forward_entries = {c[1][1] for c in mock_forward.mock_calls}
domains = get_domains(mock_api.type)
assert mock_forward.call_count == len(domains)
assert forward_entries == domains
async def test_device_unload_authentication_error(hass):
"""Test we unload a device that failed the authentication step."""
device = get_device("Living Room")
mock_api = device.get_mock_api()
mock_api.auth.side_effect = blke.AuthenticationError()
with patch.object(hass.config_entries, "async_forward_entry_setup"), patch.object(
hass.config_entries.flow, "async_init"
):
_, mock_entry = await device.setup_entry(hass, mock_api=mock_api)
with patch.object(
hass.config_entries, "async_forward_entry_unload", return_value=True
) as mock_forward:
await hass.config_entries.async_unload(mock_entry.entry_id)
assert mock_entry.state == ENTRY_STATE_NOT_LOADED
assert mock_forward.call_count == 0
async def test_device_unload_update_failed(hass):
"""Test we unload a device that failed the update step."""
device = get_device("Office")
mock_api = device.get_mock_api()
mock_api.check_sensors.side_effect = blke.DeviceOfflineError()
with patch.object(hass.config_entries, "async_forward_entry_setup"):
_, mock_entry = await device.setup_entry(hass, mock_api=mock_api)
with patch.object(
hass.config_entries, "async_forward_entry_unload", return_value=True
) as mock_forward:
await hass.config_entries.async_unload(mock_entry.entry_id)
assert mock_entry.state == ENTRY_STATE_NOT_LOADED
assert mock_forward.call_count == 0
async def test_device_update_listener(hass):
"""Test we update device and entity registry when the entry is renamed."""
device = get_device("Office")
device_registry = mock_device_registry(hass)
entity_registry = mock_registry(hass)
mock_api, mock_entry = await device.setup_entry(hass)
await hass.async_block_till_done()
with patch(
"homeassistant.components.broadlink.device.blk.gendevice", return_value=mock_api
):
hass.config_entries.async_update_entry(mock_entry, title="New Name")
await hass.async_block_till_done()
device_entry = device_registry.async_get_device(
{(DOMAIN, mock_entry.unique_id)}, set()
)
assert device_entry.name == "New Name"
for entry in async_entries_for_device(entity_registry, device_entry.id):
assert entry.original_name.startswith("New Name")
|
|
from __future__ import annotations
import codecs
import copy
import io
import os
from collections import OrderedDict
import dials.pychef
import libtbx.phil
from cctbx.array_family import flex
from dials.algorithms.merging.merge import make_dano_plots
from dials.pychef import dose_phil_str
from dials.report.analysis import batch_dependent_properties
from dials.report.plots import (
IntensityStatisticsPlots,
ResolutionPlotsAndStats,
i_over_sig_i_vs_batch_plot,
make_image_range_table,
scale_rmerge_vs_batch_plot,
)
from dials.util.batch_handling import batch_manager
from iotbx import merging_statistics
from iotbx.reflection_file_reader import any_reflection_file
from mmtbx.scaling import printed_output
from mmtbx.scaling.xtriage import master_params as xtriage_master_params
from mmtbx.scaling.xtriage import xtriage_analyses
import xia2.Handlers.Environment
import xia2.Handlers.Files
from xia2.cli.plot_multiplicity import master_phil, plot_multiplicity
from xia2.Modules.Analysis import batch_phil_scope, phil_scope, separate_unmerged
class _xtriage_output(printed_output):
def __init__(self, out):
super().__init__(out)
self.gui_output = True
self._out_orig = self.out
self.out = io.StringIO()
self._sub_header_to_out = {}
def show_big_header(self, text):
pass
def show_header(self, text):
self._out_orig.write(self.out.getvalue())
self.out = io.StringIO()
super().show_header(text)
def show_sub_header(self, title):
self._out_orig.write(self.out.getvalue())
self.out = io.StringIO()
self._current_sub_header = title
assert title not in self._sub_header_to_out
self._sub_header_to_out[title] = self.out
def flush(self):
self._out_orig.write(self.out.getvalue())
self.out.flush()
self._out_orig.flush()
class Report:
def __init__(
self,
intensities,
params,
batches=None,
scales=None,
dose=None,
report_dir=None,
experiments=None,
):
self.params = params
if params.d_min or params.d_max:
intensities = intensities.resolution_filter(
d_min=params.d_min, d_max=params.d_max
).customized_copy(info=intensities.info())
if batches:
batches = batches.resolution_filter(
d_min=params.d_min, d_max=params.d_max
)
if scales:
scales = scales.resolution_filter(
d_min=params.d_min, d_max=params.d_max
)
self.intensities = intensities
self.experiments = experiments
self.batches = batches
self.scales = scales
self.dose = dose
self.report_dir = report_dir
self._xanalysis = None
assert self.intensities is not None
# assert self.batches is not None
if self.batches is not None and len(self.params.batch) == 0:
separate = separate_unmerged(self.intensities, self.batches)
scope = libtbx.phil.parse(batch_phil_scope)
for i, batches in separate.batches.items():
batch_params = scope.extract().batch[0]
batch_params.id = i
batch_params.range = (
flex.min(batches.data()),
flex.max(batches.data()),
)
self.params.batch.append(batch_params)
if self.params.anomalous:
self.intensities = self.intensities.as_anomalous_array().customized_copy(
info=self.intensities.info()
)
if self.batches is not None:
self.batches = self.batches.as_anomalous_array()
self.intensities.setup_binner(n_bins=self.params.resolution_bins)
self.merged_intensities = self.intensities.merge_equivalents().array()
def multiplicity_plots(self, dest_path=None):
settings = master_phil.extract()
settings.size_inches = (5, 5)
settings.show_missing = True
settings.slice_index = 0
mult_json_files = {}
mult_img_files = {}
rd = dest_path or self.report_dir or "."
for settings.slice_axis in ("h", "k", "l"):
settings.plot.filename = os.path.join(
rd,
"multiplicities_%s_%i.png"
% (settings.slice_axis, settings.slice_index),
)
settings.json.filename = os.path.join(
rd,
"multiplicities_%s_%i.json"
% (settings.slice_axis, settings.slice_index),
)
# settings.slice_axis = axis
plot_multiplicity(self.intensities, settings)
mult_json_files[settings.slice_axis] = settings.json.filename
with open(settings.plot.filename, "rb") as fh:
data = codecs.encode(fh.read(), encoding="base64").decode("ascii")
mult_img_files[settings.slice_axis] = data.replace("\n", "")
return OrderedDict(
("multiplicity_%s" % axis, mult_img_files[axis]) for axis in ("h", "k", "l")
)
def symmetry_table_html(self):
symmetry_table_html = """
<p>
<b>Unit cell:</b> %s
<br>
<b>Space group:</b> %s
</p>
""" % (
self.intensities.space_group_info().symbol_and_number(),
str(self.intensities.unit_cell()),
)
return symmetry_table_html
def xtriage_report(self):
xtriage_success = []
xtriage_warnings = []
xtriage_danger = []
s = io.StringIO()
pout = printed_output(out=s)
xtriage_params = xtriage_master_params.fetch(sources=[]).extract()
xtriage_params.scaling.input.xray_data.skip_sanity_checks = True
xanalysis = xtriage_analyses(
miller_obs=self.merged_intensities,
unmerged_obs=self.intensities,
text_out=pout,
params=xtriage_params,
)
if self.report_dir is not None:
with open(os.path.join(self.report_dir, "xtriage.log"), "w") as f:
f.write(s.getvalue())
xia2.Handlers.Files.FileHandler.record_log_file(
"Xtriage", os.path.join(self.report_dir, "xtriage.log")
)
xs = io.StringIO()
xout = _xtriage_output(xs)
xanalysis.show(out=xout)
xout.flush()
sub_header_to_out = xout._sub_header_to_out
issues = xanalysis.summarize_issues()
# issues.show()
for level, text, sub_header in issues._issues:
summary = sub_header_to_out.get(sub_header, io.StringIO()).getvalue()
d = {"level": level, "text": text, "summary": summary, "header": sub_header}
if level == 0:
xtriage_success.append(d)
elif level == 1:
xtriage_warnings.append(d)
elif level == 2:
xtriage_danger.append(d)
self._xanalysis = xanalysis
return xtriage_success, xtriage_warnings, xtriage_danger
def batch_dependent_plots(self):
binned_batches, rmerge, isigi, scalesvsbatch = batch_dependent_properties(
self.batches, self.intensities, self.scales
)
batches = [{"id": b.id, "range": b.range} for b in self.params.batch]
bm = batch_manager(binned_batches, batches)
d = {}
d.update(i_over_sig_i_vs_batch_plot(bm, isigi))
d.update(scale_rmerge_vs_batch_plot(bm, rmerge, scalesvsbatch))
if self.experiments is not None:
d["image_range_table"] = make_image_range_table(self.experiments, bm)
return d
def resolution_plots_and_stats(self):
self.merging_stats = merging_statistics.dataset_statistics(
self.intensities,
n_bins=self.params.resolution_bins,
cc_one_half_significance_level=self.params.cc_half_significance_level,
eliminate_sys_absent=self.params.eliminate_sys_absent,
use_internal_variance=self.params.use_internal_variance,
assert_is_not_unique_set_under_symmetry=False,
)
intensities_anom = self.intensities.as_anomalous_array()
intensities_anom = intensities_anom.map_to_asu().customized_copy(
info=self.intensities.info()
)
self.merging_stats_anom = merging_statistics.dataset_statistics(
intensities_anom,
n_bins=self.params.resolution_bins,
anomalous=True,
cc_one_half_significance_level=self.params.cc_half_significance_level,
eliminate_sys_absent=self.params.eliminate_sys_absent,
use_internal_variance=self.params.use_internal_variance,
assert_is_not_unique_set_under_symmetry=False,
)
is_centric = self.intensities.space_group().is_centric()
plotter = ResolutionPlotsAndStats(
self.merging_stats, self.merging_stats_anom, is_centric
)
d = OrderedDict()
d.update(plotter.make_all_plots(cc_one_half_method=self.params.cc_half_method))
overall_stats = plotter.overall_statistics_table(self.params.cc_half_method)
merging_stats = plotter.merging_statistics_table(self.params.cc_half_method)
return overall_stats, merging_stats, d
def intensity_stats_plots(self, run_xtriage=True):
plotter = IntensityStatisticsPlots(
self.intensities,
anomalous=self.params.anomalous,
n_resolution_bins=self.params.resolution_bins,
xtriage_analyses=self._xanalysis,
run_xtriage_analysis=run_xtriage,
)
d = {}
d.update(plotter.generate_resolution_dependent_plots())
d.update(plotter.generate_miscellanous_plots())
return d
def pychef_plots(self, n_bins=8):
intensities = self.intensities
batches = self.batches
dose = self.dose
if self.params.chef_min_completeness:
d_min = dials.pychef.resolution_limit(
self.intensities,
min_completeness=self.params.chef_min_completeness,
n_bins=n_bins,
)
print("Estimated d_min for CHEF analysis: %.2f" % d_min)
sel = flex.bool(intensities.size(), True)
d_spacings = intensities.d_spacings().data()
sel &= d_spacings >= d_min
intensities = intensities.select(sel)
batches = batches.select(sel)
if dose is not None:
dose = dose.select(sel)
if dose is None:
dose = dials.pychef.batches_to_dose(batches.data(), self.params.dose)
else:
dose = dose.data()
pychef_stats = dials.pychef.Statistics(intensities, dose, n_bins=n_bins)
return pychef_stats.to_dict()
def dano_plots(self):
anom_data = {self.intensities.info().wavelength: self.merged_intensities}
data = make_dano_plots(anom_data)
return {"dano": data["dF"]["dano"]}
@classmethod
def from_unmerged_mtz(cls, unmerged_mtz, params, report_dir):
reader = any_reflection_file(unmerged_mtz)
assert reader.file_type() == "ccp4_mtz"
arrays = reader.as_miller_arrays(merge_equivalents=False)
for ma in arrays:
if ma.info().labels == ["BATCH"]:
batches = ma
elif ma.info().labels == ["I", "SIGI"]:
intensities = ma
elif ma.info().labels == ["I(+)", "SIGI(+)", "I(-)", "SIGI(-)"]:
intensities = ma
elif ma.info().labels == ["SCALEUSED"]:
scales = ma
assert intensities is not None
assert batches is not None
mtz_object = reader.file_content()
indices = mtz_object.extract_original_index_miller_indices()
intensities = intensities.customized_copy(
indices=indices, info=intensities.info()
)
batches = batches.customized_copy(indices=indices, info=batches.info())
report = cls(
intensities, params, batches=batches, scales=scales, report_dir=report_dir
)
report.mtz_object = mtz_object # nasty but xia2.report relys on this attribute
return report
@classmethod
def from_data_manager(cls, data_manager, params=None):
if params is None:
params = phil_scope.extract()
params.dose.batch = []
intensities, batches, scales = data_manager.reflections_as_miller_arrays(
combined=True
)
params.batch = []
scope = libtbx.phil.parse(batch_phil_scope)
dose_phil = libtbx.phil.parse(dose_phil_str).extract()
for expt in data_manager.experiments:
batch_params = scope.extract().batch[0]
batch_params.id = data_manager.identifiers_to_ids_map[expt.identifier]
batch_params.range = expt.scan.get_batch_range()
params.batch.append(batch_params)
dose_batch = copy.deepcopy(dose_phil.dose.batch[0])
dose_batch.range = expt.scan.get_batch_range()
dose_batch.dose_start = 1
dose_batch.dose_step = 1
params.dose.batch.append(dose_batch)
intensities.set_observation_type_xray_intensity()
return cls(
intensities,
params,
batches=batches,
scales=scales,
experiments=data_manager.experiments,
)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An API for reversible (bijective) transformations of random variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
class _Bijector(object):
"""An interface for transforming random variable(s).
A bijector is characterized by three operations:
1) Forward Evaluation
Useful for turning one random outcome into another random outcome from a
different distribution.
2) Inverse Evaluation
Useful for "reversing" a transformation to compute one probability in terms
of another.
3) (log o det o Jacobian o inverse)(x)
"The log of the determinant of the matrix of all first-order partial
derivatives of the inverse function."
Useful for inverting a transformation to compute one probability in terms
of another. Geometrically, the det(Jacobian) is the volume of the
transformation and is used to scale the probability.
By convention, transformations of random variables are named in terms of the
forward transformation. The forward transformation creates samples, the
inverse is useful for computing probabilities.
Example transformations:
"Exponential"
```
Y = g(X) = exp(X)
X ~ Normal(0, 1) # Univariate.
```
Implies:
```
g^{-1}(Y) = log(Y)
|Jacobian(g^{-1})(y)| = 1 / y
Y ~ LogNormal(0, 1), i.e.,
prob(Y=y) = |Jacobian(g^{-1})(y)| * prob(X=g^{-1}(y))
= (1 / y) Normal(log(y); 0, 1)
```
"ShiftAndScale"
```
Y = g(X) = sqrtSigma * X + mu
X ~ MultivariateNormal(0, I_d)
```
Implies:
```
g^{-1}(Y) = inv(sqrtSigma) * (Y - mu)
|Jacobian(g^{-1})(y)| = det(inv(sqrtSigma))
Y ~ MultivariateNormal(mu, sqrtSigma) , i.e.,
prob(Y=y) = |Jacobian(g^{-1})(y)| * prob(X=g^{-1}(y))
= det(sqrtSigma)^(-d) *
MultivariateNormal(inv(sqrtSigma) * (y - mu); 0, I_d)
```
Example use:
Basic properties:
```python
x = ... # A tensor.
# Evaluate forward transformation.
fwd_x = my_bijector.forward(x)
x != my_bijector.forward(fwd_x) # Not equal because g(x) != g(g(x)).
x == my_bijector.inverse(fwd_x)
```
Computing a log-likelihood:
```python
def transformed_log_pdf(bijector, log_pdf, x):
return (bijector.inverse_log_det_jacobian(x) +
log_pdf(bijector.inverse(x)))
```
Transforming a random outcome:
```python
def transformed_sample(bijector, x):
return bijector.forward(x)
```
"""
# TODO(b/30476956): Try to remove constructor dependence on shape util.
def __init__(self, shaper=None, name=None):
"""Constructs Bijector.
A bijector transforms random variables into new random variables. Managing
shape is typically an important piece of this so a Bijector is usually
composed of ShapeUtil. The ShapeUtil object handles input shape checks as
well as reshaping/transposing for easier linear algebra operations.
Example:
```python
# Create the Y = g(X) = X transform which operates on 4-Tensors of vectors.
identity = Identity(ShapeUtil(batch_ndims=4, event_ndims=1))
# Create the Y = g(X) = exp(X) transform which operates on matrices.
exp = Exp(ShapeUtil(batch_ndims=0, event_ndims=2))
```
See Bijector subclass doc for more details and examples.
Args:
shaper: object used for managing and manipulating shape, typically an
instance of ShapeUtil.
name: The name to give Ops created by the initializer.
"""
self._shaper = shaper
self._name = name or type(self).__name__
@property
def shaper(self):
"""Returns shape object used to manage shape constraints."""
return self._shaper
@property
def name(self):
"""Returns the string name of this bijector."""
return self._name
def forward(self, x, name='forward'):
"""Returns the forward bijector evaluation, i.e., X = g(Y).
Args:
x: `Tensor`. The input to the "forward" evaluation.
name: The name to give this op.
Returns:
`Tensor`.
"""
with ops.name_scope(self.name):
with ops.op_scope([x], name):
x = ops.convert_to_tensor(x)
return self._forward(x)
def inverse(self, x, name='inverse'):
"""Returns the inverse bijector evaluation, i.e., X = g^{-1}(Y).
Args:
x: `Tensor`. The input to the "inverse" evaluation.
name: The name to give this op.
Returns:
`Tensor`.
"""
with ops.name_scope(self.name):
with ops.op_scope([x], name):
x = ops.convert_to_tensor(x)
try:
return self._inverse(x)
except NotImplementedError:
return self._inverse_and_inverse_log_det_jacobian(x)[0]
def inverse_log_det_jacobian(self, x, name='inverse_log_det_jacobian'):
"""Returns the (log o det o Jacobian o inverse)(x).
Mathematically, returns: log(det(dY/dX g^{-1}))(Y).
Args:
x: `Tensor`. The input to the "inverse" Jacobian evaluation.
name: The name to give this op.
Returns:
`Tensor`.
"""
with ops.name_scope(self.name):
with ops.op_scope([x], name):
x = ops.convert_to_tensor(x)
try:
return self._inverse_log_det_jacobian(x)
except NotImplementedError:
return self._inverse_and_inverse_log_det_jacobian(x)[1]
def inverse_and_inverse_log_det_jacobian(
self, x, name='inverse_and_inverse_log_det_jacobian'):
"""Returns both the inverse evaluation and inverse_log_det_jacobian.
Enables possibly more efficient calculation when both inverse and
corresponding Jacobian are needed.
See `inverse()`, `inverse_log_det_jacobian()` for more details.
Args:
x: `Tensor`. The input to the "inverse" Jacobian evaluation.
name: The name to give this op.
Returns:
`Tensor`.
"""
with ops.name_scope(self.name):
with ops.op_scope([x], name):
x = ops.convert_to_tensor(x)
try:
return self._inverse_and_inverse_log_det_jacobian(x)
except NotImplementedError:
return self._inverse(x), self._inverse_log_det_jacobian(x)
# Subclass interface.
def _forward(self, x):
"""Subclass implementation of forward().
Args:
x: `Tensor`. The input to the "forward" evaluation.
Raises:
`NotImplementedError`: if subclass implementation not provided
Returns:
`Tensor`.
"""
raise NotImplementedError('_forward not implemented')
def _inverse(self, x):
"""Subclass implementation of inverse().
Args:
x: `Tensor`. The input to the "inverse" evaluation.
Raises:
`NotImplementedError`: if subclass implementation not provided
Returns:
`Tensor`.
"""
raise NotImplementedError('_inverse not implemented')
def _inverse_log_det_jacobian(self, x):
"""Subclass implementation of inverse_log_det_jacobian().
Args:
x: `Tensor`. The input to the "inverse" Jacobian evaluation.
Raises:
`NotImplementedError`: if subclass implementation not provided
Returns:
`Tensor`.
"""
raise NotImplementedError('_inverse_log_det_jacobian not implemented')
def _inverse_and_inverse_log_det_jacobian(self, x):
"""Subclass implementation of inverse_and_inverse_log_det_jacobian().
Args:
x: `Tensor`. The input to the "inverse" evaluation.
Returns:
List of two `Tensor` items, inverse and inverse_log_det_jacobian.
"""
raise NotImplementedError(
'_inverse_and_inverse_log_det_jacobian not implemented')
class _Identity(_Bijector):
"""Bijector which computes Y = g(X) = X.
Example Use:
```python
# Create the Y=g(X)=X transform which works only on Tensors with 1 batch
# ndims and 1 event ndim (i.e., vector of vectors).
identity = Identity(ShapeUtil(batch_ndims=1, event_ndims=1))
x = [[1., 2],
[3, 4]]
x == identity.forward(x) == identity.inverse(x)
```
"""
# TODO(b/30476956): Try to remove constructor dependence on shape util.
def __init__(self, shaper=None, name='Identity'):
super(_Identity, self).__init__(shaper, name)
def _forward(self, x):
return x
def _inverse(self, x):
return x
def _inverse_log_det_jacobian(self, x):
result_shape = self.shaper.get_shape(
x, sample=True, batch=True, event=False)
return array_ops.zeros(result_shape, dtype=x.dtype)
class _Exp(_Bijector):
"""Bijector which computes Y = g(X) = exp(X).
Example Use:
```python
# Create the Y=g(X)=exp(X) transform which works only on Tensors with 1
# batch ndims and 2 event ndim (i.e., vector of matrices).
exp = Exp(ShapeUtil(batch_ndims=1, event_ndims=2))
x = [[[1., 2],
[3, 4]],
[[5, 6],
[7, 8]]]
exp(x) == exp.forward(x)
log(x) == exp.inverse(x)
```
"""
# TODO(b/30476956): Try to remove constructor dependence on shape util.
def __init__(self, shaper=None, name='Exp'):
super(_Exp, self).__init__(shaper, name)
def _forward(self, x):
return math_ops.exp(x)
def _inverse(self, x):
return math_ops.log(x)
def _inverse_log_det_jacobian(self, x):
d = self.shaper.get_event_dims(x)
return -math_ops.reduce_sum(math_ops.log(x), d)
def _inverse_and_inverse_log_det_jacobian(self, x):
y = math_ops.log(x)
d = self.shaper.get_event_dims(x)
return y, -math_ops.reduce_sum(y, d)
|
|
"""Formula unit tests."""
import pytest
from vivid.classes.point import Point
from vivid.classes.interval import Interval
from vivid.classes.attribute import Attribute
from vivid.classes.relation import Relation
from vivid.classes.attribute_structure import AttributeStructure
from vivid.classes.attribute_system import AttributeSystem
from vivid.classes.named_state import NamedState
from vivid.classes.attribute_interpretation import AttributeInterpretation
from vivid.classes.relation_symbol import RelationSymbol
from vivid.classes.vocabulary import Vocabulary
from vivid.classes.constant_assignment import ConstantAssignment
from vivid.classes.variable_assignment import VariableAssignment
from vivid.classes.formula import Formula
from vivid.classes.assumption_base import AssumptionBase
def test___init__():
"""Test Formula constructor."""
def test_TypeError(vocabulary, name, *terms):
"""Test TypeError catching in Formula constructor."""
with pytest.raises(TypeError) as excinfo:
Formula(vocabulary, name, *terms)
def test_ValueError(vocabulary, name, *terms):
"""Test ValueError catching in Formula constructor."""
with pytest.raises(ValueError) as excinfo:
Formula(vocabulary, name, *terms)
ahead_rs = RelationSymbol('Ahead', 4)
behind_rs = RelationSymbol('Behind', 4)
pm_rs = RelationSymbol('PM', 1)
vocabulary = Vocabulary(
['C1', 'C2'], [ahead_rs, behind_rs, pm_rs], ['V1', 'V2'])
test_TypeError(None, 'Ahead', 'C1', 'V1')
test_TypeError(object, 'Ahead', 'C1', 'V1')
test_TypeError(vocabulary, None, 'C1', 'V1')
test_TypeError(vocabulary, object, 'C1', 'V1')
test_ValueError(vocabulary, 'Ahead')
test_ValueError(vocabulary, 'Ahead', 'nope')
F = Formula(vocabulary, 'Ahead', 'C1', 'C1', 'C1')
assert F._terms == ['C1', 'C1', 'C1']
def test___eq__():
"""Test == operator for Formula object."""
def test_TypeError(f1, f2):
"""Test TypeError catching in == operator of Formula."""
with pytest.raises(TypeError) as excinfo:
f1 == f2
ahead_rs = RelationSymbol('Ahead', 4)
behind_rs = RelationSymbol('Behind', 4)
pm_rs = RelationSymbol('PM', 1)
vocabulary = Vocabulary(['C1', 'C2'], [ahead_rs, behind_rs, pm_rs], ['V1', 'V2'])
vocabulary2 = Vocabulary(['C1', 'C2', 'C3'], [ahead_rs, behind_rs, pm_rs], ['V1', 'V2'])
f = Formula(vocabulary, 'Ahead', 'C1', 'V1')
f1 = Formula(vocabulary, 'Ahead', 'C1')
f2 = Formula(vocabulary, 'Ahead', 'V1', 'C1')
f3 = Formula(vocabulary, 'Ahead', 'V1', 'C1', 'V1', 'C1')
f4 = Formula(vocabulary, 'Behind', 'C1', 'V1')
f5 = Formula(vocabulary2, 'Ahead', 'C1', 'V1')
test_TypeError(f, None)
test_TypeError(f, object)
assert f == f
assert not f == f1
assert f == f2
assert f == f3
assert not f == f4
assert not f == f5
def test___ne__():
"""Test != operator for Formula object."""
def test_TypeError(f1, f2):
"""Test TypeError catching in != operator of Formula."""
with pytest.raises(TypeError) as excinfo:
f1 != f2
ahead_rs = RelationSymbol('Ahead', 4)
behind_rs = RelationSymbol('Behind', 4)
pm_rs = RelationSymbol('PM', 1)
vocabulary = Vocabulary(['C1', 'C2'], [ahead_rs, behind_rs, pm_rs], ['V1', 'V2'])
vocabulary2 = Vocabulary(['C1', 'C2', 'C3'], [ahead_rs, behind_rs, pm_rs], ['V1', 'V2'])
f = Formula(vocabulary, 'Ahead', 'C1', 'V1')
f1 = Formula(vocabulary, 'Ahead', 'C1')
f2 = Formula(vocabulary, 'Ahead', 'V1', 'C1')
f3 = Formula(vocabulary, 'Ahead', 'V1', 'C1', 'V1', 'C1')
f4 = Formula(vocabulary, 'Behind', 'C1', 'V1')
f5 = Formula(vocabulary2, 'Ahead', 'C1', 'V1')
test_TypeError(f, None)
test_TypeError(f, object)
assert not f != f
assert f != f1
assert not f != f2
assert not f != f3
assert f != f4
assert f != f5
def test___add__():
"""Test + operator for Formula object."""
ahead_rs = RelationSymbol('Ahead', 4)
behind_rs = RelationSymbol('Behind', 4)
pm_rs = RelationSymbol('PM', 1)
am_rs = RelationSymbol('AM', 1)
vocabulary = Vocabulary(
['C1', 'C2'], [ahead_rs, behind_rs, am_rs, pm_rs], ['V1', 'V2'])
f1 = Formula(vocabulary, 'Ahead', 'C1', 'V1')
f2 = Formula(vocabulary, 'Behind', 'C1', 'V1')
f3 = Formula(vocabulary, 'PM', 'C1')
f4 = Formula(vocabulary, 'AM', 'C1')
a1 = AssumptionBase(f1, f2)
a = f1 + f2
assert a._vocabulary is f1._vocabulary is f2._vocabulary
a = f2 + f1
assert a._vocabulary is f1._vocabulary is f2._vocabulary
assert hasattr(a, "_is_AssumptionBase")
a = f3 + a1
assert a._vocabulary is a1._vocabulary is f3._vocabulary
assert hasattr(a, "_is_AssumptionBase")
a = f1 + f2 + f3 + f4
assert a._vocabulary is f1._vocabulary is f2._vocabulary is f3._vocabulary \
is f4._vocabulary
assert hasattr(a, "_is_AssumptionBase")
assert len(a) == 4
def test___str__():
"""Test str(Formula)."""
ahead_rs = RelationSymbol('Ahead', 4)
behind_rs = RelationSymbol('Behind', 4)
pm_rs = RelationSymbol('PM', 1)
vocabulary = Vocabulary(['C1', 'C2'], [ahead_rs, behind_rs, pm_rs], ['V1', 'V2'])
f = Formula(vocabulary, 'Ahead', 'C1', 'V1')
assert str(f) == "Ahead(C1, V1)"
def test___repr__():
"""Test repr(Formula)."""
ahead_rs = RelationSymbol('Ahead', 4)
behind_rs = RelationSymbol('Behind', 4)
pm_rs = RelationSymbol('PM', 1)
vocabulary = Vocabulary(['C1', 'C2'], [ahead_rs, behind_rs, pm_rs], ['V1', 'V2'])
f = Formula(vocabulary, 'Ahead', 'C1', 'V1')
assert repr(f) == "Ahead(C1, V1)"
def test__key():
"""Test key for hash function."""
C, R, V = ['C'], [RelationSymbol('R', 1)], ['V']
vocabulary = Vocabulary(C, R, V)
formula = Formula(vocabulary, 'R', 'C', 'V')
assert (hash(vocabulary), 'R', ('C', 'V')) == formula._key()
def test___hash__():
"""Test hash(Vocabulary)."""
C, R, V = ['C'], [RelationSymbol('R', 1)], ['V']
vocabulary = Vocabulary(C, R, V)
formula1 = Formula(vocabulary, 'R', 'C', 'V')
formula2 = Formula(vocabulary, 'R', 'V', 'C')
assert hash(formula1) == hash(formula2)
def test___deepcopy__():
"""Test Test copy.deepcopy for Formula object."""
ahead_rs = RelationSymbol('Ahead', 4)
behind_rs = RelationSymbol('Behind', 4)
pm_rs = RelationSymbol('PM', 1)
vocabulary = Vocabulary(['C1', 'C2'], [ahead_rs, behind_rs, pm_rs], ['V1', 'V2'])
f = Formula(vocabulary, 'Ahead', 'C1', 'V1')
from copy import deepcopy
f_copy = deepcopy(f)
assert f == f_copy
assert f is not f_copy
assert f._vocabulary is f_copy._vocabulary
assert f._terms is not f_copy._terms
f._name = "F"
assert f._name != f_copy._name
def test_assign_truth_value():
"""Test assign_truth_value() function of Formula object."""
def test_TypeError(formula, attribute_interpretation, named_state, X):
"""Test TypeError catching in assign_truth_value()."""
with pytest.raises(TypeError) as excinfo:
formula.assign_truth_value(
attribute_interpretation, named_state, X)
def test_ValueError(formula, attribute_interpretation, named_state, X):
"""Test ValueError catching in assign_truth_value()."""
with pytest.raises(ValueError) as excinfo:
formula.assign_truth_value(
attribute_interpretation, named_state, X)
a = Attribute('hour', [Interval(0, 23)])
a2 = Attribute('minute', [Interval(0, 59)])
r_pm = Relation('R1(h1) <=> h1 > 11', ['hour'], 1)
r_am = Relation('R2(h1) <=> h1 <= 11', ['hour'], 2)
r_ahead = Relation(
'R3(h1,m1,hhh2,mm2) <=> h1 > hhh2 or (h1 = hhh2 and m1 > mm2)',
['hour', 'minute', 'hour', 'minute'], 3)
r_behind = Relation(
'R4(h1,m1,h2,m2) <=> h1 <= h2 or (h1 = h2 and m1 < m2)',
['hour', 'minute', 'hour', 'minute'], 4)
attribute_structure = AttributeStructure(
a, a2, r_ahead, r_behind, r_pm, r_am)
pm_rs = RelationSymbol('PM', 1)
am_rs = RelationSymbol('AM', 1)
ahead_rs = RelationSymbol('Ahead', 4)
behind_rs = RelationSymbol('Behind', 4)
vocabulary = Vocabulary(
['C1', 'C2'], [pm_rs, am_rs, ahead_rs, behind_rs], ['V1', 'V2'])
profiles = [
[pm_rs, ('hour', 1)],
[am_rs, ('hour', 1)],
[ahead_rs, ('hour', 1), ('minute', 1), ('hour', 2), ('minute', 2)],
[behind_rs, ('hour', 1), ('minute', 1), ('hour', 2), ('minute', 2)]]
bad_profiles = [
[pm_rs, ('hour', 1)],
[am_rs, ('hour', 1)],
[ahead_rs, ('minute', 1), ('hour', 2), ('minute', 2)],
[behind_rs, ('hour', 1), ('minute', 1), ('hour', 2), ('minute', 2)]]
mapping = {pm_rs: 1, am_rs: 2, ahead_rs: 3, behind_rs: 4}
attribute_interpretation = AttributeInterpretation(
vocabulary, attribute_structure, mapping, profiles)
objects = ['s1', 's2']
attribute_system = AttributeSystem(attribute_structure, objects)
p = ConstantAssignment(
vocabulary, attribute_system, {'C1': 's1', 'C2': 's2'})
named_state = NamedState(attribute_system, p, {
('hour', 's1'): [9, 13],
('minute', 's1'): [12],
('hour', 's2'): [8],
('minute', 's2'): [27]})
f = Formula(vocabulary, 'Ahead', 'C1', 'C2')
VA = VariableAssignment(vocabulary, attribute_system, {}, dummy=True)
bad_vocabulary = Vocabulary(
['C1', 'C2', 'C3'], [pm_rs, am_rs, ahead_rs, behind_rs], ['V1', 'V2'])
bad_p = ConstantAssignment(
bad_vocabulary, attribute_system, {'C1': 's1', 'C2': 's2'})
bad_f = Formula(bad_vocabulary, 'Ahead', 'C1', 'C2')
bad_t_f = Formula(bad_vocabulary, 'Ahead', 'C1')
bad_v_attribute_interpretation = AttributeInterpretation(
bad_vocabulary, attribute_structure, mapping, profiles)
bad_p_attribute_interpretation = AttributeInterpretation(
vocabulary, attribute_structure, mapping, bad_profiles)
bad_named_state = NamedState(attribute_system, bad_p, {})
bad_VA = VariableAssignment(
bad_vocabulary, attribute_system, {}, dummy=True)
# Test invalid types
test_TypeError(f, None, named_state, VA)
test_TypeError(f, object, named_state, VA)
test_TypeError(f, attribute_interpretation, None, VA)
test_TypeError(f, attribute_interpretation, object, VA)
test_TypeError(f, attribute_interpretation, named_state, None)
test_TypeError(f, attribute_interpretation, named_state, object)
# Test mismatched vocabularies
test_ValueError(bad_f, attribute_interpretation, named_state, VA)
test_ValueError(f, bad_v_attribute_interpretation, named_state, VA)
test_ValueError(bad_f, attribute_interpretation, bad_named_state, VA)
test_ValueError(f, attribute_interpretation, named_state, bad_VA)
# Test profile length, DR length mismatch
test_ValueError(f, bad_p_attribute_interpretation, named_state, VA)
# Test profile j_x against length of terms
test_ValueError(bad_t_f, attribute_interpretation, named_state, VA)
assert f.assign_truth_value(attribute_interpretation, named_state, VA)
from vivid.classes.point import Point
point = Attribute('point', [Point('x', 'x', 'x', 'x')])
r_is_on = Relation('R1(h1, h2, h3) <=> is_on(h1, h2, h3)',
['point', 'point', 'point'], 1)
r_not_same_point = Relation('R2(h1, h2) <=> not_same_point(h1, h2)',
['point', 'point'], 2)
r_clocks_unequal = Relation('R3(h1, h2) <=> clocks_unequal(h1, h2)',
['point', 'point'], 3)
r_can_observe = Relation(
'R4(p, sp_loc, wls, wle) <=> can_observe(p, sp_loc, wls, wle)',
['point', 'point', 'point', 'point'], 4)
r_meets = Relation(
'R5(p, wl1s, wl1e, wl2s, wl2e) <=> meets(p, wl1s, wl1e, wl2s, wl2e)',
['point', 'point', 'point', 'point', 'point'], 5)
attribute_structure = AttributeStructure(
point, r_is_on, r_not_same_point, r_clocks_unequal, r_can_observe,
r_meets)
rs_is_on = RelationSymbol('IS_ON', 3)
rs_not_same_point = RelationSymbol('NOT_SAME_POINT', 2)
rs_clocks_unequal = RelationSymbol('CLOCKS_UNEQUAL', 2)
rs_can_observe = RelationSymbol('CAN_OBSERVE', 4)
rs_meets = RelationSymbol('MEETS', 5)
vocabulary = Vocabulary(['P1', 'P2', 'P3', 'P4', 'P5'],
[rs_is_on, rs_not_same_point,
rs_clocks_unequal, rs_can_observe, rs_meets],
[])
profiles = [
[rs_is_on, ('point', 1), ('point', 2), ('point', 3)],
[rs_not_same_point, ('point', 1), ('point', 2)],
[rs_clocks_unequal, ('point', 1), ('point', 2)],
[rs_can_observe,
('point', 1), ('point', 2), ('point', 3), ('point', 4)],
[rs_meets,
('point', 1), ('point', 2), ('point', 3), ('point', 4), ('point', 5)]]
mapping = {rs_is_on: 1, rs_not_same_point: 2, rs_clocks_unequal: 3,
rs_can_observe: 4, rs_meets: 5}
attribute_interpretation = AttributeInterpretation(vocabulary,
attribute_structure,
mapping,
profiles)
objects = ['p1', 'p2', 'p3', 'p4', 'p5']
attribute_system = AttributeSystem(attribute_structure, objects)
p = ConstantAssignment(vocabulary, attribute_system,
{'P1': 'p1', 'P2': 'p2', 'P3': 'p3', 'P4': 'p4',
'P5': 'p5'})
named_state = NamedState(attribute_system, p, {
('point', 'p1'): [Point(1.5, 1.5, 1.5, 1.5)],
('point', 'p2'): [Point(2.0, 2.0, 2.0, 2.0)],
('point', 'p3'): [Point(1.0, 1.0, 1.0, 1.0)],
('point', 'p4'): [Point(3.0, 3.0, 3.0, 3.0)],
('point', 'p5'): [Point(2.0, 2.0, 2.0, 2.0)]})
f1 = Formula(vocabulary, 'IS_ON', 'P1', 'P3', 'P4')
f2 = Formula(vocabulary, 'NOT_SAME_POINT', 'P1', 'P2')
f3 = Formula(vocabulary, 'CLOCKS_UNEQUAL', 'P1', 'P2')
f4 = Formula(vocabulary, 'CAN_OBSERVE', 'P1', 'P2', 'P3', 'P4')
f5 = Formula(vocabulary, 'MEETS', 'P1', 'P2', 'P3', 'P4', 'P5')
VA = VariableAssignment(vocabulary, attribute_system, {}, dummy=True)
assumption_base = AssumptionBase(f1, f2, f3, f4)
for f in assumption_base:
assert f.assign_truth_value(attribute_interpretation, named_state, VA)
named_state.set_ascription(('point', 'p4'), [Point(1.0, 1.0, 1.0, 1.0)])
assert f5.assign_truth_value(attribute_interpretation, named_state, VA)
def test_get_basis():
"""Test get_basis function for Formula."""
point = Attribute('point', [Point('x', 'x', 'x', 'x')])
fake = Attribute('fake', [])
r_is_on = Relation('R1(h1, h2, h3) <=> is_on(h1, h2, h3)',
['point', 'point', 'point'], 1)
r_not_same_point = Relation('R2(h1, h2) <=> not_same_point(h1, h2)',
['point', 'point'], 2)
r_clocks_unequal = Relation('R3(h1, h2) <=> clocks_unequal(h1, h2)',
['point', 'point'], 3)
r_can_observe = Relation(
'R4(p, sp_loc, wls, wle) <=> can_observe(p, sp_loc, wls, wle)',
['point', 'point', 'point', 'point'], 4)
r_fake = Relation(
'R5(p, wl1s, wl1e, wl2s, wl2e) <=> meets(p, wl1s, wl1e, wl2s, wl2e)',
['fake', 'point', 'fake', 'fake', 'point'], 5)
attribute_structure = AttributeStructure(
point, fake, r_is_on, r_not_same_point, r_clocks_unequal,
r_can_observe, r_fake)
rs_is_on = RelationSymbol('IS_ON', 3)
rs_not_same_point = RelationSymbol('NOT_SAME_POINT', 2)
rs_clocks_unequal = RelationSymbol('CLOCKS_UNEQUAL', 2)
rs_can_observe = RelationSymbol('CAN_OBSERVE', 4)
rs_fake = RelationSymbol('FAKE', 5)
vocabulary = Vocabulary(['P1', 'P2', 'P3', 'P4', 'P5'],
[rs_is_on, rs_not_same_point,
rs_clocks_unequal, rs_can_observe, rs_fake],
[])
profiles = [
[rs_is_on, ('point', 1), ('point', 2), ('point', 3)],
[rs_not_same_point, ('point', 1), ('point', 2)],
[rs_clocks_unequal, ('point', 1), ('point', 2)],
[rs_can_observe,
('point', 1), ('point', 2), ('point', 3), ('point', 4)],
[rs_fake,
('fake', 1), ('point', 2), ('fake', 3), ('fake', 4), ('point', 5)]]
mapping = {rs_is_on: 1, rs_not_same_point: 2, rs_clocks_unequal: 3,
rs_can_observe: 4, rs_fake: 5}
attribute_interpretation = AttributeInterpretation(vocabulary,
attribute_structure,
mapping,
profiles)
objects = ['p1', 'p2', 'p3', 'p4', 'p5']
attribute_system = AttributeSystem(attribute_structure, objects)
p = ConstantAssignment(vocabulary, attribute_system,
{'P1': 'p1', 'P2': 'p2', 'P3': 'p3', 'P4': 'p4',
'P5': 'p5'})
named_state = NamedState(attribute_system, p, {
('point', 'p1'): [Point(1.5, 1.5, 1.5, 1.5)],
('point', 'p2'): [Point(2.0, 2.0, 2.0, 2.0)],
('point', 'p3'): [Point(1.0, 1.0, 1.0, 1.0)],
('point', 'p4'): [Point(3.0, 3.0, 3.0, 3.0)],
('point', 'p5'): [Point(2.0, 2.0, 2.0, 2.0)]})
f1 = Formula(vocabulary, 'IS_ON', 'P1', 'P3', 'P4')
f2 = Formula(vocabulary, 'NOT_SAME_POINT', 'P1', 'P2')
f3 = Formula(vocabulary, 'CLOCKS_UNEQUAL', 'P1', 'P2')
f4 = Formula(vocabulary, 'CAN_OBSERVE', 'P1', 'P2', 'P3', 'P4')
f5 = Formula(vocabulary, 'FAKE', 'P1', 'P2', 'P3', 'P4', 'P5')
VA = VariableAssignment(vocabulary, attribute_system, {}, dummy=True)
f1_basis = set([('point', 'p1'), ('point', 'p3'), ('point', 'p4')])
f2_basis = set([('point', 'p1'), ('point', 'p2')])
f1_f2_f3_basis = set(
[('point', 'p1'), ('point', 'p2'), ('point', 'p3'), ('point', 'p4')])
f5_basis = set(
[('fake', 'p1'), ('fake', 'p4'), ('point', 'p2'), ('point', 'p5'),
('fake', 'p3')])
f4_f5_basis = set(
[('fake', 'p1'), ('fake', 'p4'), ('point', 'p2'), ('point', 'p5'),
('fake', 'p3'), ('point', 'p1'), ('point', 'p3'), ('point', 'p4')])
assert f1_basis == set(Formula.get_basis(
named_state._p, VA, attribute_interpretation, f1))
assert f2_basis == set(Formula.get_basis(
named_state._p, VA, attribute_interpretation, f2))
assert f1_f2_f3_basis == set(Formula.get_basis(
named_state._p, VA, attribute_interpretation, f1, f2, f3))
assert f5_basis == set(Formula.get_basis(
named_state._p, VA, attribute_interpretation, f5))
assert f4_f5_basis == set(Formula.get_basis(
named_state._p, VA, attribute_interpretation, f4, f5))
|
|
#! /usr/bin/env python
'''
Program to send and test the trajectories
'''
import roslib; roslib.load_manifest('robotnik_torso_control')
import rospy
import actionlib
import time
from actionlib_msgs.msg import *
from trajectory_msgs.msg import *
from control_msgs.msg import *
if __name__ == '__main__':
rospy.init_node('follow_trajectory_real_test')
client = actionlib.SimpleActionClient('rt_traj_exe/follow_joint_trajectroy', FollowJointTrajectoryAction)
client.wait_for_server()
option = 9
goal = FollowJointTrajectoryGoal()
goal.trajectory.header.stamp = rospy.Time()
goal.trajectory.joint_names = ['left_arm_4_joint', 'left_arm_2_joint', 'left_arm_1_joint', 'left_arm_3_joint']
tpoint1 = JointTrajectoryPoint()
tpoint1.positions = [0, 0.0, 0.0, 0.0]
#tpoint1.velocities = [0.1, 0.1, 0.1, 0.1]
tpoint1.velocities = [0.05, 0.05, 0.05, 0.05]
tpoint1.accelerations = [0.1, 0.1, 0.1, 0.1]
tpoint1.time_from_start = rospy.Duration.from_sec(5.0)
tpoint2 = JointTrajectoryPoint()
tpoint2.positions = [0.3, 0.31, 0.32, 0.93]
tpoint2.velocities = [0.05, 0.05, 0.05, 0.05]
tpoint2.accelerations = [0.1, 0.11, 0.12, 0.13]
tpoint2.time_from_start = rospy.Duration.from_sec(5.0)
goal.trajectory.points = [tpoint1, tpoint2]
# Sends 3 trajs
if option == 1:
rospy.loginfo('OPTION 1: sending trajectory 1')
# Fill in the goal here
client.send_goal(goal)
rospy.loginfo('waiting for result')
while not client.wait_for_result(rospy.Duration.from_sec(5.0)) and not rospy.is_shutdown():
rospy.loginfo('waiting for result. state = %s'%client.get_state())
print 'Result is %s'%client.get_result()
time.sleep(0.1)
tpoint1.positions = [0.5, 1.0, 0.5, 0.5]
tpoint2.positions = [0.6, 1.1, 0.6, 0.6]
rospy.loginfo('sending trajectory 2')
# Fill in the goal here
client.send_goal(goal)
rospy.loginfo('waiting for result')
while not client.wait_for_result(rospy.Duration.from_sec(5.0)) and not rospy.is_shutdown():
rospy.loginfo('waiting for result. state = %s'%client.get_state())
print 'Result is %s'%client.get_result()
time.sleep(0.1)
tpoint1.positions = [-0.5, -1.0, -0.5, -0.5]
tpoint2.positions = [-0.6, -1.1, -0.5, -0.5]
rospy.loginfo('sending trajectory 3')
# Fill in the goal here
client.send_goal(goal)
rospy.loginfo('waiting for result')
while not client.wait_for_result(rospy.Duration.from_sec(5.0)) and not rospy.is_shutdown():
rospy.loginfo('waiting for result. state = %s'%client.get_state())
print 'Result is %s'%client.get_result()
# Sends 1 traj and owerwrite it
elif option == 2:
tpoint1.positions = [0.1, 0.2, 0, 0.0]
tpoint2.positions = [0.2, 0.3, 0.1, 0.1]
rospy.loginfo('OPTION 2: Overwritting trajectories')
# Fill in the goal here
client.send_goal(goal)
time.sleep(2)
tpoint1.positions = [-0.1, -0.2, 0, 0.0]
tpoint2.positions = [-0.2, -0.3, -0.1, -0.1]
rospy.loginfo('overwrite')
client.send_goal(goal)
while not client.wait_for_result(rospy.Duration.from_sec(5.0)) and not rospy.is_shutdown():
rospy.loginfo('waiting for result. state = %s'%client.get_state())
# Sends 1 traj and cancell it
elif option == 3:
tpoint1.positions = [0.1, 0.2, 0, 0.0]
tpoint2.positions = [0.2, 0.3, 0.5, 0.6]
rospy.loginfo('OPTION 3: Cancelling trajectories')
# Fill in the goal here
client.send_goal(goal)
time.sleep(2)
rospy.loginfo('cancel')
client.cancel_goal()
#
elif option == 4:
goal = FollowJointTrajectoryGoal()
goal.trajectory.header.stamp = rospy.Time()
goal.trajectory.joint_names = ['left_arm_1_joint', 'left_arm_2_joint', 'left_arm_3_joint', 'left_arm_4_joint', 'left_arm_5_joint', 'left_arm_6_joint']
tpoint0 = JointTrajectoryPoint()
tpoint0.positions = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
tpoint0.velocities = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
tpoint0.accelerations = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
goal.trajectory.points = [tpoint0]
client.send_goal(goal)
while not client.wait_for_result(rospy.Duration.from_sec(5.0)) and not rospy.is_shutdown():
rospy.loginfo('waiting for result. state = %s'%client.get_state())
time.sleep(2)
tpoint1 = JointTrajectoryPoint()
#tpoint1.positions = [0.4, 0.5, 0.6, 0.7]
#tpoint1.positions = [0.33, 0.44, 0.55, 0.66, 0.77, 0.88]
tpoint1.positions = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
#tpoint1.velocities = [0.05, 0.05, 0.05, 0.05, 0.05, 0.05]
tpoint1.velocities = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
tpoint1.accelerations = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
tpoint2 = JointTrajectoryPoint()
tpoint2.positions = [0.2, 0.2, 0.2, 0.2, 0.2, 0.2]
tpoint2.velocities = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
tpoint2.accelerations = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
tpoint3 = JointTrajectoryPoint()
tpoint3.positions = [0.6, 0.6, 0.6, 0.6, 0.6, 0.6]
tpoint3.velocities = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
tpoint3.accelerations = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
goal.trajectory.points = [tpoint1, tpoint2, tpoint3]
client.send_goal(goal)
while not client.wait_for_result(rospy.Duration.from_sec(5.0)) and not rospy.is_shutdown():
rospy.loginfo('waiting for result. state = %s'%client.get_state())
time.sleep(2)
print 'going back'
goal.trajectory.points = [tpoint3, tpoint2, tpoint1]
client.send_goal(goal)
elif option == 5:
goal = FollowJointTrajectoryGoal()
goal.trajectory.header.stamp = rospy.Time()
goal.trajectory.joint_names = ['right_arm_1_joint', 'right_arm_2_joint', 'right_arm_3_joint', 'right_arm_4_joint', 'right_arm_5_joint', 'right_arm_6_joint']
tpoint0 = JointTrajectoryPoint()
tpoint0.positions = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
tpoint0.velocities = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
tpoint0.accelerations = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
goal.trajectory.points = [tpoint0]
client.send_goal(goal)
while not client.wait_for_result(rospy.Duration.from_sec(5.0)) and not rospy.is_shutdown():
rospy.loginfo('waiting for result. state = %s'%client.get_state())
time.sleep(2)
tpoint1 = JointTrajectoryPoint()
#tpoint1.positions = [0.4, 0.5, 0.6, 0.7]
#tpoint1.positions = [0.33, 0.44, 0.55, 0.66, 0.77, 0.88]
tpoint1.positions = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
#tpoint1.velocities = [0.05, 0.05, 0.05, 0.05, 0.05, 0.05]
tpoint1.velocities = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
tpoint1.accelerations = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
tpoint2 = JointTrajectoryPoint()
tpoint2.positions = [0.2, 0.2, 0.2, 0.2, 0.2, 0.2]
tpoint2.velocities = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
tpoint2.accelerations = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
tpoint3 = JointTrajectoryPoint()
tpoint3.positions = [0.6, 0.6, 0.6, 0.6, 0.6, 0.6]
tpoint3.velocities = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
tpoint3.accelerations = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
goal.trajectory.points = [tpoint1, tpoint2, tpoint3]
client.send_goal(goal)
while not client.wait_for_result(rospy.Duration.from_sec(5.0)) and not rospy.is_shutdown():
rospy.loginfo('waiting for result. state = %s'%client.get_state())
time.sleep(2)
print 'going back'
goal.trajectory.points = [tpoint3, tpoint2, tpoint1]
client.send_goal(goal)
elif option == 6:
goal = FollowJointTrajectoryGoal()
goal.trajectory.joint_names = ['right_arm_1_joint', 'right_arm_2_joint', 'right_arm_3_joint', 'right_arm_4_joint', 'right_arm_5_joint', 'right_arm_6_joint', 'left_arm_1_joint', 'left_arm_2_joint', 'left_arm_3_joint', 'left_arm_4_joint', 'left_arm_5_joint', 'left_arm_6_joint']
for i in range(5):
print 'Iteration %s'%i
tpoint0 = JointTrajectoryPoint()
tpoint0.positions = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
tpoint0.velocities = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
tpoint0.accelerations = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
goal.trajectory.points = [tpoint0]
goal.trajectory.header.stamp = rospy.Time()
client.send_goal(goal)
while not client.wait_for_result(rospy.Duration.from_sec(5.0)) and not rospy.is_shutdown():
rospy.loginfo('waiting for result. state = %s'%client.get_state())
time.sleep(2)
tpoint1 = JointTrajectoryPoint()
tpoint1.positions = [-0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5]
tpoint1.velocities = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
tpoint1.accelerations = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
tpoint2 = JointTrajectoryPoint()
tpoint2.positions = [0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2]
tpoint2.velocities = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
tpoint2.accelerations = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
tpoint3 = JointTrajectoryPoint()
tpoint3.positions = [0.5, -0.5, 1.7, -0.5, 0.4, -1.57, -0.137, -0.642, 1.353, -0.548, 0.307, 1.89]
tpoint3.velocities = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
tpoint3.accelerations = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
goal.trajectory.points = [tpoint1, tpoint2, tpoint3]
goal.trajectory.header.stamp = rospy.Time()
client.send_goal(goal)
while not client.wait_for_result(rospy.Duration.from_sec(5.0)) and not rospy.is_shutdown():
rospy.loginfo('waiting for result. state = %s'%client.get_state())
time.sleep(2)
print 'going back'
goal.trajectory.points = [tpoint3, tpoint2, tpoint1]
goal.trajectory.header.stamp = rospy.Time()
client.send_goal(goal)
time.sleep(5)
elif option == 7:
goal = FollowJointTrajectoryGoal()
goal.trajectory.header.stamp = rospy.Time()
goal.trajectory.joint_names = ['head_pan_joint', 'head_tilt_joint']
tpoint0 = JointTrajectoryPoint()
tpoint0.positions = [1.3, 0.5]
tpoint0.velocities = [0.1, 0.1]
tpoint0.accelerations = [0.1, 0.1]
goal.trajectory.points = [tpoint0]
client.send_goal(goal)
while not client.wait_for_result(rospy.Duration.from_sec(5.0)) and not rospy.is_shutdown():
rospy.loginfo('waiting for result. state = %s'%client.get_state())
time.sleep(2)
tpoint0.positions = [-1.3, 0.5]
client.send_goal(goal)
while not client.wait_for_result(rospy.Duration.from_sec(5.0)) and not rospy.is_shutdown():
rospy.loginfo('waiting for result. state = %s'%client.get_state())
elif option == 8:
for i in range(10):
print 'It %d'%i
goal.trajectory.header.stamp = rospy.Time()
goal.trajectory.joint_names = ['head_pan_joint', 'head_tilt_joint']
tpoint0 = JointTrajectoryPoint()
tpoint0.positions = [1.3, 0.6]
tpoint0.velocities = [0.15, 0.15]
tpoint0.accelerations = [0.1, 0.1]
goal.trajectory.points = [tpoint0]
client.send_goal(goal)
while not client.wait_for_result(rospy.Duration.from_sec(5.0)) and not rospy.is_shutdown():
rospy.loginfo('waiting for result. state = %s'%client.get_state())
time.sleep(2)
tpoint0.positions = [-1.3, 0.6]
client.send_goal(goal)
while not client.wait_for_result(rospy.Duration.from_sec(5.0)) and not rospy.is_shutdown():
rospy.loginfo('waiting for result. state = %s'%client.get_state())
goal = FollowJointTrajectoryGoal()
goal.trajectory.joint_names = ['left_arm_1_joint', 'left_arm_2_joint', 'left_arm_3_joint', 'left_arm_4_joint', 'left_arm_5_joint', 'left_arm_6_joint', 'right_arm_1_joint', 'right_arm_2_joint', 'right_arm_3_joint', 'right_arm_4_joint', 'right_arm_5_joint', 'right_arm_6_joint', 'head_pan_joint', 'head_tilt_joint']
tpoint0 = JointTrajectoryPoint()
tpoint0.positions = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0, 0]
tpoint0.velocities = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.15, 0.16]
tpoint0.accelerations = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
goal.trajectory.points = [tpoint0]
goal.trajectory.header.stamp = rospy.Time()
client.send_goal(goal)
while not client.wait_for_result(rospy.Duration.from_sec(5.0)) and not rospy.is_shutdown():
rospy.loginfo('waiting for result. state = %s'%client.get_state())
time.sleep(2)
'''
tpoint1 = JointTrajectoryPoint()
tpoint1.positions = [-0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5]
tpoint1.velocities = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
tpoint1.accelerations = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
tpoint2 = JointTrajectoryPoint()
tpoint2.positions = [0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2]
tpoint2.velocities = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
tpoint2.accelerations = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
'''
tpoint3 = JointTrajectoryPoint()
tpoint3.positions = [0.35, -0.5, 1.7, -0.5, 0.4, -1.57, -0.127, -0.642, 1.353, -0.548, 0.307, 1.89, 0.0, 0.6]
tpoint3.velocities = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.15, 0.15]
tpoint3.accelerations = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
goal.trajectory.points = [tpoint3]
goal.trajectory.header.stamp = rospy.Time()
client.send_goal(goal)
while not client.wait_for_result(rospy.Duration.from_sec(5.0)) and not rospy.is_shutdown():
rospy.loginfo('waiting for result. state = %s'%client.get_state())
time.sleep(2)
tpoint3 = JointTrajectoryPoint()
tpoint3.positions = [0.0, -1.199, 1.070, 1.449, 0.831, 1.827, -0.055, -1.373, 0.890, -1.181, 1.270, 1.893, 0.0, 0.6]
tpoint3.velocities = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.3, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
tpoint3.accelerations = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
goal.trajectory.points = [tpoint3]
goal.trajectory.header.stamp = rospy.Time()
client.send_goal(goal)
while not client.wait_for_result(rospy.Duration.from_sec(5.0)) and not rospy.is_shutdown():
rospy.loginfo('waiting for result. state = %s'%client.get_state())
time.sleep(2)
tpoint3 = JointTrajectoryPoint()
tpoint3.positions = [-1.46, -1.725, 0, 0, 0, 0, -0.055, -1.373, 0.890, -1.181, 1.270, 1.893, 0.0, 0.6]
tpoint3.velocities = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
tpoint3.accelerations = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
goal.trajectory.points = [tpoint3]
goal.trajectory.header.stamp = rospy.Time()
client.send_goal(goal)
while not client.wait_for_result(rospy.Duration.from_sec(5.0)) and not rospy.is_shutdown():
rospy.loginfo('waiting for result. state = %s'%client.get_state())
time.sleep(2)
tpoint3 = JointTrajectoryPoint()
tpoint3.positions = [-1.46, -1.725, 0, 0, 0, 0, 1.5, -1.76, 0, 0, 0, 0, 0.0, 0.6]
tpoint3.velocities = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
tpoint3.accelerations = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
goal.trajectory.points = [tpoint3]
goal.trajectory.header.stamp = rospy.Time()
client.send_goal(goal)
while not client.wait_for_result(rospy.Duration.from_sec(5.0)) and not rospy.is_shutdown():
rospy.loginfo('waiting for result. state = %s'%client.get_state())
time.sleep(2)
print 'going back'
goal.trajectory.points = [tpoint0]
goal.trajectory.header.stamp = rospy.Time()
client.send_goal(goal)
time.sleep(5)
elif option == 9:
goal = FollowJointTrajectoryGoal()
goal.trajectory.header.stamp = rospy.Time()
goal.trajectory.joint_names = ['waist_joint']
tpoint0 = JointTrajectoryPoint()
tpoint0.positions = [1.0]
tpoint0.velocities = [0.2]
tpoint0.accelerations = [0.2]
print 'Sending traj to %s'%goal.trajectory.joint_names
goal.trajectory.points = [tpoint0]
client.send_goal(goal)
while not client.wait_for_result(rospy.Duration.from_sec(5.0)) and not rospy.is_shutdown():
rospy.loginfo('waiting for result. state = %s'%client.get_state())
time.sleep(2)
tpoint0.positions = [0.0]
client.send_goal(goal)
while not client.wait_for_result(rospy.Duration.from_sec(5.0)) and not rospy.is_shutdown():
rospy.loginfo('waiting for result. state = %s'%client.get_state())
exit()
|
|
#
#
# Model version
# automatically created: 2013/07/06 22:16:03 by copow
#
#
import string
import types
import importlib
import os
import os.path
import json
import imp
import pymongo
import pprint
import re
from #APPNAME.lib.db_conn import DBConn
from #APPNAME.lib import powlib
from #APPNAME.lib.powlib import _log
import #APPNAME.config.settings as settings
import #APPNAME.config.types_and_formats as types_and_formats
import #APPNAME.lib.custom_encoders as encoders
tab = powlib.tab
#reg = re.compile("[0-9]+")
class BaseModel(dict):
#def __init__(self, *args, data=None, schema=None, **kwargs):
# super(BaseModel,self).__init__(*args, **kwargs)
# self.array = []
# self.is_array = False
def __setitem__(self, key, value):
# optional processing here
#print("--> setitem: ", key,value)
if key in self.schema.keys():
curr_type = self.schema[key]["type"].lower()
if curr_type in types_and_formats.schema_types.keys():
if "encode_python" in types_and_formats.schema_types[curr_type][2]:
#
# if this type has a custom_encoder, then use it
#
setattr(self, key, types_and_formats.schema_types[curr_type][2]["encode_python"](value))
#print ("custom encoded for: ", curr_type, " value: ", value, " -> with: ", types_and_formats.schema_types[curr_type][2]["encode_python"])
else:
setattr(self,key, value)
else:
#print("Skipping: ", key, " -> ", value, " Not in schema")
pass
super(BaseModel, self).__setitem__(key, value)
def setup_relations(self):
self.related_models = {}
for rel_model in list(self.relations.keys()):
# check relation type
#print(" -> setting up relation for: %s " % (rel_model))
if self.relations[rel_model] == "has_many":
rel_model = powlib.singularize(rel_model)
module = importlib.import_module("#APPNAME.models." + rel_model )
#print(module)
#print(dir(module))
rel_model_instance = getattr(module, str.capitalize(rel_model) )()
self.related_models[rel_model] = rel_model_instance
self.generate_accessor_methods(rel_model_instance)
elif self.relations[rel_model] == "belongs_to":
pass
else:
raise Exception("unknown relation: %s ") %(rel_model)
def print_schema(self):
pp=pprint.PrettyPrinter(indent=4)
pp.pprint(self.schema)
def load_schema(self):
try:
"""Tries to find the according schema in migrations/schemas/Version.py
imports the schema and sets the according properties in this class/instance"""
schema_module = __import__("#APPNAME"+".migrations.schemas." + self.modelname + "_schema",
globals(), locals(), [self.modelname], 0)
schema = imp.reload(schema_module)
self.schema = schema_module.__dict__[self.modelname]
self.relations = schema_module.__dict__[self.modelname + "_relations"]
except Exception as e:
print("Unexpected Error:", e, e.args)
raise e
def setup_properties(self):
""" sets the accessor methods for the schema """
# add the property and initialize it according to the type
for column, attrs in list(self.schema.items()):
#print("column : %s" % (column))
#type = string.lower(attrs["type"])
att_type = attrs["type"].lower()
if att_type in types_and_formats.schema_types:
#print "setting up property for: %s" % (column)
#
# setting the according attribute and the default value, if any.
#
# default_value:
setattr(self, column, types_and_formats.schema_types[att_type][0])
# set the conveniance att_type attribute
setattr(self, column+"_type", att_type)
setattr(self, column+"_uimodule", types_and_formats.schema_types[att_type][1])
else:
raise Exception("no or unknown type given in schema: version_schema.py. Type was: ", att_type)
if "index" in attrs:
att_index = attrs["index"]
setattr(self, column+"_has_index", True)
else:
setattr(self, column+"_has_index", False)
if "default" in attrs:
att_default = attrs["default"]
setattr(self, column+"_dafault", att_default)
if "validation" in attrs:
# this is just a quick indication if there is any validation or
# not. If so, the real validation is loaded. So quick test, long loading
# only if True.
att_validation = attrs["validation"]
setattr(self, column+"_has_validation", True)
else:
setattr(self, column+"_has_validation", False)
self.setup_relations()
def is_valid(self):
for column in self.schema.keys():
## TODO
if getattr(self,column + "_has_validation"):
# validate this column
pass
else:
# do nothing
pass
return True
def __repr__(self):
return self.__str__()
def __str__(self):
#ostr = ""
#adict = self.to_json()
#for key in adict:
# ostr += key + " -> " + str(adict[key]) + os.linesep
#return ostr
pp = pprint.PrettyPrinter(indent=4)
str(pp.pprint(self.to_json(encoder="encode_str")))
return ""
def get(self, attribute_name=None, as_str=True):
""" returns the model attribute with the specified attribute_name"""
if attribute_name in self.schema.keys():
if as_str:
curr_type = self.schema[attribute_name]["type"].lower()
#print("column: ", column, " curr_type: ", curr_type)
if curr_type in types_and_formats.schema_types.keys():
if "encode_str" in types_and_formats.schema_types[curr_type][2]:
retval = types_and_formats.schema_types[curr_type][2]["encode_str"](getattr(self,attribute_name))
print("get as_str custom encoding: value = ", retval)
return retval
else:
return str(getattr(self,attribute_name))
else:
return getattr(self,attribute_name)
def generate_accessor_methods(self, rel_model):
""" generates the convenient append Method for adding related (has_many)
models. Also updates the belongs_to section in the related model with this
model's seld._id
"""
#print(rel_model)
#print(type(rel_model))
#print(dir(rel_model))
rel_model_name = rel_model.modelname
# prepare the tmp attribute for the full models of a relation.
setattr(self, powlib.pluralize(rel_model_name) +"_full", [])
mstr = ""
#add rel model
mstr = ""
method_name = "add_"+ rel_model_name
tmp_meth_name = "foo"
mstr += "def foo(self, model):" + os.linesep
mstr += tab + "self." + powlib.pluralize(rel_model_name) +".append(model._id) "+ os.linesep
mstr += tab + "self." + powlib.pluralize(rel_model_name) +"_full.append(model) "+ os.linesep
mstr += tab + "setattr(rel_model_instance, 'self.modelname' + '_id', self._id) "+ os.linesep
mstr += tab + "return self." + powlib.pluralize(rel_model_name) + os.linesep
exec(mstr,globals())
self.__dict__[method_name] = types.MethodType(foo,self)
#setattr(self, method_name, foo)
# get a rel model
mstr = ""
method_name = "get_"+ powlib.pluralize(rel_model_name)
tmp_meth_name = "foo"
mstr += "def foo(self):" + os.linesep
mstr += tab + "return self." + powlib.pluralize(rel_model_name) +"_full" + os.linesep
#print mstr
exec(mstr,globals())
self.__dict__[method_name] = types.MethodType(foo,self)
#setattr(self, method_name, foo)
def find_by(self, field, value):
""" find model by attribute. Sets self to the model found.
Uses find_one internally. """
res = self.find_one({ field : value })
return self
def find_all(self, *args, **kwargs):
""" Find all matching models. Returns an iterable.
Uses model.find internally. More docu can be found there
"""
return self.find(*args,**kwargs)
def find(self, *args, sort=False, limit=False, skip=False, **kwargs):
""" Find all matching models. Returns an iterable.
sorting can be done by giving for exmaple:
sort=[("field", pymongo.ASCENDING), ("field2", pymongoDESCENDING),..]
returns: a pymongo.cursor.Cursor
"""
#print("args: ", *args)
#print("kwargs: ", **kwargs)
cursor = self.__class__.collection.find(*args, as_class=self.__class__, **kwargs)
if limit:
print("limit:", limit)
cursor=cursor.limit(limit)
if skip:
print("skip:",skip)
cursor=cursor.skip(skip)
if sort:
print("sort",sort)
cursor = cursor.sort(sort)
#print("cursor__class__:", cursor.__class__)
print("cursor count:", cursor.count())
if cursor.__class__ == pymongo.cursor.Cursor:
if cursor.count() == 1:
print("setting self.set_values cause only _one_ result")
# if it is only one result in the cursor, return the cursor but also
# set this (self) object's values as the result.
#print(cursor[0].to_json())
self.set_values(cursor[0])
return cursor
def find_one(self, *args, **kwargs):
""" Updates this(self) object directly.
returns self (NOT a dict)
"""
#ret = self.__class__.collection.find_one(*args, as_class=self.__class__, **kwargs)
ret = self.__class__.collection.find_one(*args, **kwargs)
if ret:
self.set_values(ret)
return self
def set_values(self, val):
#print ("set_values: ", dictionary)
if isinstance(val, self.__class__):
self = val
#print("set self = val", type(val))
elif isinstance(val, dict):
#print("setting self = dict")
for elem in val:
self.__setitem__(elem, val[elem])
else:
print("You should never see this message!!!!")
#print(self)
return
def clear(self):
"""
erase the instance's values
"""
for elem in self.schema.keys():
# get the according default type for the attribute
# See: types_and_formats.schema_types
default_value = types_and_formats.schema_types[self.schema[elem]["type"].lower()][0]
setattr(self, elem, default_value)
print("erased values: ", self.to_json())
return
def save(self, safe=True):
""" Saves the object. Results in insert if object wasnt in the db before,
results in update otherwise"""
d = self.to_mongo()
#print(self)
d["last_updated"] = powlib.get_time()
self._id = self.__class__.collection.save(d, safe=safe)
print("saved: ", self.modelname, " id: ",str(self._id))
#self._id = self.insert(safe=safe)
return self._id
def save_relations(self):
for elem in self.relations:
print("relation: ", elem)
def insert(self, safe=True):
""" Uses pymongo insert directly"""
d = self.to_json()
#print(self)
d["last_updated"] = powlib.get_time()
#d["created"] = powlib.get_time()
del d["_id"]
self._id = self.__class__.collection.insert(d, safe=safe)
print("inserted: ", self.modelname, " id: ", str(self._id))
return self._id
def create(self):
""" Alias for insert()"""
return self.insert()
def update(self, *args, safe=True, multi=False, **kwargs):
""" Pure: pymongo update. Can update any document in the collection (not only self)
Syntax: db.test.update({"x": "y"}, {"$set": {"a": "c"}}) """
#ret = self.__class__.collection.update(*args, **kwargs)
ret = self.__class__.collection.update({"_id": self._id}, self.to_mongo(), safe=safe, multi=False )
print("updated: ", self.modelname, " id: ", str(self._id))
return ret
def remove(self, *args, **kwargs):
""" removes any given instances document representation in the db
example:
model.remove( { "attribute" : value }, True, multi=True )
for more information see:
http://docs.mongodb.org/manual/tutorial/remove-documents/
"""
return self.__class__.collection.remove(*args, **kwargs)
def delete(self, *args, **kwargs):
""" removes this instances document representation in the db
conveniance method, calls remove_self internally.
"""
return self.remove({"_id" : self._id}, True)
def from_json(self, json_data):
""" makes an self instance from json """
return self.set_values(json_data)
def to_mongo(self):
self.last_updated = powlib.get_time()
return self.to_json(encoder="encode_db")
def to_dict(self):
d = {}
print(" -- converting to dict() ")
for column in list(self.schema.keys()):
curr_type = self.schema[column]["type"].lower()
#print("column: ", column, " curr_type: ", curr_type)
if curr_type in types_and_formats.schema_types.keys():
d[column] = getattr(self, column)
print(" + ",column, "type: ", type(d[column]))
return d
def to_json(self, encoder="encode_json"):
""" returns a json representation of the schema"""
d = {}
#print(self.schema)
for column in list(self.schema.keys()):
curr_type = self.schema[column]["type"].lower()
#print("column: ", column, " curr_type: ", curr_type)
if curr_type in types_and_formats.schema_types.keys():
if encoder in types_and_formats.schema_types[curr_type][2]:
#
# if this type has a custom_encoder, then use it
#
d[column] = types_and_formats.schema_types[curr_type][2][encoder](getattr(self, column))
#print ("custom encoded for: ", column, " with: ", types_and_formats.schema_types[curr_type][2][encoder])
else:
d[column] = getattr(self, column)
#print ("standard encoded for: ", column)
return d
def reload_relations(self):
"""
(re)load the models relations from the schema (as a module)
migrations/schemas/modelname_schema.py
"""
schema_module = __import__("#APPNAME"+".migrations.schemas." + self.modelname + "_schema",
globals(), locals(), [self.modelname], 0)
schema = imp.reload(schema_module)
#schema = reload(schema_module)
self.relations = schema_module.__dict__[self.modelname + "_relations"]
return self.relations
def has_many(self, rel_model, one_to_one=False ):
""" creates an (currently embedded) one:many relation between this (self) and model.
see doc in source below.
"""
if type(rel_model) == str:
rel_modelname = powlib.singularize(rel_model)
else:
rel_modelname = rel_model.modelname
print(" rel_modelname: ", rel_modelname)
# 0. check if relation is not already existing
if powlib.plural(rel_modelname) in self.relations:
raise Exception( "POWError: model %s already has a relation to %s " % (self.modelname, rel_modelname) )
return
# 1. check if model exists
try:
module = importlib.import_module("#APPNAME.models." + rel_modelname )
#print(module)
#print(dir(module))
rel_model_instance = getattr(module, str.capitalize(rel_modelname) )()
#print(rel_model_instance)
except Exception as e:
raise e
rel = self.reload_relations()
# 2. add list of related model to relations
print(" creating the according object attributes:")
print("-"*50)
print(" ++ attribute for a 1:n relation : ", self.modelname + "." + rel_modelname + " type: list []")
self.relations[powlib.plural(rel_modelname)] = "has_many"
self.schema[powlib.plural(rel_modelname)] = { "type" : "list" }
try:
self.create_schema()
except Exception as e:
raise e
print(" ++ attribute for a 1:1 relation : ", rel_modelname + "." + self.modelname + "_id type: ObjectId")
rel_model_instance.relations[self.modelname] ="belongs_to"
rel_model_instance.schema[self.modelname+"_id"] = { "type" : "ObjectId" }
try:
rel_model_instance.create_schema()
except Exception as e:
raise e
self.generate_accessor_methods(rel_model_instance)
def has_one(self, model, embedd=True):
""" creates an (currently embedded) one:one relation between this (self) and model."""
return self.has_many(model, embedd, one_to_one=True)
def remove_relation(self, rel_model):
""" tries to find the given relation by its name and deltes the relation entry and the
instance and class attribute, as well."""
print("in remove_relation: ", self)
if type(rel_model) == str:
rel_modelname = rel_model
else:
rel_modelname = rel_model.modelname
try:
# 0. check if model exists
rel = self.reload_relations()
# 1. check if relation is existing
if powlib.plural(rel_modelname) in self.relations or rel_modelname in self.relations:
pass
else:
raise Exception( "POWError: model %s already norelation to %s " % (self.modelname, rel_modelname) )
# 2. remove the relation
# has_many
if powlib.plural(rel_modelname) in self.relations.keys():
print("removing relation (has_many): ", self.modelname, " -> ", rel_modelname)
del self.relations[powlib.plural(rel_modelname)]
del self.schema[powlib.plural(rel_modelname)]
# delete the belongs to in the rel_model as well
try:
module = importlib.import_module("#APPNAME.models." + rel_modelname )
rel_model_instance = getattr(module, str.capitalize(rel_modelname) )()
print("removing relation (belongs_to): ", rel_modelname, " -> ", self.modelname)
rel_model_instance.remove_relation(self)
rel_model_instance.create_schema()
except Exception as e:
raise e
# belongs_to
elif rel_modelname in self.relations.keys():
print("actually del the relation (belongs_to): ", self.modelname, " -> ", rel_modelname)
del self.relations[rel_modelname]
del self.schema[rel_modelname+"_id"]
# write the new schema and relation json.
try:
self.create_schema()
except Exception as e:
raise e
return
except ImportError as error:
# Display error message
print("POWError: unable to import module: %s, error msg: %S " % (rel_modelname, error.message))
raise error
def add_column(self, name, attrs={}):
""" adds a column to this collection. Updates all docs in the collection.
This might take some time in large collectgions since all docs are touched.
"""
print("Apennding column to table: %s" % (self.modelname))
return self.__class__.collection.update({},{"$set" : {name: attrs["default"]}},{ "multi": True })
def add_index(self, *args, **kwargs):
""" adds an index to a column
example: coll.create_index("title", name="title_index_khz", unique=True)
"""
return self.__class__.collection.ensure_index(*args, **kwargs)
def remove_column(self, name, filter={}):
""" removes a column
see: http://docs.mongodb.org/manual/core/update/#Updating-%24unset
"""
return self.__class__.collection.update(filter, { "$unset": { name : 1 }}, { "multi": True })
def remove_index(self, name):
""" removes an index"""
return self.__class__.collection.drop_index({name : 1})
def rename_column(self, name, new_name, filter={}):
""" renames a column """
self.__class__.collection.update( filter, { "$rename": { name : new_name }}, { "multi": True } )
def alter_column_name(self, colname, newname):
""" alters a column name.
#TODO Not implemented yet
"""
print("not implemented yet")
return False
def create_table(self, *args, **kwargs):
""" creates this collection explicitly. Even this is
not neccessary in mongoDB"""
#db.createCollection(name, {capped: <boolean>, autoIndexId: <boolean>, size: <number>, max: <number>} )
# exmaple: db.createCollection("log", { capped : true, size : 5242880, max : 5000 } )
return self.__class__.db.create_collection(self.__class__.collection_name + kwargs.get("postfix", ""), *args, **kwargs)
def drop_table(self):
""" drops this collection / table """
return self.__class__.db.drop_collection(self.__class__.collection_name)
def index_information(self):
"""Get information on this collections indexes."""
return self.__class__.collection.index_information()
def erase_schema(self):
self.schema = {}
self.relations = {}
self.create_schema(erase=True)
def create_schema(self, erase=False, prefix_output_path=""):
""" create a schema for this model
Automatically add the following column:
last_updated -> by copow
create -> by copow
_id -> by mongodb
"""
print(" creating schema: ", self.modelname)
try:
filepath = prefix_output_path + "./migrations/schemas/" + self.modelname + "_schema.py"
filepath = os.path.abspath(os.path.normpath(filepath))
#print filepath
ofile = open(filepath, "w")
ostr = self.modelname + " = "
schema = self.schema
if not erase:
schema["last_updated"] = { "type" : "date" }
#schema["created"] = { "type" : "date" }
schema["_id"] = { "type" : "objectid" }
#ostr += json.dumps(schema, indent=4) + os.linesep
ostr += str(schema) + os.linesep
ostr += self.modelname + "_relations = "
ostr += json.dumps(self.relations, indent=4)
#print(ostr)
ofile.write(ostr)
ofile.close()
self.load_schema()
self.setup_properties()
except Exception as e:
raise e
return self
|
|
"""
Use this module directly:
import xarray.plot as xplt
Or use the methods on a DataArray:
DataArray.plot._____
"""
import functools
import numpy as np
import pandas as pd
from .facetgrid import _easy_facetgrid
from .utils import (
_add_colorbar, _ensure_plottable, _infer_interval_breaks, _infer_xy_labels,
_interval_to_double_bound_points, _interval_to_mid_points,
_process_cmap_cbar_kwargs, _rescale_imshow_rgb, _resolve_intervals_2dplot,
_update_axes, _valid_other_type, get_axis, import_matplotlib_pyplot,
label_from_attrs)
def _infer_line_data(darray, x, y, hue):
error_msg = ('must be either None or one of ({0:s})'
.format(', '.join([repr(dd) for dd in darray.dims])))
ndims = len(darray.dims)
if x is not None and x not in darray.dims and x not in darray.coords:
raise ValueError('x ' + error_msg)
if y is not None and y not in darray.dims and y not in darray.coords:
raise ValueError('y ' + error_msg)
if x is not None and y is not None:
raise ValueError('You cannot specify both x and y kwargs'
'for line plots.')
if ndims == 1:
huename = None
hueplt = None
huelabel = ''
if x is not None:
xplt = darray[x]
yplt = darray
elif y is not None:
xplt = darray
yplt = darray[y]
else: # Both x & y are None
dim = darray.dims[0]
xplt = darray[dim]
yplt = darray
else:
if x is None and y is None and hue is None:
raise ValueError('For 2D inputs, please'
'specify either hue, x or y.')
if y is None:
xname, huename = _infer_xy_labels(darray=darray, x=x, y=hue)
xplt = darray[xname]
if xplt.ndim > 1:
if huename in darray.dims:
otherindex = 1 if darray.dims.index(huename) == 0 else 0
otherdim = darray.dims[otherindex]
yplt = darray.transpose(
otherdim, huename, transpose_coords=False)
xplt = xplt.transpose(
otherdim, huename, transpose_coords=False)
else:
raise ValueError('For 2D inputs, hue must be a dimension'
+ ' i.e. one of ' + repr(darray.dims))
else:
yplt = darray.transpose(xname, huename)
else:
yname, huename = _infer_xy_labels(darray=darray, x=y, y=hue)
yplt = darray[yname]
if yplt.ndim > 1:
if huename in darray.dims:
otherindex = 1 if darray.dims.index(huename) == 0 else 0
otherdim = darray.dims[otherindex]
xplt = darray.transpose(
otherdim, huename, transpose_coords=False)
else:
raise ValueError('For 2D inputs, hue must be a dimension'
+ ' i.e. one of ' + repr(darray.dims))
else:
xplt = darray.transpose(yname, huename)
huelabel = label_from_attrs(darray[huename])
hueplt = darray[huename]
xlabel = label_from_attrs(xplt)
ylabel = label_from_attrs(yplt)
return xplt, yplt, hueplt, xlabel, ylabel, huelabel
def plot(darray, row=None, col=None, col_wrap=None, ax=None, hue=None,
rtol=0.01, subplot_kws=None, **kwargs):
"""
Default plot of DataArray using matplotlib.pyplot.
Calls xarray plotting function based on the dimensions of
darray.squeeze()
=============== ===========================
Dimensions Plotting function
--------------- ---------------------------
1 :py:func:`xarray.plot.line`
2 :py:func:`xarray.plot.pcolormesh`
Anything else :py:func:`xarray.plot.hist`
=============== ===========================
Parameters
----------
darray : DataArray
row : string, optional
If passed, make row faceted plots on this dimension name
col : string, optional
If passed, make column faceted plots on this dimension name
hue : string, optional
If passed, make faceted line plots with hue on this dimension name
col_wrap : integer, optional
Use together with ``col`` to wrap faceted plots
ax : matplotlib axes, optional
If None, uses the current axis. Not applicable when using facets.
rtol : number, optional
Relative tolerance used to determine if the indexes
are uniformly spaced. Usually a small positive number.
subplot_kws : dict, optional
Dictionary of keyword arguments for matplotlib subplots. Only applies
to FacetGrid plotting.
**kwargs : optional
Additional keyword arguments to matplotlib
"""
darray = darray.squeeze()
plot_dims = set(darray.dims)
plot_dims.discard(row)
plot_dims.discard(col)
plot_dims.discard(hue)
ndims = len(plot_dims)
error_msg = ('Only 1d and 2d plots are supported for facets in xarray. '
'See the package `Seaborn` for more options.')
if ndims in [1, 2]:
if row or col:
kwargs['row'] = row
kwargs['col'] = col
kwargs['col_wrap'] = col_wrap
kwargs['subplot_kws'] = subplot_kws
if ndims == 1:
plotfunc = line
kwargs['hue'] = hue
elif ndims == 2:
if hue:
plotfunc = line
kwargs['hue'] = hue
else:
plotfunc = pcolormesh
else:
if row or col or hue:
raise ValueError(error_msg)
plotfunc = hist
kwargs['ax'] = ax
return plotfunc(darray, **kwargs)
# This function signature should not change so that it can use
# matplotlib format strings
def line(darray, *args, **kwargs):
"""
Line plot of DataArray index against values
Wraps :func:`matplotlib:matplotlib.pyplot.plot`
Parameters
----------
darray : DataArray
Must be 1 dimensional
figsize : tuple, optional
A tuple (width, height) of the figure in inches.
Mutually exclusive with ``size`` and ``ax``.
aspect : scalar, optional
Aspect ratio of plot, so that ``aspect * size`` gives the width in
inches. Only used if a ``size`` is provided.
size : scalar, optional
If provided, create a new figure for the plot with the given size.
Height (in inches) of each plot. See also: ``aspect``.
ax : matplotlib axes object, optional
Axis on which to plot this figure. By default, use the current axis.
Mutually exclusive with ``size`` and ``figsize``.
hue : string, optional
Dimension or coordinate for which you want multiple lines plotted.
If plotting against a 2D coordinate, ``hue`` must be a dimension.
x, y : string, optional
Dimensions or coordinates for x, y axis.
Only one of these may be specified.
The other coordinate plots values from the DataArray on which this
plot method is called.
xscale, yscale : 'linear', 'symlog', 'log', 'logit', optional
Specifies scaling for the x- and y-axes respectively
xticks, yticks : Specify tick locations for x- and y-axes
xlim, ylim : Specify x- and y-axes limits
xincrease : None, True, or False, optional
Should the values on the x axes be increasing from left to right?
if None, use the default for the matplotlib function.
yincrease : None, True, or False, optional
Should the values on the y axes be increasing from top to bottom?
if None, use the default for the matplotlib function.
add_legend : boolean, optional
Add legend with y axis coordinates (2D inputs only).
*args, **kwargs : optional
Additional arguments to matplotlib.pyplot.plot
"""
# Handle facetgrids first
row = kwargs.pop('row', None)
col = kwargs.pop('col', None)
if row or col:
allargs = locals().copy()
allargs.update(allargs.pop('kwargs'))
allargs.pop('darray')
return _easy_facetgrid(darray, line, kind='line', **allargs)
ndims = len(darray.dims)
if ndims > 2:
raise ValueError('Line plots are for 1- or 2-dimensional DataArrays. '
'Passed DataArray has {ndims} '
'dimensions'.format(ndims=ndims))
# Ensures consistency with .plot method
figsize = kwargs.pop('figsize', None)
aspect = kwargs.pop('aspect', None)
size = kwargs.pop('size', None)
ax = kwargs.pop('ax', None)
hue = kwargs.pop('hue', None)
x = kwargs.pop('x', None)
y = kwargs.pop('y', None)
xincrease = kwargs.pop('xincrease', None) # default needs to be None
yincrease = kwargs.pop('yincrease', None)
xscale = kwargs.pop('xscale', None) # default needs to be None
yscale = kwargs.pop('yscale', None)
xticks = kwargs.pop('xticks', None)
yticks = kwargs.pop('yticks', None)
xlim = kwargs.pop('xlim', None)
ylim = kwargs.pop('ylim', None)
add_legend = kwargs.pop('add_legend', True)
_labels = kwargs.pop('_labels', True)
if args is ():
args = kwargs.pop('args', ())
ax = get_axis(figsize, size, aspect, ax)
xplt, yplt, hueplt, xlabel, ylabel, huelabel = \
_infer_line_data(darray, x, y, hue)
# Remove pd.Intervals if contained in xplt.values.
if _valid_other_type(xplt.values, [pd.Interval]):
# Is it a step plot? (see matplotlib.Axes.step)
if kwargs.get('linestyle', '').startswith('steps-'):
xplt_val, yplt_val = _interval_to_double_bound_points(xplt.values,
yplt.values)
# Remove steps-* to be sure that matplotlib is not confused
kwargs['linestyle'] = (kwargs['linestyle']
.replace('steps-pre', '')
.replace('steps-post', '')
.replace('steps-mid', ''))
if kwargs['linestyle'] == '':
kwargs.pop('linestyle')
else:
xplt_val = _interval_to_mid_points(xplt.values)
yplt_val = yplt.values
xlabel += '_center'
else:
xplt_val = xplt.values
yplt_val = yplt.values
_ensure_plottable(xplt_val, yplt_val)
primitive = ax.plot(xplt_val, yplt_val, *args, **kwargs)
if _labels:
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
ax.set_title(darray._title_for_slice())
if darray.ndim == 2 and add_legend:
ax.legend(handles=primitive,
labels=list(hueplt.values),
title=huelabel)
# Rotate dates on xlabels
# Do this without calling autofmt_xdate so that x-axes ticks
# on other subplots (if any) are not deleted.
# https://stackoverflow.com/questions/17430105/autofmt-xdate-deletes-x-axis-labels-of-all-subplots
if np.issubdtype(xplt.dtype, np.datetime64):
for xlabels in ax.get_xticklabels():
xlabels.set_rotation(30)
xlabels.set_ha('right')
_update_axes(ax, xincrease, yincrease, xscale, yscale,
xticks, yticks, xlim, ylim)
return primitive
def step(darray, *args, **kwargs):
"""
Step plot of DataArray index against values
Similar to :func:`matplotlib:matplotlib.pyplot.step`
Parameters
----------
where : {'pre', 'post', 'mid'}, optional, default 'pre'
Define where the steps should be placed:
- 'pre': The y value is continued constantly to the left from
every *x* position, i.e. the interval ``(x[i-1], x[i]]`` has the
value ``y[i]``.
- 'post': The y value is continued constantly to the right from
every *x* position, i.e. the interval ``[x[i], x[i+1])`` has the
value ``y[i]``.
- 'mid': Steps occur half-way between the *x* positions.
Note that this parameter is ignored if the x coordinate consists of
:py:func:`pandas.Interval` values, e.g. as a result of
:py:func:`xarray.Dataset.groupby_bins`. In this case, the actual
boundaries of the interval are used.
*args, **kwargs : optional
Additional arguments following :py:func:`xarray.plot.line`
"""
if ('ls' in kwargs.keys()) and ('linestyle' not in kwargs.keys()):
kwargs['linestyle'] = kwargs.pop('ls')
where = kwargs.pop('where', 'pre')
if where not in ('pre', 'post', 'mid'):
raise ValueError("'where' argument to step must be "
"'pre', 'post' or 'mid'")
kwargs['linestyle'] = 'steps-' + where + kwargs.get('linestyle', '')
return line(darray, *args, **kwargs)
def hist(darray, figsize=None, size=None, aspect=None, ax=None, **kwargs):
"""
Histogram of DataArray
Wraps :func:`matplotlib:matplotlib.pyplot.hist`
Plots N dimensional arrays by first flattening the array.
Parameters
----------
darray : DataArray
Can be any dimension
figsize : tuple, optional
A tuple (width, height) of the figure in inches.
Mutually exclusive with ``size`` and ``ax``.
aspect : scalar, optional
Aspect ratio of plot, so that ``aspect * size`` gives the width in
inches. Only used if a ``size`` is provided.
size : scalar, optional
If provided, create a new figure for the plot with the given size.
Height (in inches) of each plot. See also: ``aspect``.
ax : matplotlib axes object, optional
Axis on which to plot this figure. By default, use the current axis.
Mutually exclusive with ``size`` and ``figsize``.
**kwargs : optional
Additional keyword arguments to matplotlib.pyplot.hist
"""
ax = get_axis(figsize, size, aspect, ax)
xincrease = kwargs.pop('xincrease', None) # default needs to be None
yincrease = kwargs.pop('yincrease', None)
xscale = kwargs.pop('xscale', None) # default needs to be None
yscale = kwargs.pop('yscale', None)
xticks = kwargs.pop('xticks', None)
yticks = kwargs.pop('yticks', None)
xlim = kwargs.pop('xlim', None)
ylim = kwargs.pop('ylim', None)
no_nan = np.ravel(darray.values)
no_nan = no_nan[pd.notnull(no_nan)]
primitive = ax.hist(no_nan, **kwargs)
ax.set_title('Histogram')
ax.set_xlabel(label_from_attrs(darray))
_update_axes(ax, xincrease, yincrease, xscale, yscale,
xticks, yticks, xlim, ylim)
return primitive
# MUST run before any 2d plotting functions are defined since
# _plot2d decorator adds them as methods here.
class _PlotMethods:
"""
Enables use of xarray.plot functions as attributes on a DataArray.
For example, DataArray.plot.imshow
"""
def __init__(self, darray):
self._da = darray
def __call__(self, **kwargs):
return plot(self._da, **kwargs)
@functools.wraps(hist)
def hist(self, ax=None, **kwargs):
return hist(self._da, ax=ax, **kwargs)
@functools.wraps(line)
def line(self, *args, **kwargs):
return line(self._da, *args, **kwargs)
@functools.wraps(step)
def step(self, *args, **kwargs):
return step(self._da, *args, **kwargs)
def _plot2d(plotfunc):
"""
Decorator for common 2d plotting logic
Also adds the 2d plot method to class _PlotMethods
"""
commondoc = """
Parameters
----------
darray : DataArray
Must be 2 dimensional, unless creating faceted plots
x : string, optional
Coordinate for x axis. If None use darray.dims[1]
y : string, optional
Coordinate for y axis. If None use darray.dims[0]
figsize : tuple, optional
A tuple (width, height) of the figure in inches.
Mutually exclusive with ``size`` and ``ax``.
aspect : scalar, optional
Aspect ratio of plot, so that ``aspect * size`` gives the width in
inches. Only used if a ``size`` is provided.
size : scalar, optional
If provided, create a new figure for the plot with the given size.
Height (in inches) of each plot. See also: ``aspect``.
ax : matplotlib axes object, optional
Axis on which to plot this figure. By default, use the current axis.
Mutually exclusive with ``size`` and ``figsize``.
row : string, optional
If passed, make row faceted plots on this dimension name
col : string, optional
If passed, make column faceted plots on this dimension name
col_wrap : integer, optional
Use together with ``col`` to wrap faceted plots
xscale, yscale : 'linear', 'symlog', 'log', 'logit', optional
Specifies scaling for the x- and y-axes respectively
xticks, yticks : Specify tick locations for x- and y-axes
xlim, ylim : Specify x- and y-axes limits
xincrease : None, True, or False, optional
Should the values on the x axes be increasing from left to right?
if None, use the default for the matplotlib function.
yincrease : None, True, or False, optional
Should the values on the y axes be increasing from top to bottom?
if None, use the default for the matplotlib function.
add_colorbar : Boolean, optional
Adds colorbar to axis
add_labels : Boolean, optional
Use xarray metadata to label axes
norm : ``matplotlib.colors.Normalize`` instance, optional
If the ``norm`` has vmin or vmax specified, the corresponding kwarg
must be None.
vmin, vmax : floats, optional
Values to anchor the colormap, otherwise they are inferred from the
data and other keyword arguments. When a diverging dataset is inferred,
setting one of these values will fix the other by symmetry around
``center``. Setting both values prevents use of a diverging colormap.
If discrete levels are provided as an explicit list, both of these
values are ignored.
cmap : matplotlib colormap name or object, optional
The mapping from data values to color space. If not provided, this
will be either be ``viridis`` (if the function infers a sequential
dataset) or ``RdBu_r`` (if the function infers a diverging dataset).
When `Seaborn` is installed, ``cmap`` may also be a `seaborn`
color palette. If ``cmap`` is seaborn color palette and the plot type
is not ``contour`` or ``contourf``, ``levels`` must also be specified.
colors : discrete colors to plot, optional
A single color or a list of colors. If the plot type is not ``contour``
or ``contourf``, the ``levels`` argument is required.
center : float, optional
The value at which to center the colormap. Passing this value implies
use of a diverging colormap. Setting it to ``False`` prevents use of a
diverging colormap.
robust : bool, optional
If True and ``vmin`` or ``vmax`` are absent, the colormap range is
computed with 2nd and 98th percentiles instead of the extreme values.
extend : {'neither', 'both', 'min', 'max'}, optional
How to draw arrows extending the colorbar beyond its limits. If not
provided, extend is inferred from vmin, vmax and the data limits.
levels : int or list-like object, optional
Split the colormap (cmap) into discrete color intervals. If an integer
is provided, "nice" levels are chosen based on the data range: this can
imply that the final number of levels is not exactly the expected one.
Setting ``vmin`` and/or ``vmax`` with ``levels=N`` is equivalent to
setting ``levels=np.linspace(vmin, vmax, N)``.
infer_intervals : bool, optional
Only applies to pcolormesh. If True, the coordinate intervals are
passed to pcolormesh. If False, the original coordinates are used
(this can be useful for certain map projections). The default is to
always infer intervals, unless the mesh is irregular and plotted on
a map projection.
subplot_kws : dict, optional
Dictionary of keyword arguments for matplotlib subplots. Only applies
to FacetGrid plotting.
cbar_ax : matplotlib Axes, optional
Axes in which to draw the colorbar.
cbar_kwargs : dict, optional
Dictionary of keyword arguments to pass to the colorbar.
**kwargs : optional
Additional arguments to wrapped matplotlib function
Returns
-------
artist :
The same type of primitive artist that the wrapped matplotlib
function returns
"""
# Build on the original docstring
plotfunc.__doc__ = '%s\n%s' % (plotfunc.__doc__, commondoc)
@functools.wraps(plotfunc)
def newplotfunc(darray, x=None, y=None, figsize=None, size=None,
aspect=None, ax=None, row=None, col=None,
col_wrap=None, xincrease=True, yincrease=True,
add_colorbar=None, add_labels=True, vmin=None, vmax=None,
cmap=None, center=None, robust=False, extend=None,
levels=None, infer_intervals=None, colors=None,
subplot_kws=None, cbar_ax=None, cbar_kwargs=None,
xscale=None, yscale=None, xticks=None, yticks=None,
xlim=None, ylim=None, norm=None, **kwargs):
# All 2d plots in xarray share this function signature.
# Method signature below should be consistent.
# Decide on a default for the colorbar before facetgrids
if add_colorbar is None:
add_colorbar = plotfunc.__name__ != 'contour'
imshow_rgb = (
plotfunc.__name__ == 'imshow' and
darray.ndim == (3 + (row is not None) + (col is not None)))
if imshow_rgb:
# Don't add a colorbar when showing an image with explicit colors
add_colorbar = False
# Matplotlib does not support normalising RGB data, so do it here.
# See eg. https://github.com/matplotlib/matplotlib/pull/10220
if robust or vmax is not None or vmin is not None:
darray = _rescale_imshow_rgb(darray, vmin, vmax, robust)
vmin, vmax, robust = None, None, False
# Handle facetgrids first
if row or col:
allargs = locals().copy()
allargs.pop('imshow_rgb')
allargs.update(allargs.pop('kwargs'))
allargs.pop('darray')
# Need the decorated plotting function
allargs['plotfunc'] = globals()[plotfunc.__name__]
return _easy_facetgrid(darray, kind='dataarray', **allargs)
plt = import_matplotlib_pyplot()
rgb = kwargs.pop('rgb', None)
if rgb is not None and plotfunc.__name__ != 'imshow':
raise ValueError('The "rgb" keyword is only valid for imshow()')
elif rgb is not None and not imshow_rgb:
raise ValueError('The "rgb" keyword is only valid for imshow()'
'with a three-dimensional array (per facet)')
xlab, ylab = _infer_xy_labels(
darray=darray, x=x, y=y, imshow=imshow_rgb, rgb=rgb)
# better to pass the ndarrays directly to plotting functions
xval = darray[xlab].values
yval = darray[ylab].values
# check if we need to broadcast one dimension
if xval.ndim < yval.ndim:
xval = np.broadcast_to(xval, yval.shape)
if yval.ndim < xval.ndim:
yval = np.broadcast_to(yval, xval.shape)
# May need to transpose for correct x, y labels
# xlab may be the name of a coord, we have to check for dim names
if imshow_rgb:
# For RGB[A] images, matplotlib requires the color dimension
# to be last. In Xarray the order should be unimportant, so
# we transpose to (y, x, color) to make this work.
yx_dims = (ylab, xlab)
dims = yx_dims + tuple(d for d in darray.dims if d not in yx_dims)
if dims != darray.dims:
darray = darray.transpose(*dims, transpose_coords=True)
elif darray[xlab].dims[-1] == darray.dims[0]:
darray = darray.transpose(transpose_coords=True)
# Pass the data as a masked ndarray too
zval = darray.to_masked_array(copy=False)
# Replace pd.Intervals if contained in xval or yval.
xplt, xlab_extra = _resolve_intervals_2dplot(xval, plotfunc.__name__)
yplt, ylab_extra = _resolve_intervals_2dplot(yval, plotfunc.__name__)
_ensure_plottable(xplt, yplt)
cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs(
plotfunc, locals(), zval.data)
if 'contour' in plotfunc.__name__:
# extend is a keyword argument only for contour and contourf, but
# passing it to the colorbar is sufficient for imshow and
# pcolormesh
kwargs['extend'] = cmap_params['extend']
kwargs['levels'] = cmap_params['levels']
# if colors == a single color, matplotlib draws dashed negative
# contours. we lose this feature if we pass cmap and not colors
if isinstance(colors, str):
cmap_params['cmap'] = None
kwargs['colors'] = colors
if 'pcolormesh' == plotfunc.__name__:
kwargs['infer_intervals'] = infer_intervals
if 'imshow' == plotfunc.__name__ and isinstance(aspect, str):
# forbid usage of mpl strings
raise ValueError("plt.imshow's `aspect` kwarg is not available "
"in xarray")
ax = get_axis(figsize, size, aspect, ax)
primitive = plotfunc(xplt, yplt, zval, ax=ax, cmap=cmap_params['cmap'],
vmin=cmap_params['vmin'],
vmax=cmap_params['vmax'],
norm=cmap_params['norm'],
**kwargs)
# Label the plot with metadata
if add_labels:
ax.set_xlabel(label_from_attrs(darray[xlab], xlab_extra))
ax.set_ylabel(label_from_attrs(darray[ylab], ylab_extra))
ax.set_title(darray._title_for_slice())
if add_colorbar:
if add_labels and 'label' not in cbar_kwargs:
cbar_kwargs['label'] = label_from_attrs(darray)
cbar = _add_colorbar(primitive, ax, cbar_ax, cbar_kwargs,
cmap_params)
elif (cbar_ax is not None or cbar_kwargs):
# inform the user about keywords which aren't used
raise ValueError("cbar_ax and cbar_kwargs can't be used with "
"add_colorbar=False.")
# origin kwarg overrides yincrease
if 'origin' in kwargs:
yincrease = None
_update_axes(ax, xincrease, yincrease, xscale, yscale,
xticks, yticks, xlim, ylim)
# Rotate dates on xlabels
# Do this without calling autofmt_xdate so that x-axes ticks
# on other subplots (if any) are not deleted.
# https://stackoverflow.com/questions/17430105/autofmt-xdate-deletes-x-axis-labels-of-all-subplots
if np.issubdtype(xplt.dtype, np.datetime64):
for xlabels in ax.get_xticklabels():
xlabels.set_rotation(30)
xlabels.set_ha('right')
return primitive
# For use as DataArray.plot.plotmethod
@functools.wraps(newplotfunc)
def plotmethod(_PlotMethods_obj, x=None, y=None, figsize=None, size=None,
aspect=None, ax=None, row=None, col=None, col_wrap=None,
xincrease=True, yincrease=True, add_colorbar=None,
add_labels=True, vmin=None, vmax=None, cmap=None,
colors=None, center=None, robust=False, extend=None,
levels=None, infer_intervals=None, subplot_kws=None,
cbar_ax=None, cbar_kwargs=None,
xscale=None, yscale=None, xticks=None, yticks=None,
xlim=None, ylim=None, norm=None, **kwargs):
"""
The method should have the same signature as the function.
This just makes the method work on Plotmethods objects,
and passes all the other arguments straight through.
"""
allargs = locals()
allargs['darray'] = _PlotMethods_obj._da
allargs.update(kwargs)
for arg in ['_PlotMethods_obj', 'newplotfunc', 'kwargs']:
del allargs[arg]
return newplotfunc(**allargs)
# Add to class _PlotMethods
setattr(_PlotMethods, plotmethod.__name__, plotmethod)
return newplotfunc
@_plot2d
def imshow(x, y, z, ax, **kwargs):
"""
Image plot of 2d DataArray using matplotlib.pyplot
Wraps :func:`matplotlib:matplotlib.pyplot.imshow`
While other plot methods require the DataArray to be strictly
two-dimensional, ``imshow`` also accepts a 3D array where some
dimension can be interpreted as RGB or RGBA color channels and
allows this dimension to be specified via the kwarg ``rgb=``.
Unlike matplotlib, Xarray can apply ``vmin`` and ``vmax`` to RGB or RGBA
data, by applying a single scaling factor and offset to all bands.
Passing ``robust=True`` infers ``vmin`` and ``vmax``
:ref:`in the usual way <robust-plotting>`.
.. note::
This function needs uniformly spaced coordinates to
properly label the axes. Call DataArray.plot() to check.
The pixels are centered on the coordinates values. Ie, if the coordinate
value is 3.2 then the pixels for those coordinates will be centered on 3.2.
"""
if x.ndim != 1 or y.ndim != 1:
raise ValueError('imshow requires 1D coordinates, try using '
'pcolormesh or contour(f)')
# Centering the pixels- Assumes uniform spacing
try:
xstep = (x[1] - x[0]) / 2.0
except IndexError:
# Arbitrary default value, similar to matplotlib behaviour
xstep = .1
try:
ystep = (y[1] - y[0]) / 2.0
except IndexError:
ystep = .1
left, right = x[0] - xstep, x[-1] + xstep
bottom, top = y[-1] + ystep, y[0] - ystep
defaults = {'origin': 'upper',
'interpolation': 'nearest'}
if not hasattr(ax, 'projection'):
# not for cartopy geoaxes
defaults['aspect'] = 'auto'
# Allow user to override these defaults
defaults.update(kwargs)
if defaults['origin'] == 'upper':
defaults['extent'] = [left, right, bottom, top]
else:
defaults['extent'] = [left, right, top, bottom]
if z.ndim == 3:
# matplotlib imshow uses black for missing data, but Xarray makes
# missing data transparent. We therefore add an alpha channel if
# there isn't one, and set it to transparent where data is masked.
if z.shape[-1] == 3:
alpha = np.ma.ones(z.shape[:2] + (1,), dtype=z.dtype)
if np.issubdtype(z.dtype, np.integer):
alpha *= 255
z = np.ma.concatenate((z, alpha), axis=2)
else:
z = z.copy()
z[np.any(z.mask, axis=-1), -1] = 0
primitive = ax.imshow(z, **defaults)
return primitive
@_plot2d
def contour(x, y, z, ax, **kwargs):
"""
Contour plot of 2d DataArray
Wraps :func:`matplotlib:matplotlib.pyplot.contour`
"""
primitive = ax.contour(x, y, z, **kwargs)
return primitive
@_plot2d
def contourf(x, y, z, ax, **kwargs):
"""
Filled contour plot of 2d DataArray
Wraps :func:`matplotlib:matplotlib.pyplot.contourf`
"""
primitive = ax.contourf(x, y, z, **kwargs)
return primitive
@_plot2d
def pcolormesh(x, y, z, ax, infer_intervals=None, **kwargs):
"""
Pseudocolor plot of 2d DataArray
Wraps :func:`matplotlib:matplotlib.pyplot.pcolormesh`
"""
# decide on a default for infer_intervals (GH781)
x = np.asarray(x)
if infer_intervals is None:
if hasattr(ax, 'projection'):
if len(x.shape) == 1:
infer_intervals = True
else:
infer_intervals = False
else:
infer_intervals = True
if (infer_intervals and
((np.shape(x)[0] == np.shape(z)[1]) or
((x.ndim > 1) and (np.shape(x)[1] == np.shape(z)[1])))):
if len(x.shape) == 1:
x = _infer_interval_breaks(x, check_monotonic=True)
else:
# we have to infer the intervals on both axes
x = _infer_interval_breaks(x, axis=1)
x = _infer_interval_breaks(x, axis=0)
if (infer_intervals and
(np.shape(y)[0] == np.shape(z)[0])):
if len(y.shape) == 1:
y = _infer_interval_breaks(y, check_monotonic=True)
else:
# we have to infer the intervals on both axes
y = _infer_interval_breaks(y, axis=1)
y = _infer_interval_breaks(y, axis=0)
primitive = ax.pcolormesh(x, y, z, **kwargs)
# by default, pcolormesh picks "round" values for bounds
# this results in ugly looking plots with lots of surrounding whitespace
if not hasattr(ax, 'projection') and x.ndim == 1 and y.ndim == 1:
# not a cartopy geoaxis
ax.set_xlim(x[0], x[-1])
ax.set_ylim(y[0], y[-1])
return primitive
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from .base import Type
from .isobmff import IsoBmff
class Mp4(IsoBmff):
"""
Implements the MP4 video type matcher.
"""
MIME = 'video/mp4'
EXTENSION = 'mp4'
def __init__(self):
super(Mp4, self).__init__(
mime=Mp4.MIME,
extension=Mp4.EXTENSION
)
def match(self, buf):
if not self._is_isobmff(buf):
return False
major_brand, minor_version, compatible_brands = self._get_ftyp(buf)
for brand in compatible_brands:
if brand in ['mp41', 'mp42', 'isom']:
return True
return major_brand in ['mp41', 'mp42', 'isom']
class M4v(Type):
"""
Implements the M4V video type matcher.
"""
MIME = 'video/x-m4v'
EXTENSION = 'm4v'
def __init__(self):
super(M4v, self).__init__(
mime=M4v.MIME,
extension=M4v.EXTENSION
)
def match(self, buf):
return (len(buf) > 10 and
buf[0] == 0x0 and buf[1] == 0x0 and
buf[2] == 0x0 and buf[3] == 0x1C and
buf[4] == 0x66 and buf[5] == 0x74 and
buf[6] == 0x79 and buf[7] == 0x70 and
buf[8] == 0x4D and buf[9] == 0x34 and
buf[10] == 0x56)
class Mkv(Type):
"""
Implements the MKV video type matcher.
"""
MIME = 'video/x-matroska'
EXTENSION = 'mkv'
def __init__(self):
super(Mkv, self).__init__(
mime=Mkv.MIME,
extension=Mkv.EXTENSION
)
def match(self, buf):
return ((len(buf) > 15 and
buf[0] == 0x1A and buf[1] == 0x45 and
buf[2] == 0xDF and buf[3] == 0xA3 and
buf[4] == 0x93 and buf[5] == 0x42 and
buf[6] == 0x82 and buf[7] == 0x88 and
buf[8] == 0x6D and buf[9] == 0x61 and
buf[10] == 0x74 and buf[11] == 0x72 and
buf[12] == 0x6F and buf[13] == 0x73 and
buf[14] == 0x6B and buf[15] == 0x61) or
(len(buf) > 38 and
buf[31] == 0x6D and buf[32] == 0x61 and
buf[33] == 0x74 and buf[34] == 0x72 and
buf[35] == 0x6f and buf[36] == 0x73 and
buf[37] == 0x6B and buf[38] == 0x61))
class Webm(Type):
"""
Implements the WebM video type matcher.
"""
MIME = 'video/webm'
EXTENSION = 'webm'
def __init__(self):
super(Webm, self).__init__(
mime=Webm.MIME,
extension=Webm.EXTENSION
)
def match(self, buf):
return (len(buf) > 3 and
buf[0] == 0x1A and
buf[1] == 0x45 and
buf[2] == 0xDF and
buf[3] == 0xA3)
class Mov(IsoBmff):
"""
Implements the MOV video type matcher.
"""
MIME = 'video/quicktime'
EXTENSION = 'mov'
def __init__(self):
super(Mov, self).__init__(
mime=Mov.MIME,
extension=Mov.EXTENSION
)
def match(self, buf):
if not self._is_isobmff(buf):
return False
major_brand, minor_version, compatible_brands = self._get_ftyp(buf)
return major_brand == 'qt '
class Avi(Type):
"""
Implements the AVI video type matcher.
"""
MIME = 'video/x-msvideo'
EXTENSION = 'avi'
def __init__(self):
super(Avi, self).__init__(
mime=Avi.MIME,
extension=Avi.EXTENSION
)
def match(self, buf):
return (len(buf) > 11 and
buf[0] == 0x52 and
buf[1] == 0x49 and
buf[2] == 0x46 and
buf[3] == 0x46 and
buf[8] == 0x41 and
buf[9] == 0x56 and
buf[10] == 0x49 and
buf[11] == 0x20)
class Wmv(Type):
"""
Implements the WMV video type matcher.
"""
MIME = 'video/x-ms-wmv'
EXTENSION = 'wmv'
def __init__(self):
super(Wmv, self).__init__(
mime=Wmv.MIME,
extension=Wmv.EXTENSION
)
def match(self, buf):
return (len(buf) > 9 and
buf[0] == 0x30 and
buf[1] == 0x26 and
buf[2] == 0xB2 and
buf[3] == 0x75 and
buf[4] == 0x8E and
buf[5] == 0x66 and
buf[6] == 0xCF and
buf[7] == 0x11 and
buf[8] == 0xA6 and
buf[9] == 0xD9)
class Flv(Type):
"""
Implements the FLV video type matcher.
"""
MIME = 'video/x-flv'
EXTENSION = 'flv'
def __init__(self):
super(Flv, self).__init__(
mime=Flv.MIME,
extension=Flv.EXTENSION
)
def match(self, buf):
return (len(buf) > 3 and
buf[0] == 0x46 and
buf[1] == 0x4C and
buf[2] == 0x56 and
buf[3] == 0x01)
class Mpeg(Type):
"""
Implements the MPEG video type matcher.
"""
MIME = 'video/mpeg'
EXTENSION = 'mpg'
def __init__(self):
super(Mpeg, self).__init__(
mime=Mpeg.MIME,
extension=Mpeg.EXTENSION
)
def match(self, buf):
return (len(buf) > 3 and
buf[0] == 0x0 and
buf[1] == 0x0 and
buf[2] == 0x1 and
buf[3] >= 0xb0 and
buf[3] <= 0xbf)
class M3gp(Type):
"""Implements the 3gp image type matcher."""
MIME = 'video/3gpp'
EXTENSION = '3gp'
def __init__(self):
super(M3gp, self).__init__(
mime=M3gp.MIME,
extension=M3gp.EXTENSION
)
def match(self, buf):
return buf[:7] == bytearray([0x66, 0x74, 0x79, 0x70, 0x33, 0x67, 0x70])
|
|
import nflgame
import MySQLdb
import MySQLdb.cursors
import datetime
import os
import argparse
import gamestats_functions as f
import config as c
# Location of text file used to translate nflgame player IDs to CBS player IDs
lookupfile = os.path.dirname(os.path.abspath(__file__))+"/playerids.txt"
# Directory where phpffl game statistic files should be placed.
try:
output_dir = c.output_dir
except AttributeError:
output_dir = os.path.dirname(os.path.abspath(__file__))+'/gamestats'
nflids = dict
def main():
global nflids
cur_year, cur_week = nflgame.live.current_year_and_week()
phase = nflgame.live._cur_season_phase
if args.y == "0": year = str(cur_year)
else: year = args.y
if args.w == "0": week = str(cur_week)
else: week = args.w
phase = args.p
games = nflgame.games(int(year), week=int(week), kind=phase)
print "Week: %s Year: %s Phase: %s" %(str(week),str(year),phase)
nflids = load_nflids()
for game in games:
print str(game)+" "+str(game.time)
one_game(game)
def one_game(game):
# Get game dict and print
gamedict = {}
gamedict["game"] = {}
gamedict["players"] = {}
# game = nflgame.one(year,week,str(home),str(away))
if (game.playing() or game.game_over()):
FillGameInfo(game, gamedict)
InitTeamPlayers(gamedict)
StatsFromGame(game, gamedict)
StatsFromPlayers(game.players, gamedict)
StatsFromPlays(game.drives.plays(), gamedict)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
outfile = output_dir+"/"+gamedict["game"]["ID"]
text = print_game(gamedict)
with open(outfile, "w") as f:
f.write(text)
else:
print "Game is in pregame, starting soon."
# Fill a playersdict with stats
def StatsFromPlayers(players, gamedict):
for player in players:
phpffl_id = GetPhpfflID(player.playerid)
gamedict["players"][phpffl_id] = {}
gamedict["players"][phpffl_id]["name"] = player.name # For debugging
for stat in player.stats:
phpffl_stat = f.get_key(stat)
if gamedict["players"][phpffl_id].get(phpffl_stat) is None:
gamedict["players"][phpffl_id][phpffl_stat] = player.stats[stat]
else:
gamedict["players"][phpffl_id][phpffl_stat] += player.stats[stat]
def StatsFromPlays(plays, gamedict):
for play in plays:
for event in play.events:
for stat in event:
phpffl_stat = f.get_key(stat)
# Stats for team players
if stat == "passing_sk": # Switched to passing_sk_yds to catch "Team Sacks"
f.team_sack(event,gamedict)
# if stat == "defense_sk":
# f.team_sack(event,gamedict)
if stat == "fumbles_lost":
f.team_fumble(event,gamedict)
if stat == "defense_int":
f.team_defint(event,gamedict)
if stat == "defense_tds":
f.team_def_td(event,gamedict)
if stat == "defense_safe":
f.team_def_saf(event,gamedict)
if stat == "puntret_tds" or stat == "kickret_tds":
f.team_st_td(event,gamedict)
# scenario where def recovers fumble, fumbles again and gets a TD
if stat == "fumbles_rec_tds" and event["team"] != play.team:
f.team_def_td(event,gamedict)
# Stats for human players
if stat == "kicking_fgm_yds": # Need yardages for each field goal
phpffl_id = GetPhpfflID(event["playerid"])
if gamedict["players"].get(phpffl_id) is None: # new player, initialize
gamedict["players"][phpffl_id] = {}
#gamedict["players"][phpffl_id]["name"] = player.name # For debugging
f.player_field_goal(phpffl_id, event, gamedict)
if (stat == "kickret_yds" or stat == "puntret_yds") and play.note != "FUMBLE":
phpffl_id = GetPhpfflID(event["playerid"])
f.AddPlayerStat(phpffl_id, stat, event, gamedict)
if (stat == "kicking_fgmissed"):
phpffl_id = GetPhpfflID(event["playerid"])
f.AddPlayerStat(phpffl_id, stat, event, gamedict)
if (stat == "rushing_tds") or (stat == "receiving_tds"):
phpffl_id = GetPhpfflID(event["playerid"])
f.AddPlayerTD(phpffl_id, stat, event, gamedict)
# Apparently this is not used
def TeamStatsFromPlays(plays, gamedict):
home = gamedict["game"]["home"]
away = gamedict["game"]["away"]
home_rushing_yds = 0
home_passing_yds = 0
home_total_yds = 0
home_score = 0
away_rushing_yds = 0
away_passing_yds = 0
away_total_yds = 0
away_score = 0
for play in plays:
for event in play.events:
for stat in event:
if stat == "passing_yds" and event["team"] == home:
home_passing_yds += event[stat]
if stat == "passing_yds" and event["team"] == away:
away_passing_yds += event[stat]
if stat == "rushing_yds" and event["team"] == home:
home_rushing_yds += event[stat]
if stat == "rushing_yds" and event["team"] == away:
away_rushing_yds += event[stat]
print stat
def DiffGameStats(oldgame, game, gamedict):
home = gamedict["game"]["home"]
away = gamedict["game"]["away"]
# Team offensive line - Home
gamedict["players"]["TOL_"+home]["Tm RU"] = game.stats_home.rushing_yds - oldgame.stats_home.rushing_yds
gamedict["players"]["TOL_"+home]["Tm PS"] = game.stats_home.passing_yds - oldgame.stats_home.passing_yds
# Team offensive line - Away
gamedict["players"]["TOL_"+away]["Tm RU"] = game.stats_away.rushing_yds - oldgame.stats_away.rushing_yds
gamedict["players"]["TOL_"+away]["Tm PS"] = game.stats_away.passing_yds - oldgame.stats_away.passing_yds
# Team defense - Home
gamedict["players"]["TDEF_"+home]["PA"] = game.score_away - oldgame.score_away
gamedict["players"]["TDEF_"+home]["YA"] = game.stats_away.total_yds - oldgame.stats_away.total_yds
# Team defense - Away
gamedict["players"]["TDEF_"+away]["PA"] = game.score_home - oldgame.score_home
gamedict["players"]["TDEF_"+away]["YA"] = game.stats_home.total_yds - oldgame.stats_home.total_yds
# Def/ST Home
gamedict["players"]["TDEFST_"+away]["PA"] = game.score_home - oldgame.score_home
gamedict["players"]["TDEFST_"+away]["YA"] = game.stats_away.total_yds - oldgame.stats_away.total_yds
# Special teams - Home
# Special teams - Away
def StatsFromGame(game, gamedict):
home = gamedict["game"]["home"]
away = gamedict["game"]["away"]
# Team offensive line - Home
gamedict["players"]["TOL_"+home]["Tm RU"] = game.stats_home.rushing_yds
gamedict["players"]["TOL_"+home]["Tm PS"] = game.stats_home.passing_yds
# Team offensive line - Away
gamedict["players"]["TOL_"+away]["Tm RU"] = game.stats_away.rushing_yds
gamedict["players"]["TOL_"+away]["Tm PS"] = game.stats_away.passing_yds
# Team defense - Home
gamedict["players"]["TDEF_"+home]["PA"] = game.score_away
gamedict["players"]["TDEF_"+home]["YA"] = game.stats_away.total_yds
# Team defense - Away
gamedict["players"]["TDEF_"+away]["PA"] = game.score_home
gamedict["players"]["TDEF_"+away]["YA"] = game.stats_home.total_yds
# Def/ST Home
gamedict["players"]["TDEFST_"+away]["PA"] = game.score_home
gamedict["players"]["TDEFST_"+away]["YA"] = game.stats_away.total_yds
# Special teams - Home
# Special teams - Away
def FillGameInfo(game, gamedict):
gamedict["game"]["homescore"] = game.score_home
gamedict["game"]["awayscore"] = game.score_away
gamedict["game"]["date"] = game.schedule
year = str(game.schedule["eid"])[0:4]
gamedict["game"]["ID"] = year+str(game.schedule["month"]).strip().zfill(2)+str(game.schedule["day"]).strip().zfill(2)+"_"+game.away+"@"+game.home
gamedict["game"]["away"] = game.away
gamedict["game"]["home"] = game.home
gamedict["game"]["season"] = game.season()
gamedict["game"]["week"] = str(game.schedule["week"]).strip()
gamedict["game"]["status"] = f.get_key(str(game.time))
def InitTeamPlayers(gamedict):
home = gamedict["game"]["home"]
away = gamedict["game"]["away"]
gamedict["players"]["TOL_"+home] = {}
gamedict["players"]["TOL_"+home]["name"] = "TOL_"+home
gamedict["players"]["TOL_"+away] = {}
gamedict["players"]["TOL_"+away]["name"] = "TOL_"+away
gamedict["players"]["TDEF_"+home] = {}
gamedict["players"]["TDEF_"+home]["name"] = "TDEF_"+home
gamedict["players"]["TDEF_"+away] = {}
gamedict["players"]["TDEF_"+away]["name"] = "TDEF_"+away
gamedict["players"]["TDEFST_"+home] = {}
gamedict["players"]["TDEFST_"+home]["name"] = "TDEFST_"+home
gamedict["players"]["TDEFST_"+away] = {}
gamedict["players"]["TDEFST_"+away]["name"] = "TDEFST_"+away
gamedict["players"]["TST_"+home] = {}
gamedict["players"]["TST_"+home]["name"] = "TST_"+home
gamedict["players"]["TST_"+away] = {}
gamedict["players"]["TST_"+away]["name"] = "TST_"+away
def GetPhpfflID(p):
if nflids.get(p) is not None:
return nflids[p]
else:
logerror("Player not found in phpffl: "+p)
return p
def load_nflids():
ids = dict()
with open(lookupfile) as f:
for line in f:
nfl_id,phpffl_id = line.strip().split(",")
ids[nfl_id] = phpffl_id
return ids
def print_game(gamedict):
text = ""
text += "a:"+str(len(gamedict))+":{"
for key, value in gamedict.items():
if str(type(value)) == "<type 'dict'>":
text += "s:"+str(len(str(key)))+":"+"\""+str(key)+"\";"
text += print_game(value)
else:
text += "s:"+str(len(str(key)))+":"+"\""+str(key)+"\";"
text += "s:"+str(len(str(value)))+":"+"\""+str(value)+"\";"
text += "}"
return text
def logerror(text):
f = open("gamestats_error.log", "a")
f.write(str(datetime.datetime.now())+" - "+text+"\n")
f.close()
parser = argparse.ArgumentParser(description='Update phpffl static files')
parser.add_argument('-y', action="store", default="0", required=False, help="Year")
parser.add_argument('-w', action="store", default="0", required=False, help="Week")
parser.add_argument('-p', action="store", default="REG", required=False, help="Phase: PRE, REG, POST")
args = parser.parse_args()
main()
|
|
"""
@package mi.dataset.parser.test
@file marine-integrations/mi/dataset/parser/test/test_nutnr_j_cspp.py
@author Emily Hahn
@brief Test code for a nutnr_j_cspp data parser
"""
import os
from nose.plugins.attrib import attr
from mi.core.log import get_logger
log = get_logger()
from mi.core.exceptions import RecoverableSampleException, \
ConfigurationException
from mi.dataset.test.test_parser import BASE_RESOURCE_PATH, ParserUnitTestCase
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.parser.cspp_base import \
METADATA_PARTICLE_CLASS_KEY
from mi.dataset.parser.nutnr_j_cspp import NutnrJCsppParser, \
NutnrJCsppMetadataTelemeteredDataParticle, \
NutnrJCsppTelemeteredDataParticle, \
NutnrJCsppDarkTelemeteredDataParticle, \
NutnrJCsppDarkRecoveredDataParticle, \
NutnrJCsppMetadataRecoveredDataParticle, \
NutnrJCsppRecoveredDataParticle, \
LIGHT_PARTICLE_CLASS_KEY, \
DARK_PARTICLE_CLASS_KEY
RESOURCE_PATH = os.path.join(BASE_RESOURCE_PATH, 'nutnr_j', 'cspp', 'resource')
MODE_ASCII_READ = 'r'
@attr('UNIT', group='mi')
class NutnrJCsppParserUnitTestCase(ParserUnitTestCase):
"""
nutnr_j_cspp Parser unit test suite
"""
def particle_to_yml(self, particles, filename, mode='w'):
"""
This is added as a testing helper, not actually as part of the parser tests. Since the same particles
will be used for the driver test it is helpful to write them to .yml in the same form they need in the
results.yml fids here.
"""
# open write append, if you want to start from scratch manually delete this fid
fid = open(os.path.join(RESOURCE_PATH, filename), mode)
fid.write('header:\n')
fid.write(" particle_object: 'MULTIPLE'\n")
fid.write(" particle_type: 'MULTIPLE'\n")
fid.write('data:\n')
for i in range(0, len(particles)):
particle_dict = particles[i].generate_dict()
fid.write(' - _index: %d\n' % (i+1))
fid.write(' particle_object: %s\n' % particles[i].__class__.__name__)
fid.write(' particle_type: %s\n' % particle_dict.get('stream_name'))
fid.write(' internal_timestamp: %16.3f\n' %
particle_dict.get('internal_timestamp'))
for val in particle_dict.get('values'):
if isinstance(val.get('value'), float):
if val.get('value_id') is "time_of_sample":
fid.write(' %s: %16.5f\n' % (val.get('value_id'), val.get('value')))
else:
fid.write(' %s: %16.3f\n' % (val.get('value_id'), val.get('value')))
elif isinstance(val.get('value'), str):
fid.write(" %s: '%s'\n" % (val.get('value_id'), val.get('value')))
else:
fid.write(' %s: %s\n' % (val.get('value_id'), val.get('value')))
fid.close()
def create_yml(self):
"""
This utility creates a yml file
"""
fid = open(os.path.join(RESOURCE_PATH,
'11079419_SNA_SNA.txt'),
MODE_ASCII_READ)
stream_handle = fid
self.create_parser(stream_handle, True)
particles = self.parser.get_records(1000)
self.particle_to_yml(particles, '11079419_SNA_SNA_telem.yml')
fid.close()
def create_parser(self, stream_handle, telem_flag=True):
"""
Initialize the parser with the given stream handle, using the
telemetered config if the flag is set, recovered if it is not
"""
if telem_flag:
# use telemetered config
config = {
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
METADATA_PARTICLE_CLASS_KEY: NutnrJCsppMetadataTelemeteredDataParticle,
LIGHT_PARTICLE_CLASS_KEY: NutnrJCsppTelemeteredDataParticle,
DARK_PARTICLE_CLASS_KEY: NutnrJCsppDarkTelemeteredDataParticle
}
}
else:
# use recovered config
config = {
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
METADATA_PARTICLE_CLASS_KEY: NutnrJCsppMetadataRecoveredDataParticle,
LIGHT_PARTICLE_CLASS_KEY: NutnrJCsppRecoveredDataParticle,
DARK_PARTICLE_CLASS_KEY: NutnrJCsppDarkRecoveredDataParticle
}
}
self.parser = NutnrJCsppParser(config, stream_handle,
self.exception_callback)
def test_simple_telem(self):
"""
Read test data and pull out data particles one at a time.
Assert that the results are those we expected.
"""
stream_handle = open(os.path.join(RESOURCE_PATH,
'short_SNA_SNA.txt'), MODE_ASCII_READ)
self.create_parser(stream_handle)
# get and compare the metadata particle
particles = self.parser.get_records(6)
# check all the values against expected results.
self.assert_particles(particles, 'short_SNA_telem.yml', RESOURCE_PATH)
# confirm no exceptions occurred
self.assertEqual(self.exception_callback_value, [])
stream_handle.close()
def test_simple_recov(self):
"""
Read test data and pull out data particles one at a time.
Assert that the results are those we expected.
"""
stream_handle = open(os.path.join(RESOURCE_PATH,
'short_SNA_SNA.txt'), MODE_ASCII_READ)
self.create_parser(stream_handle, telem_flag=False)
# get and compare the metadata particle
particles = self.parser.get_records(6)
# check all the values against expected results.
self.assert_particles(particles, 'short_SNA_recov.yml', RESOURCE_PATH)
# confirm no exceptions occurred
self.assertEqual(self.exception_callback_value, [])
stream_handle.close()
def test_bad_data(self):
"""
Ensure that bad data is skipped when it exists. Confirm
that exceptions occur.
"""
# bad data file has:
# 1 bad status
# particle A has bad timestamp
# particle B has bad dark fit
# particle C has bad frame type
# particle D has bad year
stream_handle = open(os.path.join(RESOURCE_PATH,
'bad_SNA_SNA.txt'), MODE_ASCII_READ)
self.create_parser(stream_handle, telem_flag=False)
# get E, since it is first it will generate a metadata
particles = self.parser.get_records(2)
# check all the values against expected results.
self.assert_particles(particles, 'last_and_meta_SNA_recov.yml', RESOURCE_PATH)
# should have had 5 exceptions by now
self.assertEqual(len(self.exception_callback_value), 5)
for exception in self.exception_callback_value:
self.assert_(isinstance(exception, RecoverableSampleException))
def test_missing_source_file(self):
"""
Test that a file with a missing source file path in the header
fails to create a metadata particle and throws an exception
"""
stream_handle = open(os.path.join(RESOURCE_PATH,
'no_source_file_SNA_SNA.txt'), MODE_ASCII_READ)
self.create_parser(stream_handle)
# get A-E, without metadata
particles = self.parser.get_records(5)
# check all the values against expected results.
self.assert_particles(particles, 'no_source_file_SNA_SNA_telem.yml', RESOURCE_PATH)
# confirm an exception occurred
self.assertEqual(len(self.exception_callback_value), 1)
self.assert_(isinstance(self.exception_callback_value[0], RecoverableSampleException))
stream_handle.close()
def test_no_header(self):
"""
Test that a file with no header lines
fails to create a metadata particle and throws an exception
"""
stream_handle = open(os.path.join(RESOURCE_PATH,
'no_header_SNA_SNA.txt'), MODE_ASCII_READ)
self.create_parser(stream_handle)
# get A-E, without metadata
particles = self.parser.get_records(5)
# check all the values against expected results.
self.assert_particles(particles, 'short_SNA_telem_no_meta.yml', RESOURCE_PATH)
# confirm an exception occurred
self.assertEqual(len(self.exception_callback_value), 1)
self.assert_(isinstance(self.exception_callback_value[0], RecoverableSampleException))
stream_handle.close()
def test_partial_header(self):
"""
Test a case where we are missing part of the header, but it is not
the source file so we still want to create the header
"""
stream_handle = open(os.path.join(RESOURCE_PATH,
'part_header_SNA_SNA.txt'), MODE_ASCII_READ)
self.create_parser(stream_handle)
# get A-E, also metadata
particles = self.parser.get_records(6)
# check all the values against expected results.
self.assert_particles(particles, 'short_SNA_telem_part.yml', RESOURCE_PATH)
# confirm no exceptions occurred
self.assertEqual(self.exception_callback_value, [])
stream_handle.close()
def test_bad_config(self):
"""
Test that configurations with a missing data particle dict and missing
data particle class key causes a configuration exception
"""
# test a config with a missing particle classes dict
config = {}
stream_handle = open(os.path.join(RESOURCE_PATH,
'short_SNA_SNA.txt'), MODE_ASCII_READ)
with self.assertRaises(ConfigurationException):
self.parser = NutnrJCsppParser(config, stream_handle,
self.exception_callback)
# test a config with a missing data particle class key
config = {
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
METADATA_PARTICLE_CLASS_KEY: NutnrJCsppMetadataTelemeteredDataParticle,
}
}
with self.assertRaises(ConfigurationException):
self.parser = NutnrJCsppParser(config, stream_handle,
self.exception_callback)
def test_real_file(self):
"""
Read test data from IDD and pull out multiple data particles at one time.
Assert that the results are those we expected.
"""
log.info('===== START TEST BYTE LOSS =====')
# Recovered
file_path = os.path.join(RESOURCE_PATH, '11079364_SNA_SNA.txt')
stream_handle = open(file_path, MODE_ASCII_READ)
self.create_parser(stream_handle, telem_flag=False)
particles = self.parser.get_records(182)
log.debug("*** test_real_file Num particles %s", len(particles))
# check all the values against expected results.
self.assert_particles(particles, '11079364_SNA_SNA_recov.yml', RESOURCE_PATH)
self.assertEquals(self.exception_callback_value, [])
stream_handle.close()
# Telemetered
file_path = os.path.join(RESOURCE_PATH, '11079419_SNA_SNA.txt')
stream_handle = open(file_path, MODE_ASCII_READ)
self.create_parser(stream_handle)
particles = self.parser.get_records(172)
log.debug("*** test_real_file Num particles %s", len(particles))
# check all the values against expected results.
self.assert_particles(particles, '11079419_SNA_SNA_telem.yml', RESOURCE_PATH)
stream_handle.close()
log.info('===== END TEST REAL FILE =====')
def test_bad_match(self):
"""
Test that a file that has a data sample that is causing the regex
matcher to hang. This test confirms
the fix doesn't hang and causes exceptions for not matching data.
"""
log.info('===== START TEST BAD MATCH =====')
# Telemetered
file_path = os.path.join(RESOURCE_PATH, '11129553_SNA_SNA.txt')
stream_handle = open(file_path, MODE_ASCII_READ)
self.create_parser(stream_handle)
particles = self.parser.get_records(57)
log.debug("*** test_bad_match Num particles %s", len(particles))
# 2 bad samples
self.assertEqual(len(self.exception_callback_value), 2)
stream_handle.close()
log.info('===== END TEST BAD MATCH =====')
def test_byte_loss(self):
"""
Test that a file with known byte loss occurring in the form of hex ascii
lines of data creates an exception.
"""
log.info('===== START TEST REAL FILE =====')
# Telemetered
file_path = os.path.join(RESOURCE_PATH, '11330408_SNA_SNA.txt')
stream_handle = open(file_path, MODE_ASCII_READ)
self.create_parser(stream_handle)
particles = self.parser.get_records(3)
log.debug("*** test_byte_loss Num particles %s", len(particles))
# check all the values against expected results.
self.assert_particles(particles, 'byte_loss.yml', RESOURCE_PATH)
stream_handle.close()
log.info('===== END TEST BYTE LOSS =====')
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mandeep Dhami, Big Switch Networks, Inc.
# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc.
# @author: Kevin Benton, Big Switch Networks, Inc.
"""
This module manages the HTTP and HTTPS connections to the backend controllers.
The main class it provides for external use is ServerPool which manages a set
of ServerProxy objects that correspond to individual backend controllers.
The following functionality is handled by this module:
- Translation of rest_* function calls to HTTP/HTTPS calls to the controllers
- Automatic failover between controllers
- SSL Certificate enforcement
- HTTP Authentication
"""
import base64
import httplib
import json
import os
import socket
import ssl
import eventlet
from oslo.config import cfg
from neutron.common import exceptions
from neutron.common import utils
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
from neutron.plugins.bigswitch.db import consistency_db as cdb
LOG = logging.getLogger(__name__)
# The following are used to invoke the API on the external controller
CAPABILITIES_PATH = "/capabilities"
NET_RESOURCE_PATH = "/tenants/%s/networks"
PORT_RESOURCE_PATH = "/tenants/%s/networks/%s/ports"
ROUTER_RESOURCE_PATH = "/tenants/%s/routers"
ROUTER_INTF_OP_PATH = "/tenants/%s/routers/%s/interfaces"
NETWORKS_PATH = "/tenants/%s/networks/%s"
FLOATINGIPS_PATH = "/tenants/%s/floatingips/%s"
PORTS_PATH = "/tenants/%s/networks/%s/ports/%s"
ATTACHMENT_PATH = "/tenants/%s/networks/%s/ports/%s/attachment"
ROUTERS_PATH = "/tenants/%s/routers/%s"
ROUTER_INTF_PATH = "/tenants/%s/routers/%s/interfaces/%s"
TOPOLOGY_PATH = "/topology"
HEALTH_PATH = "/health"
SUCCESS_CODES = range(200, 207)
FAILURE_CODES = [0, 301, 302, 303, 400, 401, 403, 404, 500, 501, 502, 503,
504, 505]
BASE_URI = '/networkService/v1.1'
ORCHESTRATION_SERVICE_ID = 'Neutron v2.0'
HASH_MATCH_HEADER = 'X-BSN-BVS-HASH-MATCH'
# error messages
NXNETWORK = 'NXVNS'
class RemoteRestError(exceptions.NeutronException):
message = _("Error in REST call to remote network "
"controller: %(reason)s")
status = None
def __init__(self, **kwargs):
self.status = kwargs.pop('status', None)
self.reason = kwargs.get('reason')
super(RemoteRestError, self).__init__(**kwargs)
class ServerProxy(object):
"""REST server proxy to a network controller."""
def __init__(self, server, port, ssl, auth, neutron_id, timeout,
base_uri, name, mypool, combined_cert):
self.server = server
self.port = port
self.ssl = ssl
self.base_uri = base_uri
self.timeout = timeout
self.name = name
self.success_codes = SUCCESS_CODES
self.auth = None
self.neutron_id = neutron_id
self.failed = False
self.capabilities = []
# enable server to reference parent pool
self.mypool = mypool
# cache connection here to avoid a SSL handshake for every connection
self.currentconn = None
if auth:
self.auth = 'Basic ' + base64.encodestring(auth).strip()
self.combined_cert = combined_cert
def get_capabilities(self):
try:
body = self.rest_call('GET', CAPABILITIES_PATH)[3]
self.capabilities = json.loads(body)
except Exception:
LOG.error(_("Couldn't retrieve capabilities. "
"Newer API calls won't be supported."))
LOG.info(_("The following capabilities were received "
"for %(server)s: %(cap)s"), {'server': self.server,
'cap': self.capabilities})
return self.capabilities
def rest_call(self, action, resource, data='', headers={}, timeout=False,
reconnect=False):
uri = self.base_uri + resource
body = json.dumps(data)
if not headers:
headers = {}
headers['Content-type'] = 'application/json'
headers['Accept'] = 'application/json'
headers['NeutronProxy-Agent'] = self.name
headers['Instance-ID'] = self.neutron_id
headers['Orchestration-Service-ID'] = ORCHESTRATION_SERVICE_ID
headers[HASH_MATCH_HEADER] = self.mypool.consistency_hash
if 'keep-alive' in self.capabilities:
headers['Connection'] = 'keep-alive'
else:
reconnect = True
if self.auth:
headers['Authorization'] = self.auth
LOG.debug(_("ServerProxy: server=%(server)s, port=%(port)d, "
"ssl=%(ssl)r"),
{'server': self.server, 'port': self.port, 'ssl': self.ssl})
LOG.debug(_("ServerProxy: resource=%(resource)s, data=%(data)r, "
"headers=%(headers)r, action=%(action)s"),
{'resource': resource, 'data': data, 'headers': headers,
'action': action})
# unspecified timeout is False because a timeout can be specified as
# None to indicate no timeout.
if timeout is False:
timeout = self.timeout
if timeout != self.timeout:
# need a new connection if timeout has changed
reconnect = True
if not self.currentconn or reconnect:
if self.currentconn:
self.currentconn.close()
if self.ssl:
self.currentconn = HTTPSConnectionWithValidation(
self.server, self.port, timeout=timeout)
self.currentconn.combined_cert = self.combined_cert
if self.currentconn is None:
LOG.error(_('ServerProxy: Could not establish HTTPS '
'connection'))
return 0, None, None, None
else:
self.currentconn = httplib.HTTPConnection(
self.server, self.port, timeout=timeout)
if self.currentconn is None:
LOG.error(_('ServerProxy: Could not establish HTTP '
'connection'))
return 0, None, None, None
try:
self.currentconn.request(action, uri, body, headers)
response = self.currentconn.getresponse()
newhash = response.getheader(HASH_MATCH_HEADER)
if newhash:
self._put_consistency_hash(newhash)
respstr = response.read()
respdata = respstr
if response.status in self.success_codes:
try:
respdata = json.loads(respstr)
except ValueError:
# response was not JSON, ignore the exception
pass
ret = (response.status, response.reason, respstr, respdata)
except httplib.HTTPException:
# If we were using a cached connection, try again with a new one.
with excutils.save_and_reraise_exception() as ctxt:
self.currentconn.close()
if reconnect:
# if reconnect is true, this was on a fresh connection so
# reraise since this server seems to be broken
ctxt.reraise = True
else:
# if reconnect is false, it was a cached connection so
# try one more time before re-raising
ctxt.reraise = False
return self.rest_call(action, resource, data, headers,
timeout=timeout, reconnect=True)
except (socket.timeout, socket.error) as e:
self.currentconn.close()
LOG.error(_('ServerProxy: %(action)s failure, %(e)r'),
{'action': action, 'e': e})
ret = 0, None, None, None
LOG.debug(_("ServerProxy: status=%(status)d, reason=%(reason)r, "
"ret=%(ret)s, data=%(data)r"), {'status': ret[0],
'reason': ret[1],
'ret': ret[2],
'data': ret[3]})
return ret
def _put_consistency_hash(self, newhash):
self.mypool.consistency_hash = newhash
cdb.put_consistency_hash(newhash)
class ServerPool(object):
def __init__(self, timeout=False,
base_uri=BASE_URI, name='NeutronRestProxy'):
LOG.debug(_("ServerPool: initializing"))
# 'servers' is the list of network controller REST end-points
# (used in order specified till one succeeds, and it is sticky
# till next failure). Use 'server_auth' to encode api-key
servers = cfg.CONF.RESTPROXY.servers
self.auth = cfg.CONF.RESTPROXY.server_auth
self.ssl = cfg.CONF.RESTPROXY.server_ssl
self.neutron_id = cfg.CONF.RESTPROXY.neutron_id
self.base_uri = base_uri
self.name = name
self.timeout = cfg.CONF.RESTPROXY.server_timeout
self.always_reconnect = not cfg.CONF.RESTPROXY.cache_connections
default_port = 8000
if timeout is not False:
self.timeout = timeout
# Function to use to retrieve topology for consistency syncs.
# Needs to be set by module that uses the servermanager.
self.get_topo_function = None
self.get_topo_function_args = {}
# Hash to send to backend with request as expected previous
# state to verify consistency.
self.consistency_hash = cdb.get_consistency_hash()
if not servers:
raise cfg.Error(_('Servers not defined. Aborting server manager.'))
servers = [s if len(s.rsplit(':', 1)) == 2
else "%s:%d" % (s, default_port)
for s in servers]
if any((len(spl) != 2 or not spl[1].isdigit())
for spl in [sp.rsplit(':', 1)
for sp in servers]):
raise cfg.Error(_('Servers must be defined as <ip>:<port>. '
'Configuration was %s') % servers)
self.servers = [
self.server_proxy_for(server, int(port))
for server, port in (s.rsplit(':', 1) for s in servers)
]
eventlet.spawn(self._consistency_watchdog,
cfg.CONF.RESTPROXY.consistency_interval)
LOG.debug(_("ServerPool: initialization done"))
def get_capabilities(self):
# lookup on first try
try:
return self.capabilities
except AttributeError:
# each server should return a list of capabilities it supports
# e.g. ['floatingip']
capabilities = [set(server.get_capabilities())
for server in self.servers]
# Pool only supports what all of the servers support
self.capabilities = set.intersection(*capabilities)
return self.capabilities
def server_proxy_for(self, server, port):
combined_cert = self._get_combined_cert_for_server(server, port)
return ServerProxy(server, port, self.ssl, self.auth, self.neutron_id,
self.timeout, self.base_uri, self.name, mypool=self,
combined_cert=combined_cert)
def _get_combined_cert_for_server(self, server, port):
# The ssl library requires a combined file with all trusted certs
# so we make one containing the trusted CAs and the corresponding
# host cert for this server
combined_cert = None
if self.ssl and not cfg.CONF.RESTPROXY.no_ssl_validation:
base_ssl = cfg.CONF.RESTPROXY.ssl_cert_directory
host_dir = os.path.join(base_ssl, 'host_certs')
ca_dir = os.path.join(base_ssl, 'ca_certs')
combined_dir = os.path.join(base_ssl, 'combined')
combined_cert = os.path.join(combined_dir, '%s.pem' % server)
if not os.path.exists(base_ssl):
raise cfg.Error(_('ssl_cert_directory [%s] does not exist. '
'Create it or disable ssl.') % base_ssl)
for automake in [combined_dir, ca_dir, host_dir]:
if not os.path.exists(automake):
os.makedirs(automake)
# get all CA certs
certs = self._get_ca_cert_paths(ca_dir)
# check for a host specific cert
hcert, exists = self._get_host_cert_path(host_dir, server)
if exists:
certs.append(hcert)
elif cfg.CONF.RESTPROXY.ssl_sticky:
self._fetch_and_store_cert(server, port, hcert)
certs.append(hcert)
if not certs:
raise cfg.Error(_('No certificates were found to verify '
'controller %s') % (server))
self._combine_certs_to_file(certs, combined_cert)
return combined_cert
def _combine_certs_to_file(self, certs, cfile):
'''
Concatenates the contents of each certificate in a list of
certificate paths to one combined location for use with ssl
sockets.
'''
with open(cfile, 'w') as combined:
for c in certs:
with open(c, 'r') as cert_handle:
combined.write(cert_handle.read())
def _get_host_cert_path(self, host_dir, server):
'''
returns full path and boolean indicating existence
'''
hcert = os.path.join(host_dir, '%s.pem' % server)
if os.path.exists(hcert):
return hcert, True
return hcert, False
def _get_ca_cert_paths(self, ca_dir):
certs = [os.path.join(root, name)
for name in [
name for (root, dirs, files) in os.walk(ca_dir)
for name in files
]
if name.endswith('.pem')]
return certs
def _fetch_and_store_cert(self, server, port, path):
'''
Grabs a certificate from a server and writes it to
a given path.
'''
try:
cert = ssl.get_server_certificate((server, port))
except Exception as e:
raise cfg.Error(_('Could not retrieve initial '
'certificate from controller %(server)s. '
'Error details: %(error)s') %
{'server': server, 'error': str(e)})
LOG.warning(_("Storing to certificate for host %(server)s "
"at %(path)s") % {'server': server,
'path': path})
self._file_put_contents(path, cert)
return cert
def _file_put_contents(self, path, contents):
# Simple method to write to file.
# Created for easy Mocking
with open(path, 'w') as handle:
handle.write(contents)
def server_failure(self, resp, ignore_codes=[]):
"""Define failure codes as required.
Note: We assume 301-303 is a failure, and try the next server in
the server pool.
"""
return (resp[0] in FAILURE_CODES and resp[0] not in ignore_codes)
def action_success(self, resp):
"""Defining success codes as required.
Note: We assume any valid 2xx as being successful response.
"""
return resp[0] in SUCCESS_CODES
@utils.synchronized('bsn-rest-call')
def rest_call(self, action, resource, data, headers, ignore_codes,
timeout=False):
good_first = sorted(self.servers, key=lambda x: x.failed)
first_response = None
for active_server in good_first:
ret = active_server.rest_call(action, resource, data, headers,
timeout,
reconnect=self.always_reconnect)
# If inconsistent, do a full synchronization
if ret[0] == httplib.CONFLICT:
if not self.get_topo_function:
raise cfg.Error(_('Server requires synchronization, '
'but no topology function was defined.'))
data = self.get_topo_function(**self.get_topo_function_args)
active_server.rest_call('PUT', TOPOLOGY_PATH, data,
timeout=None)
# Store the first response as the error to be bubbled up to the
# user since it was a good server. Subsequent servers will most
# likely be cluster slaves and won't have a useful error for the
# user (e.g. 302 redirect to master)
if not first_response:
first_response = ret
if not self.server_failure(ret, ignore_codes):
active_server.failed = False
return ret
else:
LOG.error(_('ServerProxy: %(action)s failure for servers: '
'%(server)r Response: %(response)s'),
{'action': action,
'server': (active_server.server,
active_server.port),
'response': ret[3]})
LOG.error(_("ServerProxy: Error details: status=%(status)d, "
"reason=%(reason)r, ret=%(ret)s, data=%(data)r"),
{'status': ret[0], 'reason': ret[1], 'ret': ret[2],
'data': ret[3]})
active_server.failed = True
# All servers failed, reset server list and try again next time
LOG.error(_('ServerProxy: %(action)s failure for all servers: '
'%(server)r'),
{'action': action,
'server': tuple((s.server,
s.port) for s in self.servers)})
return first_response
def rest_action(self, action, resource, data='', errstr='%s',
ignore_codes=[], headers={}, timeout=False):
"""
Wrapper for rest_call that verifies success and raises a
RemoteRestError on failure with a provided error string
By default, 404 errors on DELETE calls are ignored because
they already do not exist on the backend.
"""
if not ignore_codes and action == 'DELETE':
ignore_codes = [404]
resp = self.rest_call(action, resource, data, headers, ignore_codes,
timeout)
if self.server_failure(resp, ignore_codes):
LOG.error(errstr, resp[2])
raise RemoteRestError(reason=resp[2], status=resp[0])
if resp[0] in ignore_codes:
LOG.warning(_("NeutronRestProxyV2: Received and ignored error "
"code %(code)s on %(action)s action to resource "
"%(resource)s"),
{'code': resp[2], 'action': action,
'resource': resource})
return resp
def rest_create_router(self, tenant_id, router):
resource = ROUTER_RESOURCE_PATH % tenant_id
data = {"router": router}
errstr = _("Unable to create remote router: %s")
self.rest_action('POST', resource, data, errstr)
def rest_update_router(self, tenant_id, router, router_id):
resource = ROUTERS_PATH % (tenant_id, router_id)
data = {"router": router}
errstr = _("Unable to update remote router: %s")
self.rest_action('PUT', resource, data, errstr)
def rest_delete_router(self, tenant_id, router_id):
resource = ROUTERS_PATH % (tenant_id, router_id)
errstr = _("Unable to delete remote router: %s")
self.rest_action('DELETE', resource, errstr=errstr)
def rest_add_router_interface(self, tenant_id, router_id, intf_details):
resource = ROUTER_INTF_OP_PATH % (tenant_id, router_id)
data = {"interface": intf_details}
errstr = _("Unable to add router interface: %s")
self.rest_action('POST', resource, data, errstr)
def rest_remove_router_interface(self, tenant_id, router_id, interface_id):
resource = ROUTER_INTF_PATH % (tenant_id, router_id, interface_id)
errstr = _("Unable to delete remote intf: %s")
self.rest_action('DELETE', resource, errstr=errstr)
def rest_create_network(self, tenant_id, network):
resource = NET_RESOURCE_PATH % tenant_id
data = {"network": network}
errstr = _("Unable to create remote network: %s")
self.rest_action('POST', resource, data, errstr)
def rest_update_network(self, tenant_id, net_id, network):
resource = NETWORKS_PATH % (tenant_id, net_id)
data = {"network": network}
errstr = _("Unable to update remote network: %s")
self.rest_action('PUT', resource, data, errstr)
def rest_delete_network(self, tenant_id, net_id):
resource = NETWORKS_PATH % (tenant_id, net_id)
errstr = _("Unable to update remote network: %s")
self.rest_action('DELETE', resource, errstr=errstr)
def rest_create_port(self, tenant_id, net_id, port):
resource = ATTACHMENT_PATH % (tenant_id, net_id, port["id"])
data = {"port": port}
device_id = port.get("device_id")
if not port["mac_address"] or not device_id:
# controller only cares about ports attached to devices
LOG.warning(_("No device MAC attached to port %s. "
"Skipping notification to controller."), port["id"])
return
data["attachment"] = {"id": device_id,
"mac": port["mac_address"]}
errstr = _("Unable to create remote port: %s")
self.rest_action('PUT', resource, data, errstr)
def rest_delete_port(self, tenant_id, network_id, port_id):
resource = ATTACHMENT_PATH % (tenant_id, network_id, port_id)
errstr = _("Unable to delete remote port: %s")
self.rest_action('DELETE', resource, errstr=errstr)
def rest_update_port(self, tenant_id, net_id, port):
# Controller has no update operation for the port endpoint
# the create PUT method will replace
self.rest_create_port(tenant_id, net_id, port)
def rest_create_floatingip(self, tenant_id, floatingip):
resource = FLOATINGIPS_PATH % (tenant_id, floatingip['id'])
errstr = _("Unable to create floating IP: %s")
self.rest_action('PUT', resource, errstr=errstr)
def rest_update_floatingip(self, tenant_id, floatingip, oldid):
resource = FLOATINGIPS_PATH % (tenant_id, oldid)
errstr = _("Unable to update floating IP: %s")
self.rest_action('PUT', resource, errstr=errstr)
def rest_delete_floatingip(self, tenant_id, oldid):
resource = FLOATINGIPS_PATH % (tenant_id, oldid)
errstr = _("Unable to delete floating IP: %s")
self.rest_action('DELETE', resource, errstr=errstr)
def _consistency_watchdog(self, polling_interval=60):
if 'consistency' not in self.get_capabilities():
LOG.warning(_("Backend server(s) do not support automated "
"consitency checks."))
return
while True:
# If consistency is supported, all we have to do is make any
# rest call and the consistency header will be added. If it
# doesn't match, the backend will return a synchronization error
# that will be handled by the rest_call.
eventlet.sleep(polling_interval)
self.rest_call('GET', HEALTH_PATH)
class HTTPSConnectionWithValidation(httplib.HTTPSConnection):
# If combined_cert is None, the connection will continue without
# any certificate validation.
combined_cert = None
def connect(self):
sock = socket.create_connection((self.host, self.port),
self.timeout, self.source_address)
if self._tunnel_host:
self.sock = sock
self._tunnel()
if self.combined_cert:
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=self.combined_cert)
else:
self.sock = ssl.wrap_socket(sock, self.key_file,
self.cert_file,
cert_reqs=ssl.CERT_NONE)
|
|
# Copyright (c) 2015 Infoblox Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import netaddr
from neutron_lib import constants
from neutron_lib import exceptions as n_exc
from oslo_db import exception as db_exc
from oslo_log import log as logging
from oslo_utils import excutils
from sqlalchemy import and_
from neutron._i18n import _, _LE, _LW
from neutron.common import ipv6_utils
from neutron.db import ipam_backend_mixin
from neutron.db import models_v2
from neutron.extensions import portbindings
from neutron.ipam import driver
from neutron.ipam import exceptions as ipam_exc
from neutron.ipam import requests as ipam_req
LOG = logging.getLogger(__name__)
class IpamPluggableBackend(ipam_backend_mixin.IpamBackendMixin):
def _get_failed_ips(self, all_ips, success_ips):
ips_list = (ip_dict['ip_address'] for ip_dict in success_ips)
return (ip_dict['ip_address'] for ip_dict in all_ips
if ip_dict['ip_address'] not in ips_list)
def _safe_rollback(self, func, *args, **kwargs):
"""Calls rollback actions and catch all exceptions.
All exceptions are catched and logged here to prevent rewriting
original exception that triggered rollback action.
"""
try:
func(*args, **kwargs)
except Exception as e:
LOG.warning(_LW("Revert failed with: %s"), e)
def _ipam_deallocate_ips(self, context, ipam_driver, port, ips,
revert_on_fail=True):
"""Deallocate set of ips over IPAM.
If any single ip deallocation fails, tries to allocate deallocated
ip addresses with fixed ip request
"""
deallocated = []
try:
for ip in ips:
try:
ipam_subnet = ipam_driver.get_subnet(ip['subnet_id'])
ipam_subnet.deallocate(ip['ip_address'])
deallocated.append(ip)
except n_exc.SubnetNotFound:
LOG.debug("Subnet was not found on ip deallocation: %s",
ip)
except Exception:
with excutils.save_and_reraise_exception():
if not ipam_driver.needs_rollback():
return
LOG.debug("An exception occurred during IP deallocation.")
if revert_on_fail and deallocated:
LOG.debug("Reverting deallocation")
# In case of deadlock allocate fails with db error
# and rewrites original exception preventing db_retry
# wrappers from restarting entire api request.
self._safe_rollback(self._ipam_allocate_ips, context,
ipam_driver, port, deallocated,
revert_on_fail=False)
elif not revert_on_fail and ips:
addresses = ', '.join(self._get_failed_ips(ips,
deallocated))
LOG.error(_LE("IP deallocation failed on "
"external system for %s"), addresses)
return deallocated
def _ipam_allocate_ips(self, context, ipam_driver, port, ips,
revert_on_fail=True):
"""Allocate set of ips over IPAM.
If any single ip allocation fails, tries to deallocate all
allocated ip addresses.
"""
allocated = []
# we need to start with entries that asked for a specific IP in case
# those IPs happen to be next in the line for allocation for ones that
# didn't ask for a specific IP
ips.sort(key=lambda x: 'ip_address' not in x)
try:
for ip in ips:
# By default IP info is dict, used to allocate single ip
# from single subnet.
# IP info can be list, used to allocate single ip from
# multiple subnets
ip_list = [ip] if isinstance(ip, dict) else ip
subnets = [ip_dict['subnet_id'] for ip_dict in ip_list]
try:
factory = ipam_driver.get_address_request_factory()
ip_request = factory.get_request(context, port, ip_list[0])
ipam_allocator = ipam_driver.get_allocator(subnets)
ip_address, subnet_id = ipam_allocator.allocate(ip_request)
except ipam_exc.IpAddressGenerationFailureAllSubnets:
raise n_exc.IpAddressGenerationFailure(
net_id=port['network_id'])
allocated.append({'ip_address': ip_address,
'subnet_id': subnet_id})
except Exception:
with excutils.save_and_reraise_exception():
if not ipam_driver.needs_rollback():
return
LOG.debug("An exception occurred during IP allocation.")
if revert_on_fail and allocated:
LOG.debug("Reverting allocation")
# In case of deadlock deallocation fails with db error
# and rewrites original exception preventing db_retry
# wrappers from restarting entire api request.
self._safe_rollback(self._ipam_deallocate_ips, context,
ipam_driver, port, allocated,
revert_on_fail=False)
elif not revert_on_fail and ips:
addresses = ', '.join(self._get_failed_ips(ips,
allocated))
LOG.error(_LE("IP allocation failed on "
"external system for %s"), addresses)
return allocated
def _ipam_update_allocation_pools(self, context, ipam_driver, subnet):
factory = ipam_driver.get_subnet_request_factory()
subnet_request = factory.get_request(context, subnet, None)
ipam_driver.update_subnet(subnet_request)
def delete_subnet(self, context, subnet_id):
ipam_driver = driver.Pool.get_instance(None, context)
ipam_driver.remove_subnet(subnet_id)
def allocate_ips_for_port_and_store(self, context, port, port_id):
# Make a copy of port dict to prevent changing
# incoming dict by adding 'id' to it.
# Deepcopy doesn't work correctly in this case, because copy of
# ATTR_NOT_SPECIFIED object happens. Address of copied object doesn't
# match original object, so 'is' check fails
port_copy = {'port': port['port'].copy()}
port_copy['port']['id'] = port_id
network_id = port_copy['port']['network_id']
ips = []
try:
ips = self._allocate_ips_for_port(context, port_copy)
for ip in ips:
ip_address = ip['ip_address']
subnet_id = ip['subnet_id']
IpamPluggableBackend._store_ip_allocation(
context, ip_address, network_id,
subnet_id, port_id)
return ips
except Exception:
with excutils.save_and_reraise_exception():
if ips:
ipam_driver = driver.Pool.get_instance(None, context)
if not ipam_driver.needs_rollback():
return
LOG.debug("An exception occurred during port creation. "
"Reverting IP allocation")
self._safe_rollback(self._ipam_deallocate_ips, context,
ipam_driver, port_copy['port'], ips,
revert_on_fail=False)
def _allocate_ips_for_port(self, context, port):
"""Allocate IP addresses for the port. IPAM version.
If port['fixed_ips'] is set to 'ATTR_NOT_SPECIFIED', allocate IP
addresses for the port. If port['fixed_ips'] contains an IP address or
a subnet_id then allocate an IP address accordingly.
"""
p = port['port']
subnets = self._ipam_get_subnets(context,
network_id=p['network_id'],
host=p.get(portbindings.HOST_ID),
service_type=p.get('device_owner'))
v4, v6_stateful, v6_stateless = self._classify_subnets(
context, subnets)
fixed_configured = p['fixed_ips'] is not constants.ATTR_NOT_SPECIFIED
if fixed_configured:
ips = self._test_fixed_ips_for_port(context,
p["network_id"],
p['fixed_ips'],
p['device_owner'],
subnets)
else:
ips = []
version_subnets = [v4, v6_stateful]
for subnets in version_subnets:
if subnets:
ips.append([{'subnet_id': s['id']}
for s in subnets])
is_router_port = (
p['device_owner'] in constants.ROUTER_INTERFACE_OWNERS_SNAT)
if not is_router_port:
for subnet in v6_stateless:
# IP addresses for IPv6 SLAAC and DHCPv6-stateless subnets
# are implicitly included.
ips.append({'subnet_id': subnet['id'],
'subnet_cidr': subnet['cidr'],
'eui64_address': True,
'mac': p['mac_address']})
ipam_driver = driver.Pool.get_instance(None, context)
return self._ipam_allocate_ips(context, ipam_driver, p, ips)
def _test_fixed_ips_for_port(self, context, network_id, fixed_ips,
device_owner, subnets):
"""Test fixed IPs for port.
Check that configured subnets are valid prior to allocating any
IPs. Include the subnet_id in the result if only an IP address is
configured.
:raises: InvalidInput, IpAddressInUse, InvalidIpForNetwork,
InvalidIpForSubnet
"""
fixed_ip_list = []
for fixed in fixed_ips:
subnet = self._get_subnet_for_fixed_ip(context, fixed, subnets)
is_auto_addr_subnet = ipv6_utils.is_auto_address_subnet(subnet)
if 'ip_address' in fixed:
if (is_auto_addr_subnet and device_owner not in
constants.ROUTER_INTERFACE_OWNERS):
msg = (_("IPv6 address %(address)s can not be directly "
"assigned to a port on subnet %(id)s since the "
"subnet is configured for automatic addresses") %
{'address': fixed['ip_address'],
'id': subnet['id']})
raise n_exc.InvalidInput(error_message=msg)
fixed_ip_list.append({'subnet_id': subnet['id'],
'ip_address': fixed['ip_address']})
else:
# A scan for auto-address subnets on the network is done
# separately so that all such subnets (not just those
# listed explicitly here by subnet ID) are associated
# with the port.
if (device_owner in constants.ROUTER_INTERFACE_OWNERS_SNAT or
not is_auto_addr_subnet):
fixed_ip_list.append({'subnet_id': subnet['id']})
self._validate_max_ips_per_port(fixed_ip_list, device_owner)
return fixed_ip_list
def _update_ips_for_port(self, context, port, host,
original_ips, new_ips, mac):
"""Add or remove IPs from the port. IPAM version"""
added = []
removed = []
changes = self._get_changed_ips_for_port(
context, original_ips, new_ips, port['device_owner'])
try:
subnets = self._ipam_get_subnets(
context, network_id=port['network_id'], host=host)
except ipam_exc.DeferIpam:
subnets = []
# Check if the IP's to add are OK
to_add = self._test_fixed_ips_for_port(
context, port['network_id'], changes.add,
port['device_owner'], subnets)
if port['device_owner'] not in constants.ROUTER_INTERFACE_OWNERS:
to_add += self._update_ips_for_pd_subnet(
context, subnets, changes.add, mac)
ipam_driver = driver.Pool.get_instance(None, context)
if changes.remove:
removed = self._ipam_deallocate_ips(context, ipam_driver, port,
changes.remove)
if to_add:
added = self._ipam_allocate_ips(context, ipam_driver,
port, to_add)
return self.Changes(add=added,
original=changes.original,
remove=removed)
def save_allocation_pools(self, context, subnet, allocation_pools):
for pool in allocation_pools:
first_ip = str(netaddr.IPAddress(pool.first, pool.version))
last_ip = str(netaddr.IPAddress(pool.last, pool.version))
ip_pool = models_v2.IPAllocationPool(subnet=subnet,
first_ip=first_ip,
last_ip=last_ip)
context.session.add(ip_pool)
def update_port_with_ips(self, context, host, db_port, new_port, new_mac):
changes = self.Changes(add=[], original=[], remove=[])
if 'fixed_ips' in new_port:
original = self._make_port_dict(db_port,
process_extensions=False)
changes = self._update_ips_for_port(context,
db_port,
host,
original["fixed_ips"],
new_port['fixed_ips'],
new_mac)
try:
# Expire the fixed_ips of db_port in current transaction, because
# it will be changed in the following operation and the latest
# data is expected.
context.session.expire(db_port, ['fixed_ips'])
# Check if the IPs need to be updated
network_id = db_port['network_id']
for ip in changes.remove:
self._delete_ip_allocation(context, network_id,
ip['subnet_id'], ip['ip_address'])
for ip in changes.add:
self._store_ip_allocation(
context, ip['ip_address'], network_id,
ip['subnet_id'], db_port.id)
self._update_db_port(context, db_port, new_port, network_id,
new_mac)
except Exception:
with excutils.save_and_reraise_exception():
if 'fixed_ips' in new_port:
ipam_driver = driver.Pool.get_instance(None, context)
if not ipam_driver.needs_rollback():
return
LOG.debug("An exception occurred during port update.")
if changes.add:
LOG.debug("Reverting IP allocation.")
self._safe_rollback(self._ipam_deallocate_ips,
context,
ipam_driver,
db_port,
changes.add,
revert_on_fail=False)
if changes.remove:
LOG.debug("Reverting IP deallocation.")
self._safe_rollback(self._ipam_allocate_ips,
context,
ipam_driver,
db_port,
changes.remove,
revert_on_fail=False)
return changes
def delete_port(self, context, id):
# Get fixed_ips list before port deletion
port = self._get_port(context, id)
ipam_driver = driver.Pool.get_instance(None, context)
super(IpamPluggableBackend, self).delete_port(context, id)
# Deallocating ips via IPAM after port is deleted locally.
# So no need to do rollback actions on remote server
# in case of fail to delete port locally
self._ipam_deallocate_ips(context, ipam_driver, port,
port['fixed_ips'])
def update_db_subnet(self, context, id, s, old_pools):
# 'allocation_pools' is removed from 's' in
# _update_subnet_allocation_pools (ipam_backend_mixin),
# so create unchanged copy for ipam driver
subnet_copy = copy.deepcopy(s)
subnet, changes = super(IpamPluggableBackend, self).update_db_subnet(
context, id, s, old_pools)
ipam_driver = driver.Pool.get_instance(None, context)
# Set old allocation pools if no new pools are provided by user.
# Passing old pools allows to call ipam driver on each subnet update
# even if allocation pools are not changed. So custom ipam drivers
# are able to track other fields changes on subnet update.
if 'allocation_pools' not in subnet_copy:
subnet_copy['allocation_pools'] = old_pools
self._ipam_update_allocation_pools(context, ipam_driver, subnet_copy)
return subnet, changes
def add_auto_addrs_on_network_ports(self, context, subnet, ipam_subnet):
"""For an auto-address subnet, add addrs for ports on the net."""
with context.session.begin(subtransactions=True):
network_id = subnet['network_id']
port_qry = context.session.query(models_v2.Port)
ports = port_qry.filter(
and_(models_v2.Port.network_id == network_id,
~models_v2.Port.device_owner.in_(
constants.ROUTER_INTERFACE_OWNERS_SNAT)))
updated_ports = []
for port in ports:
ip_request = ipam_req.AutomaticAddressRequest(
prefix=subnet['cidr'],
mac=port['mac_address'])
ip_address = ipam_subnet.allocate(ip_request)
allocated = models_v2.IPAllocation(network_id=network_id,
port_id=port['id'],
ip_address=ip_address,
subnet_id=subnet['id'])
try:
# Do the insertion of each IP allocation entry within
# the context of a nested transaction, so that the entry
# is rolled back independently of other entries whenever
# the corresponding port has been deleted.
with context.session.begin_nested():
context.session.add(allocated)
updated_ports.append(port['id'])
except db_exc.DBReferenceError:
LOG.debug("Port %s was deleted while updating it with an "
"IPv6 auto-address. Ignoring.", port['id'])
LOG.debug("Reverting IP allocation for %s", ip_address)
# Do not fail if reverting allocation was unsuccessful
try:
ipam_subnet.deallocate(ip_address)
except Exception:
LOG.debug("Reverting IP allocation failed for %s",
ip_address)
return updated_ports
def allocate_subnet(self, context, network, subnet, subnetpool_id):
subnetpool = None
if subnetpool_id and not subnetpool_id == constants.IPV6_PD_POOL_ID:
subnetpool = self._get_subnetpool(context, id=subnetpool_id)
self._validate_ip_version_with_subnetpool(subnet, subnetpool)
# gateway_ip and allocation pools should be validated or generated
# only for specific request
if subnet['cidr'] is not constants.ATTR_NOT_SPECIFIED:
subnet['gateway_ip'] = self._gateway_ip_str(subnet,
subnet['cidr'])
subnet['allocation_pools'] = self._prepare_allocation_pools(
subnet['allocation_pools'],
subnet['cidr'],
subnet['gateway_ip'])
ipam_driver = driver.Pool.get_instance(subnetpool, context)
subnet_factory = ipam_driver.get_subnet_request_factory()
subnet_request = subnet_factory.get_request(context, subnet,
subnetpool)
ipam_subnet = ipam_driver.allocate_subnet(subnet_request)
# get updated details with actually allocated subnet
subnet_request = ipam_subnet.get_details()
try:
subnet = self._save_subnet(context,
network,
self._make_subnet_args(
subnet_request,
subnet,
subnetpool_id),
subnet['dns_nameservers'],
subnet['host_routes'],
subnet_request)
except Exception:
# Note(pbondar): Third-party ipam servers can't rely
# on transaction rollback, so explicit rollback call needed.
# IPAM part rolled back in exception handling
# and subnet part is rolled back by transaction rollback.
with excutils.save_and_reraise_exception():
if not ipam_driver.needs_rollback():
return
LOG.debug("An exception occurred during subnet creation. "
"Reverting subnet allocation.")
self._safe_rollback(self.delete_subnet,
context,
subnet_request.subnet_id)
return subnet, ipam_subnet
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VideosOperations:
"""VideosOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~video_analyzer.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
account_name: str,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.VideoEntityCollection"]:
"""Retrieves all existing video resources.
Retrieves a list of video resources that have been created, along with their JSON
representations.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: The Azure Video Analyzer account name.
:type account_name: str
:param top: Specifies a non-negative integer n that limits the number of items returned from a
collection. The service returns the number of available items up to but not greater than the
specified value n.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VideoEntityCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~video_analyzer.models.VideoEntityCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VideoEntityCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-11-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'accountName': self._serialize.url("account_name", account_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('VideoEntityCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}/videos'} # type: ignore
async def get(
self,
resource_group_name: str,
account_name: str,
video_name: str,
**kwargs: Any
) -> "_models.VideoEntity":
"""Retrieves an existing video resource.
Retrieves an existing video resource with the given name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: The Azure Video Analyzer account name.
:type account_name: str
:param video_name: The Video name.
:type video_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VideoEntity, or the result of cls(response)
:rtype: ~video_analyzer.models.VideoEntity
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VideoEntity"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-11-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'videoName': self._serialize.url("video_name", video_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('VideoEntity', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}/videos/{videoName}'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
account_name: str,
video_name: str,
parameters: "_models.VideoEntity",
**kwargs: Any
) -> "_models.VideoEntity":
"""Creates a new video resource or updates an existing one.
Creates a new video resource or updates an existing video resource with the given name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: The Azure Video Analyzer account name.
:type account_name: str
:param video_name: The Video name.
:type video_name: str
:param parameters: The request parameters.
:type parameters: ~video_analyzer.models.VideoEntity
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VideoEntity, or the result of cls(response)
:rtype: ~video_analyzer.models.VideoEntity
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VideoEntity"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-11-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'videoName': self._serialize.url("video_name", video_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VideoEntity')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VideoEntity', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VideoEntity', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}/videos/{videoName}'} # type: ignore
async def delete(
self,
resource_group_name: str,
account_name: str,
video_name: str,
**kwargs: Any
) -> None:
"""Deletes an existing video resource and its underlying data.
Deletes an existing video resource and its underlying data. This operation is irreversible.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: The Azure Video Analyzer account name.
:type account_name: str
:param video_name: The Video name.
:type video_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-11-01-preview"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'videoName': self._serialize.url("video_name", video_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}/videos/{videoName}'} # type: ignore
async def update(
self,
resource_group_name: str,
account_name: str,
video_name: str,
parameters: "_models.VideoEntity",
**kwargs: Any
) -> "_models.VideoEntity":
"""Updates individual properties of an existing video resource.
Updates individual properties of an existing video resource with the given name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: The Azure Video Analyzer account name.
:type account_name: str
:param video_name: The Video name.
:type video_name: str
:param parameters: The request parameters.
:type parameters: ~video_analyzer.models.VideoEntity
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VideoEntity, or the result of cls(response)
:rtype: ~video_analyzer.models.VideoEntity
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VideoEntity"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-11-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'videoName': self._serialize.url("video_name", video_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VideoEntity')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('VideoEntity', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}/videos/{videoName}'} # type: ignore
async def list_content_token(
self,
resource_group_name: str,
account_name: str,
video_name: str,
**kwargs: Any
) -> "_models.VideoContentToken":
"""Generates a streaming token which can be used for accessing content from video content URLs.
Generates a streaming token which can be used for accessing content from video content URLs,
for a video resource with the given name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: The Azure Video Analyzer account name.
:type account_name: str
:param video_name: The Video name.
:type video_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VideoContentToken, or the result of cls(response)
:rtype: ~video_analyzer.models.VideoContentToken
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VideoContentToken"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-11-01-preview"
accept = "application/json"
# Construct URL
url = self.list_content_token.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'videoName': self._serialize.url("video_name", video_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('VideoContentToken', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_content_token.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}/videos/{videoName}/listContentToken'} # type: ignore
|
|
#!/usr/bin/env python3
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import config
import thread_cert
from pktverify.consts import MLE_PARENT_RESPONSE, MLE_CHILD_ID_RESPONSE, SOURCE_ADDRESS_TLV, CHALLENGE_TLV, RESPONSE_TLV, LINK_LAYER_FRAME_COUNTER_TLV, ADDRESS16_TLV, LEADER_DATA_TLV, NETWORK_DATA_TLV, CONNECTIVITY_TLV, LINK_MARGIN_TLV, VERSION_TLV, ADDRESS_REGISTRATION_TLV
from pktverify.packet_verifier import PacketVerifier
from pktverify.null_field import nullField
LEADER = 1
ROUTER = 2
SED1 = 7
# Test Purpose and Description:
# -----------------------------
# The purpose of this test case is to validate the minimum
# conformance requirements for router-capable devices:
# a)Minimum number of supported children.
# b)Minimum MTU requirement when sending/forwarding an
# IPv6 datagram to a SED.
# c)Minimum number of sent/forwarded IPv6 datagrams to
# SED children.
#
# Test Topology:
# -------------
#
# Leader
# |
# Router[DUT]
# / \
# MED1 - MED4 SED1 - SED6
#
# DUT Types:
# ----------
# Router
class Cert_5_1_07_MaxChildCount(thread_cert.TestCase):
USE_MESSAGE_FACTORY = False
TOPOLOGY = {
LEADER: {
'name': 'LEADER',
'mode': 'rdn',
'panid': 0xface,
'allowlist': [ROUTER]
},
ROUTER: {
'name': 'ROUTER',
'max_children': 10,
'mode': 'rdn',
'panid': 0xface,
'router_selection_jitter': 1,
'allowlist': [LEADER, 3, 4, 5, 6, SED1, 8, 9, 10, 11, 12]
},
3: {
'name': 'MED1',
'is_mtd': True,
'mode': 'rn',
'panid': 0xface,
'timeout': config.DEFAULT_CHILD_TIMEOUT,
'allowlist': [ROUTER]
},
4: {
'name': 'MED2',
'is_mtd': True,
'mode': 'rn',
'panid': 0xface,
'timeout': config.DEFAULT_CHILD_TIMEOUT,
'allowlist': [ROUTER]
},
5: {
'name': 'MED3',
'is_mtd': True,
'mode': 'rn',
'panid': 0xface,
'timeout': config.DEFAULT_CHILD_TIMEOUT,
'allowlist': [ROUTER]
},
6: {
'name': 'MED4',
'is_mtd': True,
'mode': 'rn',
'panid': 0xface,
'timeout': config.DEFAULT_CHILD_TIMEOUT,
'allowlist': [ROUTER]
},
SED1: {
'name': 'SED1',
'is_mtd': True,
'mode': '-',
'panid': 0xface,
'timeout': config.DEFAULT_CHILD_TIMEOUT,
'allowlist': [ROUTER]
},
8: {
'name': 'SED2',
'is_mtd': True,
'mode': '-',
'panid': 0xface,
'timeout': config.DEFAULT_CHILD_TIMEOUT,
'allowlist': [ROUTER]
},
9: {
'name': 'SED3',
'is_mtd': True,
'mode': '-',
'panid': 0xface,
'timeout': config.DEFAULT_CHILD_TIMEOUT,
'allowlist': [ROUTER]
},
10: {
'name': 'SED4',
'is_mtd': True,
'mode': '-',
'panid': 0xface,
'timeout': config.DEFAULT_CHILD_TIMEOUT,
'allowlist': [ROUTER]
},
11: {
'name': 'SED5',
'is_mtd': True,
'mode': '-',
'panid': 0xface,
'timeout': config.DEFAULT_CHILD_TIMEOUT,
'allowlist': [ROUTER]
},
12: {
'name': 'SED6',
'is_mtd': True,
'mode': '-',
'panid': 0xface,
'timeout': config.DEFAULT_CHILD_TIMEOUT,
'allowlist': [ROUTER]
},
}
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER].get_state(), 'router')
for i in range(3, 13):
self.nodes[i].start()
self.simulator.go(7)
self.assertEqual(self.nodes[i].get_state(), 'child')
self.collect_rloc16s()
self.collect_ipaddrs()
ipaddrs = self.nodes[SED1].get_addrs()
for addr in ipaddrs:
if addr[0:4] != 'fe80' and 'ff:fe00' not in addr:
self.assertTrue(self.nodes[LEADER].ping(addr, size=1232))
break
for i in range(3, 13):
ipaddrs = self.nodes[i].get_addrs()
for addr in ipaddrs:
if addr[0:4] != 'fe80' and 'ff:fe00' not in addr:
self.assertTrue(self.nodes[LEADER].ping(addr, size=106))
break
def verify(self, pv: PacketVerifier):
pkts = pv.pkts
pv.summary.show()
ROUTER = pv.vars['ROUTER']
router_pkts = pkts.filter_wpan_src64(ROUTER)
# Step 1: The DUT MUST send properly formatted MLE Parent Response
# and MLE Child ID Response to each child.
for i in range(1, 7):
_pkts = router_pkts.copy().filter_wpan_dst64(pv.vars['SED%d' % i])
_pkts.filter_mle_cmd(MLE_PARENT_RESPONSE).\
filter(lambda p: {
CHALLENGE_TLV,
CONNECTIVITY_TLV,
LEADER_DATA_TLV,
LINK_LAYER_FRAME_COUNTER_TLV,
LINK_MARGIN_TLV,
RESPONSE_TLV,
SOURCE_ADDRESS_TLV,
VERSION_TLV
} <= set(p.mle.tlv.type)
).\
must_next()
_pkts.filter_mle_cmd(MLE_CHILD_ID_RESPONSE).\
filter(lambda p: {
SOURCE_ADDRESS_TLV,
LEADER_DATA_TLV,
ADDRESS16_TLV,
NETWORK_DATA_TLV,
ADDRESS_REGISTRATION_TLV
} <= set(p.mle.tlv.type) and\
p.mle.tlv.addr16 is not nullField and\
p.thread_nwd.tlv.type is not None and\
p.thread_meshcop.tlv.type is not None
).\
must_next()
for i in range(1, 5):
_pkts = router_pkts.copy().filter_wpan_dst64(pv.vars['MED%d' % i])
_pkts.filter_mle_cmd(MLE_PARENT_RESPONSE).\
filter(lambda p: {
CHALLENGE_TLV,
CONNECTIVITY_TLV,
LEADER_DATA_TLV,
LINK_LAYER_FRAME_COUNTER_TLV,
LINK_MARGIN_TLV,
RESPONSE_TLV,
SOURCE_ADDRESS_TLV,
VERSION_TLV
} <= set(p.mle.tlv.type)
).\
must_next()
_pkts.filter_mle_cmd(MLE_CHILD_ID_RESPONSE).\
filter(lambda p: {
SOURCE_ADDRESS_TLV,
LEADER_DATA_TLV,
ADDRESS16_TLV,
NETWORK_DATA_TLV,
ADDRESS_REGISTRATION_TLV
} <= set(p.mle.tlv.type) and\
p.mle.tlv.addr16 is not nullField and\
p.thread_meshcop.tlv.type is not None
).\
must_next()
# Step 2: The DUT MUST properly forward ICMPv6 Echo Requests to all MED children
# The DUT MUST properly forward ICMPv6 Echo Replies to the Leader
leader_rloc16 = pv.vars['LEADER_RLOC16']
for i in range(1, 5):
rloc16 = pv.vars['MED%d_RLOC16' % i]
_pkts = router_pkts.copy()
p = _pkts.filter('wpan.dst16 == {rloc16}', rloc16=rloc16).\
filter_ping_request().\
must_next()
_pkts.filter('wpan.dst16 == {rloc16}',
rloc16=leader_rloc16).\
filter_ping_reply(identifier=p.icmpv6.echo.identifier).\
must_next()
# Step 3: The DUT MUST properly forward ICMPv6 Echo Requests to all SED children
# The DUT MUST properly forward ICMPv6 Echo Replies to the Leader
for i in range(1, 7):
rloc16 = pv.vars['SED%d_RLOC16' % i]
_pkts = router_pkts.copy()
p = _pkts.filter('wpan.dst16 == {rloc16}', rloc16=rloc16).\
filter_ping_request().\
must_next()
_pkts.filter('wpan.dst16 == {rloc16}',
rloc16=leader_rloc16).\
filter_ping_reply(identifier=p.icmpv6.echo.identifier).\
must_next()
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
VERSION_STR = 'v0.1'
import os
import shutil
import re
import wx
import wx.grid as grid
import pyfits
DEFAULT_TOKENS = ['OBJECT', 'FILTER', 'EXPTIME', 'DATE-OBS', 'FOCUSPOS', 'AIRMASS']
TOKENS_LIST = ['OBJECT', 'FILTER', 'EXPTIME', 'DATE-OBS', 'ORIGIN', 'TELESCOP', 'COMMENT', 'OBSERVER', 'PRIORITY', 'INSTRUME',
'CDELT1', 'CDELT2', 'JD', 'LST', 'POSANGLE', 'LATITUDE', 'LONGITUD', 'ELEVATIO', 'AZIMUTH',
'HA', 'RAEOD', 'DECEOD', 'RA', 'DEC', 'OBJRA', 'OBJDEC', 'EPOCH', 'EQUINOX', 'CAMTEMP',
'FOCUSPOS', 'AIRMASS', 'RAWISTP', 'WXTEMP', 'WXPRES', 'WXWNDSPD', 'WXWNDDIR', 'WXHUMID', 'FWHMH', 'FWHMHS', 'FWHMV', 'FWHMVS']
TOKENS_NOTUSED = ['SIMPLE', 'BITPIX', 'NAXIS', 'NAXIS1', 'NAXIS2', 'OFFSET1', 'OFFSET2', 'XFACTOR',
'YFACTOR', 'RAWHENC', 'RAWDENC', 'RAWOSTP', 'HCOMSCAL', 'HCOMSTAT']
class MainWindow(wx.Frame):
def __init__(self, filename='noname.txt'):
super(MainWindow, self).__init__(None)
super(MainWindow, self).SetTitle('FITS Hydra')
# _icon = wx.Icon('fits_hydra.ico', wx.BITMAP_TYPE_ICO)
# self.SetIcon(_icon)
self.SetSize((700,500))
self.hdrs = {}
self.paths = []
self.lastdir = ''
self.gr = grid.Grid(self)
self.gr.CreateGrid(0,1)
self.gr.SetColLabelValue(0,'File')
self.gr.SetRowLabelSize(0)
self.gr.SetSelectionMode(1) # should set row selection
self.toks = wx.Menu()
k=1
for i in range(len(TOKENS_LIST)):
item = self.toks.Append(wx.ID_ANY, TOKENS_LIST[i], kind=wx.ITEM_CHECK)
self.Bind(wx.EVT_MENU, self.onToggle, item)
self.gr.SetColLabelValue(i+1, TOKENS_LIST[i])
if TOKENS_LIST[i] in DEFAULT_TOKENS:
item.Check()
self.gr.AppendCols(1)
self.gr.SetColLabelValue(k, TOKENS_LIST[i])
k+=1
if (i+1) % 25 == 0:
self.toks.Break()
fileMenu = wx.Menu()
tmp = fileMenu.Append(wx.ID_ANY,'x')
tmp.SetBitmap(wx.EmptyBitmap(1,1)) # trick to handle menu bitmaps bug
item = fileMenu.Append(wx.ID_OPEN, '&Open\tCtrl+O', 'Load FITS headers from file(s)')
item.SetBitmap(wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN))
fileMenu.Remove(tmp.GetId())
self.Bind(wx.EVT_MENU, self.onOpen, item)
item = fileMenu.Append(wx.ID_ANY, '&Rename..', 'Rename selected files based on header information')
self.Bind(wx.EVT_MENU, self.onRename, item)
item = fileMenu.Append(wx.ID_EXIT, 'E&xit\tCtrl+Q', 'Exit the program')
item.SetBitmap(wx.ArtProvider.GetBitmap(wx.ART_QUIT))
self.Bind(wx.EVT_MENU, self.onExit, item)
editMenu = wx.Menu()
item = editMenu.Append(wx.ID_ANY, 'Select &All\tCtrl+A', 'Copy Selection to Clipboard')
self.Bind(wx.EVT_MENU, self.onSelectAll, item)
item = editMenu.Append(wx.ID_COPY, '&Copy\tCtrl+C', 'Copy Selection to Clipboard')
self.Bind(wx.EVT_MENU, self.onCopy, item)
item = editMenu.Append(wx.ID_ANY, '&Clear\tDel', 'Clear Selection')
self.Bind(wx.EVT_MENU, self.onClear, item)
helpMenu = wx.Menu()
tmp = helpMenu.Append(wx.ID_ANY,'x')
tmp.SetBitmap(wx.EmptyBitmap(1,1)) # trick to handle menu bitmaps bug
item = helpMenu.Append(wx.ID_ABOUT, '&About', 'About this program')
item.SetBitmap(wx.ArtProvider.GetBitmap(wx.ART_INFORMATION, size=(16,16)))
helpMenu.Remove(tmp.GetId())
self.Bind(wx.EVT_MENU, self.onAbout, item)
menuBar = wx.MenuBar()
menuBar.Append(fileMenu, '&File')
menuBar.Append(editMenu, '&Edit')
menuBar.Append(helpMenu, '&Help')
self.SetMenuBar(menuBar)
self.CreateStatusBar()
for i in range(self.gr.GetNumberRows()):
for j in range(self.gr.GetNumberCols()):
self.gr.SetReadOnly(i,j,True)
self.gr.Bind(grid.EVT_GRID_CMD_LABEL_RIGHT_CLICK, self.onRight, None)
self.gr.Bind(grid.EVT_GRID_CMD_LABEL_LEFT_CLICK, self.onLeft, None)
self.retag = RetagDialog(self)
self.retag.Show(False)
def IsChecked(self, tok):
for item in self.toks.GetMenuItems():
if item.GetItemLabelText() == tok:
return item.IsChecked()
return False
def findColumn(self, tok):
for i in range(self.gr.GetNumberCols()):
if self.gr.GetColLabelValue(i) == tok:
return i
return -1
def showColumn(self, tok):
k = self.gr.GetNumberCols()
self.gr.AppendCols(1)
self.gr.SetColLabelValue(k,tok)
for i in range(self.gr.GetNumberRows()):
if i<len(self.paths):
j = TOKENS_LIST.index(tok)
self.gr.SetCellValue(i,k,self.hdrs[self.paths[i]][j])
self.gr.SetReadOnly(i,k)
self.gr.AutoSizeColumn(k)
self.gr.ClearSelection()
def hideColumn(self, tok):
save_labels=[]
for i in range(self.gr.GetNumberCols()):
label = self.gr.GetColLabelValue(i)
if label == tok:
k=i
continue
else:
save_labels.append(label)
if 'k' in locals():
self.gr.DeleteCols(k)
for i in range(len(save_labels)):
self.gr.SetColLabelValue(i,save_labels[i])
def sortByColumn(self, n):
sorter={}
for i in range(len(self.paths)):
sorter[self.paths[i]] = self.gr.GetCellValue(i,n)
paths = sorted(sorter, key=lambda k: (sorter[k], k))
info = [self.hdrs[path] for path in paths]
self.gr.DeleteRows(0,self.gr.GetNumberRows())
self.hdrs = {}
self.paths = []
for path,info in zip(paths,info):
self.addRow(path, info)
def addRow(self, path, info_list):
for i in range(self.gr.GetNumberRows()):
if self.gr.GetCellValue(i,0) == '':
n=i
break
else:
n=self.gr.GetNumberRows()
self.gr.AppendRows(1)
self.hdrs[path]=info_list
self.paths.append(path)
self.gr.SetCellValue(n,0,os.path.basename(path))
self.gr.SetReadOnly(n,0)
for i in range(len(info_list)):
k = self.findColumn(TOKENS_LIST[i])
if k>0:
self.gr.SetCellValue(n,k,info_list[i])
self.gr.SetReadOnly(n,k)
def GetSelection(self):
sel=[]
top = self.gr.GetSelectionBlockTopLeft()
bottom = self.gr.GetSelectionBlockBottomRight()
for (r1,c1),(r2,c2) in zip(top,bottom):
sel += [r1+x for x in range(r2-r1+1)]
return sorted(sel)
def onOpen(self, event):
wildcard = "FITS image files (*.fts,*.fits,*.fit)|*.fts;*.fits;*.fit"
dialog = wx.FileDialog(None, "Choose a file", defaultDir=self.lastdir, wildcard=wildcard, style=wx.FD_OPEN|wx.FD_MULTIPLE)
if dialog.ShowModal() == wx.ID_OK:
for path in dialog.GetPaths():
if path in self.hdrs.keys(): continue
hdr = pyfits.getheader(path)
info = []
for tok in TOKENS_LIST:
info.append( "%s" % hdr.get(tok, '') )
self.addRow(path, info)
self.lastdir = os.path.dirname(path)
self.gr.AutoSizeColumns()
dialog.Destroy()
def onRename(self, event):
if self.gr.GetNumberRows()<1: return
rn = {}
sel = self.GetSelection()
if sel == []:
self.gr.SelectAll()
sel = self.GetSelection()
for i in sel:
if i<len(self.paths):
f = self.paths[i]
rn[f] = self.hdrs[f]
self.retag.rn = rn
self.retag.lastdir = os.path.dirname(self.paths[sel[0]])
if self.retag.outdir.GetValue() == '':
self.retag.outdir.SetValue(self.retag.lastdir)
self.retag.update_sample(None)
if self.retag.ShowModal() == wx.ID_OK:
try:
for oldf,newf in self.retag.filter_files():
shutil.copy(oldf,newf)
k = self.paths.index(oldf)
self.paths[k] = newf
self.hdrs[newf]=self.hdrs[oldf]
del self.hdrs[oldf]
if os.access(oldf,os.W_OK):
os.remove(oldf)
self.gr.SetCellValue(k,0,os.path.basename(newf))
self.gr.SetReadOnly(k,0)
except OSError as e:
msg = 'File renaming failed!\n%s' % e
errdialog=wx.MessageDialog(self, msg, 'Error', style=wx.OK|wx.ICON_ERROR)
errdialog.ShowModal()
self.gr.AutoSizeColumns()
def onSelectAll(self, event):
self.gr.SelectAll()
def onCopy(self, event):
t = ''
for i in self.GetSelection():
for k in range(self.gr.GetNumberCols()):
t+=self.gr.GetCellValue(i,k)+'\t'
t+=os.linesep
if t=='':return
wx.TheClipboard.Open()
wx.TheClipboard.SetData(wx.TextDataObject(t))
wx.TheClipboard.Close()
def onClear(self, event):
for i in reversed(self.GetSelection()):
self.gr.DeleteRows(i)
if i<len(self.paths):
del self.hdrs[self.paths[i]]
del self.paths[i]
self.gr.ClearSelection()
def onExit(self, event):
self.Destroy()
def onRight(self, event):
if event.GetRow()<0:
self.PopupMenu(self.toks)
def onLeft(self, event):
if event.GetRow()<0:
self.sortByColumn(event.GetCol())
def onToggle(self, event):
id = event.GetId()
item = self.toks.FindItemById(id)
tok = self.toks.GetLabelText(id)
if item.IsChecked():
self.showColumn(tok)
else:
self.hideColumn(tok)
def onAbout(self, event):
description = """Allows simple browsing and renaming of
FITS images based on header information.
Chop one off, two grow back!
"""
info = wx.AboutDialogInfo()
info.SetName('The FITS Hydra')
info.SetVersion(VERSION_STR)
info.SetDescription(description)
info.SetCopyright('(C) 2013 Bill Peterson')
info.SetWebSite('http://astro.physics.uiowa.edu/rigel')
info.AddDeveloper('Dr. Bill Peterson (bill.m.peterson@gmail.com)')
wx.AboutBox(info)
return
class RetagDialog(wx.Dialog):
def __init__(self, *args, **kw):
super(RetagDialog, self).__init__(style=wx.RESIZE_BORDER|wx.DEFAULT_DIALOG_STYLE, *args, **kw)
self.SetTitle("Rename Files")
self.SetSize((700,400))
self.rn = {}
self.lastdir = ''
opt = ['']+TOKENS_LIST
fmt = '{OBJECT}_{FILTER}_{EXPTIME}sec_{DATE-OBS[0:10]}.fts'
self.t1 = wx.Choice(self, choices=opt)
self.t2 = wx.Choice(self, choices=opt)
self.t3 = wx.Choice(self, choices=opt)
self.sep = wx.ComboBox(self, value='_', choices=[' ','_','-','.'])
self.format = wx.TextCtrl(self, value=fmt)
self.outdir = wx.TextCtrl(self)
chdir = wx.Button(self, label='Choose')
self.output = wx.TextCtrl(self, style=wx.TE_MULTILINE|wx.TE_READONLY)
self.t1.SetSelection(1)
self.t2.SetSelection(2)
self.t3.SetSelection(3)
ctrl = wx.GridBagSizer(vgap=5, hgap=5)
ctrl.Add(wx.StaticText(self, label='FITS Header Tokens:'), pos=(0,1), span=(1,3), flag=wx.ALIGN_CENTER)
ctrl.Add(wx.StaticText(self, label='Separator:'), pos=(0,4))
ctrl.Add(self.t1, pos=(1,1))
ctrl.Add(self.t2, pos=(1,2))
ctrl.Add(self.t3, pos=(1,3))
ctrl.Add(self.sep, pos=(1,4))
ctrl.Add(wx.StaticText(self, label='Format:'), pos=(2,0), flag=wx.ALIGN_RIGHT)
ctrl.Add(self.format, pos=(2,1), span=(1,4), flag=wx.EXPAND)
ctrl.Add(wx.StaticText(self, label='Output Dir:'), pos=(3,0), flag=wx.ALIGN_RIGHT)
ctrl.Add(self.outdir, pos=(3,1), span=(1,4), flag=wx.EXPAND)
ctrl.Add(chdir, pos=(3,5))
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(ctrl, border=5, flag=wx.ALL|wx.ALIGN_CENTER)
vbox.Add(wx.StaticText(self, label='Result:'), border=5, flag=wx.ALL|wx.ALIGN_CENTER)
vbox.Add(self.output, border=10, proportion=1, flag=wx.ALL|wx.ALIGN_CENTER|wx.EXPAND)
vbox.Add(self.CreateButtonSizer(wx.OK|wx.CANCEL), flag=wx.ALL|wx.ALIGN_CENTER, border=5)
self.SetSizer(vbox)
self.t1.Bind(wx.EVT_CHOICE, self.update_format)
self.t2.Bind(wx.EVT_CHOICE, self.update_format)
self.t3.Bind(wx.EVT_CHOICE, self.update_format)
self.sep.Bind(wx.EVT_TEXT, self.update_format)
self.format.Bind(wx.EVT_TEXT, self.update_sample)
self.Bind(wx.EVT_BUTTON, self.onOK, id=wx.ID_OK)
chdir.Bind(wx.EVT_BUTTON, self.change_outdir, chdir)
def onOK(self, event):
confirm = wx.MessageDialog(self, 'Files will be permanently renamed! Proceed?',
'Confirm', style=wx.YES_NO|wx.NO_DEFAULT|wx.ICON_EXCLAMATION)
if confirm.ShowModal()==wx.ID_YES:
self.EndModal(wx.ID_OK)
else:
self.EndModal(wx.ID_CANCEL)
def change_outdir(self, event):
dialog = wx.DirDialog(self, defaultPath=self.lastdir)
if dialog.ShowModal()==wx.ID_OK:
self.outdir.SetValue(dialog.GetPath())
dialog.Destroy()
def update_format(self, event):
t1 = self.t1.GetStringSelection()
t2 = self.t2.GetStringSelection()
t3 = self.t3.GetStringSelection()
sep = self.sep.GetValue()
fields=[]
if t1!='': fields.append('{%s}' % t1)
if t2!='': fields.append('{%s}' % t2)
if t3!='': fields.append('{%s}' % t3)
self.format.SetValue(sep.join(fields)+'.fts')
self.update_sample(None)
def update_sample(self, event):
self.output.Clear()
for f,newf in self.filter_files():
self.output.AppendText('%s -> %s\n' % (os.path.basename(f), os.path.basename(newf)) )
def filter_files(self):
fmt = self.format.GetValue()
sep = self.sep.GetValue()
fields = []
oldf = self.rn.keys()
newf = []
for f in oldf:
dir = self.outdir.GetValue()
fname = fmt
for (t,func) in re.findall('{([A-Z-]+)(.*?)}', fname):
if t not in TOKENS_LIST: continue
val = self.rn[f][TOKENS_LIST.index(t)]
try:
fname = re.sub( '{%s.*?}' % t, eval('val%s' % func) , fname )
except:
fname = re.sub( '{%s.*?}' % t, val, fname )
fname = re.sub(':','-',fname)
fname = os.path.join(dir,fname)
froot = os.path.splitext(fname)[0]
k=1
while os.path.exists(fname) or fname in newf:
fname=froot+sep+'%03d.fts' % k
k+=1
newf.append( fname )
return zip(oldf, newf)
app = wx.App(redirect=False)
frame = MainWindow()
frame.Show()
app.MainLoop()
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# (c) Camille Scott, 2021
# File : processors.py
# License: MIT
# Author : Camille Scott <camille.scott.w@gmail.com>
# Date : 05.11.2021
from collections import OrderedDict, defaultdict
from enum import Enum, unique as unique_enum
import functools
import inspect
import json
import signal
import sys
from typing import Any
import curio
from goetia import libgoetia
from goetia.messages import (
AllMessages,
EndStream,
Error,
Interval,
SampleFinished,
SampleStarted,
)
from goetia.utils import is_iterable
DEFAULT_SOCKET = '/tmp/goetia.sock'
DEFAULT_INTERVAL = libgoetia.metrics.IntervalCounter.DEFAULT_INTERVAL
class QueueManager:
def __init__(self, q: curio.UniversalQueue, name: str):
assert type(q) is curio.UniversalQueue
self.q = q
self.name = name
self.subscribers = set()
self.subscriber_names = {}
def subscribe(self, q: curio.UniversalQueue, name: str):
if q not in self.subscribers:
self.subscribers.add(q)
self.subscriber_names[q] = name
def unsubscribe(self, q: curio.UniversalQueue) -> None:
try:
self.subscribers.remove(q)
del self.subscriber_names[q]
except:
pass
async def kill(self) -> None:
await self.q.put(None)
async def dispatch(self) -> None:
while True:
msg = await self.q.get()
for sub_q in self.subscribers:
await sub_q.put(msg)
await self.q.task_done()
if msg is None:
break
class MessageHandler:
def __init__(self, name, subscription):
self.subscription = subscription
self.name = name
self.handlers = defaultdict(list)
async def task(self, error_handler = None):
try:
msg_q = curio.Queue()
self.subscription.subscribe(msg_q, self.name)
while True:
msg = await msg_q.get()
if msg is None:
await msg_q.task_done()
break
for callback, args, kwargs in self.handlers[type(msg)]:
if inspect.iscoroutinefunction(callback):
await callback(msg, *args, **kwargs)
else:
callback(msg, *args, **kwargs)
for callback, args, kwargs in self.handlers[AllMessages]:
if inspect.iscoroutinefunction(callback):
await callback(msg, *args, **kwargs)
else:
callback(msg, *args, **kwargs)
await msg_q.task_done()
except curio.CancelledError:
raise
except Exception as e:
if error_handler is not None:
error_handler(e)
else:
raise
else:
self.subscription.unsubscribe(msg_q)
def on_message(self, msg_class, callback, *args, **kwargs):
assert type(msg_class) is type
self.handlers[msg_class].append((callback, args, kwargs))
@unique_enum
class RunState(Enum):
READY = 0
RUNNING = 1
SIGINT = 2
STOP_SATURATED = 3
STOP_ERROR = 4
STOP = 5
class AsyncSequenceProcessor:
def __init__(self, processor,
sample_iter,
echo = None,
broadcast_socket = None):
"""Manages advancing through a concrete FileProcessor
subblass asynchronously. The processor pushes Interval
updates on to the `worker_q`, which are also forwarded
to an `events_q`. Additional async tasks can subscribe to
either queue; the `events_q` is considered the outward-facing
point.
`sample_iter` should be conform to that produced by
`goetia.processing.iter_fastx_inputs`.
Args:
processor (libgoetia.InserterProcessor<T>): Processor to manage.
sample_iter (iterator): Iterator over pairs of or single samples.
echo (bool): Whether to echo `events_q` to the terminal.
broadcast_socket (str, optional): AF_UNIX socket to broadcast
the events queue on.
"""
self.worker_q = curio.UniversalQueue()
self.worker_subs = QueueManager(self.worker_q, 'worker_q')
self.events_q = curio.UniversalQueue()
self.events_subs = QueueManager(self.events_q, 'events_q')
self.channels = OrderedDict()
self.channels[self.worker_subs.name] = self.worker_subs
self.channels[self.events_subs.name] = self.events_subs
# We want everything from the worker q to also end
# up on the events q
self.subscribe('worker_q', self.events_q, 'events_q')
self.listener_tasks = []
self.processor = processor
self.sample_iter = sample_iter
self.run_echo = echo is not None
self.echo_file = '/dev/stderr' if echo is True else echo
self.state = RunState.READY
self.processed = set()
#super().__init__(broadcast_socket)
def get_channel(self, channel: str) -> QueueManager:
"""Query for the given channel name.
Args:
channel (str): The channel name.
Returns:
QueueManager: Manager for the channel.
"""
try:
return self.channels[channel]
except KeyError:
print(f'Requested invalid channel: "{channel}" does not exist.', file=sys.stderr)
raise
def subscribe(self, channel_name: str,
collection_q: curio.UniversalQueue,
subscriber_name: str) -> None:
"""Subscribe a queue of the given name to a channel.
Args:
channel_name (str): Name of the channel.
collection_q (curio.Queue): The queue to collect on.
subscriber_name (str): Name of the subscriber.
"""
self.get_channel(channel_name).subscribe(collection_q, subscriber_name)
def unsubscribe(self, channel_name: str,
collection_q: curio.UniversalQueue) -> None:
"""Stop receving data from the named channel on the given queue.
Args:
channel_name (str): Name of the channel.
collection_q (curio.Queue): Queue object to remove.
"""
self.get_channel(channel_name).unsubscribe(collection_q)
def add_listener(self, channel_name: str,
subscriber_name: str) -> MessageHandler:
channel = self.get_channel(channel_name)
listener = MessageHandler(subscriber_name, channel)
self.listener_tasks.append(listener.task)
return listener
def worker(self) -> None:
time, n_seqs = 0, 0
for sample, name in self.sample_iter:
self.worker_q.put(SampleStarted(sample_name=name, # type: ignore
file_names=sample,
t=time,
sequence=n_seqs))
try:
for n_seqs, time, n_skipped in self.processor.chunked_process(*sample):
if self.state is RunState.STOP_SATURATED:
# Saturation is tripped externally: just return immediately.
return
if self.state is RunState.SIGINT:
# If we're interrupted, inform our listeners that something went wrong.
self.worker_q.put(Error(t=time, # type: ignore
sequence=n_seqs,
sample_name=name,
file_names=sample,
error='Process terminated (SIGINT).'))
return
self.worker_q.put(Interval(t=time, # type: ignore
sequence=n_seqs,
sample_name=name,
file_names=sample))
self.processed.add(tuple(sample))
self.worker_q.put(SampleFinished(t=time, # type: ignore
sequence=n_seqs,
sample_name=name,
file_names=sample))
except Exception as e:
self.worker_q.put(Error(t=time, # type: ignore
sequence=n_seqs,
sample_name=name,
file_names=sample,
error=str(e.__traceback__)))
return
finally:
self.worker_q.put(EndStream(t=time, # type: ignore
sequence=n_seqs))
#def on_error(self, exception):
# self.worker_q.put(Error(t=self.processor.time_elapsed(),
# sequence=n_seqs,
# sample_name=name,
# error=f'At sequence {self.processor.n_sequences()}: {str(e)}',
# file_names=sample))
# self.state = RunState.STOP_ERROR
async def start(self, extra_tasks = None) -> None:
try:
async with curio.TaskGroup() as g:
# each channel has its own dispatch task
# to send data to its subscribers
for channel_name, channel in self.channels.items():
await g.spawn(channel.dispatch)
# start up AF_UNIX broadcaster if desired
# if self.broadcast_socket is not None:
# await g.spawn(self.broadcaster)
if self.run_echo:
listener = self.add_listener('events_q', 'echo')
async def echo(msg):
mode = 'w' if self.echo_file in ['/dev/stdout', '/dev/stderr'] else 'a'
async with curio.aopen(self.echo_file, mode) as fp:
await fp.write(f'{msg.to_yaml()}\n')
listener.on_message(AllMessages,
echo)
# spawn tasks from listener callbacks
for task in self.listener_tasks:
await g.spawn(task)
# spawn extra tasks to run
if extra_tasks is not None:
for task in extra_tasks:
await g.spawn(task)
# and now we spawn the worker to iterate through
# the processor and wait for it to finish
self.state = RunState.RUNNING
signal.signal(signal.SIGINT, lambda signo, frame: self.interrupt())
# give just a bit of time for the listeners to all spin up
await curio.sleep(0.05)
# then spawn the worker
w = await g.spawn_thread(self.worker)
await w.join()
await curio.sleep(0.05)
await self.worker_subs.kill()
except Exception as e:
print(e, file=sys.stderr)
def stop(self) -> None:
self.state = RunState.STOP
def interrupt(self) -> None:
self.state = RunState.SIGINT
def saturate(self) -> None:
self.state = RunState.STOP_SATURATED
def every_n_intervals(func, n=1):
poller = libgoetia.metrics.IntervalCounter(n)
@functools.wraps(func)
async def wrapped(msg, *args, **kwargs):
assert isinstance(msg, Interval)
if poller.poll():
await func(msg, *args, **kwargs)
return wrapped
class AsyncJSONStreamWriter:
def __init__(self, filename: str):
'''Asynchronously write JSON data to a file.
Writes a stream of JSON objects to a file. The top-level
element is always a list; list items can be any valid JSON
type.
Args:
filename: Path of the target file to write to.
'''
self.filename = filename
self.n_writes = 0
with open(self.filename, 'w') as fp:
fp.write('[')
def __del__(self):
with open(self.filename, 'a') as fp:
fp.write(']')
async def write(self, data: Any, expand: bool = True):
'''Write the given data is a JSON element to the stream.
Strings will be written assuming they are already valid JSON;
this could result in malformed JSON, so care must be taken.
Other data types are passed to json.dumps for serialization.
Args:
data: Data to coerce to JSON.
expand: If True, iterables will be expanded into the stream
rather than appended as a single item.
'''
buf = ''
async with curio.aopen(self.filename, 'a') as fp:
if self.n_writes != 0:
await fp.write(',\n')
if isinstance(data, str):
# assume already valid JSON object
buf = data
elif expand and is_iterable(data) and not isinstance(data, dict):
# extend the top level list rather than
# adding the iterable as an item
buf = ','.join((json.dumps(item) for item in data))
else:
buf = json.dumps(data)
await fp.write(buf)
self.n_writes += 1
|
|
import uuid
from django.conf import settings
from django.db.backends.base.operations import BaseDatabaseOperations
from django.utils import timezone
from django.utils.duration import duration_microseconds
from django.utils.encoding import force_str
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django.db.backends.mysql.compiler"
# MySQL stores positive fields as UNSIGNED ints.
integer_field_ranges = {
**BaseDatabaseOperations.integer_field_ranges,
'PositiveSmallIntegerField': (0, 65535),
'PositiveIntegerField': (0, 4294967295),
}
cast_data_types = {
'AutoField': 'signed integer',
'BigAutoField': 'signed integer',
'CharField': 'char(%(max_length)s)',
'DecimalField': 'decimal(%(max_digits)s, %(decimal_places)s)',
'TextField': 'char',
'IntegerField': 'signed integer',
'BigIntegerField': 'signed integer',
'SmallIntegerField': 'signed integer',
'PositiveIntegerField': 'unsigned integer',
'PositiveSmallIntegerField': 'unsigned integer',
}
cast_char_field_without_max_length = 'char'
explain_prefix = 'EXPLAIN'
def date_extract_sql(self, lookup_type, field_name):
# https://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
if lookup_type == 'week_day':
# DAYOFWEEK() returns an integer, 1-7, Sunday=1.
# Note: WEEKDAY() returns 0-6, Monday=0.
return "DAYOFWEEK(%s)" % field_name
elif lookup_type == 'week':
# Override the value of default_week_format for consistency with
# other database backends.
# Mode 3: Monday, 1-53, with 4 or more days this year.
return "WEEK(%s, 3)" % field_name
elif lookup_type == 'iso_year':
# Get the year part from the YEARWEEK function, which returns a
# number as year * 100 + week.
return "TRUNCATE(YEARWEEK(%s, 3), -2) / 100" % field_name
else:
# EXTRACT returns 1-53 based on ISO-8601 for the week number.
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
fields = {
'year': '%%Y-01-01',
'month': '%%Y-%%m-01',
} # Use double percents to escape.
if lookup_type in fields:
format_str = fields[lookup_type]
return "CAST(DATE_FORMAT(%s, '%s') AS DATE)" % (field_name, format_str)
elif lookup_type == 'quarter':
return "MAKEDATE(YEAR(%s), 1) + INTERVAL QUARTER(%s) QUARTER - INTERVAL 1 QUARTER" % (
field_name, field_name
)
elif lookup_type == 'week':
return "DATE_SUB(%s, INTERVAL WEEKDAY(%s) DAY)" % (
field_name, field_name
)
else:
return "DATE(%s)" % (field_name)
def _convert_field_to_tz(self, field_name, tzname):
if settings.USE_TZ and self.connection.timezone_name != tzname:
field_name = "CONVERT_TZ(%s, '%s', '%s')" % (field_name, self.connection.timezone_name, tzname)
return field_name
def datetime_cast_date_sql(self, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
return "DATE(%s)" % field_name
def datetime_cast_time_sql(self, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
return "TIME(%s)" % field_name
def datetime_extract_sql(self, lookup_type, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
return self.date_extract_sql(lookup_type, field_name)
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
if lookup_type == 'quarter':
return (
"CAST(DATE_FORMAT(MAKEDATE(YEAR({field_name}), 1) + "
"INTERVAL QUARTER({field_name}) QUARTER - " +
"INTERVAL 1 QUARTER, '%%Y-%%m-01 00:00:00') AS DATETIME)"
).format(field_name=field_name)
if lookup_type == 'week':
return (
"CAST(DATE_FORMAT(DATE_SUB({field_name}, "
"INTERVAL WEEKDAY({field_name}) DAY), "
"'%%Y-%%m-%%d 00:00:00') AS DATETIME)"
).format(field_name=field_name)
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join(format[:i] + format_def[i:])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql
def time_trunc_sql(self, lookup_type, field_name):
fields = {
'hour': '%%H:00:00',
'minute': '%%H:%%i:00',
'second': '%%H:%%i:%%s',
} # Use double percents to escape.
if lookup_type in fields:
format_str = fields[lookup_type]
return "CAST(DATE_FORMAT(%s, '%s') AS TIME)" % (field_name, format_str)
else:
return "TIME(%s)" % (field_name)
def date_interval_sql(self, timedelta):
return 'INTERVAL %s MICROSECOND' % duration_microseconds(timedelta)
def format_for_duration_arithmetic(self, sql):
return 'INTERVAL %s MICROSECOND' % sql
def force_no_ordering(self):
"""
"ORDER BY NULL" prevents MySQL from implicitly ordering by grouped
columns. If no ordering would otherwise be applied, we don't want any
implicit sorting going on.
"""
return [(None, ("NULL", [], False))]
def last_executed_query(self, cursor, sql, params):
# With MySQLdb, cursor objects have an (undocumented) "_executed"
# attribute where the exact query sent to the database is saved.
# See MySQLdb/cursors.py in the source distribution.
# MySQLdb returns string, PyMySQL bytes.
return force_str(getattr(cursor, '_executed', None), errors='replace')
def no_limit_value(self):
# 2**64 - 1, as recommended by the MySQL documentation
return 18446744073709551615
def quote_name(self, name):
if name.startswith("`") and name.endswith("`"):
return name # Quoting once is enough.
return "`%s`" % name
def random_function_sql(self):
return 'RAND()'
def sql_flush(self, style, tables, sequences, allow_cascade=False):
# NB: The generated SQL below is specific to MySQL
# 'TRUNCATE x;', 'TRUNCATE y;', 'TRUNCATE z;'... style SQL statements
# to clear all tables of all data
if tables:
sql = ['SET FOREIGN_KEY_CHECKS = 0;']
for table in tables:
sql.append('%s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
style.SQL_FIELD(self.quote_name(table)),
))
sql.append('SET FOREIGN_KEY_CHECKS = 1;')
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def validate_autopk_value(self, value):
# MySQLism: zero in AUTO_INCREMENT field does not work. Refs #17653.
if value == 0:
raise ValueError('The database backend does not accept 0 as a '
'value for AutoField.')
return value
def adapt_datetimefield_value(self, value):
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, 'resolve_expression'):
return value
# MySQL doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = timezone.make_naive(value, self.connection.timezone)
else:
raise ValueError("MySQL backend does not support timezone-aware datetimes when USE_TZ is False.")
return str(value)
def adapt_timefield_value(self, value):
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, 'resolve_expression'):
return value
# MySQL doesn't support tz-aware times
if timezone.is_aware(value):
raise ValueError("MySQL backend does not support timezone-aware times.")
return str(value)
def max_name_length(self):
return 64
def bulk_insert_sql(self, fields, placeholder_rows):
placeholder_rows_sql = (", ".join(row) for row in placeholder_rows)
values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql)
return "VALUES " + values_sql
def combine_expression(self, connector, sub_expressions):
if connector == '^':
return 'POW(%s)' % ','.join(sub_expressions)
# Convert the result to a signed integer since MySQL's binary operators
# return an unsigned integer.
elif connector in ('&', '|', '<<'):
return 'CONVERT(%s, SIGNED)' % connector.join(sub_expressions)
elif connector == '>>':
lhs, rhs = sub_expressions
return 'FLOOR(%(lhs)s / POW(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs}
return super().combine_expression(connector, sub_expressions)
def get_db_converters(self, expression):
converters = super().get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
if internal_type in ['BooleanField', 'NullBooleanField']:
converters.append(self.convert_booleanfield_value)
elif internal_type == 'DateTimeField':
if settings.USE_TZ:
converters.append(self.convert_datetimefield_value)
elif internal_type == 'UUIDField':
converters.append(self.convert_uuidfield_value)
return converters
def convert_booleanfield_value(self, value, expression, connection):
if value in (0, 1):
value = bool(value)
return value
def convert_datetimefield_value(self, value, expression, connection):
if value is not None:
value = timezone.make_aware(value, self.connection.timezone)
return value
def convert_uuidfield_value(self, value, expression, connection):
if value is not None:
value = uuid.UUID(value)
return value
def binary_placeholder_sql(self, value):
return '_binary %s' if value is not None and not hasattr(value, 'as_sql') else '%s'
def subtract_temporals(self, internal_type, lhs, rhs):
lhs_sql, lhs_params = lhs
rhs_sql, rhs_params = rhs
if internal_type == 'TimeField':
if self.connection.mysql_is_mariadb:
# MariaDB includes the microsecond component in TIME_TO_SEC as
# a decimal. MySQL returns an integer without microseconds.
return 'CAST((TIME_TO_SEC(%(lhs)s) - TIME_TO_SEC(%(rhs)s)) * 1000000 AS SIGNED)' % {
'lhs': lhs_sql, 'rhs': rhs_sql
}, lhs_params + rhs_params
return (
"((TIME_TO_SEC(%(lhs)s) * 1000000 + MICROSECOND(%(lhs)s)) -"
" (TIME_TO_SEC(%(rhs)s) * 1000000 + MICROSECOND(%(rhs)s)))"
) % {'lhs': lhs_sql, 'rhs': rhs_sql}, lhs_params * 2 + rhs_params * 2
else:
return "TIMESTAMPDIFF(MICROSECOND, %s, %s)" % (rhs_sql, lhs_sql), rhs_params + lhs_params
def explain_query_prefix(self, format=None, **options):
# Alias MySQL's TRADITIONAL to TEXT for consistency with other backends.
if format and format.upper() == 'TEXT':
format = 'TRADITIONAL'
prefix = super().explain_query_prefix(format, **options)
if format:
prefix += ' FORMAT=%s' % format
if self.connection.features.needs_explain_extended and format is None:
# EXTENDED and FORMAT are mutually exclusive options.
prefix += ' EXTENDED'
return prefix
def regex_lookup(self, lookup_type):
# REGEXP BINARY doesn't work correctly in MySQL 8+ and REGEXP_LIKE
# doesn't exist in MySQL 5.6 or in MariaDB.
if self.connection.mysql_version < (8, 0, 0) or self.connection.mysql_is_mariadb:
if lookup_type == 'regex':
return '%s REGEXP BINARY %s'
return '%s REGEXP %s'
match_option = 'c' if lookup_type == 'regex' else 'i'
return "REGEXP_LIKE(%%s, %%s, '%s')" % match_option
def insert_statement(self, ignore_conflicts=False):
return 'INSERT IGNORE INTO' if ignore_conflicts else super().insert_statement(ignore_conflicts)
|
|
from __future__ import unicode_literals
from django.db import transaction
from django.test import TestCase
from django.utils import six
from .models import (
Article, InheritedArticleA, InheritedArticleB, Publication, Tag,
)
class ManyToManyTests(TestCase):
def setUp(self):
# Create a couple of Publications.
self.p1 = Publication.objects.create(id=None, title='The Python Journal')
self.p2 = Publication.objects.create(id=None, title='Science News')
self.p3 = Publication.objects.create(id=None, title='Science Weekly')
self.p4 = Publication.objects.create(title='Highlights for Children')
self.a1 = Article.objects.create(id=None, headline='Django lets you build Web apps easily')
self.a1.publications.add(self.p1)
self.a2 = Article.objects.create(id=None, headline='NASA uses Python')
self.a2.publications.add(self.p1, self.p2, self.p3, self.p4)
self.a3 = Article.objects.create(headline='NASA finds intelligent life on Earth')
self.a3.publications.add(self.p2)
self.a4 = Article.objects.create(headline='Oxygen-free diet works wonders')
self.a4.publications.add(self.p2)
self.t1 = Tag.objects.create(name='web')
self.t2 = Tag.objects.create(id=2 ** 33, name='development')
self.t3 = Tag.objects.create(name='framework')
self.a1.tags.add(self.t1, self.t2, self.t3)
self.a2.tags.add(self.t1)
def test_add(self):
# Create an Article.
a5 = Article(id=None, headline='Django lets you reate Web apps easily')
# You can't associate it with a Publication until it's been saved.
self.assertRaises(ValueError, getattr, a5, 'publications')
# Save it!
a5.save()
# Associate the Article with a Publication.
a5.publications.add(self.p1)
self.assertQuerysetEqual(a5.publications.all(),
['<Publication: The Python Journal>'])
# Create another Article, and set it to appear in both Publications.
a6 = Article(id=None, headline='ESA uses Python')
a6.save()
a6.publications.add(self.p1, self.p2)
a6.publications.add(self.p3)
# Adding a second time is OK
a6.publications.add(self.p3)
self.assertQuerysetEqual(a6.publications.all(),
[
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
# Adding an object of the wrong type raises TypeError
with six.assertRaisesRegex(self, TypeError, "'Publication' instance expected, got <Article.*"):
with transaction.atomic():
a6.publications.add(a5)
# Add a Publication directly via publications.add by using keyword arguments.
a6.publications.create(title='Highlights for Adults')
self.assertQuerysetEqual(a6.publications.all(),
[
'<Publication: Highlights for Adults>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
def test_reverse_add(self):
# Adding via the 'other' end of an m2m
a5 = Article(headline='NASA finds intelligent life on Mars')
a5.save()
self.p2.article_set.add(a5)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA finds intelligent life on Mars>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(a5.publications.all(),
['<Publication: Science News>'])
# Adding via the other end using keywords
self.p2.article_set.create(headline='Carbon-free diet works wonders')
self.assertQuerysetEqual(
self.p2.article_set.all(),
[
'<Article: Carbon-free diet works wonders>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA finds intelligent life on Mars>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
a6 = self.p2.article_set.all()[3]
self.assertQuerysetEqual(a6.publications.all(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
def test_related_sets(self):
# Article objects have access to their related Publication objects.
self.assertQuerysetEqual(self.a1.publications.all(),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(self.a2.publications.all(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
# Publication objects have access to their related Article objects.
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.p1.article_set.all(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(Publication.objects.get(id=self.p4.id).article_set.all(),
['<Article: NASA uses Python>'])
def test_selects(self):
# We can perform kwarg queries across m2m relationships
self.assertQuerysetEqual(
Article.objects.filter(publications__id__exact=self.p1.id),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__pk=self.p1.id),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications=self.p1.id),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications=self.p1),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__title__startswith="Science"),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__title__startswith="Science").distinct(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
# The count() function respects distinct() as well.
self.assertEqual(Article.objects.filter(publications__title__startswith="Science").count(), 4)
self.assertEqual(Article.objects.filter(publications__title__startswith="Science").distinct().count(), 3)
self.assertQuerysetEqual(
Article.objects.filter(publications__in=[self.p1.id, self.p2.id]).distinct(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__in=[self.p1.id, self.p2]).distinct(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__in=[self.p1, self.p2]).distinct(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
# Excluding a related item works as you would expect, too (although the SQL
# involved is a little complex).
self.assertQuerysetEqual(Article.objects.exclude(publications=self.p2),
['<Article: Django lets you build Web apps easily>'])
def test_reverse_selects(self):
# Reverse m2m queries are supported (i.e., starting at the table that
# doesn't have a ManyToManyField).
self.assertQuerysetEqual(Publication.objects.filter(id__exact=self.p1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(pk=self.p1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(
Publication.objects.filter(article__headline__startswith="NASA"),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(Publication.objects.filter(article__id__exact=self.a1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(article__pk=self.a1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(article=self.a1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(article=self.a1),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(
Publication.objects.filter(article__in=[self.a1.id, self.a2.id]).distinct(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(
Publication.objects.filter(article__in=[self.a1.id, self.a2]).distinct(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(
Publication.objects.filter(article__in=[self.a1, self.a2]).distinct(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
def test_delete(self):
# If we delete a Publication, its Articles won't be able to access it.
self.p1.delete()
self.assertQuerysetEqual(Publication.objects.all(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
])
self.assertQuerysetEqual(self.a1.publications.all(), [])
# If we delete an Article, its Publications won't be able to access it.
self.a2.delete()
self.assertQuerysetEqual(Article.objects.all(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
def test_bulk_delete(self):
# Bulk delete some Publications - references to deleted publications should go
Publication.objects.filter(title__startswith='Science').delete()
self.assertQuerysetEqual(Publication.objects.all(),
[
'<Publication: Highlights for Children>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(Article.objects.all(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a2.publications.all(),
[
'<Publication: Highlights for Children>',
'<Publication: The Python Journal>',
])
# Bulk delete some articles - references to deleted objects should go
q = Article.objects.filter(headline__startswith='Django')
self.assertQuerysetEqual(q, ['<Article: Django lets you build Web apps easily>'])
q.delete()
# After the delete, the QuerySet cache needs to be cleared,
# and the referenced objects should be gone
self.assertQuerysetEqual(q, [])
self.assertQuerysetEqual(self.p1.article_set.all(),
['<Article: NASA uses Python>'])
def test_remove(self):
# Removing publication from an article:
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.a4.publications.remove(self.p2)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(self.a4.publications.all(), [])
# And from the other end
self.p2.article_set.remove(self.a3)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(self.a3.publications.all(), [])
def test_set(self):
self.p2.article_set.set([self.a4, self.a3])
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science News>'])
self.a4.publications.set([self.p3.id])
self.assertQuerysetEqual(self.p2.article_set.all(),
['<Article: NASA finds intelligent life on Earth>'])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science Weekly>'])
self.p2.article_set.set([])
self.assertQuerysetEqual(self.p2.article_set.all(), [])
self.a4.publications.set([])
self.assertQuerysetEqual(self.a4.publications.all(), [])
self.p2.article_set.set([self.a4, self.a3], clear=True)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science News>'])
self.a4.publications.set([self.p3.id], clear=True)
self.assertQuerysetEqual(self.p2.article_set.all(),
['<Article: NASA finds intelligent life on Earth>'])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science Weekly>'])
self.p2.article_set.set([], clear=True)
self.assertQuerysetEqual(self.p2.article_set.all(), [])
self.a4.publications.set([], clear=True)
self.assertQuerysetEqual(self.a4.publications.all(), [])
def test_assign(self):
# Relation sets can be assigned. Assignment clears any existing set members
self.p2.article_set = [self.a4, self.a3]
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science News>'])
self.a4.publications = [self.p3.id]
self.assertQuerysetEqual(self.p2.article_set.all(),
['<Article: NASA finds intelligent life on Earth>'])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science Weekly>'])
# An alternate to calling clear() is to assign the empty set
self.p2.article_set = []
self.assertQuerysetEqual(self.p2.article_set.all(), [])
self.a4.publications = []
self.assertQuerysetEqual(self.a4.publications.all(), [])
def test_assign_ids(self):
# Relation sets can also be set using primary key values
self.p2.article_set = [self.a4.id, self.a3.id]
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science News>'])
self.a4.publications = [self.p3.id]
self.assertQuerysetEqual(self.p2.article_set.all(),
['<Article: NASA finds intelligent life on Earth>'])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science Weekly>'])
def test_forward_assign_with_queryset(self):
# Ensure that querysets used in m2m assignments are pre-evaluated
# so their value isn't affected by the clearing operation in
# ManyRelatedObjectsDescriptor.__set__. Refs #19816.
self.a1.publications = [self.p1, self.p2]
qs = self.a1.publications.filter(title='The Python Journal')
self.a1.publications = qs
self.assertEqual(1, self.a1.publications.count())
self.assertEqual(1, qs.count())
def test_reverse_assign_with_queryset(self):
# Ensure that querysets used in M2M assignments are pre-evaluated
# so their value isn't affected by the clearing operation in
# ManyRelatedObjectsDescriptor.__set__. Refs #19816.
self.p1.article_set = [self.a1, self.a2]
qs = self.p1.article_set.filter(headline='Django lets you build Web apps easily')
self.p1.article_set = qs
self.assertEqual(1, self.p1.article_set.count())
self.assertEqual(1, qs.count())
def test_clear(self):
# Relation sets can be cleared:
self.p2.article_set.clear()
self.assertQuerysetEqual(self.p2.article_set.all(), [])
self.assertQuerysetEqual(self.a4.publications.all(), [])
# And you can clear from the other end
self.p2.article_set.add(self.a3, self.a4)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a4.publications.all(),
[
'<Publication: Science News>',
])
self.a4.publications.clear()
self.assertQuerysetEqual(self.a4.publications.all(), [])
self.assertQuerysetEqual(self.p2.article_set.all(),
['<Article: NASA finds intelligent life on Earth>'])
def test_inherited_models_selects(self):
"""
#24156 - Objects from child models where the parent's m2m field uses
related_name='+' should be retrieved correctly.
"""
a = InheritedArticleA.objects.create()
b = InheritedArticleB.objects.create()
a.publications.add(self.p1, self.p2)
self.assertQuerysetEqual(a.publications.all(),
[
'<Publication: Science News>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(b.publications.all(), [])
b.publications.add(self.p3)
self.assertQuerysetEqual(a.publications.all(),
[
'<Publication: Science News>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(b.publications.all(),
[
'<Publication: Science Weekly>',
])
|
|
import gzip
import json
import os
import shutil
import hashlib
from os.path import join
from warnings import warn
from contextlib import closing
from functools import wraps
from typing import Callable, Optional, Dict, Tuple, List, Any, Union
import itertools
from collections.abc import Generator
from collections import OrderedDict
from functools import partial
from urllib.request import urlopen, Request
import numpy as np
import scipy.sparse
from ..externals import _arff
from ..externals._arff import ArffSparseDataType, ArffContainerType
from . import get_data_home
from urllib.error import HTTPError
from ..utils import Bunch
from ..utils import is_scalar_nan
from ..utils import get_chunk_n_rows
from ..utils import _chunk_generator
from ..utils import check_pandas_support # noqa
__all__ = ['fetch_openml']
_OPENML_PREFIX = "https://openml.org/"
_SEARCH_NAME = "api/v1/json/data/list/data_name/{}/limit/2"
_DATA_INFO = "api/v1/json/data/{}"
_DATA_FEATURES = "api/v1/json/data/features/{}"
_DATA_QUALITIES = "api/v1/json/data/qualities/{}"
_DATA_FILE = "data/v1/download/{}"
OpenmlQualitiesType = List[Dict[str, str]]
OpenmlFeaturesType = List[Dict[str, str]]
def _get_local_path(openml_path: str, data_home: str) -> str:
return os.path.join(data_home, 'openml.org', openml_path + ".gz")
def _retry_with_clean_cache(
openml_path: str, data_home: Optional[str]
) -> Callable:
"""If the first call to the decorated function fails, the local cached
file is removed, and the function is called again. If ``data_home`` is
``None``, then the function is called once.
"""
def decorator(f):
@wraps(f)
def wrapper(*args, **kw):
if data_home is None:
return f(*args, **kw)
try:
return f(*args, **kw)
except HTTPError:
raise
except Exception:
warn("Invalid cache, redownloading file", RuntimeWarning)
local_path = _get_local_path(openml_path, data_home)
if os.path.exists(local_path):
os.unlink(local_path)
return f(*args, **kw)
return wrapper
return decorator
def _open_openml_url(openml_path: str, data_home: Optional[str]):
"""
Returns a resource from OpenML.org. Caches it to data_home if required.
Parameters
----------
openml_path : str
OpenML URL that will be accessed. This will be prefixes with
_OPENML_PREFIX
data_home : str
Directory to which the files will be cached. If None, no caching will
be applied.
Returns
-------
result : stream
A stream to the OpenML resource
"""
def is_gzip_encoded(_fsrc):
return _fsrc.info().get('Content-Encoding', '') == 'gzip'
req = Request(_OPENML_PREFIX + openml_path)
req.add_header('Accept-encoding', 'gzip')
if data_home is None:
fsrc = urlopen(req)
if is_gzip_encoded(fsrc):
return gzip.GzipFile(fileobj=fsrc, mode='rb')
return fsrc
local_path = _get_local_path(openml_path, data_home)
if not os.path.exists(local_path):
try:
os.makedirs(os.path.dirname(local_path))
except OSError:
# potentially, the directory has been created already
pass
try:
with closing(urlopen(req)) as fsrc:
opener: Callable
if is_gzip_encoded(fsrc):
opener = open
else:
opener = gzip.GzipFile
with opener(local_path, 'wb') as fdst:
shutil.copyfileobj(fsrc, fdst)
except Exception:
if os.path.exists(local_path):
os.unlink(local_path)
raise
# XXX: First time, decompression will not be necessary (by using fsrc), but
# it will happen nonetheless
return gzip.GzipFile(local_path, 'rb')
class OpenMLError(ValueError):
"""HTTP 412 is a specific OpenML error code, indicating a generic error"""
pass
def _get_json_content_from_openml_api(
url: str,
error_message: Optional[str],
data_home: Optional[str]
) -> Dict:
"""
Loads json data from the openml api
Parameters
----------
url : str
The URL to load from. Should be an official OpenML endpoint
error_message : str or None
The error message to raise if an acceptable OpenML error is thrown
(acceptable error is, e.g., data id not found. Other errors, like 404's
will throw the native error message)
data_home : str or None
Location to cache the response. None if no cache is required.
Returns
-------
json_data : json
the json result from the OpenML server if the call was successful.
An exception otherwise.
"""
@_retry_with_clean_cache(url, data_home)
def _load_json():
with closing(_open_openml_url(url, data_home)) as response:
return json.loads(response.read().decode("utf-8"))
try:
return _load_json()
except HTTPError as error:
# 412 is an OpenML specific error code, indicating a generic error
# (e.g., data not found)
if error.code != 412:
raise error
# 412 error, not in except for nicer traceback
raise OpenMLError(error_message)
def _split_sparse_columns(
arff_data: ArffSparseDataType, include_columns: List
) -> ArffSparseDataType:
"""
obtains several columns from sparse arff representation. Additionally, the
column indices are re-labelled, given the columns that are not included.
(e.g., when including [1, 2, 3], the columns will be relabelled to
[0, 1, 2])
Parameters
----------
arff_data : tuple
A tuple of three lists of equal size; first list indicating the value,
second the x coordinate and the third the y coordinate.
include_columns : list
A list of columns to include.
Returns
-------
arff_data_new : tuple
Subset of arff data with only the include columns indicated by the
include_columns argument.
"""
arff_data_new: ArffSparseDataType = (list(), list(), list())
reindexed_columns = {column_idx: array_idx for array_idx, column_idx
in enumerate(include_columns)}
for val, row_idx, col_idx in zip(arff_data[0], arff_data[1], arff_data[2]):
if col_idx in include_columns:
arff_data_new[0].append(val)
arff_data_new[1].append(row_idx)
arff_data_new[2].append(reindexed_columns[col_idx])
return arff_data_new
def _sparse_data_to_array(
arff_data: ArffSparseDataType, include_columns: List
) -> np.ndarray:
# turns the sparse data back into an array (can't use toarray() function,
# as this does only work on numeric data)
num_obs = max(arff_data[1]) + 1
y_shape = (num_obs, len(include_columns))
reindexed_columns = {column_idx: array_idx for array_idx, column_idx
in enumerate(include_columns)}
# TODO: improve for efficiency
y = np.empty(y_shape, dtype=np.float64)
for val, row_idx, col_idx in zip(arff_data[0], arff_data[1], arff_data[2]):
if col_idx in include_columns:
y[row_idx, reindexed_columns[col_idx]] = val
return y
def _convert_arff_data(
arff: ArffContainerType,
col_slice_x: List[int],
col_slice_y: List[int],
shape: Optional[Tuple] = None
) -> Tuple:
"""
converts the arff object into the appropriate matrix type (np.array or
scipy.sparse.csr_matrix) based on the 'data part' (i.e., in the
liac-arff dict, the object from the 'data' key)
Parameters
----------
arff : dict
As obtained from liac-arff object.
col_slice_x : list
The column indices that are sliced from the original array to return
as X data
col_slice_y : list
The column indices that are sliced from the original array to return
as y data
Returns
-------
X : np.array or scipy.sparse.csr_matrix
y : np.array
"""
arff_data = arff['data']
if isinstance(arff_data, Generator):
if shape is None:
raise ValueError(
"shape must be provided when arr['data'] is a Generator"
)
if shape[0] == -1:
count = -1
else:
count = shape[0] * shape[1]
data = np.fromiter(itertools.chain.from_iterable(arff_data),
dtype='float64', count=count)
data = data.reshape(*shape)
X = data[:, col_slice_x]
y = data[:, col_slice_y]
return X, y
elif isinstance(arff_data, tuple):
arff_data_X = _split_sparse_columns(arff_data, col_slice_x)
num_obs = max(arff_data[1]) + 1
X_shape = (num_obs, len(col_slice_x))
X = scipy.sparse.coo_matrix(
(arff_data_X[0], (arff_data_X[1], arff_data_X[2])),
shape=X_shape, dtype=np.float64)
X = X.tocsr()
y = _sparse_data_to_array(arff_data, col_slice_y)
return X, y
else:
# This should never happen
raise ValueError('Unexpected Data Type obtained from arff.')
def _feature_to_dtype(feature: Dict[str, str]):
"""Map feature to dtype for pandas DataFrame
"""
if feature['data_type'] == 'string':
return object
elif feature['data_type'] == 'nominal':
return 'category'
# only numeric, integer, real are left
elif (feature['number_of_missing_values'] != '0' or
feature['data_type'] in ['numeric', 'real']):
# cast to floats when there are any missing values
return np.float64
elif feature['data_type'] == 'integer':
return np.int64
raise ValueError('Unsupported feature: {}'.format(feature))
def _convert_arff_data_dataframe(
arff: ArffContainerType, columns: List, features_dict: Dict[str, Any]
) -> Tuple:
"""Convert the ARFF object into a pandas DataFrame.
Parameters
----------
arff : dict
As obtained from liac-arff object.
columns : list
Columns from dataframe to return.
features_dict : dict
Maps feature name to feature info from openml.
Returns
-------
result : tuple
tuple with the resulting dataframe
"""
pd = check_pandas_support('fetch_openml with as_frame=True')
attributes = OrderedDict(arff['attributes'])
arff_columns = list(attributes)
if not isinstance(arff['data'], Generator):
raise ValueError(
"arff['data'] must be a generator when converting to pd.DataFrame."
)
# calculate chunksize
first_row = next(arff['data'])
first_df = pd.DataFrame([first_row], columns=arff_columns)
row_bytes = first_df.memory_usage(deep=True).sum()
chunksize = get_chunk_n_rows(row_bytes)
# read arff data with chunks
columns_to_keep = [col for col in arff_columns if col in columns]
dfs = []
dfs.append(first_df[columns_to_keep])
for data in _chunk_generator(arff['data'], chunksize):
dfs.append(pd.DataFrame(data, columns=arff_columns)[columns_to_keep])
df = pd.concat(dfs, ignore_index=True)
for column in columns_to_keep:
dtype = _feature_to_dtype(features_dict[column])
if dtype == 'category':
cats_without_missing = [cat for cat in attributes[column]
if cat is not None and
not is_scalar_nan(cat)]
dtype = pd.api.types.CategoricalDtype(cats_without_missing)
df[column] = df[column].astype(dtype, copy=False)
return (df, )
def _get_data_info_by_name(
name: str, version: Union[int, str], data_home: Optional[str]
):
"""
Utilizes the openml dataset listing api to find a dataset by
name/version
OpenML api function:
https://www.openml.org/api_docs#!/data/get_data_list_data_name_data_name
Parameters
----------
name : str
name of the dataset
version : int or str
If version is an integer, the exact name/version will be obtained from
OpenML. If version is a string (value: "active") it will take the first
version from OpenML that is annotated as active. Any other string
values except "active" are treated as integer.
data_home : str or None
Location to cache the response. None if no cache is required.
Returns
-------
first_dataset : json
json representation of the first dataset object that adhired to the
search criteria
"""
if version == "active":
# situation in which we return the oldest active version
url = _SEARCH_NAME.format(name) + "/status/active/"
error_msg = "No active dataset {} found.".format(name)
json_data = _get_json_content_from_openml_api(
url, error_msg, data_home=data_home
)
res = json_data['data']['dataset']
if len(res) > 1:
warn("Multiple active versions of the dataset matching the name"
" {name} exist. Versions may be fundamentally different, "
"returning version"
" {version}.".format(name=name, version=res[0]['version']))
return res[0]
# an integer version has been provided
url = (_SEARCH_NAME + "/data_version/{}").format(name, version)
try:
json_data = _get_json_content_from_openml_api(
url, error_message=None, data_home=data_home
)
except OpenMLError:
# we can do this in 1 function call if OpenML does not require the
# specification of the dataset status (i.e., return datasets with a
# given name / version regardless of active, deactivated, etc. )
# TODO: feature request OpenML.
url += "/status/deactivated"
error_msg = "Dataset {} with version {} not found.".format(name,
version)
json_data = _get_json_content_from_openml_api(
url, error_msg, data_home=data_home
)
return json_data['data']['dataset'][0]
def _get_data_description_by_id(
data_id: int, data_home: Optional[str]
) -> Dict[str, Any]:
# OpenML API function: https://www.openml.org/api_docs#!/data/get_data_id
url = _DATA_INFO.format(data_id)
error_message = "Dataset with data_id {} not found.".format(data_id)
json_data = _get_json_content_from_openml_api(
url, error_message, data_home=data_home
)
return json_data['data_set_description']
def _get_data_features(
data_id: int, data_home: Optional[str]
) -> OpenmlFeaturesType:
# OpenML function:
# https://www.openml.org/api_docs#!/data/get_data_features_id
url = _DATA_FEATURES.format(data_id)
error_message = "Dataset with data_id {} not found.".format(data_id)
json_data = _get_json_content_from_openml_api(
url, error_message, data_home=data_home
)
return json_data['data_features']['feature']
def _get_data_qualities(
data_id: int, data_home: Optional[str]
) -> OpenmlQualitiesType:
# OpenML API function:
# https://www.openml.org/api_docs#!/data/get_data_qualities_id
url = _DATA_QUALITIES.format(data_id)
error_message = "Dataset with data_id {} not found.".format(data_id)
json_data = _get_json_content_from_openml_api(
url, error_message, data_home=data_home
)
# the qualities might not be available, but we still try to process
# the data
return json_data.get('data_qualities', {}).get('quality', [])
def _get_num_samples(data_qualities: OpenmlQualitiesType) -> int:
"""Get the number of samples from data qualities.
Parameters
----------
data_qualities : list of dict
Used to retrieve the number of instances (samples) in the dataset.
Returns
-------
n_samples : int
The number of samples in the dataset or -1 if data qualities are
unavailable.
"""
# If the data qualities are unavailable, we return -1
default_n_samples = -1
qualities = {d['name']: d['value'] for d in data_qualities}
return int(float(qualities.get('NumberOfInstances', default_n_samples)))
def _load_arff_response(
url: str,
data_home: Optional[str],
return_type, encode_nominal: bool,
parse_arff: Callable[[ArffContainerType], Tuple],
md5_checksum: str
) -> Tuple:
"""Load arff data with url and parses arff response with parse_arff"""
response = _open_openml_url(url, data_home)
with closing(response):
# Note that if the data is dense, no reading is done until the data
# generator is iterated.
actual_md5_checksum = hashlib.md5()
def _stream_checksum_generator(response):
for line in response:
actual_md5_checksum.update(line)
yield line.decode('utf-8')
stream = _stream_checksum_generator(response)
arff = _arff.load(stream,
return_type=return_type,
encode_nominal=encode_nominal)
parsed_arff = parse_arff(arff)
# consume remaining stream, if early exited
for _ in stream:
pass
if actual_md5_checksum.hexdigest() != md5_checksum:
raise ValueError("md5 checksum of local file for " + url +
" does not match description. "
"Downloaded file could have been modified / "
"corrupted, clean cache and retry...")
return parsed_arff
def _download_data_to_bunch(
url: str,
sparse: bool,
data_home: Optional[str],
*,
as_frame: bool,
features_list: List,
data_columns: List[int],
target_columns: List,
shape: Optional[Tuple[int, int]],
md5_checksum: str
):
"""Download OpenML ARFF and convert to Bunch of data
"""
# NB: this function is long in order to handle retry for any failure
# during the streaming parse of the ARFF.
# Prepare which columns and data types should be returned for the X and y
features_dict = {feature['name']: feature for feature in features_list}
# XXX: col_slice_y should be all nominal or all numeric
_verify_target_data_type(features_dict, target_columns)
col_slice_y = [int(features_dict[col_name]['index'])
for col_name in target_columns]
col_slice_x = [int(features_dict[col_name]['index'])
for col_name in data_columns]
for col_idx in col_slice_y:
feat = features_list[col_idx]
nr_missing = int(feat['number_of_missing_values'])
if nr_missing > 0:
raise ValueError('Target column {} has {} missing values. '
'Missing values are not supported for target '
'columns. '.format(feat['name'], nr_missing))
# Access an ARFF file on the OpenML server. Documentation:
# https://www.openml.org/api_data_docs#!/data/get_download_id
if sparse is True:
return_type = _arff.COO
else:
return_type = _arff.DENSE_GEN
frame = nominal_attributes = None
parse_arff: Callable
postprocess: Callable
if as_frame:
columns = data_columns + target_columns
parse_arff = partial(_convert_arff_data_dataframe, columns=columns,
features_dict=features_dict)
def postprocess(frame):
X = frame[data_columns]
if len(target_columns) >= 2:
y = frame[target_columns]
elif len(target_columns) == 1:
y = frame[target_columns[0]]
else:
y = None
return X, y, frame, nominal_attributes
else:
def parse_arff(arff):
X, y = _convert_arff_data(arff, col_slice_x, col_slice_y, shape)
# nominal attributes is a dict mapping from the attribute name to
# the possible values. Includes also the target column (which will
# be popped off below, before it will be packed in the Bunch
# object)
nominal_attributes = {k: v for k, v in arff['attributes']
if isinstance(v, list) and
k in data_columns + target_columns}
return X, y, nominal_attributes
def postprocess(X, y, nominal_attributes):
is_classification = {col_name in nominal_attributes
for col_name in target_columns}
if not is_classification:
# No target
pass
elif all(is_classification):
y = np.hstack([
np.take(
np.asarray(nominal_attributes.pop(col_name),
dtype='O'),
y[:, i:i + 1].astype(int, copy=False))
for i, col_name in enumerate(target_columns)
])
elif any(is_classification):
raise ValueError('Mix of nominal and non-nominal targets is '
'not currently supported')
# reshape y back to 1-D array, if there is only 1 target column;
# back to None if there are not target columns
if y.shape[1] == 1:
y = y.reshape((-1,))
elif y.shape[1] == 0:
y = None
return X, y, frame, nominal_attributes
out = _retry_with_clean_cache(url, data_home)(
_load_arff_response)(url, data_home,
return_type=return_type,
encode_nominal=not as_frame,
parse_arff=parse_arff,
md5_checksum=md5_checksum)
X, y, frame, nominal_attributes = postprocess(*out)
return Bunch(data=X, target=y, frame=frame,
categories=nominal_attributes,
feature_names=data_columns,
target_names=target_columns)
def _verify_target_data_type(features_dict, target_columns):
# verifies the data type of the y array in case there are multiple targets
# (throws an error if these targets do not comply with sklearn support)
if not isinstance(target_columns, list):
raise ValueError('target_column should be list, '
'got: %s' % type(target_columns))
found_types = set()
for target_column in target_columns:
if target_column not in features_dict:
raise KeyError('Could not find target_column={}')
if features_dict[target_column]['data_type'] == "numeric":
found_types.add(np.float64)
else:
found_types.add(object)
# note: we compare to a string, not boolean
if features_dict[target_column]['is_ignore'] == 'true':
warn('target_column={} has flag is_ignore.'.format(
target_column))
if features_dict[target_column]['is_row_identifier'] == 'true':
warn('target_column={} has flag is_row_identifier.'.format(
target_column))
if len(found_types) > 1:
raise ValueError('Can only handle homogeneous multi-target datasets, '
'i.e., all targets are either numeric or '
'categorical.')
def _valid_data_column_names(features_list, target_columns):
# logic for determining on which columns can be learned. Note that from the
# OpenML guide follows that columns that have the `is_row_identifier` or
# `is_ignore` flag, these can not be learned on. Also target columns are
# excluded.
valid_data_column_names = []
for feature in features_list:
if (feature['name'] not in target_columns
and feature['is_ignore'] != 'true'
and feature['is_row_identifier'] != 'true'):
valid_data_column_names.append(feature['name'])
return valid_data_column_names
def fetch_openml(
name: Optional[str] = None,
*,
version: Union[str, int] = 'active',
data_id: Optional[int] = None,
data_home: Optional[str] = None,
target_column: Optional[Union[str, List]] = 'default-target',
cache: bool = True,
return_X_y: bool = False,
as_frame: Union[str, bool] = 'auto'
):
"""Fetch dataset from openml by name or dataset id.
Datasets are uniquely identified by either an integer ID or by a
combination of name and version (i.e. there might be multiple
versions of the 'iris' dataset). Please give either name or data_id
(not both). In case a name is given, a version can also be
provided.
Read more in the :ref:`User Guide <openml>`.
.. versionadded:: 0.20
.. note:: EXPERIMENTAL
The API is experimental (particularly the return value structure),
and might have small backward-incompatible changes without notice
or warning in future releases.
Parameters
----------
name : str, default=None
String identifier of the dataset. Note that OpenML can have multiple
datasets with the same name.
version : int or 'active', default='active'
Version of the dataset. Can only be provided if also ``name`` is given.
If 'active' the oldest version that's still active is used. Since
there may be more than one active version of a dataset, and those
versions may fundamentally be different from one another, setting an
exact version is highly recommended.
data_id : int, default=None
OpenML ID of the dataset. The most specific way of retrieving a
dataset. If data_id is not given, name (and potential version) are
used to obtain a dataset.
data_home : str, default=None
Specify another download and cache folder for the data sets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
target_column : str, list or None, default='default-target'
Specify the column name in the data to use as target. If
'default-target', the standard target column a stored on the server
is used. If ``None``, all columns are returned as data and the
target is ``None``. If list (of strings), all columns with these names
are returned as multi-target (Note: not all scikit-learn classifiers
can handle all types of multi-output combinations)
cache : bool, default=True
Whether to cache downloaded datasets using joblib.
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object. See
below for more information about the `data` and `target` objects.
as_frame : bool or 'auto', default='auto'
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric, string or categorical). The target is
a pandas DataFrame or Series depending on the number of target_columns.
The Bunch will contain a ``frame`` attribute with the target and the
data. If ``return_X_y`` is True, then ``(data, target)`` will be pandas
DataFrames or Series as describe above.
If as_frame is 'auto', the data and target will be converted to
DataFrame or Series as if as_frame is set to True, unless the dataset
is stored in sparse format.
.. versionchanged:: 0.24
The default value of `as_frame` changed from `False` to `'auto'`
in 0.24.
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : np.array, scipy.sparse.csr_matrix of floats, or pandas DataFrame
The feature matrix. Categorical features are encoded as ordinals.
target : np.array, pandas Series or DataFrame
The regression target or classification labels, if applicable.
Dtype is float if numeric, and object if categorical. If
``as_frame`` is True, ``target`` is a pandas object.
DESCR : str
The full description of the dataset
feature_names : list
The names of the dataset columns
target_names: list
The names of the target columns
.. versionadded:: 0.22
categories : dict or None
Maps each categorical feature name to a list of values, such
that the value encoded as i is ith in the list. If ``as_frame``
is True, this is None.
details : dict
More metadata from OpenML
frame : pandas DataFrame
Only present when `as_frame=True`. DataFrame with ``data`` and
``target``.
(data, target) : tuple if ``return_X_y`` is True
.. note:: EXPERIMENTAL
This interface is **experimental** and subsequent releases may
change attributes without notice (although there should only be
minor changes to ``data`` and ``target``).
Missing values in the 'data' are represented as NaN's. Missing values
in 'target' are represented as NaN's (numerical target) or None
(categorical target)
"""
if cache is False:
# no caching will be applied
data_home = None
else:
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'openml')
# check valid function arguments. data_id XOR (name, version) should be
# provided
if name is not None:
# OpenML is case-insensitive, but the caching mechanism is not
# convert all data names (str) to lower case
name = name.lower()
if data_id is not None:
raise ValueError(
"Dataset data_id={} and name={} passed, but you can only "
"specify a numeric data_id or a name, not "
"both.".format(data_id, name))
data_info = _get_data_info_by_name(name, version, data_home)
data_id = data_info['did']
elif data_id is not None:
# from the previous if statement, it is given that name is None
if version != "active":
raise ValueError(
"Dataset data_id={} and version={} passed, but you can only "
"specify a numeric data_id or a version, not "
"both.".format(data_id, version))
else:
raise ValueError(
"Neither name nor data_id are provided. Please provide name or "
"data_id.")
data_description = _get_data_description_by_id(data_id, data_home)
if data_description['status'] != "active":
warn("Version {} of dataset {} is inactive, meaning that issues have "
"been found in the dataset. Try using a newer version from "
"this URL: {}".format(
data_description['version'],
data_description['name'],
data_description['url']))
if 'error' in data_description:
warn("OpenML registered a problem with the dataset. It might be "
"unusable. Error: {}".format(data_description['error']))
if 'warning' in data_description:
warn("OpenML raised a warning on the dataset. It might be "
"unusable. Warning: {}".format(data_description['warning']))
return_sparse = False
if data_description['format'].lower() == 'sparse_arff':
return_sparse = True
if as_frame == 'auto':
as_frame = not return_sparse
if as_frame and return_sparse:
raise ValueError('Cannot return dataframe with sparse data')
# download data features, meta-info about column types
features_list = _get_data_features(data_id, data_home)
if not as_frame:
for feature in features_list:
if 'true' in (feature['is_ignore'], feature['is_row_identifier']):
continue
if feature['data_type'] == 'string':
raise ValueError('STRING attributes are not supported for '
'array representation. Try as_frame=True')
if target_column == "default-target":
# determines the default target based on the data feature results
# (which is currently more reliable than the data description;
# see issue: https://github.com/openml/OpenML/issues/768)
target_columns = [feature['name'] for feature in features_list
if feature['is_target'] == 'true']
elif isinstance(target_column, str):
# for code-simplicity, make target_column by default a list
target_columns = [target_column]
elif target_column is None:
target_columns = []
elif isinstance(target_column, list):
target_columns = target_column
else:
raise TypeError("Did not recognize type of target_column"
"Should be str, list or None. Got: "
"{}".format(type(target_column)))
data_columns = _valid_data_column_names(features_list,
target_columns)
shape: Optional[Tuple[int, int]]
# determine arff encoding to return
if not return_sparse:
# The shape must include the ignored features to keep the right indexes
# during the arff data conversion.
data_qualities = _get_data_qualities(data_id, data_home)
shape = _get_num_samples(data_qualities), len(features_list)
else:
shape = None
# obtain the data
url = _DATA_FILE.format(data_description['file_id'])
bunch = _download_data_to_bunch(url, return_sparse, data_home,
as_frame=bool(as_frame),
features_list=features_list, shape=shape,
target_columns=target_columns,
data_columns=data_columns,
md5_checksum=data_description[
"md5_checksum"])
if return_X_y:
return bunch.data, bunch.target
description = "{}\n\nDownloaded from openml.org.".format(
data_description.pop('description'))
bunch.update(
DESCR=description, details=data_description,
url="https://www.openml.org/d/{}".format(data_id))
return bunch
|
|
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for the topics and skills dashboard, from where topics and skills
are created.
"""
from __future__ import annotations
import logging
from core import feconf
from core import utils
from core.constants import constants
from core.controllers import acl_decorators
from core.controllers import base
from core.domain import config_domain
from core.domain import fs_services
from core.domain import image_validation_services
from core.domain import question_services
from core.domain import role_services
from core.domain import skill_domain
from core.domain import skill_fetchers
from core.domain import skill_services
from core.domain import state_domain
from core.domain import topic_domain
from core.domain import topic_fetchers
from core.domain import topic_services
class TopicsAndSkillsDashboardPage(base.BaseHandler):
"""Page showing the topics and skills dashboard."""
@acl_decorators.can_access_topics_and_skills_dashboard
def get(self):
self.render_template(
'topics-and-skills-dashboard-page.mainpage.html')
class TopicsAndSkillsDashboardPageDataHandler(base.BaseHandler):
"""Provides data for the user's topics and skills dashboard page."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'GET': {}
}
@acl_decorators.can_access_topics_and_skills_dashboard
def get(self):
"""Handles GET requests."""
topic_summaries = topic_fetchers.get_all_topic_summaries()
topic_summary_dicts = [
summary.to_dict() for summary in topic_summaries]
skill_summaries = skill_services.get_all_skill_summaries()
skill_summary_dicts = [
summary.to_dict() for summary in skill_summaries]
skill_ids_assigned_to_some_topic = (
topic_fetchers.get_all_skill_ids_assigned_to_some_topic())
merged_skill_ids = (
skill_services.get_merged_skill_ids())
topic_rights_dict = topic_fetchers.get_all_topic_rights()
for topic_summary in topic_summary_dicts:
if topic_rights_dict[topic_summary['id']]:
topic_rights = topic_rights_dict[topic_summary['id']]
if topic_rights:
topic_summary['is_published'] = (
topic_rights.topic_is_published)
topic_summary['can_edit_topic'] = (
topic_services.check_can_edit_topic(
self.user, topic_rights)
)
all_classrooms_dict = config_domain.CLASSROOM_PAGES_DATA.value
all_classroom_names = [
classroom['name'] for classroom in all_classrooms_dict]
topic_classroom_dict = {}
for classroom in all_classrooms_dict:
for topic_id in classroom['topic_ids']:
topic_classroom_dict[topic_id] = classroom['name']
for topic_summary_dict in topic_summary_dicts:
topic_summary_dict['classroom'] = topic_classroom_dict.get(
topic_summary_dict['id'], None)
untriaged_skill_summary_dicts = []
mergeable_skill_summary_dicts = []
categorized_skills_dict = {}
topics = topic_fetchers.get_all_topics()
for topic in topics:
subtopics = topic.subtopics
categorized_skills_dict[topic.name] = {}
uncategorized_skills = (
skill_services.get_descriptions_of_skills(
topic.uncategorized_skill_ids)[0])
skills_list = []
for skill_id in topic.uncategorized_skill_ids:
skill_dict = {
'skill_id': skill_id,
'skill_description': uncategorized_skills[skill_id]
}
skills_list.append(skill_dict)
categorized_skills_dict[topic.name]['uncategorized'] = (
skills_list)
for subtopic in subtopics:
skills = (skill_services.get_descriptions_of_skills(
subtopic.skill_ids))[0]
skills_list = []
for skill_id in subtopic.skill_ids:
skill_dict = {
'skill_id': skill_id,
'skill_description': skills[skill_id]
}
skills_list.append(skill_dict)
categorized_skills_dict[topic.name][
subtopic.title] = skills_list
for skill_summary_dict in skill_summary_dicts:
skill_id = skill_summary_dict['id']
if (skill_id not in skill_ids_assigned_to_some_topic) and (
skill_id not in merged_skill_ids):
untriaged_skill_summary_dicts.append(skill_summary_dict)
if (skill_id in skill_ids_assigned_to_some_topic) and (
skill_id not in merged_skill_ids):
mergeable_skill_summary_dicts.append(skill_summary_dict)
can_delete_topic = (
role_services.ACTION_DELETE_TOPIC in self.user.actions)
can_create_topic = (
role_services.ACTION_CREATE_NEW_TOPIC in self.user.actions)
can_delete_skill = (
role_services.ACTION_DELETE_ANY_SKILL in self.user.actions)
can_create_skill = (
role_services.ACTION_CREATE_NEW_SKILL in self.user.actions)
self.values.update({
'untriaged_skill_summary_dicts': untriaged_skill_summary_dicts,
'mergeable_skill_summary_dicts': mergeable_skill_summary_dicts,
'topic_summary_dicts': topic_summary_dicts,
'total_skill_count': len(skill_summary_dicts),
'all_classroom_names': all_classroom_names,
'can_delete_topic': can_delete_topic,
'can_create_topic': can_create_topic,
'can_delete_skill': can_delete_skill,
'can_create_skill': can_create_skill,
'categorized_skills_dict': categorized_skills_dict
})
self.render_json(self.values)
class TopicAssignmentsHandler(base.BaseHandler):
"""Provides information about which topics contain the given skill."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {
'skill_id': {
'schema': {
'type': 'basestring',
'validators': [{
'id': 'is_regex_matched',
'regex_pattern': constants.ENTITY_ID_REGEX
}]
}
}
}
HANDLER_ARGS_SCHEMAS = {
'GET': {}
}
@acl_decorators.can_access_topics_and_skills_dashboard
def get(self, skill_id):
"""Handles GET requests."""
topic_assignments = skill_services.get_all_topic_assignments_for_skill(
skill_id)
topic_assignment_dicts = [
topic_assignment.to_dict()
for topic_assignment in topic_assignments]
self.render_json({
'topic_assignment_dicts': topic_assignment_dicts
})
class SkillsDashboardPageDataHandler(base.BaseHandler):
"""Provides data for the user's skills dashboard page."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'POST': {
'classroom_name': {
'schema': {
'type': 'basestring'
}
},
'next_cursor': {
'schema': {
'type': 'basestring'
},
'default_value': None
},
'keywords': {
'schema': {
'type': 'list',
'items': {
'type': 'basestring'
}
}
},
'num_skills_to_fetch': {
'schema': {
'type': 'int',
'validators': [{
'id': 'is_at_least',
'min_value': 1
}]
}
},
'sort': {
'schema': {
'type': 'basestring'
},
'choices': constants.TOPIC_SKILL_DASHBOARD_SORT_OPTIONS
},
'status': {
'schema': {
'type': 'basestring'
},
'choices': constants.SKILL_STATUS_OPTIONS
}
}
}
@acl_decorators.can_access_topics_and_skills_dashboard
def post(self):
"""Handles POST requests."""
classroom_name = self.normalized_payload.get('classroom_name')
urlsafe_start_cursor = self.normalized_payload.get('next_cursor')
keywords = self.normalized_payload.get('keywords')
num_skills_to_fetch = self.normalized_payload.get('num_skills_to_fetch')
sort_by = self.normalized_payload.get('sort')
status = self.normalized_payload.get('status')
skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
num_skills_to_fetch, status, classroom_name,
keywords, sort_by, urlsafe_start_cursor))
skill_summary_dicts = [summary.to_dict() for summary in skill_summaries]
self.render_json({
'skill_summary_dicts': skill_summary_dicts,
'next_cursor': next_cursor,
'more': more,
})
class NewTopicHandler(base.BaseHandler):
"""Creates a new topic."""
@acl_decorators.can_create_topic
def post(self):
"""Handles POST requests."""
name = self.payload.get('name')
url_fragment = self.payload.get('url_fragment')
description = self.payload.get('description')
thumbnail_filename = self.payload.get('filename')
thumbnail_bg_color = self.payload.get('thumbnailBgColor')
raw_image = self.request.get('image')
try:
topic_domain.Topic.require_valid_name(name)
except Exception as e:
raise self.InvalidInputException(
'Invalid topic name, received %s.' % name) from e
new_topic_id = topic_fetchers.get_new_topic_id()
topic = topic_domain.Topic.create_default_topic(
new_topic_id, name, url_fragment, description)
topic_services.save_new_topic(self.user_id, topic)
try:
file_format = image_validation_services.validate_image_and_filename(
raw_image, thumbnail_filename)
except utils.ValidationError as e:
raise self.InvalidInputException(e)
entity_id = new_topic_id
filename_prefix = 'thumbnail'
image_is_compressible = (
file_format in feconf.COMPRESSIBLE_IMAGE_FORMATS)
fs_services.save_original_and_compressed_versions_of_image(
thumbnail_filename, feconf.ENTITY_TYPE_TOPIC, entity_id, raw_image,
filename_prefix, image_is_compressible)
topic_services.update_topic_and_subtopic_pages(
self.user_id, new_topic_id, [topic_domain.TopicChange({
'cmd': 'update_topic_property',
'property_name': 'thumbnail_filename',
'old_value': None,
'new_value': thumbnail_filename
}), topic_domain.TopicChange({
'cmd': 'update_topic_property',
'property_name': 'thumbnail_bg_color',
'old_value': None,
'new_value': thumbnail_bg_color
}), ], 'Add topic thumbnail.')
self.render_json({
'topicId': new_topic_id
})
class NewSkillHandler(base.BaseHandler):
"""Creates a new skill."""
@acl_decorators.can_create_skill
def post(self):
description = self.payload.get('description')
linked_topic_ids = self.payload.get('linked_topic_ids')
explanation_dict = self.payload.get('explanation_dict')
rubrics = self.payload.get('rubrics')
if not isinstance(rubrics, list):
raise self.InvalidInputException('Rubrics should be a list.')
if not isinstance(explanation_dict, dict):
raise self.InvalidInputException(
'Explanation should be a dict.')
try:
subtitled_html = (
state_domain.SubtitledHtml.from_dict(explanation_dict))
subtitled_html.validate()
except Exception as e:
raise self.InvalidInputException(
'Explanation should be a valid SubtitledHtml dict.'
) from e
rubrics = [skill_domain.Rubric.from_dict(rubric) for rubric in rubrics]
new_skill_id = skill_services.get_new_skill_id()
if linked_topic_ids is not None:
topics = topic_fetchers.get_topics_by_ids(linked_topic_ids)
for topic in topics:
if topic is None:
raise self.InvalidInputException
topic_services.add_uncategorized_skill(
self.user_id, topic.id, new_skill_id)
skill_domain.Skill.require_valid_description(description)
if skill_services.does_skill_with_description_exist(description):
raise self.InvalidInputException(
'Skill description should not be a duplicate.')
skill = skill_domain.Skill.create_default_skill(
new_skill_id, description, rubrics)
skill.update_explanation(
state_domain.SubtitledHtml.from_dict(explanation_dict))
image_filenames = skill_services.get_image_filenames_from_skill(skill)
skill_services.save_new_skill(self.user_id, skill)
image_validation_error_message_suffix = (
'Please go to oppia.org/skill_editor/%s to edit '
'the image.' % skill.id)
for filename in image_filenames:
image = self.request.get(filename)
if not image:
logging.exception(
'Image not provided for file with name %s when the skill '
'with id %s was created.' % (filename, skill.id))
raise self.InvalidInputException(
'No image data provided for file with name %s. %s'
% (filename, image_validation_error_message_suffix))
try:
file_format = (
image_validation_services.validate_image_and_filename(
image, filename))
except utils.ValidationError as e:
e = '%s %s' % (e, image_validation_error_message_suffix)
raise self.InvalidInputException(e)
image_is_compressible = (
file_format in feconf.COMPRESSIBLE_IMAGE_FORMATS)
fs_services.save_original_and_compressed_versions_of_image(
filename, feconf.ENTITY_TYPE_SKILL, skill.id, image,
'image', image_is_compressible)
self.render_json({
'skillId': new_skill_id
})
class MergeSkillHandler(base.BaseHandler):
"""Handles merging of the skills."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_access_topics_and_skills_dashboard
def post(self):
"""Handles the POST request."""
old_skill_id = self.payload.get('old_skill_id')
new_skill_id = self.payload.get('new_skill_id')
new_skill = skill_fetchers.get_skill_by_id(new_skill_id, strict=False)
if new_skill is None:
raise self.PageNotFoundException(
Exception('The new skill with the given id doesn\'t exist.'))
old_skill = skill_fetchers.get_skill_by_id(old_skill_id, strict=False)
if old_skill is None:
raise self.PageNotFoundException(
Exception('The old skill with the given id doesn\'t exist.'))
skill_services.replace_skill_id_in_all_topics(
self.user_id, old_skill_id, new_skill_id)
question_services.replace_skill_id_for_all_questions(
old_skill_id, old_skill.description, new_skill_id)
changelist = [
skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY,
'property_name': (
skill_domain.SKILL_PROPERTY_SUPERSEDING_SKILL_ID),
'old_value': old_skill.superseding_skill_id,
'new_value': new_skill_id
})
]
skill_services.update_skill(
self.user_id, old_skill_id, changelist,
'Marking the skill as having being merged successfully.')
skill_services.delete_skill(self.user_id, old_skill_id)
self.render_json({
'merged_into_skill': new_skill_id
})
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""PValue, PCollection: one node of a dataflow graph.
A node of a dataflow processing graph is a PValue. Currently, there is only
one type: PCollection (a potentially very large set of arbitrary values).
Once created, a PValue belongs to a pipeline and has an associated
transform (of type PTransform), which describes how the value will be
produced when the pipeline gets executed.
"""
from __future__ import absolute_import
import collections
import itertools
from builtins import hex
from builtins import object
from typing import TYPE_CHECKING
from typing import Any
from typing import Dict
from typing import Generic
from typing import Iterator
from typing import Optional
from typing import Sequence
from typing import TypeVar
from typing import Union
from past.builtins import unicode
from apache_beam import typehints
from apache_beam.internal import pickler
from apache_beam.portability import common_urns
from apache_beam.portability import python_urns
from apache_beam.portability.api import beam_runner_api_pb2
if TYPE_CHECKING:
from apache_beam.transforms import sideinputs
from apache_beam.transforms.core import ParDo
from apache_beam.transforms.core import Windowing
from apache_beam.pipeline import AppliedPTransform
from apache_beam.pipeline import Pipeline
from apache_beam.runners.pipeline_context import PipelineContext
__all__ = [
'PCollection',
'TaggedOutput',
'AsSingleton',
'AsIter',
'AsList',
'AsDict',
'EmptySideInput',
]
T = TypeVar('T')
class PValue(object):
"""Base class for PCollection.
Dataflow users should not construct PValue objects directly in their
pipelines.
A PValue has the following main characteristics:
(1) Belongs to a pipeline. Added during object initialization.
(2) Has a transform that can compute the value if executed.
(3) Has a value which is meaningful if the transform was executed.
"""
def __init__(self,
pipeline, # type: Pipeline
tag=None, # type: Optional[str]
element_type=None, # type: Optional[type]
windowing=None, # type: Optional[Windowing]
is_bounded=True,
):
"""Initializes a PValue with all arguments hidden behind keyword arguments.
Args:
pipeline: Pipeline object for this PValue.
tag: Tag of this PValue.
element_type: The type of this PValue.
"""
self.pipeline = pipeline
self.tag = tag
self.element_type = element_type
# The AppliedPTransform instance for the application of the PTransform
# generating this PValue. The field gets initialized when a transform
# gets applied.
self.producer = None # type: Optional[AppliedPTransform]
self.is_bounded = is_bounded
if windowing:
self._windowing = windowing
def __str__(self):
return self._str_internal()
def __repr__(self):
return '<%s at %s>' % (self._str_internal(), hex(id(self)))
def _str_internal(self):
return "%s[%s.%s]" % (self.__class__.__name__,
self.producer.full_label if self.producer else None,
self.tag)
def apply(self, *args, **kwargs):
"""Applies a transform or callable to a PValue.
Args:
*args: positional arguments.
**kwargs: keyword arguments.
The method will insert the pvalue as the next argument following an
optional first label and a transform/callable object. It will call the
pipeline.apply() method with this modified argument list.
"""
arglist = list(args)
arglist.insert(1, self)
return self.pipeline.apply(*arglist, **kwargs)
def __or__(self, ptransform):
return self.pipeline.apply(ptransform, self)
class PCollection(PValue, Generic[T]):
"""A multiple values (potentially huge) container.
Dataflow users should not construct PCollection objects directly in their
pipelines.
"""
def __eq__(self, other):
if isinstance(other, PCollection):
return self.tag == other.tag and self.producer == other.producer
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __hash__(self):
return hash((self.tag, self.producer))
@property
def windowing(self):
# type: () -> Windowing
if not hasattr(self, '_windowing'):
self._windowing = self.producer.transform.get_windowing(
self.producer.inputs)
return self._windowing
def __reduce_ex__(self, unused_version):
# Pickling a PCollection is almost always the wrong thing to do, but we
# can't prohibit it as it often gets implicitly picked up (e.g. as part
# of a closure).
return _InvalidUnpickledPCollection, ()
@staticmethod
def from_(pcoll):
# type: (PValue) -> PCollection
"""Create a PCollection, using another PCollection as a starting point.
Transfers relevant attributes.
"""
return PCollection(pcoll.pipeline, is_bounded=pcoll.is_bounded)
def to_runner_api(self, context):
# type: (PipelineContext) -> beam_runner_api_pb2.PCollection
return beam_runner_api_pb2.PCollection(
unique_name=self._unique_name(),
coder_id=context.coder_id_from_element_type(self.element_type),
is_bounded=beam_runner_api_pb2.IsBounded.BOUNDED
if self.is_bounded
else beam_runner_api_pb2.IsBounded.UNBOUNDED,
windowing_strategy_id=context.windowing_strategies.get_id(
self.windowing))
def _unique_name(self):
# type: () -> str
if self.producer:
return '%d%s.%s' % (
len(self.producer.full_label), self.producer.full_label, self.tag)
else:
return 'PCollection%s' % id(self)
@staticmethod
def from_runner_api(proto, context):
# type: (beam_runner_api_pb2.PCollection, PipelineContext) -> PCollection
# Producer and tag will be filled in later, the key point is that the
# same object is returned for the same pcollection id.
return PCollection(
None,
element_type=context.element_type_from_coder_id(proto.coder_id),
windowing=context.windowing_strategies.get_by_id(
proto.windowing_strategy_id),
is_bounded=proto.is_bounded == beam_runner_api_pb2.IsBounded.BOUNDED)
class _InvalidUnpickledPCollection(object):
pass
class PBegin(PValue):
"""A pipeline begin marker used as input to create/read transforms.
The class is used internally to represent inputs to Create and Read
transforms. This allows us to have transforms that uniformly take PValue(s)
as inputs.
"""
pass
class PDone(PValue):
"""PDone is the output of a transform that has a trivial result such as Write.
"""
pass
class DoOutputsTuple(object):
"""An object grouping the multiple outputs of a ParDo or FlatMap transform."""
def __init__(self,
pipeline, # type: Pipeline
transform, # type: ParDo
tags, # type: Sequence[str]
main_tag # type: Optional[str]
):
self._pipeline = pipeline
self._tags = tags
self._main_tag = main_tag
self._transform = transform
# The ApplyPTransform instance for the application of the multi FlatMap
# generating this value. The field gets initialized when a transform
# gets applied.
self.producer = None # type: Optional[AppliedPTransform]
# Dictionary of PCollections already associated with tags.
self._pcolls = {} # type: Dict[Optional[str], PValue]
def __str__(self):
return '<%s>' % self._str_internal()
def __repr__(self):
return '<%s at %s>' % (self._str_internal(), hex(id(self)))
def _str_internal(self):
return '%s main_tag=%s tags=%s transform=%s' % (
self.__class__.__name__, self._main_tag, self._tags, self._transform)
def __iter__(self):
# type: () -> Iterator[PValue]
"""Iterates over tags returning for each call a (tag, pvalue) pair."""
if self._main_tag is not None:
yield self[self._main_tag]
for tag in self._tags:
yield self[tag]
def __getattr__(self, tag):
# type: (str) -> PValue
# Special methods which may be accessed before the object is
# fully constructed (e.g. in unpickling).
if tag[:2] == tag[-2:] == '__':
return object.__getattr__(self, tag) # type: ignore
return self[tag]
def __getitem__(self, tag):
# type: (Union[int, str, None]) -> PValue
# Accept int tags so that we can look at Partition tags with the
# same ints that we used in the partition function.
# TODO(gildea): Consider requiring string-based tags everywhere.
# This will require a partition function that does not return ints.
if isinstance(tag, int):
tag = str(tag)
if tag == self._main_tag:
tag = None
elif self._tags and tag not in self._tags:
raise ValueError(
"Tag '%s' is neither the main tag '%s' "
"nor any of the tags %s" % (
tag, self._main_tag, self._tags))
# Check if we accessed this tag before.
if tag in self._pcolls:
return self._pcolls[tag]
assert self.producer is not None
if tag is not None:
self._transform.output_tags.add(tag)
pcoll = PCollection(self._pipeline, tag=tag, element_type=typehints.Any) # type: PValue
# Transfer the producer from the DoOutputsTuple to the resulting
# PCollection.
pcoll.producer = self.producer.parts[0]
# Add this as an output to both the inner ParDo and the outer _MultiParDo
# PTransforms.
if tag not in self.producer.parts[0].outputs:
self.producer.parts[0].add_output(pcoll, tag)
self.producer.add_output(pcoll, tag)
else:
# Main output is output of inner ParDo.
pcoll = self.producer.parts[0].outputs[None]
self._pcolls[tag] = pcoll
return pcoll
class TaggedOutput(object):
"""An object representing a tagged value.
ParDo, Map, and FlatMap transforms can emit values on multiple outputs which
are distinguished by string tags. The DoFn will return plain values
if it wants to emit on the main output and TaggedOutput objects
if it wants to emit a value on a specific tagged output.
"""
def __init__(self, tag, value):
# type: (str, Any) -> None
if not isinstance(tag, (str, unicode)):
raise TypeError(
'Attempting to create a TaggedOutput with non-string tag %s' % (tag,))
self.tag = tag
self.value = value
class AsSideInput(object):
"""Marker specifying that a PCollection will be used as a side input.
When a PCollection is supplied as a side input to a PTransform, it is
necessary to indicate how the PCollection should be made available
as a PTransform side argument (e.g. in the form of an iterable, mapping,
or single value). This class is the superclass of all the various
options, and should not be instantiated directly. (See instead AsSingleton,
AsIter, etc.)
"""
def __init__(self, pcoll):
# type: (PCollection) -> None
from apache_beam.transforms import sideinputs
self.pvalue = pcoll
self._window_mapping_fn = sideinputs.default_window_mapping_fn(
pcoll.windowing.windowfn)
def _view_options(self):
"""Internal options corresponding to specific view.
Intended for internal use by runner implementations.
Returns:
Tuple of options for the given view.
"""
return {'window_mapping_fn': self._window_mapping_fn}
@property
def element_type(self):
return typehints.Any
# TODO(robertwb): Get rid of _from_runtime_iterable and _view_options
# in favor of _side_input_data().
def _side_input_data(self):
# type: () -> SideInputData
view_options = self._view_options()
from_runtime_iterable = type(self)._from_runtime_iterable
return SideInputData(
common_urns.side_inputs.ITERABLE.urn,
self._window_mapping_fn,
lambda iterable: from_runtime_iterable(iterable, view_options))
def to_runner_api(self, context):
# type: (PipelineContext) -> beam_runner_api_pb2.SideInput
return self._side_input_data().to_runner_api(context)
@staticmethod
def from_runner_api(proto, # type: beam_runner_api_pb2.SideInput
context # type: PipelineContext
):
# type: (...) -> _UnpickledSideInput
return _UnpickledSideInput(
SideInputData.from_runner_api(proto, context))
@staticmethod
def _from_runtime_iterable(it, options):
raise NotImplementedError
def requires_keyed_input(self):
return False
class _UnpickledSideInput(AsSideInput):
def __init__(self, side_input_data):
# type: (SideInputData) -> None
self._data = side_input_data
self._window_mapping_fn = side_input_data.window_mapping_fn
@staticmethod
def _from_runtime_iterable(it, options):
return options['data'].view_fn(it)
def _view_options(self):
return {
'data': self._data,
# For non-fn-api runners.
'window_mapping_fn': self._data.window_mapping_fn,
}
def _side_input_data(self):
return self._data
class SideInputData(object):
"""All of the data about a side input except for the bound PCollection."""
def __init__(self,
access_pattern, # type: str
window_mapping_fn, # type: sideinputs.WindowMappingFn
view_fn
):
self.access_pattern = access_pattern
self.window_mapping_fn = window_mapping_fn
self.view_fn = view_fn
def to_runner_api(self, context):
# type: (PipelineContext) -> beam_runner_api_pb2.SideInput
return beam_runner_api_pb2.SideInput(
access_pattern=beam_runner_api_pb2.FunctionSpec(
urn=self.access_pattern),
view_fn=beam_runner_api_pb2.SdkFunctionSpec(
environment_id=context.default_environment_id(),
spec=beam_runner_api_pb2.FunctionSpec(
urn=python_urns.PICKLED_VIEWFN,
payload=pickler.dumps(self.view_fn))),
window_mapping_fn=beam_runner_api_pb2.SdkFunctionSpec(
environment_id=context.default_environment_id(),
spec=beam_runner_api_pb2.FunctionSpec(
urn=python_urns.PICKLED_WINDOW_MAPPING_FN,
payload=pickler.dumps(self.window_mapping_fn))))
@staticmethod
def from_runner_api(proto, unused_context):
# type: (beam_runner_api_pb2.SideInput, PipelineContext) -> SideInputData
assert proto.view_fn.spec.urn == python_urns.PICKLED_VIEWFN
assert (proto.window_mapping_fn.spec.urn ==
python_urns.PICKLED_WINDOW_MAPPING_FN)
return SideInputData(
proto.access_pattern.urn,
pickler.loads(proto.window_mapping_fn.spec.payload),
pickler.loads(proto.view_fn.spec.payload))
class AsSingleton(AsSideInput):
"""Marker specifying that an entire PCollection is to be used as a side input.
When a PCollection is supplied as a side input to a PTransform, it is
necessary to indicate whether the entire PCollection should be made available
as a PTransform side argument (in the form of an iterable), or whether just
one value should be pulled from the PCollection and supplied as the side
argument (as an ordinary value).
Wrapping a PCollection side input argument to a PTransform in this container
(e.g., data.apply('label', MyPTransform(), AsSingleton(my_side_input) )
selects the latter behavior.
The input PCollection must contain exactly one value per window, unless a
default is given, in which case it may be empty.
"""
_NO_DEFAULT = object()
def __init__(self, pcoll, default_value=_NO_DEFAULT):
# type: (PCollection, Any) -> None
super(AsSingleton, self).__init__(pcoll)
self.default_value = default_value
def __repr__(self):
return 'AsSingleton(%s)' % self.pvalue
def _view_options(self):
base = super(AsSingleton, self)._view_options()
if self.default_value != AsSingleton._NO_DEFAULT:
return dict(base, default=self.default_value)
return base
@staticmethod
def _from_runtime_iterable(it, options):
head = list(itertools.islice(it, 2))
if not head:
return options.get('default', EmptySideInput())
elif len(head) == 1:
return head[0]
raise ValueError(
'PCollection of size %d with more than one element accessed as a '
'singleton view. First two elements encountered are "%s", "%s".' % (
len(head), str(head[0]), str(head[1])))
@property
def element_type(self):
return self.pvalue.element_type
class AsIter(AsSideInput):
"""Marker specifying that an entire PCollection is to be used as a side input.
When a PCollection is supplied as a side input to a PTransform, it is
necessary to indicate whether the entire PCollection should be made available
as a PTransform side argument (in the form of an iterable), or whether just
one value should be pulled from the PCollection and supplied as the side
argument (as an ordinary value).
Wrapping a PCollection side input argument to a PTransform in this container
(e.g., data.apply('label', MyPTransform(), AsIter(my_side_input) ) selects the
former behavor.
"""
def __repr__(self):
return 'AsIter(%s)' % self.pvalue
@staticmethod
def _from_runtime_iterable(it, options):
return it
def _side_input_data(self):
# type: () -> SideInputData
return SideInputData(
common_urns.side_inputs.ITERABLE.urn,
self._window_mapping_fn,
lambda iterable: iterable)
@property
def element_type(self):
return typehints.Iterable[self.pvalue.element_type]
class AsList(AsSideInput):
"""Marker specifying that an entire PCollection is to be used as a side input.
Intended for use in side-argument specification---the same places where
AsSingleton and AsIter are used, but forces materialization of this
PCollection as a list.
Args:
pcoll: Input pcollection.
Returns:
An AsList-wrapper around a PCollection whose one element is a list
containing all elements in pcoll.
"""
@staticmethod
def _from_runtime_iterable(it, options):
return list(it)
def _side_input_data(self):
# type: () -> SideInputData
return SideInputData(
common_urns.side_inputs.ITERABLE.urn,
self._window_mapping_fn,
list)
class AsDict(AsSideInput):
"""Marker specifying a PCollection to be used as an indexable side input.
Intended for use in side-argument specification---the same places where
AsSingleton and AsIter are used, but returns an interface that allows
key lookup.
Args:
pcoll: Input pcollection. All elements should be key-value pairs (i.e.
2-tuples) with unique keys.
Returns:
An AsDict-wrapper around a PCollection whose one element is a dict with
entries for uniquely-keyed pairs in pcoll.
"""
@staticmethod
def _from_runtime_iterable(it, options):
return dict(it)
def _side_input_data(self):
# type: () -> SideInputData
return SideInputData(
common_urns.side_inputs.ITERABLE.urn,
self._window_mapping_fn,
dict)
class AsMultiMap(AsSideInput):
"""Marker specifying a PCollection to be used as an indexable side input.
Similar to AsDict, but multiple values may be associated per key, and
the keys are fetched lazily rather than all having to fit in memory.
Intended for use in side-argument specification---the same places where
AsSingleton and AsIter are used, but returns an interface that allows
key lookup.
"""
@staticmethod
def _from_runtime_iterable(it, options):
# Legacy implementation.
result = collections.defaultdict(list)
for k, v in it:
result[k].append(v)
return result
def _side_input_data(self):
# type: () -> SideInputData
return SideInputData(
common_urns.side_inputs.MULTIMAP.urn,
self._window_mapping_fn,
lambda x: x)
def requires_keyed_input(self):
return True
class EmptySideInput(object):
"""Value indicating when a singleton side input was empty.
If a PCollection was furnished as a singleton side input to a PTransform, and
that PCollection was empty, then this value is supplied to the DoFn in the
place where a value from a non-empty PCollection would have gone. This alerts
the DoFn that the side input PCollection was empty. Users may want to check
whether side input values are EmptySideInput, but they will very likely never
want to create new instances of this class themselves.
"""
pass
|
|
"""Models for SQLAlchemy."""
import json
import logging
from sqlalchemy import (
Boolean,
Column,
DateTime,
ForeignKey,
Index,
Integer,
String,
Text,
distinct,
)
from sqlalchemy.dialects import mysql
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy.orm.session import Session
from homeassistant.const import MAX_LENGTH_EVENT_TYPE
from homeassistant.core import Context, Event, EventOrigin, State, split_entity_id
from homeassistant.helpers.json import JSONEncoder
import homeassistant.util.dt as dt_util
# SQLAlchemy Schema
# pylint: disable=invalid-name
Base = declarative_base()
SCHEMA_VERSION = 14
_LOGGER = logging.getLogger(__name__)
DB_TIMEZONE = "+00:00"
TABLE_EVENTS = "events"
TABLE_STATES = "states"
TABLE_RECORDER_RUNS = "recorder_runs"
TABLE_SCHEMA_CHANGES = "schema_changes"
ALL_TABLES = [TABLE_STATES, TABLE_EVENTS, TABLE_RECORDER_RUNS, TABLE_SCHEMA_CHANGES]
DATETIME_TYPE = DateTime(timezone=True).with_variant(
mysql.DATETIME(timezone=True, fsp=6), "mysql"
)
class Events(Base): # type: ignore
"""Event history data."""
__table_args__ = {
"mysql_default_charset": "utf8mb4",
"mysql_collate": "utf8mb4_unicode_ci",
}
__tablename__ = TABLE_EVENTS
event_id = Column(Integer, primary_key=True)
event_type = Column(String(MAX_LENGTH_EVENT_TYPE))
event_data = Column(Text().with_variant(mysql.LONGTEXT, "mysql"))
origin = Column(String(32))
time_fired = Column(DATETIME_TYPE, index=True)
created = Column(DATETIME_TYPE, default=dt_util.utcnow)
context_id = Column(String(36), index=True)
context_user_id = Column(String(36), index=True)
context_parent_id = Column(String(36), index=True)
__table_args__ = (
# Used for fetching events at a specific time
# see logbook
Index("ix_events_event_type_time_fired", "event_type", "time_fired"),
)
def __repr__(self) -> str:
"""Return string representation of instance for debugging."""
return (
f"<recorder.Events("
f"id={self.event_id}, type='{self.event_type}', data='{self.event_data}', "
f"origin='{self.origin}', time_fired='{self.time_fired}'"
f")>"
)
@staticmethod
def from_event(event, event_data=None):
"""Create an event database object from a native event."""
return Events(
event_type=event.event_type,
event_data=event_data or json.dumps(event.data, cls=JSONEncoder),
origin=str(event.origin.value),
time_fired=event.time_fired,
context_id=event.context.id,
context_user_id=event.context.user_id,
context_parent_id=event.context.parent_id,
)
def to_native(self, validate_entity_id=True):
"""Convert to a natve HA Event."""
context = Context(
id=self.context_id,
user_id=self.context_user_id,
parent_id=self.context_parent_id,
)
try:
return Event(
self.event_type,
json.loads(self.event_data),
EventOrigin(self.origin),
process_timestamp(self.time_fired),
context=context,
)
except ValueError:
# When json.loads fails
_LOGGER.exception("Error converting to event: %s", self)
return None
class States(Base): # type: ignore
"""State change history."""
__table_args__ = {
"mysql_default_charset": "utf8mb4",
"mysql_collate": "utf8mb4_unicode_ci",
}
__tablename__ = TABLE_STATES
state_id = Column(Integer, primary_key=True)
domain = Column(String(64))
entity_id = Column(String(255))
state = Column(String(255))
attributes = Column(Text().with_variant(mysql.LONGTEXT, "mysql"))
event_id = Column(
Integer, ForeignKey("events.event_id", ondelete="CASCADE"), index=True
)
last_changed = Column(DATETIME_TYPE, default=dt_util.utcnow)
last_updated = Column(DATETIME_TYPE, default=dt_util.utcnow, index=True)
created = Column(DATETIME_TYPE, default=dt_util.utcnow)
old_state_id = Column(
Integer, ForeignKey("states.state_id", ondelete="NO ACTION"), index=True
)
event = relationship("Events", uselist=False)
old_state = relationship("States", remote_side=[state_id])
__table_args__ = (
# Used for fetching the state of entities at a specific time
# (get_states in history.py)
Index("ix_states_entity_id_last_updated", "entity_id", "last_updated"),
)
def __repr__(self) -> str:
"""Return string representation of instance for debugging."""
return (
f"<recorder.States("
f"id={self.state_id}, domain='{self.domain}', entity_id='{self.entity_id}', "
f"state='{self.state}', event_id='{self.event_id}', "
f"last_updated='{self.last_updated.isoformat(sep=' ', timespec='seconds')}', "
f"old_state_id={self.old_state_id}"
f")>"
)
@staticmethod
def from_event(event):
"""Create object from a state_changed event."""
entity_id = event.data["entity_id"]
state = event.data.get("new_state")
dbstate = States(entity_id=entity_id)
# State got deleted
if state is None:
dbstate.state = ""
dbstate.domain = split_entity_id(entity_id)[0]
dbstate.attributes = "{}"
dbstate.last_changed = event.time_fired
dbstate.last_updated = event.time_fired
else:
dbstate.domain = state.domain
dbstate.state = state.state
dbstate.attributes = json.dumps(dict(state.attributes), cls=JSONEncoder)
dbstate.last_changed = state.last_changed
dbstate.last_updated = state.last_updated
return dbstate
def to_native(self, validate_entity_id=True):
"""Convert to an HA state object."""
try:
return State(
self.entity_id,
self.state,
json.loads(self.attributes),
process_timestamp(self.last_changed),
process_timestamp(self.last_updated),
# Join the events table on event_id to get the context instead
# as it will always be there for state_changed events
context=Context(id=None),
validate_entity_id=validate_entity_id,
)
except ValueError:
# When json.loads fails
_LOGGER.exception("Error converting row to state: %s", self)
return None
class RecorderRuns(Base): # type: ignore
"""Representation of recorder run."""
__tablename__ = TABLE_RECORDER_RUNS
run_id = Column(Integer, primary_key=True)
start = Column(DateTime(timezone=True), default=dt_util.utcnow)
end = Column(DateTime(timezone=True))
closed_incorrect = Column(Boolean, default=False)
created = Column(DateTime(timezone=True), default=dt_util.utcnow)
__table_args__ = (Index("ix_recorder_runs_start_end", "start", "end"),)
def __repr__(self) -> str:
"""Return string representation of instance for debugging."""
end = (
f"'{self.end.isoformat(sep=' ', timespec='seconds')}'" if self.end else None
)
return (
f"<recorder.RecorderRuns("
f"id={self.run_id}, start='{self.start.isoformat(sep=' ', timespec='seconds')}', "
f"end={end}, closed_incorrect={self.closed_incorrect}, "
f"created='{self.created.isoformat(sep=' ', timespec='seconds')}'"
f")>"
)
def entity_ids(self, point_in_time=None):
"""Return the entity ids that existed in this run.
Specify point_in_time if you want to know which existed at that point
in time inside the run.
"""
session = Session.object_session(self)
assert session is not None, "RecorderRuns need to be persisted"
query = session.query(distinct(States.entity_id)).filter(
States.last_updated >= self.start
)
if point_in_time is not None:
query = query.filter(States.last_updated < point_in_time)
elif self.end is not None:
query = query.filter(States.last_updated < self.end)
return [row[0] for row in query]
def to_native(self, validate_entity_id=True):
"""Return self, native format is this model."""
return self
class SchemaChanges(Base): # type: ignore
"""Representation of schema version changes."""
__tablename__ = TABLE_SCHEMA_CHANGES
change_id = Column(Integer, primary_key=True)
schema_version = Column(Integer)
changed = Column(DateTime(timezone=True), default=dt_util.utcnow)
def __repr__(self) -> str:
"""Return string representation of instance for debugging."""
return (
f"<recorder.SchemaChanges("
f"id={self.change_id}, schema_version={self.schema_version}, "
f"changed='{self.changed.isoformat(sep=' ', timespec='seconds')}'"
f")>"
)
def process_timestamp(ts):
"""Process a timestamp into datetime object."""
if ts is None:
return None
if ts.tzinfo is None:
return ts.replace(tzinfo=dt_util.UTC)
return dt_util.as_utc(ts)
def process_timestamp_to_utc_isoformat(ts):
"""Process a timestamp into UTC isotime."""
if ts is None:
return None
if ts.tzinfo == dt_util.UTC:
return ts.isoformat()
if ts.tzinfo is None:
return f"{ts.isoformat()}{DB_TIMEZONE}"
return ts.astimezone(dt_util.UTC).isoformat()
|
|
# This module contains the analysis options
# DEBUG options: these options cause SimuVEX to set breakpoints in various
# places or raise exceptions when checks fail.
BREAK_SIRSB_START = "BREAK_SIRSB_START"
BREAK_SIRSB_END = "BREAK_SIRSB_END"
BREAK_SIRSTMT_START = "BREAK_SIRSTMT_START"
BREAK_SIRSTMT_END = "BREAK_SIRSTMT_END"
VALIDATE_APPROXIMATIONS = "VALIDATE_APPROXIMATIONS"
# This makes SimIRSBs do a fastpath analysis, only recovering direct jumps.
SIMIRSB_FASTPATH = "SIMIRSB_FASTPATH"
# This makes all exits report themselves as "reachable" (to get a more complete CFG)
IGNORE_EXIT_GUARDS = "IGNORE_EXIT_GUARDS"
# This option controls whether register puts are carried out by the analysis.
# Without this, put statements are still analyzed, but the state is not updated.
DO_PUTS = "DO_PUTS"
# This option controls whether register puts are carried out by the analysis.
# Without this, put statements are still analyzed, but the state is not updated.
DO_GETS = "DO_GETS"
# This option controls whether memory stores are carried out by the analysis
# Without this, store statements are still analyzed, but the state is not updated.
DO_STORES = "DO_STORES"
# This option controls whether memory loads are carried out by the analysis
# Without this, load statements are still analyzed, but the state is not updated.
DO_LOADS = "DO_LOADS"
# This option controls whether or not constraints are tracked in the analysis.
TRACK_CONSTRAINTS = "TRACK_CONSTRAINTS"
# This option causes constraints to be flushed at the beginning of every instruction.
INSTRUCTION_SCOPE_CONSTRAINTS = "INSTRUCTION_SCOPE_CONSTRAINTS"
BLOCK_SCOPE_CONSTRAINTS = "BLOCK_SCOPE_CONSTRAINTS"
# This option controls whether or not various entities (IRExpr constants, reads, writes, etc) get simplified automatically
SIMPLIFY_EXPRS = "SIMPLIFY_EXPRS"
SIMPLIFY_MEMORY_READS = "SIMPLIFY_MEMORY_READS"
SIMPLIFY_MEMORY_WRITES = "SIMPLIFY_MEMORY_WRITES"
SIMPLIFY_REGISTER_READS = "SIMPLIFY_REGISTER_READS"
SIMPLIFY_REGISTER_WRITES = "SIMPLIFY_REGISTER_WRITES"
SIMPLIFY_RETS = "SIMPLIFY_RETS"
SIMPLIFY_EXIT_STATE = "SIMPLIFY_EXIT_STATE"
SIMPLIFY_EXIT_TARGET = "SIMPLIFY_EXIT_TARGET"
SIMPLIFY_EXIT_GUARD = "SIMPLIFY_EXIT_GUARD"
SIMPLIFY_CONSTRAINTS = "SIMPLIFY_CONSTRAINTS"
# This option controls whether Unop, BinOp, TriOp, and QOp expressions are executed by the analysis.
# Without this, the statements are still analyzed, but the result remains a purely symbolic value.
DO_OPS = "DO_OPS"
# This option controls whether the helper functions are actually executed for CCALL expressions.
# Without this, the arguments are parsed, but the calls aren't executed, and an unconstrained symbolic
# variable is returned, instead.
DO_CCALLS = "DO_CCALLS"
# Whether we should use the simplified ccalls or not.
USE_SIMPLIFIED_CCALLS = "USE_SIMPLIFIED_CCALLS"
# This option controls whether or not emulated exits and coderefs are added from a call instruction to its ret site.
DO_RET_EMULATION = "DO_RET_EMULATION"
# If this option is present, the guards to emulated ret exits are True instead of False
TRUE_RET_EMULATION_GUARD = "TRUE_RET_EMULATION_GUARD"
# This option causes the analysis to immediately concretize any symbol that it comes across
CONCRETIZE = "CONCRETIZE"
# This option causes the analysis to stop executing a basic block when the first taken exit is encountered.
SINGLE_EXIT = "SINGLE_EXIT"
# This option prevents simuvex from doing hundreds of constraint solves to resolve symbolic jump targets
NO_SYMBOLIC_JUMP_RESOLUTION = "NO_SYMBOLIC_JUMP_RESOLUTION"
# This option prevents simuvex from doing hundreds of constraint solves when it hits a symbolic syscall
NO_SYMBOLIC_SYSCALL_RESOLUTION = "NO_SYMBOLIC_SYSCALL_RESOLUTION"
# The absense of this option causes the analysis to avoid reasoning about most symbolic values.
SYMBOLIC = "SYMBOLIC"
# Generate symbolic values for non-existent values. The absence of this option causes Unconstrained() to return default concrete values (like 0)
SYMBOLIC_INITIAL_VALUES = "SYMBOLIC_INITIAL_VALUES"
# this causes SimuVEX to use SimAbstractMemory for the memory region
ABSTRACT_MEMORY = "ABSTRACT_MEMORY"
# This causes symbolic memory to avoid performing symbolic reads and writes. Unconstrained results
# are returned instead, if these options are present.
AVOID_MULTIVALUED_READS = "AVOID_SYMBOLIC_READS"
AVOID_MULTIVALUED_WRITES = "AVOID_SYMBOLIC_WRITES"
# This option concretizes symbolically sized writes
CONCRETIZE_SYMBOLIC_WRITE_SIZES = "CONCRETIZE_SYMBOLIC_WRITE_SIZES"
# This option concretizes the read size if it's symbolic from the file
CONCRETIZE_SYMBOLIC_FILE_READ_SIZES = "CONCRETIZE_SYMBOLIC_FILE_READ_SIZES"
# This causes angr to support fully symbolic writes. It is very likely that speed will suffer.
SYMBOLIC_WRITE_ADDRESSES = "SYMBOLIC_WRITE_ADDRESSES"
# This causes symbolic memory to avoid concretizing memory address to a single value when the
# range check fails.
CONSERVATIVE_WRITE_STRATEGY = "CONSERVATIVE_WRITE_STRATEGY"
CONSERVATIVE_READ_STRATEGY = "CONSERVATIVE_READ_STRATEGY"
# This enables dependency tracking for all Claripy ASTs.
AST_DEPS = "AST_DEPS"
# This controls whether the temps are treated as symbolic values (for easier debugging) or just written as the z3 values
SYMBOLIC_TEMPS = "SYMBOLIC_TEMPS"
# These are options for tracking various types of actions
TRACK_MEMORY_ACTIONS = "TRACK_MEMORY_ACTIONS"
TRACK_REGISTER_ACTIONS = "TRACK_REGISTER_ACTIONS"
TRACK_TMP_ACTIONS = "TRACK_TMP_ACTIONS"
TRACK_JMP_ACTIONS = "TRACK_JMP_ACTIONS"
TRACK_CONSTRAINT_ACTIONS = "TRACK_CONSTRAINT_ACTIONS"
# note that TRACK_OP_ACTIONS is not enabled in symbolic mode by default, since Yan is worried about its performance
# impact. someone should measure it and make a final decision.
TRACK_OP_ACTIONS = "TRACK_OP_ACTIONS"
# track the history of actions through a path (multiple states). This action affects things on the angr level
TRACK_ACTION_HISTORY = "TRACK_ACTION_HISTORY"
# track memory mapping and permissions
TRACK_MEMORY_MAPPING = "TRACK_MEMORY_MAPPING"
# this is an internal option to automatically track dependencies in SimProcedures
AUTO_REFS = "AUTO_REFS"
# Whether we should track dependencies in SimActions
# If none of the ref options above exist, this option does nothing
ACTION_DEPS = "ACTION_DEPS"
# This enables the tracking of reverse mappings (name->addr and hash->addr) in SimSymbolicMemory
REVERSE_MEMORY_NAME_MAP = "REVERSE_MEMORY_NAME_MAP"
REVERSE_MEMORY_HASH_MAP = "REVERSE_MEMORY_HASH_MAP"
# This enables tracking of which bytes in the state are symbolic
MEMORY_SYMBOLIC_BYTES_MAP = "MEMORY_SYMBOLIC_BYTES_MAP"
# this makes s_run() copy states
COW_STATES = "COW_STATES"
# this replaces calls with an unconstraining of the return register
CALLLESS = "CALLLESS"
# these enables indepent constraint set optimizations. The first is a master toggle, and the second controls
# splitting constraint sets during simplification
COMPOSITE_SOLVER = "COMPOSITE_SOLVER"
ABSTRACT_SOLVER = "ABSTRACT_SOLVER"
PARALLEL_SOLVES = "PARALLEL_SOLVES"
# this stops SimRun for checking the satisfiability of successor states
LAZY_SOLVES = "LAZY_SOLVES"
# This controls whether state executes in native or python mode
NATIVE_EXECUTION = "NATIVE_EXECUTION"
# This makes simuvex downsize solvers wherever reasonable.
DOWNSIZE_Z3 = "DOWNSIZE_Z3"
# Concretize certain registers if they're unique
CONCRETIZE_UNIQUE_REGS = "CONCRETIZE_UNIQUE_REGS"
# initialize all registers to 0 when creating the state
INITIALIZE_ZERO_REGISTERS = "INITIALIZE_ZERO_REGISTERS"
# Turn-on superfastpath mode
SUPER_FASTPATH = "SUPER_FASTPATH"
# use FastMemory for memory
FAST_MEMORY = "FAST_MEMORY"
# use FastMemory for registers
FAST_REGISTERS = "FAST_REGISTERS"
# Under-constrained symbolic execution
UNDER_CONSTRAINED_SYMEXEC = "UNDER_CONSTRAINED_SYMEXEC"
# enable unicorn engine
UNICORN = "UNICORN"
UNICORN_ZEROPAGE_GUARD = "UNICORN_ZEROPAGE_GUARD"
UNICORN_SYM_REGS_SUPPORT = "UNICORN_SYM_REGS_SUPPORT"
# concretize symbolic data when we see it "too often"
UNICORN_THRESHOLD_CONCRETIZATION = "UNICORN_THRESHOLD_CONCRETIZATION"
# aggressively concretize symbolic data when we see it in unicorn
UNICORN_AGGRESSIVE_CONCRETIZATION = "UNICORN_AGGRESSIVE_CONCRETIZATION"
# floating point support
SUPPORT_FLOATING_POINT = "SUPPORT_FLOATING_POINT"
# Resilience options
BYPASS_UNSUPPORTED_IROP = "BYPASS_UNSUPPORTED_IROP"
BYPASS_ERRORED_IROP = "BYPASS_ERRORED_IROP"
BYPASS_UNSUPPORTED_IREXPR = "BYPASS_UNSUPPORTED_IREXPR"
BYPASS_UNSUPPORTED_IRSTMT = "BYPASS_UNSUPPORTED_IRSTMT"
BYPASS_UNSUPPORTED_IRDIRTY = "BYPASS_UNSUPPORTED_IRDIRTY"
BYPASS_UNSUPPORTED_IRCCALL = "BYPASS_UNSUPPORTED_IRCCALL"
BYPASS_ERRORED_IRCCALL = "BYPASS_ERRORED_IRCCALL"
BYPASS_UNSUPPORTED_SYSCALL = "BYPASS_UNSUPPORTED_SYSCALL"
UNSUPPORTED_BYPASS_ZERO_DEFAULT = "UNSUPPORTED_BYPASS_ZERO_DEFAULT"
FRESHNESS_ANALYSIS = 'FRESHNESS_ANALYSIS'
UNINITIALIZED_ACCESS_AWARENESS = 'UNINITIALIZED_ACCESS_AWARENESS'
BEST_EFFORT_MEMORY_STORING = 'BEST_EFFORT_MEMORY_STORING'
# Approximation options (to optimize symbolic execution)
APPROXIMATE_GUARDS = "APPROXIMATE_GUARDS"
APPROXIMATE_SATISFIABILITY = "APPROXIMATE_SATISFIABILITY" # does GUARDS and the rest of the constraints
APPROXIMATE_MEMORY_SIZES = "APPROXIMAGE_MEMORY_SIZES"
APPROXIMATE_MEMORY_INDICES = "APPROXIMAGE_MEMORY_INDICES"
# use an experimental replacement solver
REPLACEMENT_SOLVER = "REPLACEMENT_SOLVER"
# use a cache-less solver in claripy
CACHELESS_SOLVER = "CACHELESS_SOLVER"
# IR optimization
OPTIMIZE_IR = "OPTIMIZE_IR"
SPECIAL_MEMORY_FILL = "SPECIAL_MEMORY_FILL"
# using this option the value inside the register ip is keeped symbolic
KEEP_IP_SYMBOLIC = "KEEP_IP_SYMBOLIC"
# Do not union values from different locations when reading from the memory for a reduced loss in precision
# It is only applied to SimAbstractMemory
KEEP_MEMORY_READS_DISCRETE = "KEEP_MEMORY_READS_DISCRETE"
# Raise a SigSegfaultError on illegal memory accesses
STRICT_PAGE_ACCESS = "STRICT_PAGE_ACCESS"
#
# CGC specific state options
#
# Return 0 instead of a symbolic byte for any unconstrained bytes in memory region
CGC_ZERO_FILL_UNCONSTRAINED_MEMORY = 'CGC_ZERO_FILL_UNCONSTRAINED_MEMORY'
# Make sure the receive syscall always read as many bytes as the program wants
CGC_NO_SYMBOLIC_RECEIVE_LENGTH = 'CGC_NO_SYMBOLIC_RECEIVE_LENGTH'
BYPASS_VERITESTING_EXCEPTIONS = 'BYPASS_VERITESTING_EXCEPTIONS'
# Make sure filedescriptors on transmit and recieve are always 1 and 0
CGC_ENFORCE_FD = 'CGC_ENFORCE_FD'
# useful sets of options
resilience_options = { BYPASS_UNSUPPORTED_IROP, BYPASS_UNSUPPORTED_IREXPR, BYPASS_UNSUPPORTED_IRSTMT, BYPASS_UNSUPPORTED_IRDIRTY, BYPASS_UNSUPPORTED_IRCCALL, BYPASS_ERRORED_IRCCALL, BYPASS_UNSUPPORTED_SYSCALL, BYPASS_ERRORED_IROP, BYPASS_VERITESTING_EXCEPTIONS }
refs = { TRACK_REGISTER_ACTIONS, TRACK_MEMORY_ACTIONS, TRACK_TMP_ACTIONS, TRACK_JMP_ACTIONS, ACTION_DEPS, TRACK_CONSTRAINT_ACTIONS }
approximation = { APPROXIMATE_SATISFIABILITY, APPROXIMATE_MEMORY_SIZES, APPROXIMATE_MEMORY_INDICES }
symbolic = { DO_CCALLS, SYMBOLIC, TRACK_CONSTRAINTS, LAZY_SOLVES, SYMBOLIC_INITIAL_VALUES, COMPOSITE_SOLVER }
simplification = { SIMPLIFY_MEMORY_WRITES, SIMPLIFY_REGISTER_WRITES }
common_options_without_simplification = { DO_GETS, DO_PUTS, DO_LOADS, DO_OPS, COW_STATES, DO_STORES, OPTIMIZE_IR, TRACK_MEMORY_MAPPING, SUPPORT_FLOATING_POINT }
common_options = common_options_without_simplification | simplification
unicorn = { UNICORN, UNICORN_SYM_REGS_SUPPORT, INITIALIZE_ZERO_REGISTERS }
modes = { }
modes['symbolic'] = common_options | symbolic | refs #| approximation | { VALIDATE_APPROXIMATIONS }
modes['symbolic_approximating'] = common_options | symbolic | refs | approximation
modes['static'] = common_options_without_simplification | refs | { BEST_EFFORT_MEMORY_STORING, UNINITIALIZED_ACCESS_AWARENESS, SYMBOLIC_INITIAL_VALUES, DO_CCALLS, DO_RET_EMULATION, TRUE_RET_EMULATION_GUARD, BLOCK_SCOPE_CONSTRAINTS, TRACK_CONSTRAINTS, ABSTRACT_MEMORY, ABSTRACT_SOLVER, USE_SIMPLIFIED_CCALLS, REVERSE_MEMORY_NAME_MAP }
modes['fastpath'] = ((modes['symbolic'] | { TRACK_OP_ACTIONS, BEST_EFFORT_MEMORY_STORING, AVOID_MULTIVALUED_READS, AVOID_MULTIVALUED_WRITES, IGNORE_EXIT_GUARDS, SYMBOLIC_INITIAL_VALUES, DO_RET_EMULATION, NO_SYMBOLIC_JUMP_RESOLUTION, FAST_REGISTERS } | resilience_options) - simplification - approximation) - { SYMBOLIC, DO_CCALLS }
|
|
"""
Module that contains many useful utilities
for validating data or function arguments
"""
from typing import Iterable, Union
import warnings
import numpy as np
from pandas.core.dtypes.common import is_bool
def _check_arg_length(fname, args, max_fname_arg_count, compat_args):
"""
Checks whether 'args' has length of at most 'compat_args'. Raises
a TypeError if that is not the case, similar to in Python when a
function is called with too many arguments.
"""
if max_fname_arg_count < 0:
raise ValueError("'max_fname_arg_count' must be non-negative")
if len(args) > len(compat_args):
max_arg_count = len(compat_args) + max_fname_arg_count
actual_arg_count = len(args) + max_fname_arg_count
argument = "argument" if max_arg_count == 1 else "arguments"
raise TypeError(
f"{fname}() takes at most {max_arg_count} {argument} "
f"({actual_arg_count} given)"
)
def _check_for_default_values(fname, arg_val_dict, compat_args):
"""
Check that the keys in `arg_val_dict` are mapped to their
default values as specified in `compat_args`.
Note that this function is to be called only when it has been
checked that arg_val_dict.keys() is a subset of compat_args
"""
for key in arg_val_dict:
# try checking equality directly with '=' operator,
# as comparison may have been overridden for the left
# hand object
try:
v1 = arg_val_dict[key]
v2 = compat_args[key]
# check for None-ness otherwise we could end up
# comparing a numpy array vs None
if (v1 is not None and v2 is None) or (v1 is None and v2 is not None):
match = False
else:
match = v1 == v2
if not is_bool(match):
raise ValueError("'match' is not a boolean")
# could not compare them directly, so try comparison
# using the 'is' operator
except ValueError:
match = arg_val_dict[key] is compat_args[key]
if not match:
raise ValueError(
f"the '{key}' parameter is not supported in "
f"the pandas implementation of {fname}()"
)
def validate_args(fname, args, max_fname_arg_count, compat_args):
"""
Checks whether the length of the `*args` argument passed into a function
has at most `len(compat_args)` arguments and whether or not all of these
elements in `args` are set to their default values.
Parameters
----------
fname : str
The name of the function being passed the `*args` parameter
args : tuple
The `*args` parameter passed into a function
max_fname_arg_count : int
The maximum number of arguments that the function `fname`
can accept, excluding those in `args`. Used for displaying
appropriate error messages. Must be non-negative.
compat_args : dict
A dictionary of keys and their associated default values.
In order to accommodate buggy behaviour in some versions of `numpy`,
where a signature displayed keyword arguments but then passed those
arguments **positionally** internally when calling downstream
implementations, a dict ensures that the original
order of the keyword arguments is enforced.
Raises
------
TypeError
If `args` contains more values than there are `compat_args`
ValueError
If `args` contains values that do not correspond to those
of the default values specified in `compat_args`
"""
_check_arg_length(fname, args, max_fname_arg_count, compat_args)
# We do this so that we can provide a more informative
# error message about the parameters that we are not
# supporting in the pandas implementation of 'fname'
kwargs = dict(zip(compat_args, args))
_check_for_default_values(fname, kwargs, compat_args)
def _check_for_invalid_keys(fname, kwargs, compat_args):
"""
Checks whether 'kwargs' contains any keys that are not
in 'compat_args' and raises a TypeError if there is one.
"""
# set(dict) --> set of the dictionary's keys
diff = set(kwargs) - set(compat_args)
if diff:
bad_arg = list(diff)[0]
raise TypeError(f"{fname}() got an unexpected keyword argument '{bad_arg}'")
def validate_kwargs(fname, kwargs, compat_args):
"""
Checks whether parameters passed to the **kwargs argument in a
function `fname` are valid parameters as specified in `*compat_args`
and whether or not they are set to their default values.
Parameters
----------
fname : str
The name of the function being passed the `**kwargs` parameter
kwargs : dict
The `**kwargs` parameter passed into `fname`
compat_args: dict
A dictionary of keys that `kwargs` is allowed to have and their
associated default values
Raises
------
TypeError if `kwargs` contains keys not in `compat_args`
ValueError if `kwargs` contains keys in `compat_args` that do not
map to the default values specified in `compat_args`
"""
kwds = kwargs.copy()
_check_for_invalid_keys(fname, kwargs, compat_args)
_check_for_default_values(fname, kwds, compat_args)
def validate_args_and_kwargs(fname, args, kwargs, max_fname_arg_count, compat_args):
"""
Checks whether parameters passed to the *args and **kwargs argument in a
function `fname` are valid parameters as specified in `*compat_args`
and whether or not they are set to their default values.
Parameters
----------
fname: str
The name of the function being passed the `**kwargs` parameter
args: tuple
The `*args` parameter passed into a function
kwargs: dict
The `**kwargs` parameter passed into `fname`
max_fname_arg_count: int
The minimum number of arguments that the function `fname`
requires, excluding those in `args`. Used for displaying
appropriate error messages. Must be non-negative.
compat_args: dict
A dictionary of keys that `kwargs` is allowed to
have and their associated default values.
Raises
------
TypeError if `args` contains more values than there are
`compat_args` OR `kwargs` contains keys not in `compat_args`
ValueError if `args` contains values not at the default value (`None`)
`kwargs` contains keys in `compat_args` that do not map to the default
value as specified in `compat_args`
See Also
--------
validate_args : Purely args validation.
validate_kwargs : Purely kwargs validation.
"""
# Check that the total number of arguments passed in (i.e.
# args and kwargs) does not exceed the length of compat_args
_check_arg_length(
fname, args + tuple(kwargs.values()), max_fname_arg_count, compat_args
)
# Check there is no overlap with the positional and keyword
# arguments, similar to what is done in actual Python functions
args_dict = dict(zip(compat_args, args))
for key in args_dict:
if key in kwargs:
raise TypeError(
f"{fname}() got multiple values for keyword argument '{key}'"
)
kwargs.update(args_dict)
validate_kwargs(fname, kwargs, compat_args)
def validate_bool_kwarg(value, arg_name):
""" Ensures that argument passed in arg_name is of type bool. """
if not (is_bool(value) or value is None):
raise ValueError(
f'For argument "{arg_name}" expected type bool, received '
f"type {type(value).__name__}."
)
return value
def validate_axis_style_args(data, args, kwargs, arg_name, method_name):
"""
Argument handler for mixed index, columns / axis functions
In an attempt to handle both `.method(index, columns)`, and
`.method(arg, axis=.)`, we have to do some bad things to argument
parsing. This translates all arguments to `{index=., columns=.}` style.
Parameters
----------
data : DataFrame
args : tuple
All positional arguments from the user
kwargs : dict
All keyword arguments from the user
arg_name, method_name : str
Used for better error messages
Returns
-------
kwargs : dict
A dictionary of keyword arguments. Doesn't modify ``kwargs``
inplace, so update them with the return value here.
Examples
--------
>>> df._validate_axis_style_args((str.upper,), {'columns': id},
... 'mapper', 'rename')
{'columns': <function id>, 'index': <method 'upper' of 'str' objects>}
This emits a warning
>>> df._validate_axis_style_args((str.upper, id), {},
... 'mapper', 'rename')
{'columns': <function id>, 'index': <method 'upper' of 'str' objects>}
"""
# TODO: Change to keyword-only args and remove all this
out = {}
# Goal: fill 'out' with index/columns-style arguments
# like out = {'index': foo, 'columns': bar}
# Start by validating for consistency
if "axis" in kwargs and any(x in kwargs for x in data._AXIS_TO_AXIS_NUMBER):
msg = "Cannot specify both 'axis' and any of 'index' or 'columns'."
raise TypeError(msg)
# First fill with explicit values provided by the user...
if arg_name in kwargs:
if args:
msg = f"{method_name} got multiple values for argument '{arg_name}'"
raise TypeError(msg)
axis = data._get_axis_name(kwargs.get("axis", 0))
out[axis] = kwargs[arg_name]
# More user-provided arguments, now from kwargs
for k, v in kwargs.items():
try:
ax = data._get_axis_name(k)
except ValueError:
pass
else:
out[ax] = v
# All user-provided kwargs have been handled now.
# Now we supplement with positional arguments, emitting warnings
# when there's ambiguity and raising when there's conflicts
if len(args) == 0:
pass # It's up to the function to decide if this is valid
elif len(args) == 1:
axis = data._get_axis_name(kwargs.get("axis", 0))
out[axis] = args[0]
elif len(args) == 2:
if "axis" in kwargs:
# Unambiguously wrong
msg = "Cannot specify both 'axis' and any of 'index' or 'columns'"
raise TypeError(msg)
msg = (
f"Interpreting call\n\t'.{method_name}(a, b)' as "
f"\n\t'.{method_name}(index=a, columns=b)'.\nUse named "
"arguments to remove any ambiguity. In the future, using "
"positional arguments for 'index' or 'columns' will raise "
"a 'TypeError'."
)
warnings.warn(msg, FutureWarning, stacklevel=4)
out[data._get_axis_name(0)] = args[0]
out[data._get_axis_name(1)] = args[1]
else:
msg = f"Cannot specify all of '{arg_name}', 'index', 'columns'."
raise TypeError(msg)
return out
def validate_fillna_kwargs(value, method, validate_scalar_dict_value=True):
"""
Validate the keyword arguments to 'fillna'.
This checks that exactly one of 'value' and 'method' is specified.
If 'method' is specified, this validates that it's a valid method.
Parameters
----------
value, method : object
The 'value' and 'method' keyword arguments for 'fillna'.
validate_scalar_dict_value : bool, default True
Whether to validate that 'value' is a scalar or dict. Specifically,
validate that it is not a list or tuple.
Returns
-------
value, method : object
"""
from pandas.core.missing import clean_fill_method
if value is None and method is None:
raise ValueError("Must specify a fill 'value' or 'method'.")
elif value is None and method is not None:
method = clean_fill_method(method)
elif value is not None and method is None:
if validate_scalar_dict_value and isinstance(value, (list, tuple)):
raise TypeError(
'"value" parameter must be a scalar or dict, but '
f'you passed a "{type(value).__name__}"'
)
elif value is not None and method is not None:
raise ValueError("Cannot specify both 'value' and 'method'.")
return value, method
def validate_percentile(q: Union[float, Iterable[float]]) -> np.ndarray:
"""
Validate percentiles (used by describe and quantile).
This function checks if the given float or iterable of floats is a valid percentile
otherwise raises a ValueError.
Parameters
----------
q: float or iterable of floats
A single percentile or an iterable of percentiles.
Returns
-------
ndarray
An ndarray of the percentiles if valid.
Raises
------
ValueError if percentiles are not in given interval([0, 1]).
"""
q_arr = np.asarray(q)
# Don't change this to an f-string. The string formatting
# is too expensive for cases where we don't need it.
msg = "percentiles should all be in the interval [0, 1]. Try {} instead."
if q_arr.ndim == 0:
if not 0 <= q_arr <= 1:
raise ValueError(msg.format(q_arr / 100.0))
else:
if not all(0 <= qs <= 1 for qs in q_arr):
raise ValueError(msg.format(q_arr / 100.0))
return q_arr
|
|
# Lint as: python3
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Turn Python docstrings into Markdown for TensorFlow documentation."""
import dataclasses
import enum
import inspect
import os
import pathlib
import posixpath
import pprint
import re
import textwrap
import typing
from typing import Any, Dict, List, Tuple, Iterable, Optional, Union
from tensorflow_docs.api_generator import config
from tensorflow_docs.api_generator import doc_generator_visitor
from tensorflow_docs.api_generator import obj_type as obj_type_lib
@dataclasses.dataclass
class FileLocation(object):
"""This class indicates that the object is defined in a regular file.
This can be used for the `defined_in` slot of the `PageInfo` objects.
"""
base_url: Optional[str] = None
start_line: Optional[int] = None
end_line: Optional[int] = None
@property
def url(self) -> Optional[str]:
if self.start_line and self.end_line:
if 'github.com' in self.base_url:
return f'{self.base_url}#L{self.start_line}-L{self.end_line}'
return self.base_url
def is_class_attr(full_name, index):
"""Check if the object's parent is a class.
Args:
full_name: The full name of the object, like `tf.module.symbol`.
index: The {full_name:py_object} dictionary for the public API.
Returns:
True if the object is a class attribute.
"""
parent_name = full_name.rsplit('.', 1)[0]
if inspect.isclass(index[parent_name]):
return True
return False
def documentation_path(full_name, is_fragment=False):
"""Returns the file path for the documentation for the given API symbol.
Given the fully qualified name of a library symbol, compute the path to which
to write the documentation for that symbol (relative to a base directory).
Documentation files are organized into directories that mirror the python
module/class structure.
Args:
full_name: Fully qualified name of a library symbol.
is_fragment: If `False` produce a page link (`tf.a.b.c` -->
`tf/a/b/c.md`). If `True` produce fragment link, `tf.a.b.c` -->
`tf/a/b.md#c`
Returns:
The file path to which to write the documentation for `full_name`.
"""
parts = full_name.split('.')
if is_fragment:
parts, fragment = parts[:-1], parts[-1]
result = posixpath.join(*parts) + '.md'
if is_fragment:
result = result + '#' + fragment
return result
def _get_raw_docstring(py_object):
"""Get the docs for a given python object.
Args:
py_object: A python object to retrieve the docs for (class, function/method,
or module).
Returns:
The docstring, or the empty string if no docstring was found.
"""
if obj_type_lib.ObjType.get(py_object) is obj_type_lib.ObjType.TYPE_ALIAS:
if inspect.getdoc(py_object) != inspect.getdoc(py_object.__origin__):
result = inspect.getdoc(py_object)
else:
result = ''
elif obj_type_lib.ObjType.get(py_object) is not obj_type_lib.ObjType.OTHER:
result = inspect.getdoc(py_object) or ''
else:
result = ''
if result is None:
result = ''
result = _StripTODOs()(result)
result = _StripPylintAndPyformat()(result)
result = _AddDoctestFences()(result + '\n')
result = _DowngradeH1Keywords()(result)
return result
class _AddDoctestFences(object):
"""Adds ``` fences around doctest caret blocks >>> that don't have them."""
CARET_BLOCK_RE = re.compile(
r"""
\n # After a blank line.
(?P<indent>\ *)(?P<content>\>\>\>.*?) # Whitespace and a triple caret.
\n\s*?(?=\n|$) # Followed by a blank line""",
re.VERBOSE | re.DOTALL)
def _sub(self, match):
groups = match.groupdict()
fence = f"\n{groups['indent']}```\n"
content = groups['indent'] + groups['content']
return ''.join([fence, content, fence])
def __call__(self, content):
return self.CARET_BLOCK_RE.sub(self._sub, content)
class _StripTODOs(object):
TODO_RE = re.compile('#? *TODO.*')
def __call__(self, content: str) -> str:
return self.TODO_RE.sub('', content)
class _StripPylintAndPyformat(object):
STRIP_RE = re.compile('# *?(pylint|pyformat):.*', re.I)
def __call__(self, content: str) -> str:
return self.STRIP_RE.sub('', content)
class _DowngradeH1Keywords():
"""Convert keras docstring keyword format to google format."""
KEYWORD_H1_RE = re.compile(
r"""
^ # Start of line
(?P<indent>\s*) # Capture leading whitespace as <indent
\#\s* # A literal "#" and more spaces
# Capture any of these keywords as <keyword>
(?P<keyword>Args|Arguments|Returns|Raises|Yields|Examples?|Notes?)
\s*:? # Optional whitespace and optional ":"
""", re.VERBOSE)
def __call__(self, docstring):
lines = docstring.splitlines()
new_lines = []
is_code = False
for line in lines:
if line.strip().startswith('```'):
is_code = not is_code
elif not is_code:
line = self.KEYWORD_H1_RE.sub(r'\g<indent>\g<keyword>:', line)
new_lines.append(line)
docstring = '\n'.join(new_lines)
return docstring
def _handle_compatibility(doc) -> Tuple[str, Dict[str, str]]:
"""Parse and remove compatibility blocks from the main docstring.
Args:
doc: The docstring that contains compatibility notes.
Returns:
A tuple of the modified doc string and a hash that maps from compatibility
note type to the text of the note.
"""
compatibility_notes = {}
match_compatibility = re.compile(
r'[ \t]*@compatibility\(([^\n]+?)\)\s*\n'
r'(.*?)'
r'[ \t]*@end_compatibility', re.DOTALL)
for f in match_compatibility.finditer(doc):
compatibility_notes[f.group(1)] = f.group(2)
return match_compatibility.subn(r'', doc)[0], compatibility_notes
def _pairs(items):
"""Given an list of items [a,b,a,b...], generate pairs [(a,b),(a,b)...].
Args:
items: A list of items (length must be even)
Returns:
A list of pairs.
"""
assert len(items) % 2 == 0
return list(zip(items[::2], items[1::2]))
# Don't change the width="214px" without consulting with the devsite-team.
TABLE_TEMPLATE = textwrap.dedent("""
<!-- Tabular view -->
<table class="responsive fixed orange">
<colgroup><col width="214px"><col></colgroup>
<tr><th colspan="2">{title}</th></tr>
{text}
{items}
</table>
""")
ITEMS_TEMPLATE = textwrap.dedent("""\
<tr>
<td>
{name}{anchor}
</td>
<td>
{description}
</td>
</tr>""")
TEXT_TEMPLATE = textwrap.dedent("""\
<tr class="alt">
<td colspan="2">
{text}
</td>
</tr>""")
@dataclasses.dataclass
class TitleBlock(object):
"""A class to parse title blocks (like `Args:`) and convert them to markdown.
This handles the "Args/Returns/Raises" blocks and anything similar.
These are used to extract metadata (argument descriptions, etc), and upgrade
This `TitleBlock` to markdown.
These blocks are delimited by indentation. There must be a blank line before
the first `TitleBlock` in a series.
The expected format is:
```
Title:
Freeform text
arg1: value1
arg2: value1
```
These are represented as:
```
TitleBlock(
title = "Arguments",
text = "Freeform text",
items=[('arg1', 'value1'), ('arg2', 'value2')])
```
The "text" and "items" fields may be empty. When both are empty the generated
markdown only serves to upgrade the title to a <h4>.
Attributes:
title: The title line, without the colon.
text: Freeform text. Anything between the `title`, and the `items`.
items: A list of (name, value) string pairs. All items must have the same
indentation.
"""
_INDENTATION_REMOVAL_RE = re.compile(r'( *)(.+)')
title: Optional[str]
text: str
items: Iterable[Tuple[str, str]]
def _dedent_after_first_line(self, text):
if '\n' not in text:
return text
first, remainder = text.split('\n', 1)
remainder = textwrap.dedent(remainder)
result = '\n'.join([first, remainder])
return result
def table_view(self, title_template: Optional[str] = None) -> str:
"""Returns a tabular markdown version of the TitleBlock.
Tabular view is only for `Args`, `Returns`, `Raises` and `Attributes`. If
anything else is encountered, redirect to list view.
Args:
title_template: Template for title detailing how to display it.
Returns:
Table containing the content to display.
"""
if title_template is not None:
title = title_template.format(title=self.title)
else:
title = self.title
text = self.text.strip()
if text:
text = self._dedent_after_first_line(text)
text = TEXT_TEMPLATE.format(text=text)
items = []
for name, description in self.items:
if not description:
description = ''
else:
description = description.strip('\n')
description = self._dedent_after_first_line(description)
item_table = ITEMS_TEMPLATE.format(
name=f'`{name}`', anchor='', description=description)
items.append(item_table)
return '\n' + TABLE_TEMPLATE.format(
title=title, text=text, items=''.join(items)) + '\n'
def __str__(self) -> str:
"""Returns a non-tempated version of the TitleBlock."""
sub = []
sub.append(f'\n\n#### {self.title}:\n')
sub.append(textwrap.dedent(self.text))
sub.append('\n')
for name, description in self.items:
description = description.strip()
if not description:
sub.append(f'* <b>`{name}`</b>\n')
else:
sub.append(f'* <b>`{name}`</b>: {description}\n')
return ''.join(sub)
# This regex matches an entire title-block.
BLOCK_RE = re.compile(
r"""
(?:^|^\n|\n\n) # After a blank line (non-capturing):
(?P<title>[A-Z][\s\w]{0,20}) # Find a sentence case title, followed by
\s*:\s*?(?=\n) # whitespace, a colon and a new line.
(?P<content>.*?) # Then take everything until
(?=\n\S|$) # look ahead finds a non-indented line
# (a new-line followed by non-whitespace)
""", re.VERBOSE | re.DOTALL)
ITEM_RE = re.compile(
r"""
^(\*?\*?'?"? # Capture optional *s to allow *args, **kwargs and quotes
\w[\w.'"]*? # Capture a word character followed by word characters
# or "."s or ending quotes.
)\s*:\s # Allow any whitespace around the colon.""",
re.MULTILINE | re.VERBOSE)
@classmethod
def split_string(cls, docstring: str):
r"""Given a docstring split it into a list of `str` or `TitleBlock` chunks.
For example the docstring of `tf.nn.relu`:
'''
Computes `max(features, 0)`.
Args:
features: A `Tensor`. Must be one of the following types: `float32`,
`float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`, `half`.
name: A name for the operation (optional).
More freeform markdown text.
'''
This is parsed, and returned as:
```
[
"Computes rectified linear: `max(features, 0)`.",
TitleBlock(
title='Args',
text='',
items=[
('features', ' A `Tensor`. Must be...'),
('name', ' A name for the operation (optional).\n\n')]
),
"More freeform markdown text."
]
```
Args:
docstring: The docstring to parse
Returns:
The docstring split into chunks. Each chunk produces valid markdown when
`str` is called on it (each chunk is a python `str`, or a `TitleBlock`).
"""
parts = []
while docstring:
split = re.split(cls.BLOCK_RE, docstring, maxsplit=1)
# The first chunk in split is all text before the TitleBlock.
before = split.pop(0)
parts.append(before)
# If `split` is empty, there were no other matches, and we're done.
if not split:
break
# If there was a match, split contains three items. The two capturing
# groups in the RE, and the remainder.
title, content, docstring = split
# Now `content` contains the text and the name-value item pairs.
# separate these two parts.
content = textwrap.dedent(content)
split = cls.ITEM_RE.split(content)
text = split.pop(0)
items = _pairs(split)
title_block = cls(title=title, text=text, items=items)
parts.append(title_block)
return parts
class DocstringInfo(typing.NamedTuple):
brief: str
docstring_parts: List[Union[TitleBlock, str]]
compatibility: Dict[str, str]
def _get_other_member_doc(
obj: Any,
parser_config: config.ParserConfig,
extra_docs: Optional[Dict[int, str]],
) -> str:
"""Returns the docs for other members of a module."""
# An object's __doc__ attribute will mask the class'.
my_doc = inspect.getdoc(obj)
class_doc = inspect.getdoc(type(obj))
description = None
if my_doc != class_doc:
# If they're different it's because __doc__ is set on the instance.
if my_doc is not None:
description = my_doc
if description is None and extra_docs is not None:
description = extra_docs.get(id(obj), None)
info = None
if isinstance(obj, dict):
# pprint.pformat (next block) doesn't sort dicts until python 3.8
items = [
f' {name!r}: {value!r}'
for name, value in sorted(obj.items(), key=repr)
]
items = ',\n'.join(items)
info = f'```\n{{\n{items}\n}}\n```'
elif isinstance(obj, (set, frozenset)):
# pprint.pformat (next block) doesn't sort dicts until python 3.8
items = [f' {value!r}' for value in sorted(obj, key=repr)]
items = ',\n'.join(items)
info = f'```\n{{\n{items}\n}}\n```'
elif (doc_generator_visitor.maybe_singleton(obj) or
isinstance(obj, (list, tuple, enum.Enum))):
# * Use pformat instead of repr so dicts and sets are sorted (deterministic)
# * Escape ` so it doesn't break code formatting. You can't use "`"
# here since it will diaplay as a literal. I'm not sure why <pre></pre>
# breaks on the site.
info = pprint.pformat(obj).replace('`', r'\`')
info = f'`{info}`'
elif obj_type_lib.ObjType.get(obj) is obj_type_lib.ObjType.PROPERTY:
info = None
else:
class_full_name = parser_config.reverse_index.get(id(type(obj)), None)
if class_full_name is None:
module = getattr(type(obj), '__module__', None)
class_name = type(obj).__name__
if module is None or module == 'builtins':
class_full_name = class_name
else:
class_full_name = f'{module}.{class_name}'
info = f'Instance of `{class_full_name}`'
parts = [info, description]
parts = [item for item in parts if item is not None]
return '\n\n'.join(parts)
def parse_md_docstring(
py_object: Any,
full_name: str,
parser_config: config.ParserConfig,
extra_docs: Optional[Dict[int, str]] = None,
) -> DocstringInfo:
"""Parse the object's docstring and return a `DocstringInfo`.
This function clears @@'s from the docstring, and replaces `` references
with links.
Args:
py_object: A python object to retrieve the docs for (class, function/method,
or module).
full_name: (optional) The api path to the current object, so replacements
can depend on context.
parser_config: An instance of `config.ParserConfig`.
extra_docs: Extra docs for symbols like public constants(list, tuple, etc)
that need to be added to the markdown pages created.
Returns:
A DocstringInfo object, all fields will be empty if no docstring was found.
"""
if obj_type_lib.ObjType.get(py_object) is obj_type_lib.ObjType.OTHER:
raw_docstring = _get_other_member_doc(
obj=py_object, parser_config=parser_config, extra_docs=extra_docs)
else:
raw_docstring = _get_raw_docstring(py_object)
raw_docstring = parser_config.reference_resolver.replace_references(
raw_docstring, full_name)
atat_re = re.compile(r' *@@[a-zA-Z_.0-9]+ *$')
raw_docstring = '\n'.join(
line for line in raw_docstring.split('\n') if not atat_re.match(line))
docstring, compatibility = _handle_compatibility(raw_docstring)
if 'Generated by: tensorflow/tools/api/generator' in docstring:
docstring = ''
# Remove the first-line "brief" docstring.
lines = docstring.split('\n')
brief = lines.pop(0)
docstring = '\n'.join(lines)
docstring_parts = TitleBlock.split_string(docstring)
return DocstringInfo(brief, docstring_parts, compatibility)
def get_defining_class(py_class, name):
for cls in inspect.getmro(py_class):
if name in cls.__dict__:
return cls
return None
def _unwrap_obj(obj):
while True:
unwrapped_obj = getattr(obj, '__wrapped__', None)
if unwrapped_obj is None:
break
obj = unwrapped_obj
return obj
def get_defined_in(
py_object: Any,
parser_config: config.ParserConfig) -> Optional[FileLocation]:
"""Returns a description of where the passed in python object was defined.
Args:
py_object: The Python object.
parser_config: A config.ParserConfig object.
Returns:
A `FileLocation`
"""
# Every page gets a note about where this object is defined
base_dirs_and_prefixes = zip(parser_config.base_dir,
parser_config.code_url_prefix)
try:
obj_path = pathlib.Path(inspect.getfile(_unwrap_obj(py_object)))
except TypeError: # getfile throws TypeError if py_object is a builtin.
return None
if obj_path.suffix not in ('.py', '.pyc'):
return None
code_url_prefix = None
for base_dir, temp_prefix in base_dirs_and_prefixes:
try:
rel_path = obj_path.relative_to(base_dir)
except ValueError:
continue
code_url_prefix = temp_prefix
# rel_path is currently a platform-specific path, so we need to convert
# it to a posix path (for lack of a URL path).
posix_rel_path_str = str(pathlib.PurePosixPath(rel_path))
break
# No link if the file was not found in a `base_dir`, or the prefix is None.
if code_url_prefix is None:
return None
try:
lines, start_line = inspect.getsourcelines(py_object)
end_line = start_line + len(lines) - 1
if 'MACHINE GENERATED' in lines[0]:
# don't link to files generated by tf_export
return None
except (IOError, TypeError, IndexError):
start_line = None
end_line = None
# In case this is compiled, point to the original
if posix_rel_path_str.endswith('.pyc'):
# If a PY3 __pycache__/ subdir is being used, omit it.
posix_rel_path_str = posix_rel_path_str.replace('__pycache__/', '')
# Strip everything after the first . so that variants such as .pyc and
# .cpython-3x.pyc or similar are all handled.
posix_rel_path_str = posix_rel_path_str.partition('.')[0] + '.py'
if re.search(r'<[\w\s]+>', posix_rel_path_str):
# Built-ins emit paths like <embedded stdlib>, <string>, etc.
return None
if '<attrs generated' in posix_rel_path_str:
return None
if re.match(r'.*/gen_[^/]*\.py$', posix_rel_path_str):
return FileLocation()
if 'genfiles' in posix_rel_path_str:
return FileLocation()
elif posix_rel_path_str.endswith('_pb2.py'):
# The _pb2.py files all appear right next to their defining .proto file.
posix_rel_path_str = posix_rel_path_str[:-7] + '.proto'
return FileLocation(
base_url=posixpath.join(code_url_prefix, posix_rel_path_str))
else:
return FileLocation(
base_url=posixpath.join(code_url_prefix, posix_rel_path_str),
start_line=start_line,
end_line=end_line)
# TODO(markdaoust): This should just parse, pretty_docs should generate the md.
def generate_global_index(library_name, index, reference_resolver):
"""Given a dict of full names to python objects, generate an index page.
The index page generated contains a list of links for all symbols in `index`
that have their own documentation page.
Args:
library_name: The name for the documented library to use in the title.
index: A dict mapping full names to python objects.
reference_resolver: An instance of ReferenceResolver.
Returns:
A string containing an index page as Markdown.
"""
symbol_links = []
for full_name, py_object in index.items():
obj_type = obj_type_lib.ObjType.get(py_object)
if obj_type in (obj_type_lib.ObjType.OTHER, obj_type_lib.ObjType.PROPERTY):
continue
# In Python 3, unbound methods are functions, so eliminate those.
if obj_type is obj_type_lib.ObjType.CALLABLE:
if is_class_attr(full_name, index):
continue
with reference_resolver.temp_prefix('..'):
symbol_links.append(
(full_name, reference_resolver.python_link(full_name, full_name)))
lines = [f'# All symbols in {library_name}', '']
lines.append('<!-- Insert buttons and diff -->\n')
# Sort all the symbols once, so that the ordering is preserved when its broken
# up into main symbols and compat symbols and sorting the sublists is not
# required.
symbol_links = sorted(symbol_links, key=lambda x: x[0])
compat_v1_symbol_links = []
compat_v2_symbol_links = []
primary_symbol_links = []
for symbol, link in symbol_links:
if symbol.startswith('tf.compat.v1'):
if 'raw_ops' not in symbol:
compat_v1_symbol_links.append(link)
elif symbol.startswith('tf.compat.v2'):
compat_v2_symbol_links.append(link)
else:
primary_symbol_links.append(link)
lines.append('## Primary symbols')
for link in primary_symbol_links:
lines.append(f'* {link}')
if compat_v2_symbol_links:
lines.append('\n## Compat v2 symbols\n')
for link in compat_v2_symbol_links:
lines.append(f'* {link}')
if compat_v1_symbol_links:
lines.append('\n## Compat v1 symbols\n')
for link in compat_v1_symbol_links:
lines.append(f'* {link}')
# TODO(markdaoust): use a _ModulePageInfo -> prety_docs.build_md_page()
return '\n'.join(lines)
class Metadata(object):
"""A class for building a page's Metadata block.
Attributes:
name: The name of the page being described by the Metadata block.
version: The source version.
"""
def __init__(self, name, version=None, content=None):
"""Creates a Metadata builder.
Args:
name: The name of the page being described by the Metadata block.
version: The source version.
content: Content to create the metadata from.
"""
self.name = name
self.version = version
if self.version is None:
self.version = 'Stable'
self._content = content
if self._content is None:
self._content = []
def append(self, item):
"""Adds an item from the page to the Metadata block.
Args:
item: The parsed page section to add.
"""
self._content.append(item.short_name)
def build_html(self):
"""Returns the Metadata block as an Html string."""
# Note: A schema is not a URL. It is defined with http: but doesn't resolve.
schema = 'http://developers.google.com/ReferenceObject'
parts = [f'<div itemscope itemtype="{schema}">']
parts.append(f'<meta itemprop="name" content="{self.name}" />')
parts.append(f'<meta itemprop="path" content="{self.version}" />')
for item in self._content:
parts.append(f'<meta itemprop="property" content="{item}"/>')
parts.extend(['</div>', ''])
return '\n'.join(parts)
|
|
from . import color_utils
from . import filepath_utils
from . import string_utils
from . import object_utils
from ..rman_constants import RMAN_STYLIZED_FILTERS, RMAN_STYLIZED_PATTERNS, RMAN_UTILITY_PATTERN_NAMES, RFB_FLOAT3
import math
import bpy
def is_renderman_nodetree(material):
return find_node(material, 'RendermanOutputNode')
def is_mesh_light(ob):
'''Checks to see if ob is a RenderMan mesh light
Args:
ob (bpy.types.Object) - Object caller wants to check.
Returns:
(bpy.types.Node) - the PxrMeshLight node if this is a mesh light. Else, returns None.
'''
#mat = getattr(ob, 'active_material', None)
mat = object_utils.get_active_material(ob)
if not mat:
return None
output = is_renderman_nodetree(mat)
if not output:
return None
if len(output.inputs) > 1:
socket = output.inputs[1]
if socket.is_linked:
node = socket.links[0].from_node
if node.bl_label == 'PxrMeshLight':
return node
return None
def is_rman_light(ob, include_light_filters=True):
'''Checks to see if ob is a RenderMan light
Args:
ob (bpy.types.Object) - Object caller wants to check.
include_light_filters (bool) - whether or not light filters should be included
Returns:
(bpy.types.Node) - the shading node, else returns None.
'''
return get_light_node(ob, include_light_filters=include_light_filters)
def get_rman_light_properties_group(ob):
'''Return the RendermanLightSettings properties
for this object.
Args:
ob (bpy.types.Object) - Object caller wants to get the RendermanLightSettings for.
Returns:
(RendermanLightSettings) - RendermanLightSettings object
'''
if ob.type == 'LIGHT':
return ob.data.renderman
else:
#mat = ob.active_material
mat = object_utils.get_active_material(ob)
if mat:
return mat.renderman_light
return None
def get_light_node(ob, include_light_filters=True):
'''Return the shading node for this light object.
Args:
ob (bpy.types.Object) - Object caller is interested in.
include_light_filters (bool) - whether or not light filters should be included
Returns:
(bpy.types.Node) - The associated shading node for ob
'''
if ob.type == 'LIGHT':
if hasattr(ob.data, 'renderman'):
if include_light_filters:
return ob.data.renderman.get_light_node()
elif ob.data.renderman.renderman_light_role == 'RMAN_LIGHT':
return ob.data.renderman.get_light_node()
else:
return is_mesh_light(ob)
def socket_node_input(nt, socket):
return next((l.from_node for l in nt.links if l.to_socket == socket), None)
def socket_socket_input(nt, socket):
return next((l.from_socket for l in nt.links if l.to_socket == socket and socket.is_linked),
None)
def get_socket_name(node, socket):
if type(socket) == dict:
return socket['name'].replace(' ', '')
# if this is a renderman node we can just use the socket name,
else:
if not hasattr('node', 'plugin_name'):
from .. import rman_bl_nodes
# cycles node?
mapping, node_desc = rman_bl_nodes.get_cycles_node_desc(node)
if node_desc:
idx = -1
is_output = socket.is_output
if is_output:
for i, output in enumerate(node.outputs):
if socket.name == output.name:
idx = i
break
else:
for i, input in enumerate(node.inputs):
if socket.name == input.name:
idx = i
break
if idx == -1:
return socket.identifier.replace(' ', '')
if is_output:
node_desc_param = node_desc.outputs[idx]
else:
node_desc_param = node_desc.params[idx]
return node_desc_param.name
else:
if socket.name in node.inputs and socket.name in node.outputs:
suffix = 'Out' if socket.is_output else 'In'
return socket.name.replace(' ', '') + suffix
return socket.identifier.replace(' ', '')
def get_socket_type(node, socket):
sock_type = socket.type.lower()
if sock_type == 'rgba':
return 'color'
elif sock_type == 'value':
return 'float'
elif sock_type == 'vector':
return 'point'
else:
return sock_type
def get_node_name(node, mat_name):
node_name = string_utils.sanitize_node_name('%s_%s' % (mat_name, node.name))
return node_name
def linked_sockets(sockets):
if sockets is None:
return []
return [i for i in sockets if i.is_linked]
def is_socket_same_type(socket1, socket2):
'''Compare two NodeSockets to see if they are of the same type. Types that
are float3 like are considered the same.
Arguments:
socket1 (bpy.types.NodeSocket) - first socket to compare
socket2 (bpy.types.NodeSocket) - second socket to compare
Returns:
(bool) - return True if both sockets are the same type
'''
return (type(socket1) == type(socket2)) or (is_socket_float_type(socket1) and is_socket_float_type(socket2)) or \
(is_socket_float3_type(socket1) and is_socket_float3_type(socket2))
def is_socket_float_type(socket):
'''Check if socket is of float type
Arguments:
socket (bpy.types.NodeSocket) - socket to check
Returns:
(bool) - return True if socket are float type
'''
renderman_type = getattr(socket, 'renderman_type', None)
if renderman_type:
return renderman_type in ['int', 'float']
else:
return socket.type in ['INT', 'VALUE']
def is_socket_float3_type(socket):
'''Check if socket is of float3 type
Arguments:
socket (bpy.types.NodeSocket) - socket to check
Returns:
(bool) - return True if socket is float3 type
'''
renderman_type = getattr(socket, 'renderman_type', None)
if renderman_type:
return renderman_type in RFB_FLOAT3
else:
return socket.type in ['RGBA', 'VECTOR']
# do we need to convert this socket?
def do_convert_socket(from_socket, to_socket):
if not to_socket:
return False
return (is_socket_float_type(from_socket) and is_socket_float3_type(to_socket)) or \
(is_socket_float3_type(from_socket) and is_socket_float_type(to_socket))
def find_node_input(node, name):
for input in node.inputs:
if input.name == name:
return input
return None
def find_node(material, nodetype):
if material and material.node_tree:
ntree = material.node_tree
active_output_node = None
for node in ntree.nodes:
if getattr(node, "bl_idname", None) == nodetype:
if getattr(node, "is_active_output", True):
return node
if not active_output_node:
active_output_node = node
return active_output_node
return None
def find_node_from_nodetree(ntree, nodetype):
active_output_node = None
for node in ntree.nodes:
if getattr(node, "bl_idname", None) == nodetype:
if getattr(node, "is_active_output", True):
return node
if not active_output_node:
active_output_node = node
return active_output_node
return None
def find_material_from_nodetree(ntree):
mat = None
for m in bpy.data.materials:
if m.node_tree == ntree.id_data:
mat = m
break
return mat
def is_soloable_node(node):
is_soloable = False
node_type = getattr(node, 'renderman_node_type', '')
if node_type in ('pattern', 'bxdf'):
if node.bl_label in ['PxrLayer', 'PxrLayerMixer']:
is_soloable = False
else:
is_soloable = True
return is_soloable
def find_soloable_node(ntree):
selected_node = None
for n in ntree.nodes:
node_type = getattr(n, 'renderman_node_type', '')
if n.select and node_type in ('pattern', 'bxdf'):
if n.bl_label in ['PxrLayer', 'PxrLayerMixer']:
continue
selected_node = n
break
return selected_node
def find_selected_pattern_node(ntree):
selected_node = None
for n in ntree.nodes:
node_type = getattr(n, 'renderman_node_type', '')
if n.select and node_type == 'pattern':
if n.bl_label in ['PxrLayer', 'PxrLayerMixer']:
continue
selected_node = n
break
return selected_node
def find_node_input(node, name):
for input in node.inputs:
if input.name == name:
return input
return None
# walk the tree for nodes to export
def gather_nodes(node):
nodes = []
for socket in node.inputs:
if socket.is_linked:
link = socket.links[0]
for sub_node in gather_nodes(socket.links[0].from_node):
if sub_node not in nodes:
nodes.append(sub_node)
# if this is a float -> color inset a tofloat3
if is_socket_float_type(link.from_socket) and is_socket_float3_type(socket):
convert_node = ('PxrToFloat3', link.from_node,
link.from_socket)
if convert_node not in nodes:
nodes.append(convert_node)
elif is_socket_float3_type(link.from_socket) and is_socket_float_type(socket):
convert_node = ('PxrToFloat', link.from_node, link.from_socket)
if convert_node not in nodes:
nodes.append(convert_node)
if hasattr(node, 'renderman_node_type') and node.renderman_node_type != 'output':
nodes.append(node)
elif not hasattr(node, 'renderman_node_type') and node.bl_idname not in ['ShaderNodeOutputMaterial', 'NodeGroupInput', 'NodeGroupOutput']:
nodes.append(node)
return nodes
def get_rerouted_node(node):
'''Find and return the rerouted node and socket, given
a NodeReroute node
Arguments:
node (bpy.types.Node) - A shader node of type NodeReroute
Returns:
(bpy.types.Node) - the rerouted node
(bpy.types.NodeSocket) - the socket that should be connected from the rerouted node
'''
if not node.inputs[0].is_linked:
return (None, None)
from_node = node.inputs[0].links[0].from_node
if from_node.bl_idname == 'NodeReroute':
return get_rerouted_node(from_node)
socket = node.inputs[0].links[0].from_socket
return (from_node, socket)
def find_integrator_node(world):
'''Find and return the integrator node from the world nodetree
Arguments:
world (bpy.types.World) - Blender world object
Returns:
(RendermanIntegratorNode) - the integrator ShadingNode
'''
rm = world.renderman
if not world.renderman.use_renderman_node:
return None
output = find_node(world, 'RendermanIntegratorsOutputNode')
if output:
socket = output.inputs[0]
if socket.is_linked:
return socket.links[0].from_node
return None
def find_displayfilter_nodes(world):
'''Find and return all display filter nodes from the world nodetree
Arguments:
world (bpy.types.World) - Blender world object
Returns:
(list) - list of display filter nodes
'''
df_nodes = []
if not world.renderman.use_renderman_node:
return df_nodes
output = find_node(world, 'RendermanDisplayfiltersOutputNode')
if output:
for i, socket in enumerate(output.inputs):
if socket.is_linked:
bl_df_node = socket.links[0].from_node
df_nodes.append(bl_df_node)
return df_nodes
def find_samplefilter_nodes(world):
'''Find and return all sample filter nodes from the world nodetree
Arguments:
world (bpy.types.World) - Blender world object
Returns:
(list) - list of sample filter nodes
'''
sf_nodes = []
if not world.renderman.use_renderman_node:
return sf_nodes
output = find_node(world, 'RendermanSamplefiltersOutputNode')
if output:
for i, socket in enumerate(output.inputs):
if socket.is_linked:
bl_sf_node = socket.links[0].from_node
sf_nodes.append(bl_sf_node)
return sf_nodes
def find_projection_node(camera):
'''Find the projection node, if any
Arguments:
camera (bpy.types.Camera) - Camera object
Returns:
(bpy.types.ShaderNode) - projection node
'''
projection_node = None
nt = camera.data.renderman.rman_nodetree
if nt:
output = find_node_from_nodetree(nt, 'RendermanProjectionsOutputNode')
socket = output.inputs[0]
if socket.is_linked:
projection_node = socket.links[0].from_node
return projection_node
def find_all_stylized_filters(world):
nodes = list()
output = find_node(world, 'RendermanDisplayfiltersOutputNode')
if not output:
return nodes
for i, socket in enumerate(output.inputs):
if socket.is_linked:
link = socket.links[0]
node = link.from_node
if node.bl_label in RMAN_STYLIZED_FILTERS:
nodes.append(node)
return nodes
def has_stylized_pattern_node(ob, node=None):
prop_name = ''
if not node:
mat = object_utils.get_active_material(ob)
if not mat:
return False
nt = mat.node_tree
output = is_renderman_nodetree(mat)
if not output:
return False
socket = output.inputs[0]
if not socket.is_linked:
return False
link = socket.links[0]
node = link.from_node
for nm in RMAN_UTILITY_PATTERN_NAMES:
if hasattr(node, nm):
prop_name = nm
prop_meta = node.prop_meta[prop_name]
if prop_meta['renderman_type'] == 'array':
array_len = getattr(node, '%s_arraylen' % prop_name)
for i in range(0, array_len):
nm = '%s[%d]' % (prop_name, i)
sub_prop = getattr(node, nm)
if hasattr(node, 'inputs') and nm in node.inputs and \
node.inputs[nm].is_linked:
to_socket = node.inputs[nm]
from_node = to_socket.links[0].from_node
if from_node.bl_label in RMAN_STYLIZED_PATTERNS:
return from_node
elif node.inputs[prop_name].is_linked:
to_socket = node.inputs[prop_name]
from_node = to_socket.links[0].from_node
if from_node.bl_label in RMAN_STYLIZED_PATTERNS:
return from_node
return False
def create_pxrlayer_nodes(nt, bxdf):
from .. import rman_bl_nodes
mixer = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__["PxrLayerMixer"])
layer1 = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__["PxrLayer"])
layer2 = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__["PxrLayer"])
mixer.location = bxdf.location
mixer.location[0] -= 300
layer1.location = mixer.location
layer1.location[0] -= 300
layer1.location[1] += 300
layer2.location = mixer.location
layer2.location[0] -= 300
layer2.location[1] -= 300
nt.links.new(mixer.outputs[0], bxdf.inputs[0])
nt.links.new(layer1.outputs[0], mixer.inputs['baselayer'])
nt.links.new(layer2.outputs[0], mixer.inputs['layer1'])
def _convert_grease_pencil_stroke_texture(mat, nt, output):
from .. import rman_bl_nodes
gp_mat = mat.grease_pencil
col = gp_mat.color[:3]
# col = color_utils.linearizeSRGB(col)
alpha = gp_mat.color[3]
bl_image = gp_mat.stroke_image
bxdf = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__['PxrConstant'])
bxdf.location = output.location
bxdf.location[0] -= 300
bxdf.emitColor = col
bxdf.presence = alpha
nt.links.new(bxdf.outputs[0], output.inputs[0])
if not bl_image:
bxdf.emitColor = [0.0, 0.0, 0.0, 1.0]
else:
real_file = filepath_utils.get_real_path(bl_image.filepath)
manifold = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__['PxrManifold2D'])
manifold.angle = -math.degrees(gp_mat.pattern_angle)
manifold.scaleS = gp_mat.pattern_scale[0]
manifold.scaleT = gp_mat.pattern_scale[1]
manifold.offsetS = gp_mat.texture_offset[0]
manifold.offsetT = gp_mat.texture_offset[1]
manifold.invertT = 1
texture = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__['PxrTexture'])
texture.filename = real_file
texture.linearize = 1
nt.links.new(manifold.outputs[0], texture.inputs[3])
mix = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__['PxrMix'])
mix.color2 = col
mix.mix = gp_mat.mix_stroke_factor
nt.links.new(texture.outputs[0], mix.inputs[0])
nt.links.new(mix.outputs[0], bxdf.inputs[0])
nt.links.new(texture.outputs[4], bxdf.inputs[1])
def _convert_grease_pencil_fill_texture(mat, nt, output):
from .. import rman_bl_nodes
gp_mat = mat.grease_pencil
col = gp_mat.fill_color[:3]
# col = color_utils.linearizeSRGB(col)
alpha = gp_mat.fill_color[3]
mix_color = gp_mat.mix_color[:3]
mix_alpha = gp_mat.mix_color[3]
bxdf = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__['PxrConstant'])
bxdf.location = output.location
bxdf.location[0] -= 300
bxdf.emitColor = col
bxdf.presence = alpha
nt.links.new(bxdf.outputs[0], output.inputs[0])
bl_image = gp_mat.fill_image
if not bl_image:
bxdf.emitColor = [0.0, 0.0, 0.0, 1.0]
else:
real_file = filepath_utils.get_real_path(bl_image.filepath)
manifold = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__['PxrManifold2D'])
manifold.angle = -math.degrees(gp_mat.texture_angle)
manifold.scaleS = gp_mat.texture_scale[0]
manifold.scaleT = gp_mat.texture_scale[1]
manifold.offsetS = gp_mat.texture_offset[0]
manifold.offsetT = gp_mat.texture_offset[1]
manifold.invertT = 1
texture = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__['PxrTexture'])
texture.filename = real_file
texture.linearize = 1
nt.links.new(manifold.outputs[0], texture.inputs[3])
mix = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__['PxrMix'])
mix.color2 = col
mix.mix = gp_mat.mix_factor
nt.links.new(texture.outputs[0], mix.inputs[0])
nt.links.new(mix.outputs[0], bxdf.inputs[0])
nt.links.new(texture.outputs[4], bxdf.inputs[1])
def _convert_grease_pencil_fill_checker(mat, nt, output):
from .. import rman_bl_nodes
gp_mat = mat.grease_pencil
col = gp_mat.fill_color[:3]
# col = color_utils.linearizeSRGB(col)
alpha = gp_mat.fill_color[3]
mix_color = gp_mat.mix_color[:3]
mix_alpha = gp_mat.mix_color[3]
bxdf = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__['PxrConstant'])
bxdf.location = output.location
bxdf.location[0] -= 300
bxdf.emitColor = col
bxdf.presence = alpha
nt.links.new(bxdf.outputs[0], output.inputs[0])
manifold = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__['PxrManifold2D'])
manifold.angle = -math.degrees(gp_mat.pattern_angle)
manifold.scaleS = (1/gp_mat.pattern_gridsize) * gp_mat.pattern_scale[0]
manifold.scaleT = (1/gp_mat.pattern_gridsize) * gp_mat.pattern_scale[1]
checker = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__['PxrChecker'])
checker.colorA = col
checker.colorB = mix_color
nt.links.new(manifold.outputs[0], checker.inputs[2])
checker2 = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__['PxrChecker'])
checker2.colorA = col
checker2.colorB = mix_color
nt.links.new(manifold.outputs[0], checker2.inputs[2])
float3_1 = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__['PxrToFloat3'])
float3_1.input = alpha
float3_2 = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__['PxrToFloat3'])
float3_2.input = mix_alpha
mix = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__['PxrMix'])
nt.links.new(float3_1.outputs[0], mix.inputs[0])
nt.links.new(float3_2.outputs[0], mix.inputs[1])
nt.links.new(checker2.outputs[1], mix.inputs[2])
nt.links.new(checker.outputs[0], bxdf.inputs[0])
nt.links.new(mix.outputs[0], bxdf.inputs[1])
def convert_grease_pencil_mat(mat, nt, output):
from .. import rman_bl_nodes
gp_mat = mat.grease_pencil
if gp_mat.show_stroke:
stroke_style = gp_mat.stroke_style
if stroke_style == 'TEXTURE':
_convert_grease_pencil_stroke_texture(mat, nt, output)
else:
col = gp_mat.color[:3]
# col = color_utils.linearizeSRGB(col)
alpha = gp_mat.color[3]
bxdf = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__['PxrConstant'])
bxdf.location = output.location
bxdf.location[0] -= 300
bxdf.emitColor = col
bxdf.presence = alpha
nt.links.new(bxdf.outputs[0], output.inputs[0])
elif gp_mat.show_fill:
fill_style = gp_mat.fill_style
if fill_style == 'CHECKER':
_convert_grease_pencil_fill_checker(mat, nt, output)
elif fill_style == 'TEXTURE':
_convert_grease_pencil_fill_texture(mat, nt, output)
else:
col = gp_mat.fill_color[:3]
# col = color_utils.linearizeSRGB(col)
alpha = gp_mat.fill_color[3]
mix_color = gp_mat.mix_color[:3]
mix_alpha = gp_mat.mix_color[3]
bxdf = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__['PxrConstant'])
bxdf.location = output.location
bxdf.location[0] -= 300
bxdf.emitColor = col
bxdf.presence = alpha
nt.links.new(bxdf.outputs[0], output.inputs[0])
|
|
"""Generic configuration system using unrepr.
Configuration data may be supplied as a Python dictionary, as a filename,
or as an open file object. When you supply a filename or file, Python's
builtin ConfigParser is used (with some extensions).
Namespaces
----------
Configuration keys are separated into namespaces by the first "." in the key.
The only key that cannot exist in a namespace is the "environment" entry.
This special entry 'imports' other config entries from a template stored in
the Config.environments dict.
You can define your own namespaces to be called when new config is merged
by adding a named handler to Config.namespaces. The name can be any string,
and the handler must be either a callable or a context manager.
"""
try:
# Python 3.0+
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser
try:
set
except NameError:
from sets import Set as set
try:
basestring
except NameError:
basestring = str
try:
# Python 3
import builtins
except ImportError:
# Python 2
import __builtin__ as builtins
import operator as _operator
import sys
def as_dict(config):
"""Return a dict from 'config' whether it is a dict, file, or filename."""
if isinstance(config, basestring):
config = Parser().dict_from_file(config)
elif hasattr(config, 'read'):
config = Parser().dict_from_file(config)
return config
class NamespaceSet(dict):
"""A dict of config namespace names and handlers.
Each config entry should begin with a namespace name; the corresponding
namespace handler will be called once for each config entry in that
namespace, and will be passed two arguments: the config key (with the
namespace removed) and the config value.
Namespace handlers may be any Python callable; they may also be
Python 2.5-style 'context managers', in which case their __enter__
method should return a callable to be used as the handler.
See cherrypy.tools (the Toolbox class) for an example.
"""
def __call__(self, config):
"""Iterate through config and pass it to each namespace handler.
config
A flat dict, where keys use dots to separate
namespaces, and values are arbitrary.
The first name in each config key is used to look up the corresponding
namespace handler. For example, a config entry of {'tools.gzip.on': v}
will call the 'tools' namespace handler with the args: ('gzip.on', v)
"""
# Separate the given config into namespaces
ns_confs = {}
for k in config:
if "." in k:
ns, name = k.split(".", 1)
bucket = ns_confs.setdefault(ns, {})
bucket[name] = config[k]
# I chose __enter__ and __exit__ so someday this could be
# rewritten using Python 2.5's 'with' statement:
# for ns, handler in self.iteritems():
# with handler as callable:
# for k, v in ns_confs.get(ns, {}).iteritems():
# callable(k, v)
for ns, handler in self.items():
exit = getattr(handler, "__exit__", None)
if exit:
callable = handler.__enter__()
no_exc = True
try:
try:
for k, v in ns_confs.get(ns, {}).items():
callable(k, v)
except:
# The exceptional case is handled here
no_exc = False
if exit is None:
raise
if not exit(*sys.exc_info()):
raise
# The exception is swallowed if exit() returns true
finally:
# The normal and non-local-goto cases are handled here
if no_exc and exit:
exit(None, None, None)
else:
for k, v in ns_confs.get(ns, {}).items():
handler(k, v)
def __repr__(self):
return "%s.%s(%s)" % (self.__module__, self.__class__.__name__,
dict.__repr__(self))
def __copy__(self):
newobj = self.__class__()
newobj.update(self)
return newobj
copy = __copy__
class Config(dict):
"""A dict-like set of configuration data, with defaults and namespaces.
May take a file, filename, or dict.
"""
defaults = {}
environments = {}
namespaces = NamespaceSet()
def __init__(self, file=None, **kwargs):
self.reset()
if file is not None:
self.update(file)
if kwargs:
self.update(kwargs)
def reset(self):
"""Reset self to default values."""
self.clear()
dict.update(self, self.defaults)
def update(self, config):
"""Update self from a dict, file or filename."""
if isinstance(config, basestring):
# Filename
config = Parser().dict_from_file(config)
elif hasattr(config, 'read'):
# Open file object
config = Parser().dict_from_file(config)
else:
config = config.copy()
self._apply(config)
def _apply(self, config):
"""Update self from a dict."""
which_env = config.get('environment')
if which_env:
env = self.environments[which_env]
for k in env:
if k not in config:
config[k] = env[k]
dict.update(self, config)
self.namespaces(config)
def __setitem__(self, k, v):
dict.__setitem__(self, k, v)
self.namespaces({k: v})
class Parser(ConfigParser):
"""Sub-class of ConfigParser that keeps the case of options and that
raises an exception if the file cannot be read.
"""
def optionxform(self, optionstr):
return optionstr
def read(self, filenames):
if isinstance(filenames, basestring):
filenames = [filenames]
for filename in filenames:
# try:
# fp = open(filename)
# except IOError:
# continue
fp = open(filename)
try:
self._read(fp, filename)
finally:
fp.close()
def as_dict(self, raw=False, vars=None):
"""Convert an INI file to a dictionary"""
# Load INI file into a dict
result = {}
for section in self.sections():
if section not in result:
result[section] = {}
for option in self.options(section):
value = self.get(section, option, raw=raw, vars=vars)
try:
value = unrepr(value)
except Exception:
x = sys.exc_info()[1]
msg = ("Config error in section: %r, option: %r, "
"value: %r. Config values must be valid Python." %
(section, option, value))
raise ValueError(msg, x.__class__.__name__, x.args)
result[section][option] = value
return result
def dict_from_file(self, file):
if hasattr(file, 'read'):
self.readfp(file)
else:
self.read(file)
return self.as_dict()
# public domain "unrepr" implementation, found on the web and then improved.
class _Builder2:
def build(self, o):
m = getattr(self, 'build_' + o.__class__.__name__, None)
if m is None:
raise TypeError("unrepr does not recognize %s" %
repr(o.__class__.__name__))
return m(o)
def astnode(self, s):
"""Return a Python2 ast Node compiled from a string."""
try:
import compiler
except ImportError:
# Fallback to eval when compiler package is not available,
# e.g. IronPython 1.0.
return eval(s)
p = compiler.parse("__tempvalue__ = " + s)
return p.getChildren()[1].getChildren()[0].getChildren()[1]
def build_Subscript(self, o):
expr, flags, subs = o.getChildren()
expr = self.build(expr)
subs = self.build(subs)
return expr[subs]
def build_CallFunc(self, o):
children = map(self.build, o.getChildren())
callee = children.pop(0)
kwargs = children.pop() or {}
starargs = children.pop() or ()
args = tuple(children) + tuple(starargs)
return callee(*args, **kwargs)
def build_List(self, o):
return map(self.build, o.getChildren())
def build_Const(self, o):
return o.value
def build_Dict(self, o):
d = {}
i = iter(map(self.build, o.getChildren()))
for el in i:
d[el] = i.next()
return d
def build_Tuple(self, o):
return tuple(self.build_List(o))
def build_Name(self, o):
name = o.name
if name == 'None':
return None
if name == 'True':
return True
if name == 'False':
return False
# See if the Name is a package or module. If it is, import it.
try:
return modules(name)
except ImportError:
pass
# See if the Name is in builtins.
try:
return getattr(builtins, name)
except AttributeError:
pass
raise TypeError("unrepr could not resolve the name %s" % repr(name))
def build_Add(self, o):
left, right = map(self.build, o.getChildren())
return left + right
def build_Mul(self, o):
left, right = map(self.build, o.getChildren())
return left * right
def build_Getattr(self, o):
parent = self.build(o.expr)
return getattr(parent, o.attrname)
def build_NoneType(self, o):
return None
def build_UnarySub(self, o):
return -self.build(o.getChildren()[0])
def build_UnaryAdd(self, o):
return self.build(o.getChildren()[0])
class _Builder3:
def build(self, o):
m = getattr(self, 'build_' + o.__class__.__name__, None)
if m is None:
raise TypeError("unrepr does not recognize %s" %
repr(o.__class__.__name__))
return m(o)
def astnode(self, s):
"""Return a Python3 ast Node compiled from a string."""
try:
import ast
except ImportError:
# Fallback to eval when ast package is not available,
# e.g. IronPython 1.0.
return eval(s)
p = ast.parse("__tempvalue__ = " + s)
return p.body[0].value
def build_Subscript(self, o):
return self.build(o.value)[self.build(o.slice)]
def build_Index(self, o):
return self.build(o.value)
def build_Call(self, o):
callee = self.build(o.func)
if o.args is None:
args = ()
else:
args = tuple([self.build(a) for a in o.args])
if o.starargs is None:
starargs = ()
else:
starargs = self.build(o.starargs)
if o.kwargs is None:
kwargs = {}
else:
kwargs = self.build(o.kwargs)
return callee(*(args + starargs), **kwargs)
def build_List(self, o):
return list(map(self.build, o.elts))
def build_Str(self, o):
return o.s
def build_Num(self, o):
return o.n
def build_Dict(self, o):
return dict([(self.build(k), self.build(v))
for k, v in zip(o.keys, o.values)])
def build_Tuple(self, o):
return tuple(self.build_List(o))
def build_Name(self, o):
name = o.id
if name == 'None':
return None
if name == 'True':
return True
if name == 'False':
return False
# See if the Name is a package or module. If it is, import it.
try:
return modules(name)
except ImportError:
pass
# See if the Name is in builtins.
try:
import builtins
return getattr(builtins, name)
except AttributeError:
pass
raise TypeError("unrepr could not resolve the name %s" % repr(name))
def build_UnaryOp(self, o):
op, operand = map(self.build, [o.op, o.operand])
return op(operand)
def build_BinOp(self, o):
left, op, right = map(self.build, [o.left, o.op, o.right])
return op(left, right)
def build_Add(self, o):
return _operator.add
def build_Mult(self, o):
return _operator.mul
def build_USub(self, o):
return _operator.neg
def build_Attribute(self, o):
parent = self.build(o.value)
return getattr(parent, o.attr)
def build_NoneType(self, o):
return None
def unrepr(s):
"""Return a Python object compiled from a string."""
if not s:
return s
if sys.version_info < (3, 0):
b = _Builder2()
else:
b = _Builder3()
obj = b.astnode(s)
return b.build(obj)
def modules(modulePath):
"""Load a module and retrieve a reference to that module."""
try:
mod = sys.modules[modulePath]
if mod is None:
raise KeyError()
except KeyError:
__import__(modulePath)
mod = sys.modules[modulePath]
return mod
def attributes(full_attribute_name):
"""Load a module and retrieve an attribute of that module."""
# Parse out the path, module, and attribute
last_dot = full_attribute_name.rfind(".")
attr_name = full_attribute_name[last_dot + 1:]
mod_path = full_attribute_name[:last_dot]
mod = modules(mod_path)
# Let an AttributeError propagate outward.
try:
attr = getattr(mod, attr_name)
except AttributeError:
raise AttributeError("'%s' object has no attribute '%s'"
% (mod_path, attr_name))
# Return a reference to the attribute.
return attr
|
|
"""Support for Todoist task management (https://todoist.com)."""
from datetime import datetime, timedelta
import logging
from todoist.api import TodoistAPI
import voluptuous as vol
from homeassistant.components.calendar import PLATFORM_SCHEMA, CalendarEventDevice
from homeassistant.const import CONF_ID, CONF_NAME, CONF_TOKEN
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.template import DATE_STR_FORMAT
from homeassistant.util import Throttle, dt
from .const import (
ALL_DAY,
ALL_TASKS,
CHECKED,
COMPLETED,
CONF_PROJECT_DUE_DATE,
CONF_EXTRA_PROJECTS,
CONF_PROJECT_LABEL_WHITELIST,
CONF_PROJECT_WHITELIST,
CONTENT,
DATETIME,
DESCRIPTION,
DOMAIN,
DUE,
DUE_DATE,
DUE_DATE_LANG,
DUE_DATE_STRING,
DUE_DATE_VALID_LANGS,
DUE_TODAY,
END,
ID,
LABELS,
NAME,
OVERDUE,
PRIORITY,
PROJECT_ID,
PROJECT_NAME,
PROJECTS,
SERVICE_NEW_TASK,
START,
SUMMARY,
TASKS,
)
_LOGGER = logging.getLogger(__name__)
NEW_TASK_SERVICE_SCHEMA = vol.Schema(
{
vol.Required(CONTENT): cv.string,
vol.Optional(PROJECT_NAME, default="inbox"): vol.All(cv.string, vol.Lower),
vol.Optional(LABELS): cv.ensure_list_csv,
vol.Optional(PRIORITY): vol.All(vol.Coerce(int), vol.Range(min=1, max=4)),
vol.Exclusive(DUE_DATE_STRING, "due_date"): cv.string,
vol.Optional(DUE_DATE_LANG): vol.All(cv.string, vol.In(DUE_DATE_VALID_LANGS)),
vol.Exclusive(DUE_DATE, "due_date"): cv.string,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_TOKEN): cv.string,
vol.Optional(CONF_EXTRA_PROJECTS, default=[]): vol.All(
cv.ensure_list,
vol.Schema(
[
vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_PROJECT_DUE_DATE): vol.Coerce(int),
vol.Optional(CONF_PROJECT_WHITELIST, default=[]): vol.All(
cv.ensure_list, [vol.All(cv.string, vol.Lower)]
),
vol.Optional(
CONF_PROJECT_LABEL_WHITELIST, default=[]
): vol.All(cv.ensure_list, [vol.All(cv.string, vol.Lower)]),
}
)
]
),
),
}
)
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=15)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Todoist platform."""
token = config.get(CONF_TOKEN)
# Look up IDs based on (lowercase) names.
project_id_lookup = {}
label_id_lookup = {}
api = TodoistAPI(token)
api.sync()
# Setup devices:
# Grab all projects.
projects = api.state[PROJECTS]
# Grab all labels
labels = api.state[LABELS]
# Add all Todoist-defined projects.
project_devices = []
for project in projects:
# Project is an object, not a dict!
# Because of that, we convert what we need to a dict.
project_data = {CONF_NAME: project[NAME], CONF_ID: project[ID]}
project_devices.append(TodoistProjectDevice(hass, project_data, labels, api))
# Cache the names so we can easily look up name->ID.
project_id_lookup[project[NAME].lower()] = project[ID]
# Cache all label names
for label in labels:
label_id_lookup[label[NAME].lower()] = label[ID]
# Check config for more projects.
extra_projects = config[CONF_EXTRA_PROJECTS]
for project in extra_projects:
# Special filter: By date
project_due_date = project.get(CONF_PROJECT_DUE_DATE)
# Special filter: By label
project_label_filter = project[CONF_PROJECT_LABEL_WHITELIST]
# Special filter: By name
# Names must be converted into IDs.
project_name_filter = project[CONF_PROJECT_WHITELIST]
project_id_filter = [
project_id_lookup[project_name.lower()]
for project_name in project_name_filter
]
# Create the custom project and add it to the devices array.
project_devices.append(
TodoistProjectDevice(
hass,
project,
labels,
api,
project_due_date,
project_label_filter,
project_id_filter,
)
)
add_entities(project_devices)
def handle_new_task(call):
"""Call when a user creates a new Todoist Task from HASS."""
project_name = call.data[PROJECT_NAME]
project_id = project_id_lookup[project_name]
# Create the task
item = api.items.add(call.data[CONTENT], project_id=project_id)
if LABELS in call.data:
task_labels = call.data[LABELS]
label_ids = [label_id_lookup[label.lower()] for label in task_labels]
item.update(labels=label_ids)
if PRIORITY in call.data:
item.update(priority=call.data[PRIORITY])
_due: dict = {}
if DUE_DATE_STRING in call.data:
_due["string"] = call.data[DUE_DATE_STRING]
if DUE_DATE_LANG in call.data:
_due["lang"] = call.data[DUE_DATE_LANG]
if DUE_DATE in call.data:
due_date = dt.parse_datetime(call.data[DUE_DATE])
if due_date is None:
due = dt.parse_date(call.data[DUE_DATE])
due_date = datetime(due.year, due.month, due.day)
# Format it in the manner Todoist expects
due_date = dt.as_utc(due_date)
date_format = "%Y-%m-%dT%H:%M"
due_date = datetime.strftime(due_date, date_format)
_due["date"] = due_date
if _due:
item.update(due=_due)
# Commit changes
api.commit()
_LOGGER.debug("Created Todoist task: %s", call.data[CONTENT])
hass.services.register(
DOMAIN, SERVICE_NEW_TASK, handle_new_task, schema=NEW_TASK_SERVICE_SCHEMA
)
def _parse_due_date(data: dict) -> datetime:
"""Parse the due date dict into a datetime object."""
# Add time information to date only strings.
if len(data["date"]) == 10:
data["date"] += "T00:00:00"
# If there is no timezone provided, use UTC.
if data["timezone"] is None:
data["date"] += "Z"
return dt.parse_datetime(data["date"])
class TodoistProjectDevice(CalendarEventDevice):
"""A device for getting the next Task from a Todoist Project."""
def __init__(
self,
hass,
data,
labels,
token,
latest_task_due_date=None,
whitelisted_labels=None,
whitelisted_projects=None,
):
"""Create the Todoist Calendar Event Device."""
self.data = TodoistProjectData(
data,
labels,
token,
latest_task_due_date,
whitelisted_labels,
whitelisted_projects,
)
self._cal_data = {}
self._name = data[CONF_NAME]
@property
def event(self):
"""Return the next upcoming event."""
return self.data.event
@property
def name(self):
"""Return the name of the entity."""
return self._name
def update(self):
"""Update all Todoist Calendars."""
self.data.update()
# Set Todoist-specific data that can't easily be grabbed
self._cal_data[ALL_TASKS] = [
task[SUMMARY] for task in self.data.all_project_tasks
]
async def async_get_events(self, hass, start_date, end_date):
"""Get all events in a specific time frame."""
return await self.data.async_get_events(hass, start_date, end_date)
@property
def device_state_attributes(self):
"""Return the device state attributes."""
if self.data.event is None:
# No tasks, we don't REALLY need to show anything.
return None
attributes = {}
attributes[DUE_TODAY] = self.data.event[DUE_TODAY]
attributes[OVERDUE] = self.data.event[OVERDUE]
attributes[ALL_TASKS] = self._cal_data[ALL_TASKS]
attributes[PRIORITY] = self.data.event[PRIORITY]
attributes[LABELS] = self.data.event[LABELS]
return attributes
class TodoistProjectData:
"""
Class used by the Task Device service object to hold all Todoist Tasks.
This is analogous to the GoogleCalendarData found in the Google Calendar
component.
Takes an object with a 'name' field and optionally an 'id' field (either
user-defined or from the Todoist API), a Todoist API token, and an optional
integer specifying the latest number of days from now a task can be due (7
means everything due in the next week, 0 means today, etc.).
This object has an exposed 'event' property (used by the Calendar platform
to determine the next calendar event) and an exposed 'update' method (used
by the Calendar platform to poll for new calendar events).
The 'event' is a representation of a Todoist Task, with defined parameters
of 'due_today' (is the task due today?), 'all_day' (does the task have a
due date?), 'task_labels' (all labels assigned to the task), 'message'
(the content of the task, e.g. 'Fetch Mail'), 'description' (a URL pointing
to the task on the Todoist website), 'end_time' (what time the event is
due), 'start_time' (what time this event was last updated), 'overdue' (is
the task past its due date?), 'priority' (1-4, how important the task is,
with 4 being the most important), and 'all_tasks' (all tasks in this
project, sorted by how important they are).
'offset_reached', 'location', and 'friendly_name' are defined by the
platform itself, but are not used by this component at all.
The 'update' method polls the Todoist API for new projects/tasks, as well
as any updates to current projects/tasks. This is throttled to every
MIN_TIME_BETWEEN_UPDATES minutes.
"""
def __init__(
self,
project_data,
labels,
api,
latest_task_due_date=None,
whitelisted_labels=None,
whitelisted_projects=None,
):
"""Initialize a Todoist Project."""
self.event = None
self._api = api
self._name = project_data[CONF_NAME]
# If no ID is defined, fetch all tasks.
self._id = project_data.get(CONF_ID)
# All labels the user has defined, for easy lookup.
self._labels = labels
# Not tracked: order, indent, comment_count.
self.all_project_tasks = []
# The latest date a task can be due (for making lists of everything
# due today, or everything due in the next week, for example).
if latest_task_due_date is not None:
self._latest_due_date = dt.utcnow() + timedelta(days=latest_task_due_date)
else:
self._latest_due_date = None
# Only tasks with one of these labels will be included.
if whitelisted_labels is not None:
self._label_whitelist = whitelisted_labels
else:
self._label_whitelist = []
# This project includes only projects with these names.
if whitelisted_projects is not None:
self._project_id_whitelist = whitelisted_projects
else:
self._project_id_whitelist = []
def create_todoist_task(self, data):
"""
Create a dictionary based on a Task passed from the Todoist API.
Will return 'None' if the task is to be filtered out.
"""
task = {}
# Fields are required to be in all returned task objects.
task[SUMMARY] = data[CONTENT]
task[COMPLETED] = data[CHECKED] == 1
task[PRIORITY] = data[PRIORITY]
task[DESCRIPTION] = "https://todoist.com/showTask?id={}".format(data[ID])
# All task Labels (optional parameter).
task[LABELS] = [
label[NAME].lower() for label in self._labels if label[ID] in data[LABELS]
]
if self._label_whitelist and (
not any(label in task[LABELS] for label in self._label_whitelist)
):
# We're not on the whitelist, return invalid task.
return None
# Due dates (optional parameter).
# The due date is the END date -- the task cannot be completed
# past this time.
# That means that the START date is the earliest time one can
# complete the task.
# Generally speaking, that means right now.
task[START] = dt.utcnow()
if data[DUE] is not None:
task[END] = _parse_due_date(data[DUE])
if self._latest_due_date is not None and (
task[END] > self._latest_due_date
):
# This task is out of range of our due date;
# it shouldn't be counted.
return None
task[DUE_TODAY] = task[END].date() == datetime.today().date()
# Special case: Task is overdue.
if task[END] <= task[START]:
task[OVERDUE] = True
# Set end time to the current time plus 1 hour.
# We're pretty much guaranteed to update within that 1 hour,
# so it should be fine.
task[END] = task[START] + timedelta(hours=1)
else:
task[OVERDUE] = False
else:
# If we ask for everything due before a certain date, don't count
# things which have no due dates.
if self._latest_due_date is not None:
return None
# Define values for tasks without due dates
task[END] = None
task[ALL_DAY] = True
task[DUE_TODAY] = False
task[OVERDUE] = False
# Not tracked: id, comments, project_id order, indent, recurring.
return task
@staticmethod
def select_best_task(project_tasks):
"""
Search through a list of events for the "best" event to select.
The "best" event is determined by the following criteria:
* A proposed event must not be completed
* A proposed event must have an end date (otherwise we go with
the event at index 0, selected above)
* A proposed event must be on the same day or earlier as our
current event
* If a proposed event is an earlier day than what we have so
far, select it
* If a proposed event is on the same day as our current event
and the proposed event has a higher priority than our current
event, select it
* If a proposed event is on the same day as our current event,
has the same priority as our current event, but is due earlier
in the day, select it
"""
# Start at the end of the list, so if tasks don't have a due date
# the newest ones are the most important.
event = project_tasks[-1]
for proposed_event in project_tasks:
if event == proposed_event:
continue
if proposed_event[COMPLETED]:
# Event is complete!
continue
if proposed_event[END] is None:
# No end time:
if event[END] is None and (proposed_event[PRIORITY] < event[PRIORITY]):
# They also have no end time,
# but we have a higher priority.
event = proposed_event
continue
if event[END] is None:
# We have an end time, they do not.
event = proposed_event
continue
if proposed_event[END].date() > event[END].date():
# Event is too late.
continue
if proposed_event[END].date() < event[END].date():
# Event is earlier than current, select it.
event = proposed_event
continue
if proposed_event[PRIORITY] > event[PRIORITY]:
# Proposed event has a higher priority.
event = proposed_event
continue
if proposed_event[PRIORITY] == event[PRIORITY] and (
proposed_event[END] < event[END]
):
event = proposed_event
continue
return event
async def async_get_events(self, hass, start_date, end_date):
"""Get all tasks in a specific time frame."""
if self._id is None:
project_task_data = [
task
for task in self._api.state[TASKS]
if not self._project_id_whitelist
or task[PROJECT_ID] in self._project_id_whitelist
]
else:
project_data = await hass.async_add_executor_job(
self._api.projects.get_data, self._id
)
project_task_data = project_data[TASKS]
events = []
for task in project_task_data:
due_date = _parse_due_date(task["due"])
if start_date < due_date < end_date:
event = {
"uid": task["id"],
"title": task["content"],
"start": due_date.isoformat(),
"end": due_date.isoformat(),
"allDay": True,
}
events.append(event)
return events
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data."""
if self._id is None:
self._api.reset_state()
self._api.sync()
project_task_data = [
task
for task in self._api.state[TASKS]
if not self._project_id_whitelist
or task[PROJECT_ID] in self._project_id_whitelist
]
else:
project_task_data = self._api.projects.get_data(self._id)[TASKS]
# If we have no data, we can just return right away.
if not project_task_data:
_LOGGER.debug("No data for %s", self._name)
self.event = None
return
# Keep an updated list of all tasks in this project.
project_tasks = []
for task in project_task_data:
todoist_task = self.create_todoist_task(task)
if todoist_task is not None:
# A None task means it is invalid for this project
project_tasks.append(todoist_task)
if not project_tasks:
# We had no valid tasks
_LOGGER.debug("No valid tasks for %s", self._name)
self.event = None
return
# Make sure the task collection is reset to prevent an
# infinite collection repeating the same tasks
self.all_project_tasks.clear()
# Organize the best tasks (so users can see all the tasks
# they have, organized)
while project_tasks:
best_task = self.select_best_task(project_tasks)
_LOGGER.debug("Found Todoist Task: %s", best_task[SUMMARY])
project_tasks.remove(best_task)
self.all_project_tasks.append(best_task)
self.event = self.all_project_tasks[0]
# Convert datetime to a string again
if self.event is not None:
if self.event[START] is not None:
self.event[START] = {
DATETIME: self.event[START].strftime(DATE_STR_FORMAT)
}
if self.event[END] is not None:
self.event[END] = {DATETIME: self.event[END].strftime(DATE_STR_FORMAT)}
else:
# HASS gets cranky if a calendar event never ends
# Let's set our "due date" to tomorrow
self.event[END] = {
DATETIME: (datetime.utcnow() + timedelta(days=1)).strftime(
DATE_STR_FORMAT
)
}
_LOGGER.debug("Updated %s", self._name)
|
|
"""Rackspace provider implementation"""
import json
import logging
import time
import requests
from lexicon.exceptions import AuthenticationError
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = ["rackspacecloud.com"]
def _async_request_completed(payload):
"""Looks into an async response payload to see if the requested job has finished."""
if payload["status"] == "COMPLETED":
return True
if payload["status"] == "ERROR":
return True
return False
def provider_parser(subparser):
"""Configure provider parser for Rackspace"""
subparser.add_argument(
"--auth-account", help="specify account number for authentication"
)
subparser.add_argument(
"--auth-username",
help="specify username for authentication. Only used if --auth-token is empty.",
)
subparser.add_argument(
"--auth-api-key",
help="specify api key for authentication. Only used if --auth-token is empty.",
)
subparser.add_argument(
"--auth-token",
help=(
"specify token for authentication. "
"If empty, the username and api key will be used to create a token."
),
)
subparser.add_argument(
"--sleep-time",
type=float,
default=1,
help="number of seconds to wait between update requests.",
)
class Provider(BaseProvider):
"""Provider class for Rackspace"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.domain_id = None
self.api_endpoint = "https://dns.api.rackspacecloud.com/v1.0"
self.auth_api_endpoint = "https://identity.api.rackspacecloud.com/v2.0"
self._auth_token = None
self._auth_account = None
def _get_rackspace_option(self, key):
private_key = "_" + key
result = None
if hasattr(self, private_key):
result = getattr(self, private_key)
if result is None:
result = self._get_provider_option(key)
return result
def _authenticate(self):
self._auth_token = self._get_provider_option("auth_token")
if not self._auth_token:
auth_response = self._auth_request(
"POST",
"/tokens",
{
"auth": {
"RAX-KSKEY:apiKeyCredentials": {
"username": self._get_provider_option("auth_username"),
"apiKey": self._get_provider_option("auth_api_key"),
}
}
},
)
self._auth_token = auth_response["access"]["token"]["id"]
self._auth_account = auth_response["access"]["token"]["tenant"]["id"]
payload = self._get("/domains", {"name": self.domain})
if not payload["domains"]:
raise AuthenticationError("No domain found")
if len(payload["domains"]) > 1:
raise AuthenticationError("Too many domains found. This should not happen")
self.domain_id = payload["domains"][0]["id"]
# Create record. If record already exists with the same content, do nothing'
def _create_record(self, rtype, name, content):
data = {
"records": [{"type": rtype, "name": self._full_name(name), "data": content}]
}
if self._get_lexicon_option("ttl"):
data["records"][0]["ttl"] = self._get_lexicon_option("ttl")
try:
payload = self._post_and_wait(f"/domains/{self.domain_id}/records", data)
except Exception as error:
if str(error).startswith("Record is a duplicate of another record"):
return self._update_record(None, rtype, name, content)
raise error
success = len(payload["records"]) > 0
LOGGER.debug("create_record: %s", success)
return success
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def _list_records(self, rtype=None, name=None, content=None):
params = {"per_page": 100}
if rtype:
params["type"] = rtype
if name:
params["name"] = self._full_name(name)
# Sending the data filter to the Rackspace DNS API results in a 503 error
# if content:
# params['data'] = content
payload = self._get(f"/domains/{self.domain_id}/records", params)
records = list(payload["records"])
if content:
records = [record for record in records if record["data"] == content]
records = [
{
"type": record["type"],
"name": record["name"],
"ttl": record["ttl"],
"content": record["data"],
"id": record["id"],
}
for record in records
]
LOGGER.debug("list_records: %s", records)
return records
# Create or update a record.
def _update_record(self, identifier, rtype=None, name=None, content=None):
data = {}
if rtype:
data["type"] = rtype
if name:
data["name"] = self._full_name(name)
if content:
data["data"] = content
if self._get_lexicon_option("ttl"):
data["ttl"] = self._get_lexicon_option("ttl")
if identifier is None:
records = self._list_records(rtype, name)
if not records:
raise Exception("Unable to find record to modify: " + name)
identifier = records[0]["id"]
self._put_and_wait(f"/domains/{self.domain_id}/records/{identifier}", data)
# If it didn't raise from the http status code, then we're good
LOGGER.debug("update_record: %s", identifier)
return True
# Delete an existing record.
# If record does not exist, do nothing.
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
delete_record_id = []
if not identifier:
records = self._list_records(rtype, name, content)
delete_record_id = [record["id"] for record in records]
else:
delete_record_id.append(identifier)
LOGGER.debug("delete_records: %s", delete_record_id)
for record_id in delete_record_id:
self._delete_and_wait(f"/domains/{self.domain_id}/records/{record_id}")
# If it didn't raise from the http status code, then we're good
success = True
LOGGER.debug("delete_record: %s", success)
return success
# Helpers
def _request(self, action="GET", url="/", data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
LOGGER.debug(
"request tenant ID: %s", self._get_rackspace_option("auth_account")
)
full_url = (
f"{self.api_endpoint}/{self._get_rackspace_option('auth_account')}{url}"
)
# For data= argument, use None value for GET requests to comply with Rackspace API
response = requests.request(
action,
full_url,
params=query_params,
data=json.dumps(data) if action != "GET" else None,
headers={
"X-Auth-Token": self._get_rackspace_option("auth_token"),
"Content-Type": "application/json",
},
)
# if the request fails for any reason, throw an error.
response.raise_for_status()
return response.json()
# Non-GET requests to the Rackspace CloudDNS API are asynchronous
def _request_and_wait(self, action="POST", url="/", data=None, query_params=None):
result = self._request(action, url, data, query_params)
sleep_time = self._get_rackspace_option("sleep_time") or "1"
sleep_time = float(sleep_time)
while not _async_request_completed(result):
if sleep_time:
time.sleep(sleep_time)
result = self._update_response(result)
if result["status"] == "ERROR":
raise Exception(result["error"]["details"])
if "response" in result:
return result["response"]
return None
def _post_and_wait(self, url="/", data=None, query_params=None):
return self._request_and_wait("POST", url, data, query_params)
def _put_and_wait(self, url="/", data=None, query_params=None):
return self._request_and_wait("PUT", url, data, query_params)
def _delete_and_wait(self, url="/", data=None, query_params=None):
return self._request_and_wait("DELETE", url, data, query_params)
def _update_response(self, payload):
response = requests.request(
"GET",
payload["callbackUrl"],
params={"showDetails": "true"},
data={},
headers={
"X-Auth-Token": self._get_rackspace_option("auth_token"),
"Content-Type": "application/json",
},
)
# if the request fails for any reason, throw an error.
response.raise_for_status()
return response.json()
def _auth_request(self, action="GET", url="/", data=None, query_params=None):
if data is None:
data = {}
response = requests.request(
action,
self.auth_api_endpoint + url,
params=query_params,
data=json.dumps(data),
headers={"Content-Type": "application/json"},
)
# if the request fails for any reason, throw an error.
response.raise_for_status()
return response.json()
|
|
# Copyright 2019 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from distutils.util import strtobool
import json
import os
import sys
import time
import yaml
from kubernetes import client
from kfserving import constants
from kfserving import KFServingClient
from kfserving import V1beta1InferenceService
from kfserving import V1beta1InferenceServiceSpec
from kfserving import V1beta1LightGBMSpec
from kfserving import V1beta1ONNXRuntimeSpec
from kfserving import V1beta1PMMLSpec
from kfserving import V1beta1PredictorSpec
from kfserving import V1beta1SKLearnSpec
from kfserving import V1beta1TFServingSpec
from kfserving import V1beta1TorchServeSpec
from kfserving import V1beta1TritonSpec
from kfserving import V1beta1XGBoostSpec
from kfserving.api.kf_serving_watch import isvc_watch
AVAILABLE_FRAMEWORKS = {
'tensorflow': V1beta1TFServingSpec,
'pytorch': V1beta1TorchServeSpec,
'sklearn': V1beta1SKLearnSpec,
'xgboost': V1beta1XGBoostSpec,
'onnx': V1beta1ONNXRuntimeSpec,
'triton': V1beta1TritonSpec,
'pmml': V1beta1PMMLSpec,
'lightgbm': V1beta1LightGBMSpec
}
def create_predictor_spec(framework, storage_uri, canary_traffic_percent,
service_account, min_replicas, max_replicas, containers, request_timeout):
"""
Create and return V1beta1PredictorSpec to be used in a V1beta1InferenceServiceSpec
object.
"""
predictor_spec = V1beta1PredictorSpec(
service_account_name=service_account,
min_replicas=(min_replicas
if min_replicas >= 0
else None
),
max_replicas=(max_replicas
if max_replicas > 0 and max_replicas >= min_replicas
else None
),
containers=(containers or None),
canary_traffic_percent=canary_traffic_percent,
timeout=request_timeout
)
# If the containers field was set, then this is custom model serving.
if containers:
return predictor_spec
if framework not in AVAILABLE_FRAMEWORKS:
raise ValueError("Error: No matching framework: " + framework)
setattr(
predictor_spec,
framework,
AVAILABLE_FRAMEWORKS[framework](storage_uri=storage_uri)
)
return predictor_spec
def create_custom_container_spec(custom_model_spec):
"""
Given a JSON container spec, return a V1Container object
representing the container. This is used for passing in
custom server images. The expected format for the input is:
{ "image": "test/containerimage",
"port":5000,
"name": "custom-container" }
"""
env = (
[
client.V1EnvVar(name=i["name"], value=i["value"])
for i in custom_model_spec["env"]
]
if custom_model_spec.get("env", "")
else None
)
ports = (
[client.V1ContainerPort(container_port=int(custom_model_spec.get("port", "")), protocol="TCP")]
if custom_model_spec.get("port", "")
else None
)
resources = (
client.V1ResourceRequirements(
requests=(custom_model_spec["resources"]["requests"]
if custom_model_spec.get('resources', {}).get('requests')
else None
),
limits=(custom_model_spec["resources"]["limits"]
if custom_model_spec.get('resources', {}).get('limits')
else None
),
)
if custom_model_spec.get("resources", {})
else None
)
return client.V1Container(
name=custom_model_spec.get("name", "custom-container"),
image=custom_model_spec["image"],
env=env,
ports=ports,
command=custom_model_spec.get("command", None),
args=custom_model_spec.get("args", None),
image_pull_policy=custom_model_spec.get("image_pull_policy", None),
working_dir=custom_model_spec.get("working_dir", None),
resources=resources
)
def create_inference_service(metadata, predictor_spec):
"""
Build and return V1beta1InferenceService object.
"""
return V1beta1InferenceService(
api_version=constants.KFSERVING_V1BETA1,
kind=constants.KFSERVING_KIND,
metadata=metadata,
spec=V1beta1InferenceServiceSpec(
predictor=predictor_spec
),
)
def submit_api_request(kfs_client, action, name, isvc, namespace=None,
watch=False, timeout_seconds=300):
"""
Creates or updates a Kubernetes custom object. This code is borrowed from the
KFServingClient.create/patch methods as using those directly doesn't allow for
sending in dicts as the InferenceService object which is needed for supporting passing
in raw InferenceService serialized YAML.
"""
custom_obj_api = kfs_client.api_instance
args = [constants.KFSERVING_GROUP,constants.KFSERVING_V1BETA1_VERSION,
namespace, constants.KFSERVING_PLURAL]
if action == 'update':
outputs = custom_obj_api.patch_namespaced_custom_object(*args, name, isvc)
else:
outputs = custom_obj_api.create_namespaced_custom_object(*args, isvc)
if watch:
# Sleep 3 to avoid status still be True within a very short time.
time.sleep(3)
isvc_watch(
name=outputs['metadata']['name'],
namespace=namespace,
timeout_seconds=timeout_seconds)
else:
return outputs
def perform_action(action, model_name, model_uri, canary_traffic_percent, namespace,
framework, custom_model_spec, service_account, inferenceservice_yaml,
request_timeout, autoscaling_target=0, enable_istio_sidecar=True,
watch_timeout=300, min_replicas=0, max_replicas=0):
"""
Perform the specified action. If the action is not 'delete' and `inferenceService_yaml`
was provided, the dict representation of the YAML will be sent directly to the
Kubernetes API. Otherwise, a V1beta1InferenceService object will be built using the
provided input and then sent for creation/update.
:return InferenceService JSON output
"""
kfs_client = KFServingClient()
if inferenceservice_yaml:
# Overwrite name and namespace if exists
if namespace:
inferenceservice_yaml['metadata']['namespace'] = namespace
if model_name:
inferenceservice_yaml['metadata']['name'] = model_name
else:
model_name = inferenceservice_yaml['metadata']['name']
kfsvc = inferenceservice_yaml
elif action != 'delete':
# Create annotations
annotations = {}
if int(autoscaling_target) != 0:
annotations["autoscaling.knative.dev/target"] = str(autoscaling_target)
if not enable_istio_sidecar:
annotations["sidecar.istio.io/inject"] = 'false'
if not annotations:
annotations = None
metadata = client.V1ObjectMeta(
name=model_name, namespace=namespace, annotations=annotations
)
# If a custom model container spec was provided, build the V1Container
# object using it.
containers = []
if custom_model_spec:
containers = [create_custom_container_spec(custom_model_spec)]
# Build the V1beta1PredictorSpec.
predictor_spec = create_predictor_spec(
framework, model_uri, canary_traffic_percent, service_account,
min_replicas, max_replicas, containers, request_timeout
)
kfsvc = create_inference_service(metadata, predictor_spec)
if action == "create":
submit_api_request(kfs_client, 'create', model_name, kfsvc, namespace,
watch=True, timeout_seconds=watch_timeout)
elif action == "update":
submit_api_request(kfs_client, 'update', model_name, kfsvc, namespace,
watch=True, timeout_seconds=watch_timeout)
elif action == "apply":
try:
submit_api_request(kfs_client, 'create', model_name, kfsvc, namespace,
watch=True, timeout_seconds=watch_timeout)
except Exception:
submit_api_request(kfs_client, 'update', model_name, kfsvc, namespace,
watch=True, timeout_seconds=watch_timeout)
elif action == "delete":
kfs_client.delete(model_name, namespace=namespace)
else:
raise ("Error: No matching action: " + action)
model_status = kfs_client.get(model_name, namespace=namespace)
return model_status
def main():
"""
This parses arguments passed in from the CLI and performs the corresponding action.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--action", type=str, help="Action to execute on KFServing", default="create"
)
parser.add_argument(
"--model-name", type=str, help="Name to give to the deployed model"
)
parser.add_argument(
"--model-uri",
type=str,
help="Path of the S3, GCS or PVC directory containing the model",
)
parser.add_argument(
"--canary-traffic-percent",
type=str,
help="The traffic split percentage between the candidate model and the last ready model",
default="100",
)
parser.add_argument(
"--namespace",
type=str,
help="Kubernetes namespace where the KFServing service is deployed",
default="",
)
parser.add_argument(
"--framework",
type=str,
help="Model serving framework to use. Available frameworks: " +
str(list(AVAILABLE_FRAMEWORKS.keys())),
default=""
)
parser.add_argument(
"--custom-model-spec",
type=json.loads,
help="The container spec for a custom model runtime",
default="{}",
)
parser.add_argument(
"--autoscaling-target", type=str, help="Autoscaling target number", default="0"
)
parser.add_argument(
"--service-account",
type=str,
help="Service account containing s3 credentials",
default="",
)
parser.add_argument(
"--enable-istio-sidecar",
type=strtobool,
help="Whether to inject istio sidecar",
default="True"
)
parser.add_argument(
"--inferenceservice-yaml",
type=yaml.safe_load,
help="Raw InferenceService serialized YAML for deployment",
default="{}"
)
parser.add_argument("--output-path", type=str, help="Path to store URI output")
parser.add_argument("--watch-timeout",
type=str,
help="Timeout seconds for watching until InferenceService becomes ready.",
default="300")
parser.add_argument(
"--min-replicas", type=str, help="Minimum number of replicas", default="-1"
)
parser.add_argument(
"--max-replicas", type=str, help="Maximum number of replicas", default="-1"
)
parser.add_argument("--request-timeout",
type=str,
help="Specifies the number of seconds to wait before timing out a request to the component.",
default="60")
args = parser.parse_args()
action = args.action.lower()
model_name = args.model_name
model_uri = args.model_uri
canary_traffic_percent = int(args.canary_traffic_percent)
namespace = args.namespace
framework = args.framework.lower()
output_path = args.output_path
custom_model_spec = args.custom_model_spec
autoscaling_target = int(args.autoscaling_target)
service_account = args.service_account
enable_istio_sidecar = args.enable_istio_sidecar
inferenceservice_yaml = args.inferenceservice_yaml
watch_timeout = int(args.watch_timeout)
min_replicas = int(args.min_replicas)
max_replicas = int(args.max_replicas)
request_timeout = int(args.request_timeout)
# Default the namespace.
if not namespace:
namespace = 'anonymous'
# If no namespace was provided, but one is listed in the YAML, use that.
if inferenceservice_yaml and inferenceservice_yaml.get('metadata', {}).get('namespace'):
namespace = inferenceservice_yaml['metadata']['namespace']
# Only require model name when an Isvc YAML was not provided.
if not inferenceservice_yaml and not model_name:
parser.error('{} argument is required when performing "{}" action'.format(
'model_name', action
))
# If the action isn't a delete, require 'model-uri' and 'framework' only if an Isvc YAML
# or custom model container spec are not provided.
if action != 'delete':
if not inferenceservice_yaml and not custom_model_spec and not (model_uri and framework):
parser.error('Arguments for {} and {} are required when performing "{}" action'.format(
'model_uri', 'framework', action
))
model_status = perform_action(
action=action,
model_name=model_name,
model_uri=model_uri,
canary_traffic_percent=canary_traffic_percent,
namespace=namespace,
framework=framework,
custom_model_spec=custom_model_spec,
autoscaling_target=autoscaling_target,
service_account=service_account,
enable_istio_sidecar=enable_istio_sidecar,
inferenceservice_yaml=inferenceservice_yaml,
request_timeout=request_timeout,
watch_timeout=watch_timeout,
min_replicas=min_replicas,
max_replicas=max_replicas
)
print(model_status)
if action != 'delete':
# Check whether the model is ready
for condition in model_status["status"]["conditions"]:
if condition['type'] == 'Ready':
if condition['status'] == 'True':
print('Model is ready\n')
break
print('Model is timed out, please check the InferenceService events for more details.')
sys.exit(1)
try:
print( model_status["status"]["url"] + " is the Knative domain.")
print("Sample test commands: \n")
# model_status['status']['url'] is like http://flowers-sample.kubeflow.example.com/v1/models/flowers-sample
print("curl -v -X GET %s" % model_status["status"]["url"])
print("\nIf the above URL is not accessible, it's recommended to setup Knative with a configured DNS.\n"\
"https://knative.dev/docs/install/installing-istio/#configuring-dns")
except Exception:
print("Model is not ready, check the logs for the Knative URL status.")
sys.exit(1)
if output_path:
try:
# Remove some less needed fields to reduce output size.
del model_status['metadata']['managedFields']
del model_status['status']['conditions']
if sys.getsizeof(model_status) > 3000:
del model_status['components']['predictor']['address']['url']
del model_status['components']['predictor']['latestCreatedRevision']
del model_status['components']['predictor']['latestReadyRevision']
del model_status['components']['predictor']['latestRolledoutRevision']
del model_status['components']['predictor']['url']
del model_status['spec']
except KeyError:
pass
if not os.path.exists(os.path.dirname(output_path)):
os.makedirs(os.path.dirname(output_path))
with open(output_path, "w") as report:
report.write(json.dumps(model_status, indent=4))
if __name__ == "__main__":
main()
|
|
"""ToyBox API Server in Python 3.7."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import datetime
import json
import logging
import mimetypes
import os
import traceback
import auth
import flask
from werkzeug.exceptions import HTTPException
import dao
# configure logging
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
# here we define user roles; roles are additive bundles of priveleges
ROLE_USER = 'user' # id_token verified; whitelist rules passed
ROLE_MODERATOR = 'moderator' # valid user, who has moderator priveleges
ROLE_ADMIN = 'admin' # valid user, who has admin priveleges
ROLES_ALL = [ROLE_USER, ROLE_MODERATOR, ROLE_ADMIN]
# profile visibility
PROFILE_VISIBILITY_PRIVATE = 'private'
PROFILE_VISIBILITY_PUBLIC = 'public'
PROFILE_VISIBILITY = {
PROFILE_VISIBILITY_PRIVATE: 'private (hidden from other site users)',
PROFILE_VISIBILITY_PUBLIC: 'public (visible to other site users)',
}
# tags that user can assign to their profile
PROFILE_TAGS = {
'1': 'sports',
'2': 'dance',
'3': 'science',
'4': 'reading',
'5': 'politics',
'6': 'languages',
'7': 'treavel',
'8': 'art',
}
# limits
MAX_MEMBERS_IN_LIST = 500
MAX_POSTS_IN_LIST = 500
# application schema; this is delivered to the client as JSON
APP_SCHEMA = {
'version': 'V1',
'is_consent_required': True,
'user': {
'role': {
'keys': dict([(role, role) for role in ROLES_ALL]),
},
},
'profile': {
'visibility': {
'keys': dict([(key, key) for key in PROFILE_VISIBILITY.keys()]),
'values': PROFILE_VISIBILITY,
},
'tags': PROFILE_TAGS,
},
'post': {
'list': {
'max_length': MAX_POSTS_IN_LIST,
},
},
'member': {
'list': {
'max_length': MAX_MEMBERS_IN_LIST,
},
},
}
# admin email addresses
ADMIN_EMAILS = set([
'psimakov@google.com',
])
# whitelisted domains
WHITELISTED_DOMAINS = set([])
# whitelisted emails
WHITELISTED_EMAILS = set([
# App Script robot account to test OAuth auth
'toybox-app-script-drive-api@a120-toybox.iam.gserviceaccount.com',
])
# Google Cloud Platform project that manages your Firebase Authentication users
FIREBASE_CLOUD_PROJECT_ID = 'psimakov-pwa'
# Firebase/OAuth user id namespaces
FIREBASE_UID_NS = '1'
OAUTH_UID_NS = '2'
# JSON response details
API_RESPONSE_PREFIX = ')]}\'\n'
API_RESPONSE_CONTENT_TYPE = 'application/json; charset=utf-8'
# relative path to static assets
STATIC_DIR = os.path.join(os.path.dirname(__file__), 'static')
if not os.path.isdir(STATIC_DIR):
STATIC_DIR = os.path.join(os.path.dirname(__file__), 'static')
assert os.path.isdir(STATIC_DIR), 'Invalid static folder: %s' % STATIC_DIR
# we need to id_token verifier, which is bpound to Firebase project_id
firebase_utils = auth.FirebaseAppUtils(FIREBASE_CLOUD_PROJECT_ID)
oauth2_utils = auth.OAuth2AppUtils(FIREBASE_CLOUD_PROJECT_ID)
def parse_api_response(body):
assert body.startswith(API_RESPONSE_PREFIX)
unprefixed = body[len(API_RESPONSE_PREFIX):]
return json.loads(unprefixed)
def set_cors_headers(headers):
headers['Access-Control-Allow-Origin'] = '*'
headers['Access-Control-Allow-Headers'] = '*'
headers['Access-Control-Allow-Methods'] = '*'
def set_no_cache_headers(headers):
headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
headers['Expires'] = 'Mon, 01 Jan 1990 00:00:00 GMT'
headers['Pragma'] = 'no-cache'
def serve_static_file(filename):
"""Serves static file."""
# Remove drive letter (if we are on Windows).
filename = os.path.abspath(filename)
filename = os.path.join(os.sep, filename)
unused_drive, path_no_drive = os.path.splitdrive(filename)
# Remove leading path separator.
relfilename = path_no_drive[1:]
filename = os.path.join(STATIC_DIR, relfilename)
logging.info("Serving static file: %s" % filename)
if not os.path.isfile(filename):
flask.abort(404)
mime = mimetypes.guess_type(filename)[0]
if not mime:
mime = 'text/plain'
with open(filename, 'rb') as data:
return flask.Response(data.read(), 200, mimetype=mime)
def format_api_response(status_code, data):
response = flask.Response(
'%s%s' % (API_RESPONSE_PREFIX, json.dumps(data, sort_keys=True)),
status_code, {
'Content-Type': API_RESPONSE_CONTENT_TYPE,
})
set_no_cache_headers(response.headers)
return response
def get_server_info():
return {
'lang': 'PY37',
'time': str(datetime.datetime.now()),
'software': os.environ.get('GAE_RUNTIME', 'UNKNOWN_RUNTIME'),
'version': os.environ.get('GAE_VERSION', 'UNKNOWN_VERSION'),
}
def abort_invalid_attribute(name, message):
flask.abort(format_api_response(
400, dao.InvalidFieldValueError(name, message).to_json_serializable()))
def abort_user_error(message):
flask.abort(format_api_response(
400, dao.BusinessRuleError(message).to_json_serializable()))
app = flask.Flask(__name__)
@app.after_request
def set_cors_policy(response):
set_cors_headers(response.headers)
return response
@app.route('/', methods=['GET'])
def static_root():
return flask.redirect('/index.html', code=301)
@app.route('/<path:filename>', methods=['GET'])
def static_get(filename):
response = serve_static_file(filename)
set_no_cache_headers(response.headers)
return response
def api_v1_ping():
"""Handles ping()."""
return format_api_response(200, {
'server': get_server_info(),
})
class ETag(object):
"""ETag.
We use "etag" as a generic mechanism to manage concurrent from multiple
clients; client will receive "etag" from server and must send it back for any
mutattion operation; server can encode into "etag" enough data to properly
order mutations or detect what objects have changed; how storage issues
"etags" varies; sometimes you may choose not to use them and let mutations
override each other; sometimes you can use "version" attribute that you
monotonically increment; timestamp can also be used as long as server issues
it to prevent clock skews; most project will not use "etags"; we do provide
support and example here so you know how to do it if you have to
"""
ETAG_NAME_SETTINGS = 'settings_etag'
@classmethod
def from_request(cls, name):
data = flask.request.form
value = data.get(name, None)
if value is None:
abort_user_error('Missing required parameter "%s".' % name)
return value
def get_roles_for(user):
"""Checks if user can access the system, and in which specific roles."""
email = user.get('email', '')
good_status = 'Your account is in good standing.'
# admin users
if email in ADMIN_EMAILS:
return ROLES_ALL, good_status
# whitelisted users
if WHITELISTED_EMAILS and (email in WHITELISTED_EMAILS):
return [ROLE_USER], good_status
# whitelisted domains users
domain = email.split('@')[1]
if WHITELISTED_DOMAINS and (domain in WHITELISTED_DOMAINS):
return [ROLE_USER], good_status
# if not limited to any domains
if not WHITELISTED_DOMAINS:
return [ROLE_USER], good_status
# otherwise access denied
bad_status = 'Your account doesn\'t have rights to access this system.'
return [], bad_status
def get_user_for_request(request):
"""Determines user for the request."""
user = None
if request.headers.get(oauth2_utils.HTTP_HEADER_NAME):
verified_claims = oauth2_utils.verify_access_token_from_request(request)
user = oauth2_utils.get_user_for_access_token(verified_claims)
scope = OAUTH_UID_NS
elif request.headers.get(firebase_utils.HTTP_HEADER_NAME):
verified_claims = firebase_utils.verify_id_token_from_request(request)
user = firebase_utils.get_user_for(verified_claims)
scope = FIREBASE_UID_NS
if not user:
return None
assert user['uid']
user['uid'] = '%s/%s' % (scope, user['uid'])
user['uid_scope'] = scope
return user
def get_uid_for(user):
uid = user['uid']
assert uid
return uid
def with_user(method):
"""Executed method with current user."""
user = get_user_for_request(flask.request)
if not user:
return flask.Response('Unauthorized.', 401)
roles, status = get_roles_for(user)
result = None
if method:
if not roles:
return flask.Response('Access denied.', 403)
try:
result = method(user, roles)
except HTTPException: # these are flask.abort; ok
raise
except dao.BusinessRuleError: # these are our dao.* exceptions; ok
raise
except Exception: # pylint: disable=broad-except
logging.error('Exception:\n%s', traceback.format_exc())
flask.abort(format_api_response(
500,
dao.BusinessRuleError(
'Internal server error. Please try again later.'
).to_json_serializable()))
member = dao.Members().get_or_create_member(get_uid_for(user))
user['roles'] = roles
user['settings'] = json.loads(member.data)
user['slug'] = member.slug
user['status'] = status
user[ETag.ETAG_NAME_SETTINGS] = member.version
response = {
'app': {
'schema': APP_SCHEMA,
},
'user': user,
'server': get_server_info(),
}
if result or result == []: # pylint: disable=g-explicit-bool-comparison
response['result'] = result
return format_api_response(200, response)
def validate_profile(profile):
# not strictly nessesary, but we will require few profile attributes
# to be set just to show error handling end to end from client to server
if not profile.get('title'):
abort_invalid_attribute('title', 'Title is required.')
if not profile.get('location'):
abort_invalid_attribute('location', 'Location is required.')
if not profile.get('about'):
abort_invalid_attribute('about', 'Information about you is required.')
def api_v1_whoami():
"""Queries capabilities of user specified by id_token in HTTP header."""
return with_user(None)
def api_v1_registration():
"""Register current user into the program."""
def action(user, unused_roles):
members = dao.Members()
# load member current settings
member_uid = get_uid_for(user)
member = members.get_or_create_member(member_uid)
version = ETag.from_request(ETag.ETAG_NAME_SETTINGS)
if not version:
version = member.version
# update registration portion of settings
settings = json.loads(member.data)
settings['registered'] = True
settings['registration'] = {
'displayName': user['displayName'],
'photoURL': user['photoURL'],
'email': user['email'],
'created_on': dao.datetime_to_str(dao.timezone_aware_now()),
}
# save to storage
try:
members.update(member_uid, json.dumps(settings), version=version)
except dao.ETagError as error:
flask.abort(format_api_response(400, error.to_json_serializable()))
return with_user(action)
def api_v1_profile():
"""Updates current user profile."""
# load and validate new settings from request
json_string = flask.request.form.get('profile', None)
if not json_string:
abort_user_error('Missing required parameter "profile".')
try:
profile = json.loads(json_string)
except: # pylint: disable=bare-except
abort_user_error('Provided "profile" is not a valid JSON.')
validate_profile(profile)
def action(user, unused_roles):
members = dao.Members()
# load member current settings
member_uid = get_uid_for(user)
member = members.get_or_create_member(member_uid)
version = ETag.from_request(ETag.ETAG_NAME_SETTINGS)
if not version:
version = member.version
# update profile portion of settings
settings = json.loads(member.data)
settings['profile'] = profile
# save to storage
try:
members.update(member_uid, json.dumps(settings), version=version)
except dao.ETagError as error:
flask.abort(format_api_response(400, error.to_json_serializable()))
return with_user(action)
def api_v1_members():
"""Lists members."""
def action(unused_user, roles):
members = dao.Members()
is_admin = ROLE_ADMIN in roles
results = []
# add registered members
for member in members.query_members():
if len(results) > MAX_MEMBERS_IN_LIST:
break
settings = json.loads(member.data)
profile = settings.get('profile', None)
registration = settings.get('registration', None)
# check rights
is_public = profile and (
profile.get('visibility') == PROFILE_VISIBILITY_PUBLIC)
if not(is_public or is_admin):
continue
# add projection to output
results.append({
'slug': member.slug,
'profile': profile,
'registration': registration,
})
return results
return with_user(action)
def api_v1_posts_get():
"""Lists all posts."""
def action(user, unused_roles):
posts = dao.Posts()
member_uid = get_uid_for(user)
return dao.posts_query_to_list(
member_uid, posts.query_posts(), client=posts.client)
return with_user(action)
def api_v1_member_posts():
"""Lists all posts of current user."""
def action(user, unused_roles):
posts = dao.Posts()
member_uid = get_uid_for(user)
return dao.posts_query_to_list(
member_uid, posts.query_member_posts(member_uid), client=posts.client)
return with_user(action)
def api_v1_posts_insert():
"""Records new user post."""
json_string = flask.request.form.get('post', None)
if not json_string:
abort_user_error('Missing required parameter "post".')
def action(user, unused_roles):
member_uid = get_uid_for(user)
dao.Posts().insert_post(member_uid, json_string)
return with_user(action)
def api_v1_posts_post():
"""Marks post deleted new user post."""
json_string = flask.request.form.get('post', None)
if not json_string:
abort_user_error('Missing required parameter "post".')
post = json.loads(json_string)
post_uid = post.get('uid')
if not post_uid:
abort_user_error('Missing required parameter "post.uid".')
def action(user, unused_roles):
member_uid = get_uid_for(user)
dao.Posts().mark_post_deleted(member_uid, post_uid)
return with_user(action)
def api_v1_votes_put():
"""Records user vote."""
# extract and validate post data
json_string = flask.request.form.get('vote', None)
if not json_string:
abort_user_error('Missing required parameter "vote".')
vote = json.loads(json_string)
post_uid = vote.get('uid')
if not post_uid:
abort_user_error('Missing required parameter "vote.uid".')
value = vote.get('value')
if not value:
abort_user_error('Missing required parameter "vote.value".')
def action(user, unused_roles):
votes = dao.Votes()
member_uid = get_uid_for(user)
# record vote
post, vote = votes.insert_vote(member_uid, post_uid, value)
result = dao.posts_query_to_list(member_uid, [post], fill_votes=False,
client=votes.client)[0]
# update my_vote_value directly; it may not get picked up
# due to indexed query being out of date
result['my_vote_value'] = vote.value
return result
return with_user(action)
# all HTTP routes are registered in one place here
ALL_ROUTES = [
('/api/rest/v1/ping', api_v1_ping, ['GET']),
('/api/rest/v1/whoami', api_v1_whoami, ['GET']),
('/api/rest/v1/registration', api_v1_registration, ['PUT']),
('/api/rest/v1/profile', api_v1_profile, ['POST']),
('/api/rest/v1/members', api_v1_members, ['GET']),
('/api/rest/v1/member/posts', api_v1_member_posts, ['GET']),
('/api/rest/v1/posts', api_v1_posts_get, ['GET']),
('/api/rest/v1/posts', api_v1_posts_insert, ['PUT']),
('/api/rest/v1/posts', api_v1_posts_post, ['POST']),
('/api/rest/v1/votes', api_v1_votes_put, ['PUT']),
]
# add routes to Flask
for path, view_func, methods in ALL_ROUTES:
app.add_url_rule(path, view_func=view_func, methods=methods)
if __name__ == '__main__':
# This is used when running locally only. When deploying to Google App
# Engine, a webserver process such as Gunicorn will serve the app. This
# can be configured by adding an `entrypoint` to app.yaml.
app.run(host='0.0.0.0', port=8080, debug=True)
|
|
'''
Copyright 2013 Douglas Gibbons
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from django.http import HttpResponse
from django.shortcuts import render,redirect
import dash.forms
from dash.services import requestToObjects,findObject,updateObject
import logging
from dash.models import Build,Product,Event,Testrun,Deploy,Testpack,Environment,Host
import json
import datetime
from django.views.decorators.csrf import csrf_exempt
from django.db.models import Max
from django.core import serializers
logger = logging.getLogger(__name__)
def index(request):
return render(request, 'index.html',{})
def help(request):
return render(request, 'help.html',{})
@csrf_exempt
def new_build(request):
''' Example request: /dash/new_build?Product.name=test%20product&Build.version=1.1.1-234&Build.revision=12
responds with json of new build
'''
objects = requestToObjects(dash.models,request)
product = findObject(objects,dash.models.Product)
product.save()
build = findObject(objects,dash.models.Build)
build.product = product
build.save()
return HttpResponse(build.id)
@csrf_exempt
def update_build(request):
id = request.REQUEST['Build.id']
build = Build.objects.get(pk=id)
updateObject(build, request)
build.save()
return HttpResponse(build.id)
@csrf_exempt
def new_testrun(request):
objects = requestToObjects(dash.models,request)
product = findObject(objects,dash.models.Product)
product.save()
testpack = findObject(objects,dash.models.Testpack)
testpack.save()
environment = findObject(objects,dash.models.Environment)
environment.save()
testrun = findObject(objects,dash.models.Testrun)
testrun.product = product
testrun.testpack = testpack
testrun.environment = environment
testrun.save()
return HttpResponse(testrun.id)
@csrf_exempt
def update_testrun(request):
id = request.REQUEST['Testrun.id']
obj = Testrun.objects.get(pk=id)
updateObject(obj, request)
obj.save()
return HttpResponse(obj.id)
@csrf_exempt
def new_deploy(request):
objects = requestToObjects(dash.models,request)
product = findObject(objects,dash.models.Product)
product.save()
environment = findObject(objects,dash.models.Environment)
environment.save()
host = findObject(objects,dash.models.Host)
host.save()
deploy = findObject(objects,dash.models.Deploy)
deploy.product = product
deploy.environment = environment
deploy.host = host
deploy.save()
return HttpResponse(deploy.id)
@csrf_exempt
def update_deploy(request):
id = request.REQUEST['Deploy.id']
obj = Deploy.objects.get(pk=id)
updateObject(obj, request)
obj.save()
return HttpResponse(obj.id)
def candidate(request):
products = Product.objects.all()
events = None
selectedProduct = None
selectedVersion = None
try:
selectedProduct = request.REQUEST['product']
selectedVersion = request.REQUEST['version']
product = Product.objects.get(pk=selectedProduct)
events = Event.objects.filter(product = product, version = selectedVersion).order_by('start')
for e in events:
if (e.end and e.start):
e.duration = e.end - e.start
except KeyError:
logger.exception('No events')
logger.debug('No events')
True
return render(
request, 'candidate.html',
{'products' : products, 'events' : events, 'selectedProduct' : selectedProduct, 'selectedVersion' : selectedVersion }
)
def product_versions(request):
product = Product.objects.get(pk = request.REQUEST['product'])
versions = Event.objects.filter(product = product).order_by('-version').values('version').distinct()
return HttpResponse(
json.dumps(list(versions.values_list('version',flat=True) ))
)
""" Host view """
def host(request):
hosts = Host.objects.all()
try:
host = Host.objects.get(pk=request.REQUEST['host'])
# Filter of deploys, selecting latest of each product type for the environment
ids = (
Deploy.objects.filter(host = host)
.values('product')
.annotate(max_id=Max('id'))
.values_list('max_id',flat=True)
)
deploys = Deploy.objects.filter(pk__in=ids)
except KeyError:
host = False
deploys = False
return render(
request, 'host.html',
{'deploys' : deploys, 'hosts' : hosts, 'host': host }
)
""" Environment view """
def environment(request):
environments = Environment.objects.all()
try:
environment = Environment.objects.get(pk=request.REQUEST['environment'])
# Filter of deploys, selecting latest of each product type for the environment
ids = (
Deploy.objects.filter(environment = environment)
.values('product','host')
.annotate(max_id=Max('id'))
.values_list('max_id',flat=True)
)
deploys = Deploy.objects.filter(pk__in=ids).order_by('product','host')
except KeyError:
environment = False
deploys = False
return render(
request, 'environment.html',
{'deploys' : deploys, 'environments' : environments, 'environment': environment }
)
def pipeline(request):
products = Product.objects.all()
loop_times = range(101)[1:] # Range for nunPipes selection
''' How many recent pipelines to show '''
try:
numpipes = int(request.REQUEST['numpipes'])
except KeyError:
'''Do not show table as no product selected'''
numpipes = 20
try:
product = Product.objects.get(pk=request.REQUEST['product'])
except KeyError:
'''Do not show table as no product selected'''
product = False
return render(
request, 'pipeline.html',
{'products' : products, 'product' : product, 'loop_times' : loop_times, 'numpipes' : numpipes }
)
"""
Removes old data up_to a certain date
"""
def cleanup(request):
up_to = request.REQUEST['up_to']
Event.objects.filter(end__lt = up_to).delete()
Event.objects.filter(start__lt = up_to).filter(end__isnull = True).delete()
# Clean up host, products, environment, testpack records
# where there is no longer an event
Product.objects.exclude(
pk__in = Event.objects.values_list('product', flat=True)
).delete()
Host.objects.exclude(
pk__in = Deploy.objects.values_list('host', flat=True)
).delete()
# Enironment IDs from Testrun and Deploy tables
environment_ids = (
list(Testrun.objects.values_list('environment', flat=True))
)
environment_ids.extend(
list(Deploy.objects.values_list('environment', flat=True))
)
Environment.objects.exclude(
pk__in = environment_ids
).delete()
Testpack.objects.exclude(
pk__in = Testrun.objects.values_list('testpack', flat=True)
).delete()
return HttpResponse("OK", content_type="text/plain")
def pipeline_chart(request):
pipes = False
product = False
''' How many recent pipelines to show '''
try:
numpipes = int(request.REQUEST['numpipes'])
except KeyError:
'''Do not show table as no product selected'''
numpipes = 20
try:
product = Product.objects.get(pk=request.REQUEST['product'])
pipes = getPipes(product,numpipes)
except KeyError:
'''Do not show table as no product selected'''
True
return render(
request, 'pipeline_chart.html',
{'pipes' : pipes, 'product' : product, }
)
def getPipes(product,numToReturn):
'''Empty class to store data for one product/version'''
class Pipe:
pass
recentBuilds = Build.objects.filter(product = product).order_by('-start')[:numToReturn]
pipes = []
for build in recentBuilds:
pipe = Pipe()
pipe.product = product
pipe.version = build.version
logger.debug('Pipe version: '+build.version)
pipe.events = Event.objects.filter(product = product,version = build.version).order_by('start')
pipes.append(pipe)
'''Figure out widths of event bars'''
maxPipeDuration = None
'''Find longest Pipe time in Epoche'''
for pipe in pipes:
mintime = None
maxtime = None
for event in pipe.events:
'''Images:
flashing for in progress ( no end time )
green for succes
red for failure
blue for unknown
'''
if event.end == None and event.success == None:
event.img = 'bluedot_flashing.gif'
elif event.end == None and event.success == False:
event.img = 'reddot_flashing.gif'
elif event.end == None and event.success == True:
event.img = 'greendot_flashing.gif'
elif event.success == None:
event.img = 'bluedot.png'
elif event.success == True:
event.img = 'greendot.png'
elif event.success == False:
event.img = 'reddot.png'
'''If no end time we can assume event is still going on'''
if event.end == None:
event.end = datetime.datetime.now()
eventStartEpoche = int(event.start.strftime('%s'))
eventEndEpoche = int(event.end.strftime('%s'))
if event.start != None and ( mintime == None or eventStartEpoche < mintime ):
mintime = int( eventStartEpoche )
if event.end != None and ( maxtime == None or eventEndEpoche > maxtime ):
maxtime = eventEndEpoche
logger.debug('Pipe mintime: ' + str(mintime))
logger.debug('Pipe maxtime: ' + str(maxtime))
pipeDuration = maxtime - mintime
if mintime != None and maxtime != None and ( maxPipeDuration == None or pipeDuration > maxPipeDuration ):
maxPipeDuration = pipeDuration
for p in pipes:
pipeStartT = int (p.events[0].start.strftime('%s'))
for e in p.events:
'''Epoche formats'''
eventStartT = int( e.start.strftime('%s') )
eventEndT = int (e.end.strftime('%s') )
e.startPos = float(eventStartT - pipeStartT) / maxPipeDuration * 500
e.endPos = float(eventEndT - pipeStartT) / maxPipeDuration * 500 - e.startPos
logger.debug('eventStartT: ' + str(eventStartT) + ' eventEndT: '+ str(eventEndT) + ' maxPipeDuration: ' + str(maxPipeDuration))
logger.debug('pipeStartT: ' + str(pipeStartT) )
return pipes
|
|
# -*- coding: utf-8 -*-
"""
werkzeug.http
~~~~~~~~~~~~~
Werkzeug comes with a bunch of utilities that help Werkzeug to deal with
HTTP data. Most of the classes and functions provided by this module are
used by the wrappers, but they are useful on their own, too, especially if
the response and request objects are not used.
This covers some of the more HTTP centric features of WSGI, some other
utilities such as cookie handling are documented in the `werkzeug.utils`
module.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
from time import time, gmtime
try:
from email.utils import parsedate_tz
except ImportError: # pragma: no cover
from email.Utils import parsedate_tz
try:
from urllib2 import parse_http_list as _parse_list_header
except ImportError: # pragma: no cover
from urllib.request import parse_http_list as _parse_list_header
from datetime import datetime, timedelta
from hashlib import md5
import base64
from werkzeug._internal import _cookie_quote, _make_cookie_domain, \
_cookie_parse_impl
from werkzeug._compat import to_unicode, iteritems, text_type, \
string_types, try_coerce_native, to_bytes, PY2, \
integer_types
# incorrect
_cookie_charset = 'latin1'
_accept_re = re.compile(r'([^\s;,]+)(?:[^,]*?;\s*q=(\d*(?:\.\d+)?))?')
_token_chars = frozenset("!#$%&'*+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
'^_`abcdefghijklmnopqrstuvwxyz|~')
_etag_re = re.compile(r'([Ww]/)?(?:"(.*?)"|(.*?))(?:\s*,\s*|$)')
_unsafe_header_chars = set('()<>@,;:\"/[]?={} \t')
_quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"'
_option_header_piece_re = re.compile(r';\s*(%s|[^\s;=]+)\s*(?:=\s*(%s|[^;]+))?\s*' %
(_quoted_string_re, _quoted_string_re))
_entity_headers = frozenset([
'allow', 'content-encoding', 'content-language', 'content-length',
'content-location', 'content-md5', 'content-range', 'content-type',
'expires', 'last-modified'
])
_hop_by_hop_headers = frozenset([
'connection', 'keep-alive', 'proxy-get_token',
'proxy-authorization', 'te', 'trailer', 'transfer-encoding',
'upgrade'
])
HTTP_STATUS_CODES = {
100: 'Continue',
101: 'Switching Protocols',
102: 'Processing',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
207: 'Multi Status',
226: 'IM Used', # see RFC 3229
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required', # unused
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
418: 'I\'m a teapot', # see RFC 2324
422: 'Unprocessable Entity',
423: 'Locked',
424: 'Failed Dependency',
426: 'Upgrade Required',
428: 'Precondition Required', # see RFC 6585
429: 'Too Many Requests',
431: 'Request Header Fields Too Large',
449: 'Retry With', # proprietary MS extension
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
507: 'Insufficient Storage',
510: 'Not Extended'
}
def wsgi_to_bytes(data):
"""coerce wsgi unicode represented bytes to real ones
"""
if isinstance(data, bytes):
return data
return data.encode('latin1') #XXX: utf8 fallback?
def bytes_to_wsgi(data):
assert isinstance(data, bytes), 'data must be bytes'
if isinstance(data, str):
return data
else:
return data.decode('latin1')
def quote_header_value(value, extra_chars='', allow_token=True):
"""Quote a header value if necessary.
.. versionadded:: 0.5
:param value: the value to quote.
:param extra_chars: a list of extra characters to skip quoting.
:param allow_token: if this is enabled token values are returned
unchanged.
"""
if isinstance(value, bytes):
value = bytes_to_wsgi(value)
value = str(value)
if allow_token:
token_chars = _token_chars | set(extra_chars)
if set(value).issubset(token_chars):
return value
return '"%s"' % value.replace('\\', '\\\\').replace('"', '\\"')
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
.. versionadded:: 0.5
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dump_options_header(header, options):
"""The reverse function to :func:`parse_options_header`.
:param header: the header to dump
:param options: a dict of options to append.
"""
segments = []
if header is not None:
segments.append(header)
for key, value in iteritems(options):
if value is None:
segments.append(key)
else:
segments.append('%s=%s' % (key, quote_header_value(value)))
return '; '.join(segments)
def dump_header(iterable, allow_token=True):
"""Dump an HTTP header again. This is the reversal of
:func:`parse_list_header`, :func:`parse_set_header` and
:func:`parse_dict_header`. This also quotes strings that include an
equals sign unless you pass it as dict of key, value pairs.
>>> dump_header({'foo': 'bar baz'})
'foo="bar baz"'
>>> dump_header(('foo', 'bar baz'))
'foo, "bar baz"'
:param iterable: the iterable or dict of values to quote.
:param allow_token: if set to `False` tokens as values are disallowed.
See :func:`quote_header_value` for more details.
"""
if isinstance(iterable, dict):
items = []
for key, value in iteritems(iterable):
if value is None:
items.append(key)
else:
items.append('%s=%s' % (
key,
quote_header_value(value, allow_token=allow_token)
))
else:
items = [quote_header_value(x, allow_token=allow_token)
for x in iterable]
return ', '.join(items)
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
def parse_dict_header(value, cls=dict):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict (or any other mapping object created from
the type with a dict like interface provided by the `cls` arugment):
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
.. versionchanged:: 0.9
Added support for `cls` argument.
:param value: a string with a dict header.
:param cls: callable to use for storage of parsed results.
:return: an instance of `cls`
"""
result = cls()
if not isinstance(value, text_type):
#XXX: validate
value = bytes_to_wsgi(value)
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
def parse_options_header(value):
"""Parse a ``Content-Type`` like header into a tuple with the content
type and the options:
>>> parse_options_header('text/html; charset=utf8')
('text/html', {'charset': 'utf8'})
This should not be used to parse ``Cache-Control`` like headers that use
a slightly different format. For these headers use the
:func:`parse_dict_header` function.
.. versionadded:: 0.5
:param value: the header to parse.
:return: (str, options)
"""
def _tokenize(string):
for match in _option_header_piece_re.finditer(string):
key, value = match.groups()
key = unquote_header_value(key)
if value is not None:
value = unquote_header_value(value, key == 'filename')
yield key, value
if not value:
return '', {}
parts = _tokenize(';' + value)
name = next(parts)[0]
extra = dict(parts)
return name, extra
def parse_accept_header(value, cls=None):
"""Parses an HTTP Accept-* header. This does not implement a complete
valid algorithm but one that supports at least value and quality
extraction.
Returns a new :class:`Accept` object (basically a list of ``(value, quality)``
tuples sorted by the quality with some additional accessor methods).
The second parameter can be a subclass of :class:`Accept` that is created
with the parsed values and returned.
:param value: the accept header string to be parsed.
:param cls: the wrapper class for the return value (can be
:class:`Accept` or a subclass thereof)
:return: an instance of `cls`.
"""
if cls is None:
cls = Accept
if not value:
return cls(None)
result = []
for match in _accept_re.finditer(value):
quality = match.group(2)
if not quality:
quality = 1
else:
quality = max(min(float(quality), 1), 0)
result.append((match.group(1), quality))
return cls(result)
def parse_cache_control_header(value, on_update=None, cls=None):
"""Parse a cache control header. The RFC differs between response and
request cache control, this method does not. It's your responsibility
to not use the wrong control statements.
.. versionadded:: 0.5
The `cls` was added. If not specified an immutable
:class:`~werkzeug.datastructures.RequestCacheControl` is returned.
:param value: a cache control header to be parsed.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.CacheControl`
object is changed.
:param cls: the class for the returned object. By default
:class:`~werkzeug.datastructures.RequestCacheControl` is used.
:return: a `cls` object.
"""
if cls is None:
cls = RequestCacheControl
if not value:
return cls(None, on_update)
return cls(parse_dict_header(value), on_update)
def parse_set_header(value, on_update=None):
"""Parse a set-like header and return a
:class:`~werkzeug.datastructures.HeaderSet` object:
>>> hs = parse_set_header('token, "quoted value"')
The return value is an object that treats the items case-insensitively
and keeps the order of the items:
>>> 'TOKEN' in hs
True
>>> hs.index('quoted value')
1
>>> hs
HeaderSet(['token', 'quoted value'])
To create a header from the :class:`HeaderSet` again, use the
:func:`dump_header` function.
:param value: a set header to be parsed.
:param on_update: an optional callable that is called every time a
value on the :class:`~werkzeug.datastructures.HeaderSet`
object is changed.
:return: a :class:`~werkzeug.datastructures.HeaderSet`
"""
if not value:
return HeaderSet(None, on_update)
return HeaderSet(parse_list_header(value), on_update)
def parse_authorization_header(value):
"""Parse an HTTP basic/digest authorization header transmitted by the web
browser. The return value is either `None` if the header was invalid or
not given, otherwise an :class:`~werkzeug.datastructures.Authorization`
object.
:param value: the authorization header to parse.
:return: a :class:`~werkzeug.datastructures.Authorization` object or `None`.
"""
if not value:
return
value = wsgi_to_bytes(value)
try:
auth_type, auth_info = value.split(None, 1)
auth_type = auth_type.lower()
except ValueError:
return
if auth_type == b'basic':
try:
username, password = base64.b64decode(auth_info).split(b':', 1)
except Exception as e:
return
return Authorization('basic', {'username': bytes_to_wsgi(username),
'password': bytes_to_wsgi(password)})
elif auth_type == b'digest':
auth_map = parse_dict_header(auth_info)
for key in 'username', 'realm', 'nonce', 'uri', 'response':
if not key in auth_map:
return
if 'qop' in auth_map:
if not auth_map.get('nc') or not auth_map.get('cnonce'):
return
return Authorization('digest', auth_map)
def parse_www_authenticate_header(value, on_update=None):
"""Parse an HTTP WWW-Authenticate header into a
:class:`~werkzeug.datastructures.WWWAuthenticate` object.
:param value: a WWW-Authenticate header to parse.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.WWWAuthenticate`
object is changed.
:return: a :class:`~werkzeug.datastructures.WWWAuthenticate` object.
"""
if not value:
return WWWAuthenticate(on_update=on_update)
try:
auth_type, auth_info = value.split(None, 1)
auth_type = auth_type.lower()
except (ValueError, AttributeError):
return WWWAuthenticate(value.strip().lower(), on_update=on_update)
return WWWAuthenticate(auth_type, parse_dict_header(auth_info),
on_update)
def parse_if_range_header(value):
"""Parses an if-range header which can be an etag or a date. Returns
a :class:`~werkzeug.datastructures.IfRange` object.
.. versionadded:: 0.7
"""
if not value:
return IfRange()
date = parse_date(value)
if date is not None:
return IfRange(date=date)
# drop weakness information
return IfRange(unquote_etag(value)[0])
def parse_range_header(value, make_inclusive=True):
"""Parses a range header into a :class:`~werkzeug.datastructures.Range`
object. If the header is missing or malformed `None` is returned.
`ranges` is a list of ``(start, stop)`` tuples where the ranges are
non-inclusive.
.. versionadded:: 0.7
"""
if not value or '=' not in value:
return None
ranges = []
last_end = 0
units, rng = value.split('=', 1)
units = units.strip().lower()
for item in rng.split(','):
item = item.strip()
if '-' not in item:
return None
if item.startswith('-'):
if last_end < 0:
return None
begin = int(item)
end = None
last_end = -1
elif '-' in item:
begin, end = item.split('-', 1)
begin = int(begin)
if begin < last_end or last_end < 0:
return None
if end:
end = int(end) + 1
if begin >= end:
return None
else:
end = None
last_end = end
ranges.append((begin, end))
return Range(units, ranges)
def parse_content_range_header(value, on_update=None):
"""Parses a range header into a
:class:`~werkzeug.datastructures.ContentRange` object or `None` if
parsing is not possible.
.. versionadded:: 0.7
:param value: a content range header to be parsed.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.ContentRange`
object is changed.
"""
if value is None:
return None
try:
units, rangedef = (value or '').strip().split(None, 1)
except ValueError:
return None
if '/' not in rangedef:
return None
rng, length = rangedef.split('/', 1)
if length == '*':
length = None
elif length.isdigit():
length = int(length)
else:
return None
if rng == '*':
return ContentRange(units, None, None, length, on_update=on_update)
elif '-' not in rng:
return None
start, stop = rng.split('-', 1)
try:
start = int(start)
stop = int(stop) + 1
except ValueError:
return None
if is_byte_range_valid(start, stop, length):
return ContentRange(units, start, stop, length, on_update=on_update)
def quote_etag(etag, weak=False):
"""Quote an etag.
:param etag: the etag to quote.
:param weak: set to `True` to tag it "weak".
"""
if '"' in etag:
raise ValueError('invalid etag')
etag = '"%s"' % etag
if weak:
etag = 'w/' + etag
return etag
def unquote_etag(etag):
"""Unquote a single etag:
>>> unquote_etag('w/"bar"')
('bar', True)
>>> unquote_etag('"bar"')
('bar', False)
:param etag: the etag identifier to unquote.
:return: a ``(etag, weak)`` tuple.
"""
if not etag:
return None, None
etag = etag.strip()
weak = False
if etag[:2] in ('w/', 'W/'):
weak = True
etag = etag[2:]
if etag[:1] == etag[-1:] == '"':
etag = etag[1:-1]
return etag, weak
def parse_etags(value):
"""Parse an etag header.
:param value: the tag header to parse
:return: an :class:`~werkzeug.datastructures.ETags` object.
"""
if not value:
return ETags()
strong = []
weak = []
end = len(value)
pos = 0
while pos < end:
match = _etag_re.match(value, pos)
if match is None:
break
is_weak, quoted, raw = match.groups()
if raw == '*':
return ETags(star_tag=True)
elif quoted:
raw = quoted
if is_weak:
weak.append(raw)
else:
strong.append(raw)
pos = match.end()
return ETags(strong, weak)
def generate_etag(data):
"""Generate an etag for some data."""
return md5(data).hexdigest()
def parse_date(value):
"""Parse one of the following date formats into a datetime object:
.. sourcecode:: text
Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
If parsing fails the return value is `None`.
:param value: a string with a supported date format.
:return: a :class:`datetime.datetime` object.
"""
if value:
t = parsedate_tz(value.strip())
if t is not None:
try:
year = t[0]
# unfortunately that function does not tell us if two digit
# years were part of the string, or if they were prefixed
# with two zeroes. So what we do is to assume that 69-99
# refer to 1900, and everything below to 2000
if year >= 0 and year <= 68:
year += 2000
elif year >= 69 and year <= 99:
year += 1900
return datetime(*((year,) + t[1:7])) - \
timedelta(seconds=t[-1] or 0)
except (ValueError, OverflowError):
return None
def _dump_date(d, delim):
"""Used for `http_date` and `cookie_date`."""
if d is None:
d = gmtime()
elif isinstance(d, datetime):
d = d.utctimetuple()
elif isinstance(d, (integer_types, float)):
d = gmtime(d)
return '%s, %02d%s%s%s%s %02d:%02d:%02d GMT' % (
('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')[d.tm_wday],
d.tm_mday, delim,
('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec')[d.tm_mon - 1],
delim, str(d.tm_year), d.tm_hour, d.tm_min, d.tm_sec
)
def cookie_date(expires=None):
"""Formats the time to ensure compatibility with Netscape's cookie
standard.
Accepts a floating point number expressed in seconds since the epoch in, a
datetime object or a timetuple. All times in UTC. The :func:`parse_date`
function can be used to parse such a date.
Outputs a string in the format ``Wdy, DD-Mon-YYYY HH:MM:SS GMT``.
:param expires: If provided that date is used, otherwise the current.
"""
return _dump_date(expires, '-')
def http_date(timestamp=None):
"""Formats the time to match the RFC1123 date format.
Accepts a floating point number expressed in seconds since the epoch in, a
datetime object or a timetuple. All times in UTC. The :func:`parse_date`
function can be used to parse such a date.
Outputs a string in the format ``Wdy, DD Mon YYYY HH:MM:SS GMT``.
:param timestamp: If provided that date is used, otherwise the current.
"""
return _dump_date(timestamp, ' ')
def is_resource_modified(environ, etag=None, data=None, last_modified=None):
"""Convenience method for conditional requests.
:param environ: the WSGI environment of the request to be checked.
:param etag: the etag for the response for comparison.
:param data: or alternatively the data of the response to automatically
generate an etag using :func:`generate_etag`.
:param last_modified: an optional date of the last modification.
:return: `True` if the resource was modified, otherwise `False`.
"""
if etag is None and data is not None:
etag = generate_etag(data)
elif data is not None:
raise TypeError('both data and etag given')
if environ['REQUEST_METHOD'] not in ('GET', 'HEAD'):
return False
unmodified = False
if isinstance(last_modified, string_types):
last_modified = parse_date(last_modified)
# ensure that microsecond is zero because the HTTP spec does not transmit
# that either and we might have some false positives. See issue #39
if last_modified is not None:
last_modified = last_modified.replace(microsecond=0)
modified_since = parse_date(environ.get('HTTP_IF_MODIFIED_SINCE'))
if modified_since and last_modified and last_modified <= modified_since:
unmodified = True
if etag:
if_none_match = parse_etags(environ.get('HTTP_IF_NONE_MATCH'))
if if_none_match:
unmodified = if_none_match.contains_raw(etag)
return not unmodified
def remove_entity_headers(headers, allowed=('expires', 'content-location')):
"""Remove all entity headers from a list or :class:`Headers` object. This
operation works in-place. `Expires` and `Content-Location` headers are
by default not removed. The reason for this is :rfc:`2616` section
10.3.5 which specifies some entity headers that should be sent.
.. versionchanged:: 0.5
added `allowed` parameter.
:param headers: a list or :class:`Headers` object.
:param allowed: a list of headers that should still be allowed even though
they are entity headers.
"""
allowed = set(x.lower() for x in allowed)
headers[:] = [(key, value) for key, value in headers if
not is_entity_header(key) or key.lower() in allowed]
def remove_hop_by_hop_headers(headers):
"""Remove all HTTP/1.1 "Hop-by-Hop" headers from a list or
:class:`Headers` object. This operation works in-place.
.. versionadded:: 0.5
:param headers: a list or :class:`Headers` object.
"""
headers[:] = [(key, value) for key, value in headers if
not is_hop_by_hop_header(key)]
def is_entity_header(header):
"""Check if a header is an entity header.
.. versionadded:: 0.5
:param header: the header to test.
:return: `True` if it's an entity header, `False` otherwise.
"""
return header.lower() in _entity_headers
def is_hop_by_hop_header(header):
"""Check if a header is an HTTP/1.1 "Hop-by-Hop" header.
.. versionadded:: 0.5
:param header: the header to test.
:return: `True` if it's an entity header, `False` otherwise.
"""
return header.lower() in _hop_by_hop_headers
def parse_cookie(header, charset='utf-8', errors='replace', cls=None):
"""Parse a cookie. Either from a string or WSGI environ.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
:exc:`HTTPUnicodeError` is raised.
.. versionchanged:: 0.5
This function now returns a :class:`TypeConversionDict` instead of a
regular dict. The `cls` parameter was added.
:param header: the header to be used to parse the cookie. Alternatively
this can be a WSGI environment.
:param charset: the charset for the cookie values.
:param errors: the error behavior for the charset decoding.
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`TypeConversionDict` is
used.
"""
if isinstance(header, dict):
header = header.get('HTTP_COOKIE', '')
elif header is None:
header = ''
# If the value is an unicode string it's mangled through latin1. This
# is done because on PEP 3333 on Python 3 all headers are assumed latin1
# which however is incorrect for cookies, which are sent in page encoding.
# As a result we
if isinstance(header, text_type):
header = header.encode('latin1', 'replace')
if cls is None:
cls = TypeConversionDict
def _parse_pairs():
for key, val in _cookie_parse_impl(header):
key = to_unicode(key, charset, errors, allow_none_charset=True)
val = to_unicode(val, charset, errors, allow_none_charset=True)
yield try_coerce_native(key), val
return cls(_parse_pairs())
def dump_cookie(key, value='', max_age=None, expires=None, path='/',
domain=None, secure=False, httponly=False,
charset='utf-8', sync_expires=True):
"""Creates a new Set-Cookie header without the ``Set-Cookie`` prefix
The parameters are the same as in the cookie Morsel object in the
Python standard library but it accepts unicode data, too.
On Python 3 the return value of this function will be a unicode
string, on Python 2 it will be a native string. In both cases the
return value is usually restricted to ascii as the vast majority of
values are properly escaped, but that is no guarantee. If a unicode
string is returned it's tunneled through latin1 as required by
PEP 3333.
The return value is not ASCII safe if the key contains unicode
characters. This is technically against the specification but
happens in the wild. It's strongly recommended to not use
non-ASCII values for the keys.
:param max_age: should be a number of seconds, or `None` (default) if
the cookie should last only as long as the client's
browser session. Additionally `timedelta` objects
are accepted, too.
:param expires: should be a `datetime` object or unix timestamp.
:param path: limits the cookie to a given path, per default it will
span the whole domain.
:param domain: Use this if you want to set a cross-domain cookie. For
example, ``domain=".example.com"`` will set a cookie
that is readable by the domain ``www.example.com``,
``foo.example.com`` etc. Otherwise, a cookie will only
be readable by the domain that set it.
:param secure: The cookie will only be available via HTTPS
:param httponly: disallow JavaScript to access the cookie. This is an
extension to the cookie standard and probably not
supported by all browsers.
:param charset: the encoding for unicode values.
:param sync_expires: automatically set expires if max_age is defined
but expires not.
"""
key = to_bytes(key, charset)
value = to_bytes(value, charset)
if path is not None:
path = iri_to_uri(path, charset)
domain = _make_cookie_domain(domain)
if isinstance(max_age, timedelta):
max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds
if expires is not None:
if not isinstance(expires, string_types):
expires = cookie_date(expires)
elif max_age is not None and sync_expires:
expires = to_bytes(cookie_date(time() + max_age))
buf = [key + b'=' + _cookie_quote(value)]
# XXX: In theory all of these parameters that are not marked with `None`
# should be quoted. Because stdlib did not quote it before I did not
# want to introduce quoting there now.
for k, v, q in ((b'Domain', domain, True),
(b'Expires', expires, False,),
(b'Max-Age', max_age, False),
(b'Secure', secure, None),
(b'HttpOnly', httponly, None),
(b'Path', path, False)):
if q is None:
if v:
buf.append(k)
continue
if v is None:
continue
tmp = bytearray(k)
if not isinstance(v, (bytes, bytearray)):
v = to_bytes(text_type(v), charset)
if q:
v = _cookie_quote(v)
tmp += b'=' + v
buf.append(bytes(tmp))
# The return value will be an incorrectly encoded latin1 header on
# Python 3 for consistency with the headers object and a bytestring
# on Python 2 because that's how the API makes more sense.
rv = b'; '.join(buf)
if not PY2:
rv = rv.decode('latin1')
return rv
def is_byte_range_valid(start, stop, length):
"""Checks if a given byte content range is valid for the given length.
.. versionadded:: 0.7
"""
if (start is None) != (stop is None):
return False
elif start is None:
return length is None or length >= 0
elif length is None:
return 0 <= start < stop
elif start >= stop:
return False
return 0 <= start < length
# circular dependency fun
from werkzeug.datastructures import Accept, HeaderSet, ETags, Authorization, \
WWWAuthenticate, TypeConversionDict, IfRange, Range, ContentRange, \
RequestCacheControl
# DEPRECATED
# backwards compatible imports
from werkzeug.datastructures import MIMEAccept, CharsetAccept, \
LanguageAccept, Headers
from werkzeug.urls import iri_to_uri
|
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''
'''
__docformat__ = 'restructuredtext'
__version__ = ''
from pyglet.window import key
# From SDL: src/video/quartz/SDL_QuartzKeys.h
# These are the Macintosh key scancode constants -- from Inside Macintosh
# http://boredzo.org/blog/wp-content/uploads/2007/05/imtx-virtual-keycodes.png
# Renamed QZ_RALT, QZ_LALT to QZ_ROPTION, QZ_LOPTION
# and QZ_RMETA, QZ_LMETA to QZ_RCOMMAND, QZ_LCOMMAND.
#
# See also:
# /System/Library/Frameworks/Carbon.framework/Versions/A/Frameworks/HIToolbox.framework/Headers/Events.h
QZ_ESCAPE = 0x35
QZ_F1 = 0x7A
QZ_F2 = 0x78
QZ_F3 = 0x63
QZ_F4 = 0x76
QZ_F5 = 0x60
QZ_F6 = 0x61
QZ_F7 = 0x62
QZ_F8 = 0x64
QZ_F9 = 0x65
QZ_F10 = 0x6D
QZ_F11 = 0x67
QZ_F12 = 0x6F
QZ_F13 = 0x69
QZ_F14 = 0x6B
QZ_F15 = 0x71
QZ_F16 = 0x6A
QZ_F17 = 0x40
QZ_F18 = 0x4F
QZ_F19 = 0x50
QZ_F20 = 0x5A
QZ_BACKQUOTE = 0x32
QZ_1 = 0x12
QZ_2 = 0x13
QZ_3 = 0x14
QZ_4 = 0x15
QZ_5 = 0x17
QZ_6 = 0x16
QZ_7 = 0x1A
QZ_8 = 0x1C
QZ_9 = 0x19
QZ_0 = 0x1D
QZ_MINUS = 0x1B
QZ_EQUALS = 0x18
QZ_BACKSPACE = 0x33
QZ_INSERT = 0x72
QZ_HOME = 0x73
QZ_PAGEUP = 0x74
QZ_NUMLOCK = 0x47
QZ_KP_EQUALS = 0x51
QZ_KP_DIVIDE = 0x4B
QZ_KP_MULTIPLY = 0x43
QZ_TAB = 0x30
QZ_q = 0x0C
QZ_w = 0x0D
QZ_e = 0x0E
QZ_r = 0x0F
QZ_t = 0x11
QZ_y = 0x10
QZ_u = 0x20
QZ_i = 0x22
QZ_o = 0x1F
QZ_p = 0x23
QZ_LEFTBRACKET = 0x21
QZ_RIGHTBRACKET = 0x1E
QZ_BACKSLASH = 0x2A
QZ_DELETE = 0x75
QZ_END = 0x77
QZ_PAGEDOWN = 0x79
QZ_KP7 = 0x59
QZ_KP8 = 0x5B
QZ_KP9 = 0x5C
QZ_KP_MINUS = 0x4E
QZ_CAPSLOCK = 0x39
QZ_a = 0x00
QZ_s = 0x01
QZ_d = 0x02
QZ_f = 0x03
QZ_g = 0x05
QZ_h = 0x04
QZ_j = 0x26
QZ_k = 0x28
QZ_l = 0x25
QZ_SEMICOLON = 0x29
QZ_QUOTE = 0x27
QZ_RETURN = 0x24
QZ_KP4 = 0x56
QZ_KP5 = 0x57
QZ_KP6 = 0x58
QZ_KP_PLUS = 0x45
QZ_LSHIFT = 0x38
QZ_z = 0x06
QZ_x = 0x07
QZ_c = 0x08
QZ_v = 0x09
QZ_b = 0x0B
QZ_n = 0x2D
QZ_m = 0x2E
QZ_COMMA = 0x2B
QZ_PERIOD = 0x2F
QZ_SLASH = 0x2C
QZ_RSHIFT = 0x3C
QZ_UP = 0x7E
QZ_KP1 = 0x53
QZ_KP2 = 0x54
QZ_KP3 = 0x55
QZ_KP_ENTER = 0x4C
QZ_LCTRL = 0x3B
QZ_LOPTION = 0x3A
QZ_LCOMMAND = 0x37
QZ_SPACE = 0x31
QZ_RCOMMAND = 0x36
QZ_ROPTION = 0x3D
QZ_RCTRL = 0x3E
QZ_FUNCTION = 0x3F
QZ_LEFT = 0x7B
QZ_DOWN = 0x7D
QZ_RIGHT = 0x7C
QZ_KP0 = 0x52
QZ_KP_PERIOD = 0x41
keymap = {
QZ_ESCAPE: key.ESCAPE,
QZ_F1: key.F1,
QZ_F2: key.F2,
QZ_F3: key.F3,
QZ_F4: key.F4,
QZ_F5: key.F5,
QZ_F6: key.F6,
QZ_F7: key.F7,
QZ_F8: key.F8,
QZ_F9: key.F9,
QZ_F10: key.F10,
QZ_F11: key.F11,
QZ_F12: key.F12,
QZ_F13: key.F13,
QZ_F14: key.F14,
QZ_F15: key.F15,
QZ_F16: key.F16,
QZ_F17: key.F17,
QZ_F18: key.F18,
QZ_F19: key.F19,
QZ_F20: key.F20,
QZ_BACKQUOTE: key.QUOTELEFT,
QZ_1: key._1,
QZ_2: key._2,
QZ_3: key._3,
QZ_4: key._4,
QZ_5: key._5,
QZ_6: key._6,
QZ_7: key._7,
QZ_8: key._8,
QZ_9: key._9,
QZ_0: key._0,
QZ_MINUS: key.MINUS,
QZ_EQUALS: key.EQUAL,
QZ_BACKSPACE: key.BACKSPACE,
QZ_INSERT: key.INSERT,
QZ_HOME: key.HOME,
QZ_PAGEUP: key.PAGEUP,
QZ_NUMLOCK: key.NUMLOCK,
QZ_KP_EQUALS: key.NUM_EQUAL,
QZ_KP_DIVIDE: key.NUM_DIVIDE,
QZ_KP_MULTIPLY: key.NUM_MULTIPLY,
QZ_TAB: key.TAB,
QZ_q: key.Q,
QZ_w: key.W,
QZ_e: key.E,
QZ_r: key.R,
QZ_t: key.T,
QZ_y: key.Y,
QZ_u: key.U,
QZ_i: key.I,
QZ_o: key.O,
QZ_p: key.P,
QZ_LEFTBRACKET: key.BRACKETLEFT,
QZ_RIGHTBRACKET: key.BRACKETRIGHT,
QZ_BACKSLASH: key.BACKSLASH,
QZ_DELETE: key.DELETE,
QZ_END: key.END,
QZ_PAGEDOWN: key.PAGEDOWN,
QZ_KP7: key.NUM_7,
QZ_KP8: key.NUM_8,
QZ_KP9: key.NUM_9,
QZ_KP_MINUS: key.NUM_SUBTRACT,
QZ_CAPSLOCK: key.CAPSLOCK,
QZ_a: key.A,
QZ_s: key.S,
QZ_d: key.D,
QZ_f: key.F,
QZ_g: key.G,
QZ_h: key.H,
QZ_j: key.J,
QZ_k: key.K,
QZ_l: key.L,
QZ_SEMICOLON: key.SEMICOLON,
QZ_QUOTE: key.APOSTROPHE,
QZ_RETURN: key.RETURN,
QZ_KP4: key.NUM_4,
QZ_KP5: key.NUM_5,
QZ_KP6: key.NUM_6,
QZ_KP_PLUS: key.NUM_ADD,
QZ_LSHIFT: key.LSHIFT,
QZ_z: key.Z,
QZ_x: key.X,
QZ_c: key.C,
QZ_v: key.V,
QZ_b: key.B,
QZ_n: key.N,
QZ_m: key.M,
QZ_COMMA: key.COMMA,
QZ_PERIOD: key.PERIOD,
QZ_SLASH: key.SLASH,
QZ_RSHIFT: key.RSHIFT,
QZ_UP: key.UP,
QZ_KP1: key.NUM_1,
QZ_KP2: key.NUM_2,
QZ_KP3: key.NUM_3,
QZ_KP_ENTER: key.NUM_ENTER,
QZ_LCTRL: key.LCTRL,
QZ_LOPTION: key.LOPTION,
QZ_LCOMMAND: key.LCOMMAND,
QZ_SPACE: key.SPACE,
QZ_RCOMMAND: key.RCOMMAND,
QZ_ROPTION: key.ROPTION,
QZ_RCTRL: key.RCTRL,
QZ_FUNCTION: key.FUNCTION,
QZ_LEFT: key.LEFT,
QZ_DOWN: key.DOWN,
QZ_RIGHT: key.RIGHT,
QZ_KP0: key.NUM_0,
QZ_KP_PERIOD: key.NUM_DECIMAL,
}
charmap = {
' ' : key.SPACE,
'!' : key.EXCLAMATION,
'"' : key.DOUBLEQUOTE,
'#' : key.HASH,
'#' : key.POUND,
'$' : key.DOLLAR,
'%' : key.PERCENT,
'&' : key.AMPERSAND,
"'" : key.APOSTROPHE,
'(' : key.PARENLEFT,
')' : key.PARENRIGHT,
'*' : key.ASTERISK,
'+' : key.PLUS,
',' : key.COMMA,
'-' : key.MINUS,
'.' : key.PERIOD,
'/' : key.SLASH,
'0' : key._0,
'1' : key._1,
'2' : key._2,
'3' : key._3,
'4' : key._4,
'5' : key._5,
'6' : key._6,
'7' : key._7,
'8' : key._8,
'9' : key._9,
':' : key.COLON,
';' : key.SEMICOLON,
'<' : key.LESS,
'=' : key.EQUAL,
'>' : key.GREATER,
'?' : key.QUESTION,
'@' : key.AT,
'[' : key.BRACKETLEFT,
'\\' : key.BACKSLASH,
']' : key.BRACKETRIGHT,
'^' : key.ASCIICIRCUM,
'_' : key.UNDERSCORE,
'`' : key.GRAVE,
'`' : key.QUOTELEFT,
'A' : key.A,
'B' : key.B,
'C' : key.C,
'D' : key.D,
'E' : key.E,
'F' : key.F,
'G' : key.G,
'H' : key.H,
'I' : key.I,
'J' : key.J,
'K' : key.K,
'L' : key.L,
'M' : key.M,
'N' : key.N,
'O' : key.O,
'P' : key.P,
'Q' : key.Q,
'R' : key.R,
'S' : key.S,
'T' : key.T,
'U' : key.U,
'V' : key.V,
'W' : key.W,
'X' : key.X,
'Y' : key.Y,
'Z' : key.Z,
'{' : key.BRACELEFT,
'|' : key.BAR,
'}' : key.BRACERIGHT,
'~' : key.ASCIITILDE
}
|
|
from django.test import TestCase
from django.contrib.auth.models import User
from wagtail.wagtailcore.models import Page, UserPagePermissionsProxy
from wagtail.tests.models import EventPage
class TestPagePermission(TestCase):
fixtures = ['test.json']
def test_nonpublisher_page_permissions(self):
event_editor = User.objects.get(username='eventeditor')
homepage = Page.objects.get(url_path='/home/')
christmas_page = EventPage.objects.get(url_path='/home/events/christmas/')
unpublished_event_page = EventPage.objects.get(url_path='/home/events/tentative-unpublished-event/')
someone_elses_event_page = EventPage.objects.get(url_path='/home/events/someone-elses-event/')
homepage_perms = homepage.permissions_for_user(event_editor)
christmas_page_perms = christmas_page.permissions_for_user(event_editor)
unpub_perms = unpublished_event_page.permissions_for_user(event_editor)
someone_elses_event_perms = someone_elses_event_page.permissions_for_user(event_editor)
self.assertFalse(homepage_perms.can_add_subpage())
self.assertTrue(christmas_page_perms.can_add_subpage())
self.assertTrue(unpub_perms.can_add_subpage())
self.assertTrue(someone_elses_event_perms.can_add_subpage())
self.assertFalse(homepage_perms.can_edit())
self.assertTrue(christmas_page_perms.can_edit())
self.assertTrue(unpub_perms.can_edit())
self.assertFalse(someone_elses_event_perms.can_edit()) # basic 'add' permission doesn't allow editing pages owned by someone else
self.assertFalse(homepage_perms.can_delete())
self.assertFalse(christmas_page_perms.can_delete()) # cannot delete because it is published
self.assertTrue(unpub_perms.can_delete())
self.assertFalse(someone_elses_event_perms.can_delete())
self.assertFalse(homepage_perms.can_publish())
self.assertFalse(christmas_page_perms.can_publish())
self.assertFalse(unpub_perms.can_publish())
self.assertFalse(homepage_perms.can_unpublish())
self.assertFalse(christmas_page_perms.can_unpublish())
self.assertFalse(unpub_perms.can_unpublish())
self.assertFalse(homepage_perms.can_publish_subpage())
self.assertFalse(christmas_page_perms.can_publish_subpage())
self.assertFalse(unpub_perms.can_publish_subpage())
self.assertFalse(homepage_perms.can_reorder_children())
self.assertFalse(christmas_page_perms.can_reorder_children())
self.assertFalse(unpub_perms.can_reorder_children())
self.assertFalse(homepage_perms.can_move())
self.assertFalse(christmas_page_perms.can_move()) # cannot move because this would involve unpublishing from its current location
self.assertTrue(unpub_perms.can_move())
self.assertFalse(someone_elses_event_perms.can_move())
self.assertFalse(christmas_page_perms.can_move_to(unpublished_event_page)) # cannot move because this would involve unpublishing from its current location
self.assertTrue(unpub_perms.can_move_to(christmas_page))
self.assertFalse(unpub_perms.can_move_to(homepage)) # no permission to create pages at destination
self.assertFalse(unpub_perms.can_move_to(unpublished_event_page)) # cannot make page a child of itself
def test_publisher_page_permissions(self):
event_moderator = User.objects.get(username='eventmoderator')
homepage = Page.objects.get(url_path='/home/')
christmas_page = EventPage.objects.get(url_path='/home/events/christmas/')
unpublished_event_page = EventPage.objects.get(url_path='/home/events/tentative-unpublished-event/')
homepage_perms = homepage.permissions_for_user(event_moderator)
christmas_page_perms = christmas_page.permissions_for_user(event_moderator)
unpub_perms = unpublished_event_page.permissions_for_user(event_moderator)
self.assertFalse(homepage_perms.can_add_subpage())
self.assertTrue(christmas_page_perms.can_add_subpage())
self.assertTrue(unpub_perms.can_add_subpage())
self.assertFalse(homepage_perms.can_edit())
self.assertTrue(christmas_page_perms.can_edit())
self.assertTrue(unpub_perms.can_edit())
self.assertFalse(homepage_perms.can_delete())
self.assertTrue(christmas_page_perms.can_delete()) # cannot delete because it is published
self.assertTrue(unpub_perms.can_delete())
self.assertFalse(homepage_perms.can_publish())
self.assertTrue(christmas_page_perms.can_publish())
self.assertTrue(unpub_perms.can_publish())
self.assertFalse(homepage_perms.can_unpublish())
self.assertTrue(christmas_page_perms.can_unpublish())
self.assertFalse(unpub_perms.can_unpublish()) # cannot unpublish a page that isn't published
self.assertFalse(homepage_perms.can_publish_subpage())
self.assertTrue(christmas_page_perms.can_publish_subpage())
self.assertTrue(unpub_perms.can_publish_subpage())
self.assertFalse(homepage_perms.can_reorder_children())
self.assertTrue(christmas_page_perms.can_reorder_children())
self.assertTrue(unpub_perms.can_reorder_children())
self.assertFalse(homepage_perms.can_move())
self.assertTrue(christmas_page_perms.can_move())
self.assertTrue(unpub_perms.can_move())
self.assertTrue(christmas_page_perms.can_move_to(unpublished_event_page))
self.assertTrue(unpub_perms.can_move_to(christmas_page))
self.assertFalse(unpub_perms.can_move_to(homepage)) # no permission to create pages at destination
self.assertFalse(unpub_perms.can_move_to(unpublished_event_page)) # cannot make page a child of itself
def test_inactive_user_has_no_permissions(self):
user = User.objects.get(username='inactiveuser')
christmas_page = EventPage.objects.get(url_path='/home/events/christmas/')
unpublished_event_page = EventPage.objects.get(url_path='/home/events/tentative-unpublished-event/')
christmas_page_perms = christmas_page.permissions_for_user(user)
unpub_perms = unpublished_event_page.permissions_for_user(user)
self.assertFalse(unpub_perms.can_add_subpage())
self.assertFalse(unpub_perms.can_edit())
self.assertFalse(unpub_perms.can_delete())
self.assertFalse(unpub_perms.can_publish())
self.assertFalse(christmas_page_perms.can_unpublish())
self.assertFalse(unpub_perms.can_publish_subpage())
self.assertFalse(unpub_perms.can_reorder_children())
self.assertFalse(unpub_perms.can_move())
self.assertFalse(unpub_perms.can_move_to(christmas_page))
def test_superuser_has_full_permissions(self):
user = User.objects.get(username='superuser')
homepage = Page.objects.get(url_path='/home/')
root = Page.objects.get(url_path='/')
unpublished_event_page = EventPage.objects.get(url_path='/home/events/tentative-unpublished-event/')
homepage_perms = homepage.permissions_for_user(user)
root_perms = root.permissions_for_user(user)
unpub_perms = unpublished_event_page.permissions_for_user(user)
self.assertTrue(homepage_perms.can_add_subpage())
self.assertTrue(root_perms.can_add_subpage())
self.assertTrue(homepage_perms.can_edit())
self.assertFalse(root_perms.can_edit()) # root is not a real editable page, even to superusers
self.assertTrue(homepage_perms.can_delete())
self.assertFalse(root_perms.can_delete())
self.assertTrue(homepage_perms.can_publish())
self.assertFalse(root_perms.can_publish())
self.assertTrue(homepage_perms.can_unpublish())
self.assertFalse(root_perms.can_unpublish())
self.assertFalse(unpub_perms.can_unpublish())
self.assertTrue(homepage_perms.can_publish_subpage())
self.assertTrue(root_perms.can_publish_subpage())
self.assertTrue(homepage_perms.can_reorder_children())
self.assertTrue(root_perms.can_reorder_children())
self.assertTrue(homepage_perms.can_move())
self.assertFalse(root_perms.can_move())
self.assertTrue(homepage_perms.can_move_to(root))
self.assertFalse(homepage_perms.can_move_to(unpublished_event_page))
def test_editable_pages_for_user_with_add_permission(self):
event_editor = User.objects.get(username='eventeditor')
homepage = Page.objects.get(url_path='/home/')
christmas_page = EventPage.objects.get(url_path='/home/events/christmas/')
unpublished_event_page = EventPage.objects.get(url_path='/home/events/tentative-unpublished-event/')
someone_elses_event_page = EventPage.objects.get(url_path='/home/events/someone-elses-event/')
user_perms = UserPagePermissionsProxy(event_editor)
editable_pages = user_perms.editable_pages()
can_edit_pages = user_perms.can_edit_pages()
publishable_pages = user_perms.publishable_pages()
can_publish_pages = user_perms.can_publish_pages()
self.assertFalse(editable_pages.filter(id=homepage.id).exists())
self.assertTrue(editable_pages.filter(id=christmas_page.id).exists())
self.assertTrue(editable_pages.filter(id=unpublished_event_page.id).exists())
self.assertFalse(editable_pages.filter(id=someone_elses_event_page.id).exists())
self.assertTrue(can_edit_pages)
self.assertFalse(publishable_pages.filter(id=homepage.id).exists())
self.assertFalse(publishable_pages.filter(id=christmas_page.id).exists())
self.assertFalse(publishable_pages.filter(id=unpublished_event_page.id).exists())
self.assertFalse(publishable_pages.filter(id=someone_elses_event_page.id).exists())
self.assertFalse(can_publish_pages)
def test_editable_pages_for_user_with_edit_permission(self):
event_moderator = User.objects.get(username='eventmoderator')
homepage = Page.objects.get(url_path='/home/')
christmas_page = EventPage.objects.get(url_path='/home/events/christmas/')
unpublished_event_page = EventPage.objects.get(url_path='/home/events/tentative-unpublished-event/')
someone_elses_event_page = EventPage.objects.get(url_path='/home/events/someone-elses-event/')
user_perms = UserPagePermissionsProxy(event_moderator)
editable_pages = user_perms.editable_pages()
can_edit_pages = user_perms.can_edit_pages()
publishable_pages = user_perms.publishable_pages()
can_publish_pages = user_perms.can_publish_pages()
self.assertFalse(editable_pages.filter(id=homepage.id).exists())
self.assertTrue(editable_pages.filter(id=christmas_page.id).exists())
self.assertTrue(editable_pages.filter(id=unpublished_event_page.id).exists())
self.assertTrue(editable_pages.filter(id=someone_elses_event_page.id).exists())
self.assertTrue(can_edit_pages)
self.assertFalse(publishable_pages.filter(id=homepage.id).exists())
self.assertTrue(publishable_pages.filter(id=christmas_page.id).exists())
self.assertTrue(publishable_pages.filter(id=unpublished_event_page.id).exists())
self.assertTrue(publishable_pages.filter(id=someone_elses_event_page.id).exists())
self.assertTrue(can_publish_pages)
def test_editable_pages_for_inactive_user(self):
user = User.objects.get(username='inactiveuser')
homepage = Page.objects.get(url_path='/home/')
christmas_page = EventPage.objects.get(url_path='/home/events/christmas/')
unpublished_event_page = EventPage.objects.get(url_path='/home/events/tentative-unpublished-event/')
someone_elses_event_page = EventPage.objects.get(url_path='/home/events/someone-elses-event/')
user_perms = UserPagePermissionsProxy(user)
editable_pages = user_perms.editable_pages()
can_edit_pages = user_perms.can_edit_pages()
publishable_pages = user_perms.publishable_pages()
can_publish_pages = user_perms.can_publish_pages()
self.assertFalse(editable_pages.filter(id=homepage.id).exists())
self.assertFalse(editable_pages.filter(id=christmas_page.id).exists())
self.assertFalse(editable_pages.filter(id=unpublished_event_page.id).exists())
self.assertFalse(editable_pages.filter(id=someone_elses_event_page.id).exists())
self.assertFalse(can_edit_pages)
self.assertFalse(publishable_pages.filter(id=homepage.id).exists())
self.assertFalse(publishable_pages.filter(id=christmas_page.id).exists())
self.assertFalse(publishable_pages.filter(id=unpublished_event_page.id).exists())
self.assertFalse(publishable_pages.filter(id=someone_elses_event_page.id).exists())
self.assertFalse(can_publish_pages)
def test_editable_pages_for_superuser(self):
user = User.objects.get(username='superuser')
homepage = Page.objects.get(url_path='/home/')
christmas_page = EventPage.objects.get(url_path='/home/events/christmas/')
unpublished_event_page = EventPage.objects.get(url_path='/home/events/tentative-unpublished-event/')
someone_elses_event_page = EventPage.objects.get(url_path='/home/events/someone-elses-event/')
user_perms = UserPagePermissionsProxy(user)
editable_pages = user_perms.editable_pages()
can_edit_pages = user_perms.can_edit_pages()
publishable_pages = user_perms.publishable_pages()
can_publish_pages = user_perms.can_publish_pages()
self.assertTrue(editable_pages.filter(id=homepage.id).exists())
self.assertTrue(editable_pages.filter(id=christmas_page.id).exists())
self.assertTrue(editable_pages.filter(id=unpublished_event_page.id).exists())
self.assertTrue(editable_pages.filter(id=someone_elses_event_page.id).exists())
self.assertTrue(can_edit_pages)
self.assertTrue(publishable_pages.filter(id=homepage.id).exists())
self.assertTrue(publishable_pages.filter(id=christmas_page.id).exists())
self.assertTrue(publishable_pages.filter(id=unpublished_event_page.id).exists())
self.assertTrue(publishable_pages.filter(id=someone_elses_event_page.id).exists())
self.assertTrue(can_publish_pages)
def test_editable_pages_for_non_editing_user(self):
user = User.objects.get(username='admin_only_user')
homepage = Page.objects.get(url_path='/home/')
christmas_page = EventPage.objects.get(url_path='/home/events/christmas/')
unpublished_event_page = EventPage.objects.get(url_path='/home/events/tentative-unpublished-event/')
someone_elses_event_page = EventPage.objects.get(url_path='/home/events/someone-elses-event/')
user_perms = UserPagePermissionsProxy(user)
editable_pages = user_perms.editable_pages()
can_edit_pages = user_perms.can_edit_pages()
publishable_pages = user_perms.publishable_pages()
can_publish_pages = user_perms.can_publish_pages()
self.assertFalse(editable_pages.filter(id=homepage.id).exists())
self.assertFalse(editable_pages.filter(id=christmas_page.id).exists())
self.assertFalse(editable_pages.filter(id=unpublished_event_page.id).exists())
self.assertFalse(editable_pages.filter(id=someone_elses_event_page.id).exists())
self.assertFalse(can_edit_pages)
self.assertFalse(publishable_pages.filter(id=homepage.id).exists())
self.assertFalse(publishable_pages.filter(id=christmas_page.id).exists())
self.assertFalse(publishable_pages.filter(id=unpublished_event_page.id).exists())
self.assertFalse(publishable_pages.filter(id=someone_elses_event_page.id).exists())
self.assertFalse(can_publish_pages)
|
|
"""
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This file serves two main purposes
- it serves up the main html page
- and it provides a simple set of apis to the javascript
"""
# If this is set to true, the client_secrets.json must be valid and users will
# be required to grant OAuth access to this app before continuing.
# This enables the Google API to work
REQUIRE_OAUTH = False
# If this is set to true, then this file will assume that app engine is
# being used to run the server.
USE_APPENGINE = False
import httplib2
import jinja2
import json
import logging
import os
import re
import socket
import webapp2
if USE_APPENGINE:
from oauth2client import appengine
from google.appengine.api import users
from google.appengine.api import memcache
socket.setdefaulttimeout(60)
http = httplib2.Http(cache=memcache if USE_APPENGINE else None)
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
autoescape=True,
extensions=['jinja2.ext.autoescape'])
client_secrets = os.path.join(os.path.dirname(__file__), 'client_secrets.json')
if USE_APPENGINE and REQUIRE_OAUTH:
decorator = appengine.oauth2decorator_from_clientsecrets(
client_secrets,
scope=[
'https://www.googleapis.com/auth/genomics',
'https://www.googleapis.com/auth/devstorage.read_write'
])
else:
class FakeOauthDecorator():
def http(self):
return http
def oauth_aware(self, method):
return method
@property
def callback_path(self):
return '/unused'
def callback_handler(self):
pass
decorator = FakeOauthDecorator()
# TODO: Dataset information should come from the list datasets api call
SUPPORTED_BACKENDS = {
'NCBI' : {'name': 'NCBI',
'url': 'http://trace.ncbi.nlm.nih.gov/Traces/gg',
'datasets': {'SRP034507': 'SRP034507', 'SRP029392': 'SRP029392'}},
'LOCAL' : {'name': 'Local',
'url': 'http://localhost:5000',
'datasets': {'All': ''}},
}
if REQUIRE_OAUTH:
# Google temporarily requires OAuth on all calls
SUPPORTED_BACKENDS['GOOGLE'] = {
'name': 'Google',
'url': 'https://www.googleapis.com/genomics/v1beta',
'datasets': {'1000 Genomes': '376902546192', 'PGP': '383928317087'}
}
class ApiException(Exception):
pass
# Request handlers
class BaseRequestHandler(webapp2.RequestHandler):
def handle_exception(self, exception, debug_mode):
if isinstance(exception, ApiException):
# ApiExceptions are expected, and will return nice error
# messages to the client
self.response.write(exception.message)
self.response.set_status(400)
else:
# All other exceptions are unexpected and should be logged
logging.exception('Unexpected exception')
self.response.write('Unexpected internal exception')
self.response.set_status(500)
def get_backend(self):
backend = self.request.get('backend')
if not backend:
raise ApiException('Backend parameter must be set')
return backend
def get_base_api_url(self):
return SUPPORTED_BACKENDS[self.get_backend()]['url']
def get_content(self, path, method='POST', body=None):
http = decorator.http()
response, content = http.request(
uri="%s/%s" % (self.get_base_api_url(), path),
method=method, body=json.dumps(body) if body else None,
headers={'Content-Type': 'application/json; charset=UTF-8'})
try:
content = json.loads(content)
except ValueError:
logging.error("non-json api content %s" % content)
raise ApiException('The API returned invalid JSON')
if response.status >= 300:
logging.error("error api response %s" % response)
logging.error("error api content %s" % content)
if 'error' in content:
raise ApiException(content['error']['message'])
else:
raise ApiException('Something went wrong with the API call!')
return content
def write_content(self, path, method='POST', body=None):
self.response.write(json.dumps(self.get_content(path, method, body)))
class ReadsetSearchHandler(BaseRequestHandler):
@decorator.oauth_aware
def get(self):
readset_id = self.request.get('readsetId')
if not readset_id:
dataset_id = self.request.get('datasetId')
name = self.request.get('name')
if dataset_id:
body = {'datasetIds' : [dataset_id]}
else:
# This is needed for the local readstore
body = {'datasetIds' : []}
response = self.get_content("readsets/search?fields=readsets(id,name)",
body=body)
# TODO: Use the api once name filtering is supported
if name:
name = name.lower()
response['readsets'] = [r for r in response['readsets']
if name in r['name'].lower()]
self.response.write(json.dumps(response))
return
# Single readset response
self.write_content("readsets/%s" % readset_id, method='GET')
class ReadSearchHandler(BaseRequestHandler):
@decorator.oauth_aware
def get(self):
body = {
'readsetIds': self.request.get('readsetIds').split(','),
'sequenceName': self.request.get('sequenceName'),
'sequenceStart': max(0, int(self.request.get('sequenceStart'))),
'sequenceEnd': int(self.request.get('sequenceEnd')),
}
pageToken = self.request.get('pageToken')
if pageToken:
body['pageToken'] = pageToken
self.write_content("reads/search", body=body)
class BaseSnpediaHandler(webapp2.RequestHandler):
def getSnppediaPageContent(self, snp):
uri = "http://bots.snpedia.com/api.php?action=query&prop=revisions&" \
"format=json&rvprop=content&titles=%s" % snp
response, content = http.request(uri=uri)
page_id, page = json.loads(content)['query']['pages'].popitem()
return page['revisions'][0]['*']
def getContentValue(self, content, key):
try:
matcher = '%s=(.*)\n' % key
return re.search(matcher, content, re.I).group(1)
except (KeyError, AttributeError):
return ''
def complement(self, base):
return {'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G'}[base]
class SnpSearchHandler(BaseSnpediaHandler):
def getSnpResponse(self, name, content):
return {
'name': name,
'link': 'http://www.snpedia.com/index.php/%s' % name,
'position': self.getContentValue(content, 'position'),
'chr': self.getContentValue(content, 'chromosome')
}
def get(self):
snp = self.request.get('snp')
try:
content = self.getSnppediaPageContent(snp)
if snp[:2].lower() == 'rs':
snps = [self.getSnpResponse(snp, content)]
else:
# Try a gene format
snps = re.findall('\[\[(rs\d+?)\]\]', content, re.I)
snps = [self.getSnpResponse(s, self.getSnppediaPageContent(s))
for s in set(snps)]
except (ValueError, KeyError, AttributeError):
snps = []
self.response.write(json.dumps({'snps' : snps}))
class AlleleSearchHandler(BaseSnpediaHandler):
def getAlleleResponse(self, name, content):
return {
'name': name,
'link': 'http://www.snpedia.com/index.php/%s' % name,
'repute': self.getContentValue(content, 'repute'),
'summary': self.getContentValue(content, 'summary') or 'Unknown',
'magnitude': self.getContentValue(content, 'magnitude')
}
def get(self):
snp = self.request.get('snp')
a1 = self.request.get('a1')
a2 = self.request.get('a2')
a1c = self.complement(a1)
a2c = self.complement(a2)
possible_names = [(snp, a1, a2), (snp, a2, a1),
(snp, a1c, a2c), (snp, a2c, a1c)]
for name in possible_names:
try:
page = "%s(%s;%s)" % name
content = self.getSnppediaPageContent(page)
self.response.write(json.dumps(self.getAlleleResponse(page, content)))
return
except (ValueError, KeyError, AttributeError):
pass # Continue trying the next allele name
self.response.write(json.dumps({}))
class MainHandler(webapp2.RequestHandler):
@decorator.oauth_aware
def get(self):
if not REQUIRE_OAUTH or decorator.has_credentials():
template = JINJA_ENVIRONMENT.get_template('main.html')
self.response.write(template.render({
'username': users.User().nickname() if USE_APPENGINE else '',
'logout_url': users.create_logout_url('/') if USE_APPENGINE else '',
'backends': SUPPORTED_BACKENDS,
}))
else:
# TODO: What kind of access do the non-google backends need?
template = JINJA_ENVIRONMENT.get_template('grantaccess.html')
self.response.write(template.render({
'url': decorator.authorize_url()
}))
web_app = webapp2.WSGIApplication(
[
('/', MainHandler),
('/api/reads', ReadSearchHandler),
('/api/readsets', ReadsetSearchHandler),
('/api/snps', SnpSearchHandler),
('/api/alleles', AlleleSearchHandler),
(decorator.callback_path, decorator.callback_handler()),
],
debug=True)
|
|
import logging
from urllib import urlencode
from urlparse import urljoin
from xml.etree import ElementTree
import recurly.js as js
from recurly.errors import *
from recurly.resource import Resource, Money, PageError
"""
Recurly's Python client library is an interface to its REST API.
Please see the Recurly API documentation for more information:
http://docs.recurly.com/api/
"""
__version__ = '2.1.9'
BASE_URI = 'https://%s.recurly.com/v2/'
"""The API endpoint to send requests to."""
SUBDOMAIN = 'api'
"""The subdomain of the site authenticating API requests."""
API_KEY = None
"""The API key to use when authenticating API requests."""
CA_CERTS_FILE = None
"""A file contianing a set of concatenated certificate authority certs
for validating the server against."""
DEFAULT_CURRENCY = 'USD'
"""The currency to use creating `Money` instances when one is not specified."""
def base_uri():
if SUBDOMAIN is None:
raise ValueError('recurly.SUBDOMAIN not set')
return BASE_URI % SUBDOMAIN
class Account(Resource):
"""A customer account."""
member_path = 'accounts/%s'
collection_path = 'accounts'
nodename = 'account'
attributes = (
'account_code',
'username',
'email',
'first_name',
'last_name',
'company_name',
'accept_language',
'created_at',
)
sensitive_attributes = ('number', 'verification_value',)
def to_element(self):
elem = super(Account, self).to_element()
# Make sure the account code is always included in a serialization.
if 'account_code' not in self.__dict__: # not already included
try:
account_code = self.account_code
except AttributeError:
pass
else:
elem.append(self.element_for_value('account_code', account_code))
if 'billing_info' in self.__dict__:
elem.append(self.billing_info.to_element())
return elem
@classmethod
def all_active(cls, **kwargs):
"""Return a `Page` of active customer accounts.
This is a convenience method for `Account.all(state='active')`.
"""
return cls.all(state='active', **kwargs)
@classmethod
def all_closed(cls, **kwargs):
"""Return a `Page` of closed customer accounts.
This is a convenience method for `Account.all(state='closed')`.
"""
return cls.all(state='closed', **kwargs)
@classmethod
def all_past_due(cls, **kwargs):
"""Return a `Page` of past-due customer accounts.
This is a convenience method for `Account.all(state='past_due').
"""
return cls.all(state='past_due', **kwargs)
@classmethod
def all_subscribers(cls, **kwargs):
"""Return a `Page` of customer accounts that are subscribers.
This is a convenience method for `Account.all(state='subscriber').
"""
return cls.all(state='subscriber', **kwargs)
@classmethod
def all_non_subscribers(cls, **kwargs):
"""Return a `Page` of customer accounts that are not subscribers.
This is a convenience method for `Account.all(state='non_subscriber').
"""
return cls.all(state='non_subscriber', **kwargs)
def __getattr__(self, name):
if name == 'billing_info':
try:
billing_info_url = self._elem.find('billing_info').attrib['href']
except (AttributeError, KeyError):
raise AttributeError(name)
resp, elem = BillingInfo.element_for_url(billing_info_url)
return BillingInfo.from_element(elem)
return super(Account, self).__getattr__(name)
def charge(self, charge):
"""Charge (or credit) this account with the given `Adjustment`."""
url = urljoin(self._url, '%s/adjustments' % self.account_code)
return charge.post(url)
def invoice(self):
"""Create an invoice for any outstanding adjustments this account has."""
url = urljoin(self._url, '%s/invoices' % self.account_code)
response = self.http_request(url, 'POST')
if response.status != 201:
self.raise_http_error(response)
response_xml = response.read()
logging.getLogger('recurly.http.response').debug(response_xml)
elem = ElementTree.fromstring(response_xml)
invoice = Invoice.from_element(elem)
invoice._url = response.getheader('Location')
return invoice
def notes(self):
"""Fetch Notes for this account."""
url = urljoin(self._url, '%s/notes' % self.account_code)
return Note.paginated(url)
def reopen(self):
"""Reopen a closed account."""
url = urljoin(self._url, '%s/reopen' % self.account_code)
response = self.http_request(url, 'PUT')
if response.status != 200:
self.raise_http_error(response)
response_xml = response.read()
logging.getLogger('recurly.http.response').debug(response_xml)
self.update_from_element(ElementTree.fromstring(response_xml))
def subscribe(self, subscription):
"""Create the given `Subscription` for this existing account."""
url = urljoin(self._url, '%s/subscriptions' % self.account_code)
return subscription.post(url)
def update_billing_info(self, billing_info):
"""Change this account's billing information to the given `BillingInfo`."""
url = urljoin(self._url, '%s/billing_info' % self.account_code)
response = billing_info.http_request(url, 'PUT', billing_info,
{'Content-Type': 'application/xml; charset=utf-8'})
if response.status == 200:
pass
elif response.status == 201:
billing_info._url = response.getheader('Location')
else:
billing_info.raise_http_error(response)
response_xml = response.read()
logging.getLogger('recurly.http.response').debug(response_xml)
billing_info.update_from_element(ElementTree.fromstring(response_xml))
class BillingInfo(Resource):
"""A set of billing information for an account."""
nodename = 'billing_info'
attributes = (
'type',
'first_name',
'last_name',
'number',
'verification_value',
'year',
'month',
'start_month',
'start_year',
'issue_number',
'company',
'address1',
'address2',
'city',
'state',
'zip',
'country',
'phone',
'vat_number',
'ip_address',
'ip_address_country',
'card_type',
'first_six',
'last_four',
'billing_agreement_id',
)
sensitive_attributes = ('number', 'verification_value')
xml_attribute_attributes = ('type',)
class Coupon(Resource):
"""A coupon for a customer to apply to their account."""
member_path = 'coupons/%s'
collection_path = 'coupons'
nodename = 'coupon'
attributes = (
'coupon_code',
'name',
'discount_type',
'discount_percent',
'discount_in_cents',
'redeem_by_date',
'single_use',
'applies_for_months',
'max_redemptions',
'applies_to_all_plans',
'created_at',
'plan_codes',
)
@classmethod
def value_for_element(cls, elem):
if not elem or elem.tag != 'plan_codes' or elem.attrib.get('type') != 'array':
return super(Coupon, cls).value_for_element(elem)
return [code_elem.text for code_elem in elem]
@classmethod
def element_for_value(cls, attrname, value):
if attrname != 'plan_codes':
return super(Coupon, cls).element_for_value(attrname, value)
elem = ElementTree.Element(attrname)
elem.attrib['type'] = 'array'
for code in value:
code_el = ElementTree.Element('plan_code')
code_el.text = code
elem.append(code_el)
return elem
@classmethod
def all_redeemable(cls, **kwargs):
"""Return a `Page` of redeemable coupons.
This is a convenience method for `Coupon.all(state='redeemable')`.
"""
return cls.all(state='redeemable', **kwargs)
@classmethod
def all_expired(cls, **kwargs):
"""Return a `Page` of expired coupons.
This is a convenience method for `Coupon.all(state='expired')`.
"""
return cls.all(state='expired', **kwargs)
@classmethod
def all_maxed_out(cls, **kwargs):
"""Return a `Page` of coupons that have been used the maximum
number of times.
This is a convenience method for `Coupon.all(state='maxed_out')`.
"""
return cls.all(state='maxed_out', **kwargs)
class Redemption(Resource):
"""A particular application of a coupon to a customer account."""
nodename = 'redemption'
attributes = (
'account_code',
'single_use',
'total_discounted_in_cents',
'currency',
'created_at',
)
class Adjustment(Resource):
"""A charge or credit applied (or to be applied) to an account's invoice."""
nodename = 'adjustment'
attributes = (
'uuid',
'description',
'accounting_code',
'quantity',
'unit_amount_in_cents',
'discount_in_cents',
'tax_in_cents',
'total_in_cents',
'currency',
'taxable',
'start_date',
'end_date',
'created_at',
'type',
)
xml_attribute_attributes = ('type',)
class Invoice(Resource):
"""A payable charge to an account for the customer's charges and
subscriptions."""
member_path = 'invoices/%s'
collection_path = 'invoices'
nodename = 'invoice'
attributes = (
'uuid',
'state',
'invoice_number',
'po_number',
'vat_number',
'subtotal_in_cents',
'tax_in_cents',
'total_in_cents',
'currency',
'created_at',
'line_items',
'transactions',
)
@classmethod
def all_open(cls, **kwargs):
"""Return a `Page` of open invoices.
This is a convenience method for `Invoice.all(state='open')`.
"""
return cls.all(state='open', **kwargs)
@classmethod
def all_collected(cls, **kwargs):
"""Return a `Page` of collected invoices.
This is a convenience method for `Invoice.all(state='collected')`.
"""
return cls.all(state='collected', **kwargs)
@classmethod
def all_failed(cls, **kwargs):
"""Return a `Page` of failed invoices.
This is a convenience method for `Invoice.all(state='failed')`.
"""
return cls.all(state='failed', **kwargs)
@classmethod
def all_past_due(cls, **kwargs):
"""Return a `Page` of past-due invoices.
This is a convenience method for `Invoice.all(state='past_due')`.
"""
return cls.all(state='past_due', **kwargs)
class Subscription(Resource):
"""A customer account's subscription to your service."""
member_path = 'subscriptions/%s'
collection_path = 'subscriptions'
nodename = 'subscription'
attributes = (
'uuid',
'state',
'plan_code',
'coupon_code',
'quantity',
'activated_at',
'canceled_at',
'starts_at',
'expires_at',
'current_period_started_at',
'current_period_ends_at',
'trial_started_at',
'trial_ends_at',
'unit_amount_in_cents',
'total_billing_cycles',
'timeframe',
'currency',
'subscription_add_ons',
'account',
'pending_subscription',
'first_renewal_date',
)
sensitive_attributes = ('number', 'verification_value',)
def _update(self):
if not hasattr(self, 'timeframe'):
self.timeframe = 'now'
return super(Subscription, self)._update()
def __getpath__(self, name):
if name == 'plan_code':
return 'plan/plan_code'
else:
return name
class Transaction(Resource):
"""An immediate one-time charge made to a customer's account."""
member_path = 'transactions/%s'
collection_path = 'transactions'
nodename = 'transaction'
attributes = (
'uuid',
'action',
'account',
'currency',
'amount_in_cents',
'tax_in_cents',
'status',
'reference',
'test',
'voidable',
'description',
'refundable',
'cvv_result',
'avs_result',
'avs_result_street',
'avs_result_postal',
'created_at',
'details',
'transaction_error',
'type',
)
xml_attribute_attributes = ('type',)
sensitive_attributes = ('number', 'verification_value',)
def _handle_refund_accepted(self, response):
if response.status != 202:
self.raise_http_error(response)
self._refund_transaction_url = response.getheader('Location')
return self
def get_refund_transaction(self):
"""Retrieve the refund transaction for this transaction, immediately
after refunding.
After calling `refund()` to refund a transaction, call this method to
retrieve the new transaction representing the refund.
"""
try:
url = self._refund_transaction_url
except AttributeError:
raise ValueError("No refund transaction is available for this transaction")
resp, elem = self.element_for_url(url)
value = self.value_for_element(elem)
return value
def refund(self, **kwargs):
"""Refund this transaction.
Calling this method returns the refunded transaction (that is,
``self``) if the refund was successful, or raises a `ResponseError` if
an error occurred requesting the refund. After a successful call to
`refund()`, to retrieve the new transaction representing the refund,
use the `get_refund_transaction()` method.
"""
# Find the URL and method to refund the transaction.
try:
selfnode = self._elem
except AttributeError:
raise AttributeError('refund')
url, method = None, None
for anchor_elem in selfnode.findall('a'):
if anchor_elem.attrib.get('name') == 'refund':
url = anchor_elem.attrib['href']
method = anchor_elem.attrib['method'].upper()
if url is None or method is None:
raise AttributeError("refund") # should do something more specific probably
actionator = self._make_actionator(url, method, extra_handler=self._handle_refund_accepted)
return actionator(**kwargs)
class Plan(Resource):
"""A service level for your service to which a customer account
can subscribe."""
member_path = 'plans/%s'
collection_path = 'plans'
nodename = 'plan'
attributes = (
'plan_code',
'name',
'description',
'success_url',
'cancel_url',
'display_donation_amounts',
'display_quantity',
'display_phone_number',
'bypass_hosted_confirmation',
'unit_name',
'payment_page_tos_link',
'plan_interval_length',
'plan_interval_unit',
'trial_interval_length',
'trial_interval_unit',
'accounting_code',
'created_at',
'unit_amount_in_cents',
'setup_fee_in_cents',
)
def get_add_on(self, add_on_code):
"""Return the `AddOn` for this plan with the given add-on code."""
url = urljoin(self._url, '%s/add_ons/%s' % (self.plan_code, add_on_code))
resp, elem = AddOn.element_for_url(url)
return AddOn.from_element(elem)
def create_add_on(self, add_on):
"""Make the given `AddOn` available to subscribers on this plan."""
url = urljoin(self._url, '%s/add_ons' % self.plan_code)
return add_on.post(url)
class AddOn(Resource):
"""An additional benefit a customer subscribed to a particular plan
can also subscribe to."""
nodename = 'add_on'
attributes = (
'add_on_code',
'name',
'display_quantity_on_hosted_page',
'display_quantity',
'default_quantity',
'accounting_code',
'unit_amount_in_cents',
'created_at',
)
class SubscriptionAddOn(Resource):
"""A plan add-on as added to a customer's subscription.
Use these instead of `AddOn` instances when specifying a
`Subscription` instance's `subscription_add_ons` attribute.
"""
nodename = 'subscription_add_on'
inherits_currency = True
attributes = (
'add_on_code',
'quantity',
'unit_amount_in_cents',
)
class Note(Resource):
"""A customer account's notes."""
nodename = 'note'
collection_path = 'notes'
attributes = (
'message',
'created_at',
)
@classmethod
def from_element(cls, elem):
new_note = Note()
for child_el in elem:
if not child_el.tag:
continue
setattr(new_note, child_el.tag, child_el.text)
return new_note
Resource._learn_nodenames(locals().values())
def objects_for_push_notification(notification):
"""Decode a push notification with the given body XML.
Returns a dictionary containing the constituent objects of the push
notification. The kind of push notification is given in the ``"type"``
member of the returned dictionary.
"""
notification_el = ElementTree.fromstring(notification)
objects = {'type': notification_el.tag}
for child_el in notification_el:
tag = child_el.tag
res = Resource.value_for_element(child_el)
objects[tag] = res
return objects
|
|
from test import support
import types
import unittest
class FuncAttrsTest(unittest.TestCase):
def setUp(self):
class F:
def a(self):
pass
def b():
return 3
self.fi = F()
self.F = F
self.b = b
def cannot_set_attr(self, obj, name, value, exceptions):
try:
setattr(obj, name, value)
except exceptions:
pass
else:
self.fail("shouldn't be able to set %s to %r" % (name, value))
try:
delattr(obj, name)
except exceptions:
pass
else:
self.fail("shouldn't be able to del %s" % name)
class FunctionPropertiesTest(FuncAttrsTest):
# Include the external setUp method that is common to all tests
def test_module(self):
self.assertEqual(self.b.__module__, __name__)
def test_dir_includes_correct_attrs(self):
self.b.known_attr = 7
self.assertIn('known_attr', dir(self.b),
"set attributes not in dir listing of method")
# Test on underlying function object of method
self.F.a.known_attr = 7
self.assertIn('known_attr', dir(self.fi.a), "set attribute on function "
"implementations, should show up in next dir")
def test_duplicate_function_equality(self):
# Body of `duplicate' is the exact same as self.b
def duplicate():
'my docstring'
return 3
self.assertNotEqual(self.b, duplicate)
def test_copying___code__(self):
def test(): pass
self.assertEqual(test(), None)
test.__code__ = self.b.__code__
self.assertEqual(test(), 3) # self.b always returns 3, arbitrarily
def test___globals__(self):
self.assertIs(self.b.__globals__, globals())
self.cannot_set_attr(self.b, '__globals__', 2,
(AttributeError, TypeError))
def test___closure__(self):
a = 12
def f(): print(a)
c = f.__closure__
self.assertIsInstance(c, tuple)
self.assertEqual(len(c), 1)
# don't have a type object handy
self.assertEqual(c[0].__class__.__name__, "cell")
self.cannot_set_attr(f, "__closure__", c, AttributeError)
def test_empty_cell(self):
def f(): print(a)
try:
f.__closure__[0].cell_contents
except ValueError:
pass
else:
self.fail("shouldn't be able to read an empty cell")
a = 12
def test___name__(self):
self.assertEqual(self.b.__name__, 'b')
self.b.__name__ = 'c'
self.assertEqual(self.b.__name__, 'c')
self.b.__name__ = 'd'
self.assertEqual(self.b.__name__, 'd')
# __name__ and __name__ must be a string
self.cannot_set_attr(self.b, '__name__', 7, TypeError)
# __name__ must be available when in restricted mode. Exec will raise
# AttributeError if __name__ is not available on f.
s = """def f(): pass\nf.__name__"""
exec(s, {'__builtins__': {}})
# Test on methods, too
self.assertEqual(self.fi.a.__name__, 'a')
self.cannot_set_attr(self.fi.a, "__name__", 'a', AttributeError)
def test___code__(self):
num_one, num_two = 7, 8
def a(): pass
def b(): return 12
def c(): return num_one
def d(): return num_two
def e(): return num_one, num_two
for func in [a, b, c, d, e]:
self.assertEqual(type(func.__code__), types.CodeType)
self.assertEqual(c(), 7)
self.assertEqual(d(), 8)
d.__code__ = c.__code__
self.assertEqual(c.__code__, d.__code__)
self.assertEqual(c(), 7)
# self.assertEqual(d(), 7)
try:
b.__code__ = c.__code__
except ValueError:
pass
else:
self.fail("__code__ with different numbers of free vars should "
"not be possible")
try:
e.__code__ = d.__code__
except ValueError:
pass
else:
self.fail("__code__ with different numbers of free vars should "
"not be possible")
def test_blank_func_defaults(self):
self.assertEqual(self.b.__defaults__, None)
del self.b.__defaults__
self.assertEqual(self.b.__defaults__, None)
def test_func_default_args(self):
def first_func(a, b):
return a+b
def second_func(a=1, b=2):
return a+b
self.assertEqual(first_func.__defaults__, None)
self.assertEqual(second_func.__defaults__, (1, 2))
first_func.__defaults__ = (1, 2)
self.assertEqual(first_func.__defaults__, (1, 2))
self.assertEqual(first_func(), 3)
self.assertEqual(first_func(3), 5)
self.assertEqual(first_func(3, 5), 8)
del second_func.__defaults__
self.assertEqual(second_func.__defaults__, None)
try:
second_func()
except TypeError:
pass
else:
self.fail("__defaults__ does not update; deleting it does not "
"remove requirement")
class InstancemethodAttrTest(FuncAttrsTest):
def test___class__(self):
self.assertEqual(self.fi.a.__self__.__class__, self.F)
self.cannot_set_attr(self.fi.a, "__class__", self.F, TypeError)
def test___func__(self):
self.assertEqual(self.fi.a.__func__, self.F.a)
self.cannot_set_attr(self.fi.a, "__func__", self.F.a, AttributeError)
def test___self__(self):
self.assertEqual(self.fi.a.__self__, self.fi)
self.cannot_set_attr(self.fi.a, "__self__", self.fi, AttributeError)
def test___func___non_method(self):
# Behavior should be the same when a method is added via an attr
# assignment
self.fi.id = types.MethodType(id, self.fi)
self.assertEqual(self.fi.id(), id(self.fi))
# Test usage
try:
self.fi.id.unknown_attr
except AttributeError:
pass
else:
self.fail("using unknown attributes should raise AttributeError")
# Test assignment and deletion
self.cannot_set_attr(self.fi.id, 'unknown_attr', 2, AttributeError)
class ArbitraryFunctionAttrTest(FuncAttrsTest):
def test_set_attr(self):
self.b.known_attr = 7
self.assertEqual(self.b.known_attr, 7)
try:
self.fi.a.known_attr = 7
except AttributeError:
pass
else:
self.fail("setting attributes on methods should raise error")
def test_delete_unknown_attr(self):
try:
del self.b.unknown_attr
except AttributeError:
pass
else:
self.fail("deleting unknown attribute should raise TypeError")
def test_unset_attr(self):
for func in [self.b, self.fi.a]:
try:
func.non_existent_attr
except AttributeError:
pass
else:
self.fail("using unknown attributes should raise "
"AttributeError")
class FunctionDictsTest(FuncAttrsTest):
def test_setting_dict_to_invalid(self):
self.cannot_set_attr(self.b, '__dict__', None, TypeError)
from collections import UserDict
d = UserDict({'known_attr': 7})
self.cannot_set_attr(self.fi.a.__func__, '__dict__', d, TypeError)
def test_setting_dict_to_valid(self):
d = {'known_attr': 7}
self.b.__dict__ = d
# Test assignment
self.assertIs(d, self.b.__dict__)
# ... and on all the different ways of referencing the method's func
self.F.a.__dict__ = d
self.assertIs(d, self.fi.a.__func__.__dict__)
self.assertIs(d, self.fi.a.__dict__)
# Test value
self.assertEqual(self.b.known_attr, 7)
self.assertEqual(self.b.__dict__['known_attr'], 7)
# ... and again, on all the different method's names
self.assertEqual(self.fi.a.__func__.known_attr, 7)
self.assertEqual(self.fi.a.known_attr, 7)
def test_delete___dict__(self):
try:
del self.b.__dict__
except TypeError:
pass
else:
self.fail("deleting function dictionary should raise TypeError")
def test_unassigned_dict(self):
self.assertEqual(self.b.__dict__, {})
def test_func_as_dict_key(self):
value = "Some string"
d = {}
d[self.b] = value
self.assertEqual(d[self.b], value)
class FunctionDocstringTest(FuncAttrsTest):
def test_set_docstring_attr(self):
self.assertEqual(self.b.__doc__, None)
docstr = "A test method that does nothing"
self.b.__doc__ = docstr
self.F.a.__doc__ = docstr
self.assertEqual(self.b.__doc__, docstr)
self.assertEqual(self.fi.a.__doc__, docstr)
self.cannot_set_attr(self.fi.a, "__doc__", docstr, AttributeError)
def test_delete_docstring(self):
self.b.__doc__ = "The docstring"
del self.b.__doc__
self.assertEqual(self.b.__doc__, None)
def cell(value):
"""Create a cell containing the given value."""
def f():
print(a)
a = value
return f.__closure__[0]
def empty_cell(empty=True):
"""Create an empty cell."""
def f():
print(a)
# the intent of the following line is simply "if False:"; it's
# spelt this way to avoid the danger that a future optimization
# might simply remove an "if False:" code block.
if not empty:
a = 1729
return f.__closure__[0]
class CellTest(unittest.TestCase):
def test_comparison(self):
# These tests are here simply to exercise the comparison code;
# their presence should not be interpreted as providing any
# guarantees about the semantics (or even existence) of cell
# comparisons in future versions of CPython.
self.assertTrue(cell(2) < cell(3))
self.assertTrue(empty_cell() < cell('saturday'))
self.assertTrue(empty_cell() == empty_cell())
self.assertTrue(cell(-36) == cell(-36.0))
self.assertTrue(cell(True) > empty_cell())
class StaticMethodAttrsTest(unittest.TestCase):
def test_func_attribute(self):
def f():
pass
c = classmethod(f)
self.assertTrue(c.__func__ is f)
s = staticmethod(f)
self.assertTrue(s.__func__ is f)
def test_main():
support.run_unittest(FunctionPropertiesTest, InstancemethodAttrTest,
ArbitraryFunctionAttrTest, FunctionDictsTest,
FunctionDocstringTest, CellTest,
StaticMethodAttrsTest)
if __name__ == "__main__":
test_main()
|
|
import httplib2
from apiclient import discovery, errors
from rest_framework.viewsets import ViewSet
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework import status
from csp import settings
from crowdsourcing import models
from apiclient.http import MediaFileUpload
from crowdsourcing.models import AccountModel
# TODO add support for api ajax calls
class GoogleDriveOauth(ViewSet):
permission_classes = [IsAuthenticated]
def get_flow(self, request):
from oauth2client.client import OAuth2WebServerFlow
auth_flow = OAuth2WebServerFlow(settings.GOOGLE_DRIVE_CLIENT_ID, settings.GOOGLE_DRIVE_CLIENT_SECRET,
settings.GOOGLE_DRIVE_OAUTH_SCOPE, settings.GOOGLE_DRIVE_REDIRECT_URI,
approval_prompt='force', access_type='offline')
return auth_flow
def auth_init(self, request):
auth_flow = self.get_flow(request)
flow_model = models.FlowModel()
flow_model.flow = auth_flow
flow_model.id = request.user
flow_model.save()
authorize_url = auth_flow.step1_get_authorize_url()
return Response({'authorize_url': authorize_url}, status=status.HTTP_200_OK)
def auth_end(self, request):
from oauth2client.django_orm import Storage
from apiclient.discovery import build
auth_flow = models.FlowModel.objects.get(id=request.user).flow
credentials = auth_flow.step2_exchange(request.data.get('code'))
http = httplib2.Http()
http = credentials.authorize(http)
drive_service = build('drive', 'v2', http=http)
try:
account_info = drive_service.about().get().execute()
user_info = account_info['user']
quota_info = account_info['quotaBytesByService']
drive_quota = [drive['bytesUsed'] for drive in quota_info if drive['serviceName'] == 'DRIVE']
drive_bytes_used = drive_quota.pop()
quota_bytes_total = account_info['quotaBytesTotal']
try:
account_check = models.AccountModel.objects.get(type='GOOGLEDRIVE', email=user_info['emailAddress'])
account_check.is_active = 1
account_check.status = 1
account_check.save()
except models.AccountModel.DoesNotExist:
account = models.AccountModel()
account.owner = request.user
account.email = user_info['emailAddress']
account.access_token = credentials.to_json()
account.description = user_info['displayName'] + '(' + user_info['emailAddress'] + ')'
account.type = 'GOOGLEDRIVE'
account.quota = quota_bytes_total
account.assigned_space = quota_bytes_total
account.used_space = drive_bytes_used
account.is_active = 1
body = {
'title': 'crowdresearch',
'mimeType': 'application/vnd.google-apps.folder'
}
account.root = drive_service.files().insert(body=body).execute()['id']
account.name = 'Google Drive'
account.status = 1
account.save()
storage = Storage(models.CredentialsModel, 'account', account, 'credential')
storage.put(credentials)
except Exception:
Response({"message": "Failed to add account, please retry"}, status.HTTP_400_BAD_REQUEST)
return Response({"message": "OK"}, status.HTTP_201_CREATED)
class GoogleDriveViewSet(ViewSet):
permission_classes = [IsAuthenticated]
def query(self, request):
file_name = request.query_params.get('path')
files = file_name.split('/')
account = AccountModel.objects.get(owner=request.user, type='GOOGLEDRIVE')
root = account.root
drive_util = GoogleDriveUtil(account_instance=account)
file_list = []
for file in files:
file_list = drive_util.list_files_in_folder(root, "title = '" + file + "'")
root = file_list[0]['id']
return Response(file_list, 200)
class GoogleDriveUtil(object):
def __init__(self, account_instance):
credential_model = models.CredentialsModel.objects.get(account=account_instance)
get_credential = credential_model.credential
http = httplib2.Http()
http = get_credential.authorize(http)
drive_service = discovery.build('drive', 'v2', http=http)
self.drive_service = drive_service
def list_files_in_folder(self, folder_id, q):
# TODO filter by q
file_list = []
page_token = None
while True:
try:
params = {}
if page_token:
params['pageToken'] = page_token
params['q'] = q
children = self.drive_service.children().list(folderId=folder_id, **params).execute()
for child in children.get('items', []):
file_list.append(self.drive_service.files().get(fileId=child['id']).execute())
page_token = children.get('nextPageToken')
if not page_token:
break
except errors.HttpError as error:
message = 'An error occurred: ' + error.content
return message
return file_list
def search_file(self, account_instance, file_title):
root_id = models.CredentialsModel.objects.get(account=account_instance).account.root
parent_id = self.getPathId(root_id) # get the id of the parent folder
query = str(parent_id) + ' in parents and title=' + file_title
contents = self.list_files_in_folder(parent_id, query)
return contents
def create_folder(self, title, parent_id='', mime_type='application/vnd.google-apps.folder'):
body = {
'title': title,
'mimeType': mime_type
}
if parent_id:
body['parents'] = [{'id': parent_id}]
try:
file = self.drive_service.files().insert(body=body).execute()
return file
except errors.HttpError:
return None
def insert(self, file_name, title, parent_id=[], mime_type='application/octet-stream', resumable=True):
media_body = MediaFileUpload(file_name, mimetype=mime_type, resumable=resumable)
body = {
'title': title,
'mimeType': mime_type
}
if parent_id:
body['parents'] = [{'id': parent_id}]
try:
file = self.drive_service.files().insert(body=body, media_body=media_body).execute()
return file
except errors.HttpError:
return None
def update(self, file_id, new_revision, new_filename, mime_type='application/octet-stream'):
try:
# First retrieve the file from the API.
file = self.drive_service.files().get(fileId=file_id).execute()
# File's new content.
media_body = MediaFileUpload(new_filename, mimetype=mime_type, resumable=True)
# Send the request to the API.
updated_file = self.drive_service.files().update(
fileId=file_id,
body=file,
newRevision=new_revision,
media_body=media_body).execute()
return updated_file
except errors.HttpError:
return None
def trash(self, file_id):
try:
return self.drive_service.files().trash(fileId=file_id).execute()
except errors.HttpError as error:
return str(error)
def untrash(self, file_id):
try:
return self.drive_service.files().untrash(fileId=file_id).execute()
except errors.HttpError:
return None
def delete(self, file_id):
try:
return self.drive_service.files().delete(fileId=file_id).execute()
except errors.HttpError:
return None
def download(self, file_id):
file = None
try:
file = self.drive_service.files().get(fileId=file_id).execute()
except errors.HttpError:
return None
download_url = file.get('downloadUrl')
if download_url:
resp, content = self.drive_service._http.request(download_url)
if resp.status == 200:
return content
else:
return None
else:
return None
def get(self, file_id):
try:
file = self.drive_service.files().get(fileId=file_id).execute()
return file
except errors.HttpError:
return None
def get_account_info(self):
account_info = self.drive_service.about().get().execute()
return account_info
|
|
"""Hierarchical Agglomerative Clustering
These routines perform some hierarchical agglomerative clustering of some
input data.
Authors : Vincent Michel, Bertrand Thirion, Alexandre Gramfort,
Gael Varoquaux
License: BSD 3 clause
"""
from heapq import heapify, heappop, heappush, heappushpop
import warnings
import sys
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClusterMixin
from ..externals.joblib import Memory
from ..externals import six
from ..metrics.pairwise import paired_distances, pairwise_distances
from ..utils import check_array
from ..utils.sparsetools import connected_components
from . import _hierarchical
from ._feature_agglomeration import AgglomerationTransform
from ..utils.fast_dict import IntFloatDict
if sys.version_info[0] > 2:
xrange = range
###############################################################################
# For non fully-connected graphs
def _fix_connectivity(X, connectivity, n_components=None,
affinity="euclidean"):
"""
Fixes the connectivity matrix
- copies it
- makes it symmetric
- converts it to LIL if necessary
- completes it if necessary
"""
n_samples = X.shape[0]
if (connectivity.shape[0] != n_samples or
connectivity.shape[1] != n_samples):
raise ValueError('Wrong shape for connectivity matrix: %s '
'when X is %s' % (connectivity.shape, X.shape))
# Make the connectivity matrix symmetric:
connectivity = connectivity + connectivity.T
# Convert connectivity matrix to LIL
if not sparse.isspmatrix_lil(connectivity):
if not sparse.isspmatrix(connectivity):
connectivity = sparse.lil_matrix(connectivity)
else:
connectivity = connectivity.tolil()
# Compute the number of nodes
n_components, labels = connected_components(connectivity)
if n_components > 1:
warnings.warn("the number of connected components of the "
"connectivity matrix is %d > 1. Completing it to avoid "
"stopping the tree early." % n_components,
stacklevel=2)
# XXX: Can we do without completing the matrix?
for i in xrange(n_components):
idx_i = np.where(labels == i)[0]
Xi = X[idx_i]
for j in xrange(i):
idx_j = np.where(labels == j)[0]
Xj = X[idx_j]
D = pairwise_distances(Xi, Xj, metric=affinity)
ii, jj = np.where(D == np.min(D))
ii = ii[0]
jj = jj[0]
connectivity[idx_i[ii], idx_j[jj]] = True
connectivity[idx_j[jj], idx_i[ii]] = True
return connectivity, n_components
###############################################################################
# Hierarchical tree building functions
def ward_tree(X, connectivity=None, n_components=None, n_clusters=None,
return_distance=False):
"""Ward clustering based on a Feature matrix.
Recursively merges the pair of clusters that minimally increases
within-cluster variance.
The inertia matrix uses a Heapq-based representation.
This is the structured version, that takes into account some topological
structure between samples.
Parameters
----------
X : array, shape (n_samples, n_features)
feature matrix representing n_samples samples to be clustered
connectivity : sparse matrix (optional).
connectivity matrix. Defines for each sample the neighboring samples
following a given structure of the data. The matrix is assumed to
be symmetric and only the upper triangular half is used.
Default is None, i.e, the Ward algorithm is unstructured.
n_components : int (optional)
Number of connected components. If None the number of connected
components is estimated from the connectivity matrix.
NOTE: This parameter is now directly determined directly
from the connectivity matrix and will be removed in 0.18
n_clusters : int (optional)
Stop early the construction of the tree at n_clusters. This is
useful to decrease computation time if the number of clusters is
not small compared to the number of samples. In this case, the
complete tree is not computed, thus the 'children' output is of
limited use, and the 'parents' output should rather be used.
This option is valid only when specifying a connectivity matrix.
return_distance: bool (optional)
If True, return the distance between the clusters.
Returns
-------
children : 2D array, shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`
n_components : int
The number of connected components in the graph.
n_leaves : int
The number of leaves in the tree
parents : 1D array, shape (n_nodes, ) or None
The parent of each node. Only returned when a connectivity matrix
is specified, elsewhere 'None' is returned.
distances : 1D array, shape (n_nodes-1, )
Only returned if return_distance is set to True (for compatibility).
The distances between the centers of the nodes. `distances[i]`
corresponds to a weighted euclidean distance between
the nodes `children[i, 1]` and `children[i, 2]`. If the nodes refer to
leaves of the tree, then `distances[i]` is their unweighted euclidean
distance. Distances are updated in the following way
(from scipy.hierarchy.linkage):
The new entry :math:`d(u,v)` is computed as follows,
.. math::
d(u,v) = \\sqrt{\\frac{|v|+|s|}
{T}d(v,s)^2
+ \\frac{|v|+|t|}
{T}d(v,t)^2
- \\frac{|v|}
{T}d(s,t)^2}
where :math:`u` is the newly joined cluster consisting of
clusters :math:`s` and :math:`t`, :math:`v` is an unused
cluster in the forest, :math:`T=|v|+|s|+|t|`, and
:math:`|*|` is the cardinality of its argument. This is also
known as the incremental algorithm.
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (-1, 1))
n_samples, n_features = X.shape
if connectivity is None:
from scipy.cluster import hierarchy # imports PIL
if n_clusters is not None:
warnings.warn('Partial build of the tree is implemented '
'only for structured clustering (i.e. with '
'explicit connectivity). The algorithm '
'will build the full tree and only '
'retain the lower branches required '
'for the specified number of clusters',
stacklevel=2)
out = hierarchy.ward(X)
children_ = out[:, :2].astype(np.intp)
if return_distance:
distances = out[:, 2]
return children_, 1, n_samples, None, distances
else:
return children_, 1, n_samples, None
if n_components is not None:
warnings.warn(
"n_components is now directly calculated from the connectivity "
"matrix and will be removed in 0.18",
DeprecationWarning)
connectivity, n_components = _fix_connectivity(X, connectivity)
if n_clusters is None:
n_nodes = 2 * n_samples - 1
else:
if n_clusters > n_samples:
raise ValueError('Cannot provide more clusters than samples. '
'%i n_clusters was asked, and there are %i samples.'
% (n_clusters, n_samples))
n_nodes = 2 * n_samples - n_clusters
# create inertia matrix
coord_row = []
coord_col = []
A = []
for ind, row in enumerate(connectivity.rows):
A.append(row)
# We keep only the upper triangular for the moments
# Generator expressions are faster than arrays on the following
row = [i for i in row if i < ind]
coord_row.extend(len(row) * [ind, ])
coord_col.extend(row)
coord_row = np.array(coord_row, dtype=np.intp, order='C')
coord_col = np.array(coord_col, dtype=np.intp, order='C')
# build moments as a list
moments_1 = np.zeros(n_nodes, order='C')
moments_1[:n_samples] = 1
moments_2 = np.zeros((n_nodes, n_features), order='C')
moments_2[:n_samples] = X
inertia = np.empty(len(coord_row), dtype=np.float, order='C')
_hierarchical.compute_ward_dist(moments_1, moments_2, coord_row, coord_col,
inertia)
inertia = list(six.moves.zip(inertia, coord_row, coord_col))
heapify(inertia)
# prepare the main fields
parent = np.arange(n_nodes, dtype=np.intp)
used_node = np.ones(n_nodes, dtype=bool)
children = []
if return_distance:
distances = np.empty(n_nodes - n_samples)
not_visited = np.empty(n_nodes, dtype=np.int8, order='C')
# recursive merge loop
for k in range(n_samples, n_nodes):
# identify the merge
while True:
inert, i, j = heappop(inertia)
if used_node[i] and used_node[j]:
break
parent[i], parent[j] = k, k
children.append((i, j))
used_node[i] = used_node[j] = False
if return_distance: # store inertia value
distances[k - n_samples] = inert
# update the moments
moments_1[k] = moments_1[i] + moments_1[j]
moments_2[k] = moments_2[i] + moments_2[j]
# update the structure matrix A and the inertia matrix
coord_col = []
not_visited.fill(1)
not_visited[k] = 0
_hierarchical._get_parents(A[i], coord_col, parent, not_visited)
_hierarchical._get_parents(A[j], coord_col, parent, not_visited)
# List comprehension is faster than a for loop
[A[l].append(k) for l in coord_col]
A.append(coord_col)
coord_col = np.array(coord_col, dtype=np.intp, order='C')
coord_row = np.empty(coord_col.shape, dtype=np.intp, order='C')
coord_row.fill(k)
n_additions = len(coord_row)
ini = np.empty(n_additions, dtype=np.float, order='C')
_hierarchical.compute_ward_dist(moments_1, moments_2,
coord_row, coord_col, ini)
# List comprehension is faster than a for loop
[heappush(inertia, (ini[idx], k, coord_col[idx]))
for idx in range(n_additions)]
# Separate leaves in children (empty lists up to now)
n_leaves = n_samples
# sort children to get consistent output with unstructured version
children = [c[::-1] for c in children]
children = np.array(children) # return numpy array for efficient caching
if return_distance:
# 2 is scaling factor to compare w/ unstructured version
distances = np.sqrt(2. * distances)
return children, n_components, n_leaves, parent, distances
else:
return children, n_components, n_leaves, parent
# average and complete linkage
def linkage_tree(X, connectivity=None, n_components=None,
n_clusters=None, linkage='complete', affinity="euclidean",
return_distance=False):
"""Linkage agglomerative clustering based on a Feature matrix.
The inertia matrix uses a Heapq-based representation.
This is the structured version, that takes into account some topological
structure between samples.
Parameters
----------
X : array, shape (n_samples, n_features)
feature matrix representing n_samples samples to be clustered
connectivity : sparse matrix (optional).
connectivity matrix. Defines for each sample the neighboring samples
following a given structure of the data. The matrix is assumed to
be symmetric and only the upper triangular half is used.
Default is None, i.e, the Ward algorithm is unstructured.
n_components : int (optional)
Number of connected components. If None the number of connected
components is estimated from the connectivity matrix.
NOTE: This parameter is now directly determined directly
from the connectivity matrix and will be removed in 0.18
n_clusters : int (optional)
Stop early the construction of the tree at n_clusters. This is
useful to decrease computation time if the number of clusters is
not small compared to the number of samples. In this case, the
complete tree is not computed, thus the 'children' output is of
limited use, and the 'parents' output should rather be used.
This option is valid only when specifying a connectivity matrix.
linkage : {"average", "complete"}, optional, default: "complete"
Which linkage critera to use. The linkage criterion determines which
distance to use between sets of observation.
- average uses the average of the distances of each observation of
the two sets
- complete or maximum linkage uses the maximum distances between
all observations of the two sets.
affinity : string or callable, optional, default: "euclidean".
which metric to use. Can be "euclidean", "manhattan", or any
distance know to paired distance (see metric.pairwise)
return_distance : bool, default False
whether or not to return the distances between the clusters.
Returns
-------
children : 2D array, shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`
n_components : int
The number of connected components in the graph.
n_leaves : int
The number of leaves in the tree.
parents : 1D array, shape (n_nodes, ) or None
The parent of each node. Only returned when a connectivity matrix
is specified, elsewhere 'None' is returned.
distances : ndarray, shape (n_nodes-1,)
Returned when return_distance is set to True.
distances[i] refers to the distance between children[i][0] and
children[i][1] when they are merged.
See also
--------
ward_tree : hierarchical clustering with ward linkage
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (-1, 1))
n_samples, n_features = X.shape
linkage_choices = {'complete': _hierarchical.max_merge,
'average': _hierarchical.average_merge,
}
try:
join_func = linkage_choices[linkage]
except KeyError:
raise ValueError(
'Unknown linkage option, linkage should be one '
'of %s, but %s was given' % (linkage_choices.keys(), linkage))
if connectivity is None:
from scipy.cluster import hierarchy # imports PIL
if n_clusters is not None:
warnings.warn('Partial build of the tree is implemented '
'only for structured clustering (i.e. with '
'explicit connectivity). The algorithm '
'will build the full tree and only '
'retain the lower branches required '
'for the specified number of clusters',
stacklevel=2)
if affinity == 'precomputed':
# for the linkage function of hierarchy to work on precomputed
# data, provide as first argument an ndarray of the shape returned
# by pdist: it is a flat array containing the upper triangular of
# the distance matrix.
i, j = np.triu_indices(X.shape[0], k=1)
X = X[i, j]
elif affinity == 'l2':
# Translate to something understood by scipy
affinity = 'euclidean'
elif affinity in ('l1', 'manhattan'):
affinity = 'cityblock'
elif callable(affinity):
X = affinity(X)
i, j = np.triu_indices(X.shape[0], k=1)
X = X[i, j]
out = hierarchy.linkage(X, method=linkage, metric=affinity)
children_ = out[:, :2].astype(np.int)
if return_distance:
distances = out[:, 2]
return children_, 1, n_samples, None, distances
return children_, 1, n_samples, None
if n_components is not None:
warnings.warn(
"n_components is now directly calculated from the connectivity "
"matrix and will be removed in 0.18",
DeprecationWarning)
connectivity, n_components = _fix_connectivity(X, connectivity)
connectivity = connectivity.tocoo()
# Put the diagonal to zero
diag_mask = (connectivity.row != connectivity.col)
connectivity.row = connectivity.row[diag_mask]
connectivity.col = connectivity.col[diag_mask]
connectivity.data = connectivity.data[diag_mask]
del diag_mask
if affinity == 'precomputed':
distances = X[connectivity.row, connectivity.col]
else:
# FIXME We compute all the distances, while we could have only computed
# the "interesting" distances
distances = paired_distances(X[connectivity.row],
X[connectivity.col],
metric=affinity)
connectivity.data = distances
if n_clusters is None:
n_nodes = 2 * n_samples - 1
else:
assert n_clusters <= n_samples
n_nodes = 2 * n_samples - n_clusters
if return_distance:
distances = np.empty(n_nodes - n_samples)
# create inertia heap and connection matrix
A = np.empty(n_nodes, dtype=object)
inertia = list()
# LIL seems to the best format to access the rows quickly,
# without the numpy overhead of slicing CSR indices and data.
connectivity = connectivity.tolil()
# We are storing the graph in a list of IntFloatDict
for ind, (data, row) in enumerate(zip(connectivity.data,
connectivity.rows)):
A[ind] = IntFloatDict(np.asarray(row, dtype=np.intp),
np.asarray(data, dtype=np.float64))
# We keep only the upper triangular for the heap
# Generator expressions are faster than arrays on the following
inertia.extend(_hierarchical.WeightedEdge(d, ind, r)
for r, d in zip(row, data) if r < ind)
del connectivity
heapify(inertia)
# prepare the main fields
parent = np.arange(n_nodes, dtype=np.intp)
used_node = np.ones(n_nodes, dtype=np.intp)
children = []
# recursive merge loop
for k in xrange(n_samples, n_nodes):
# identify the merge
while True:
edge = heappop(inertia)
if used_node[edge.a] and used_node[edge.b]:
break
i = edge.a
j = edge.b
if return_distance:
# store distances
distances[k - n_samples] = edge.weight
parent[i] = parent[j] = k
children.append((i, j))
# Keep track of the number of elements per cluster
n_i = used_node[i]
n_j = used_node[j]
used_node[k] = n_i + n_j
used_node[i] = used_node[j] = False
# update the structure matrix A and the inertia matrix
# a clever 'min', or 'max' operation between A[i] and A[j]
coord_col = join_func(A[i], A[j], used_node, n_i, n_j)
for l, d in coord_col:
A[l].append(k, d)
# Here we use the information from coord_col (containing the
# distances) to update the heap
heappush(inertia, _hierarchical.WeightedEdge(d, k, l))
A[k] = coord_col
# Clear A[i] and A[j] to save memory
A[i] = A[j] = 0
# Separate leaves in children (empty lists up to now)
n_leaves = n_samples
# # return numpy array for efficient caching
children = np.array(children)[:, ::-1]
if return_distance:
return children, n_components, n_leaves, parent, distances
return children, n_components, n_leaves, parent
# Matching names to tree-building strategies
def _complete_linkage(*args, **kwargs):
kwargs['linkage'] = 'complete'
return linkage_tree(*args, **kwargs)
def _average_linkage(*args, **kwargs):
kwargs['linkage'] = 'average'
return linkage_tree(*args, **kwargs)
_TREE_BUILDERS = dict(
ward=ward_tree,
complete=_complete_linkage,
average=_average_linkage,
)
###############################################################################
# Functions for cutting hierarchical clustering tree
def _hc_cut(n_clusters, children, n_leaves):
"""Function cutting the ward tree for a given number of clusters.
Parameters
----------
n_clusters : int or ndarray
The number of clusters to form.
children : list of pairs. Length of n_nodes
The children of each non-leaf node. Values less than `n_samples` refer
to leaves of the tree. A greater value `i` indicates a node with
children `children[i - n_samples]`.
n_leaves : int
Number of leaves of the tree.
Returns
-------
labels : array [n_samples]
cluster labels for each point
"""
if n_clusters > n_leaves:
raise ValueError('Cannot extract more clusters than samples: '
'%s clusters where given for a tree with %s leaves.'
% (n_clusters, n_leaves))
# In this function, we store nodes as a heap to avoid recomputing
# the max of the nodes: the first element is always the smallest
# We use negated indices as heaps work on smallest elements, and we
# are interested in largest elements
# children[-1] is the root of the tree
nodes = [-(max(children[-1]) + 1)]
for i in xrange(n_clusters - 1):
# As we have a heap, nodes[0] is the smallest element
these_children = children[-nodes[0] - n_leaves]
# Insert the 2 children and remove the largest node
heappush(nodes, -these_children[0])
heappushpop(nodes, -these_children[1])
label = np.zeros(n_leaves, dtype=np.intp)
for i, node in enumerate(nodes):
label[_hierarchical._hc_get_descendent(-node, children, n_leaves)] = i
return label
###############################################################################
class AgglomerativeClustering(BaseEstimator, ClusterMixin):
"""
Agglomerative Clustering
Recursively merges the pair of clusters that minimally increases
a given linkage distance.
Parameters
----------
n_clusters : int, default=2
The number of clusters to find.
connectivity : array-like or callable, optional
Connectivity matrix. Defines for each sample the neighboring
samples following a given structure of the data.
This can be a connectivity matrix itself or a callable that transforms
the data into a connectivity matrix, such as derived from
kneighbors_graph. Default is None, i.e, the
hierarchical clustering algorithm is unstructured.
affinity : string or callable, default: "euclidean"
Metric used to compute the linkage. Can be "euclidean", "l1", "l2",
"manhattan", "cosine", or 'precomputed'.
If linkage is "ward", only "euclidean" is accepted.
memory : Instance of joblib.Memory or string (optional)
Used to cache the output of the computation of the tree.
By default, no caching is done. If a string is given, it is the
path to the caching directory.
n_components : int (optional)
Number of connected components. If None the number of connected
components is estimated from the connectivity matrix.
NOTE: This parameter is now directly determined from the connectivity
matrix and will be removed in 0.18
compute_full_tree : bool or 'auto' (optional)
Stop early the construction of the tree at n_clusters. This is
useful to decrease computation time if the number of clusters is
not small compared to the number of samples. This option is
useful only when specifying a connectivity matrix. Note also that
when varying the number of clusters and using caching, it may
be advantageous to compute the full tree.
linkage : {"ward", "complete", "average"}, optional, default: "ward"
Which linkage criterion to use. The linkage criterion determines which
distance to use between sets of observation. The algorithm will merge
the pairs of cluster that minimize this criterion.
- ward minimizes the variance of the clusters being merged.
- average uses the average of the distances of each observation of
the two sets.
- complete or maximum linkage uses the maximum distances between
all observations of the two sets.
pooling_func : callable, default=np.mean
This combines the values of agglomerated features into a single
value, and should accept an array of shape [M, N] and the keyword
argument ``axis=1``, and reduce it to an array of size [M].
Attributes
----------
labels_ : array [n_samples]
cluster labels for each point
n_leaves_ : int
Number of leaves in the hierarchical tree.
n_components_ : int
The estimated number of connected components in the graph.
children_ : array-like, shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`
"""
def __init__(self, n_clusters=2, affinity="euclidean",
memory=Memory(cachedir=None, verbose=0),
connectivity=None, n_components=None,
compute_full_tree='auto', linkage='ward',
pooling_func=np.mean):
self.n_clusters = n_clusters
self.memory = memory
self.n_components = n_components
self.connectivity = connectivity
self.compute_full_tree = compute_full_tree
self.linkage = linkage
self.affinity = affinity
self.pooling_func = pooling_func
def fit(self, X, y=None):
"""Fit the hierarchical clustering on the data
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The samples a.k.a. observations.
Returns
-------
self
"""
X = check_array(X)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory, verbose=0)
if self.linkage == "ward" and self.affinity != "euclidean":
raise ValueError("%s was provided as affinity. Ward can only "
"work with euclidean distances." %
(self.affinity, ))
if self.linkage not in _TREE_BUILDERS:
raise ValueError("Unknown linkage type %s."
"Valid options are %s" % (self.linkage,
_TREE_BUILDERS.keys()))
tree_builder = _TREE_BUILDERS[self.linkage]
connectivity = self.connectivity
if self.connectivity is not None:
if callable(self.connectivity):
connectivity = self.connectivity(X)
connectivity = check_array(
connectivity, accept_sparse=['csr', 'coo', 'lil'])
n_samples = len(X)
compute_full_tree = self.compute_full_tree
if self.connectivity is None:
compute_full_tree = True
if compute_full_tree == 'auto':
# Early stopping is likely to give a speed up only for
# a large number of clusters. The actual threshold
# implemented here is heuristic
compute_full_tree = self.n_clusters < max(100, .02 * n_samples)
n_clusters = self.n_clusters
if compute_full_tree:
n_clusters = None
# Construct the tree
kwargs = {}
if self.linkage != 'ward':
kwargs['linkage'] = self.linkage
kwargs['affinity'] = self.affinity
self.children_, self.n_components_, self.n_leaves_, parents = \
memory.cache(tree_builder)(X, connectivity,
n_components=self.n_components,
n_clusters=n_clusters,
**kwargs)
# Cut the tree
if compute_full_tree:
self.labels_ = _hc_cut(self.n_clusters, self.children_,
self.n_leaves_)
else:
labels = _hierarchical.hc_get_heads(parents, copy=False)
# copy to avoid holding a reference on the original array
labels = np.copy(labels[:n_samples])
# Reasign cluster numbers
self.labels_ = np.searchsorted(np.unique(labels), labels)
return self
class FeatureAgglomeration(AgglomerativeClustering, AgglomerationTransform):
"""Agglomerate features.
Similar to AgglomerativeClustering, but recursively merges features
instead of samples.
Parameters
----------
n_clusters : int, default 2
The number of clusters to find.
connectivity : array-like or callable, optional
Connectivity matrix. Defines for each feature the neighboring
features following a given structure of the data.
This can be a connectivity matrix itself or a callable that transforms
the data into a connectivity matrix, such as derived from
kneighbors_graph. Default is None, i.e, the
hierarchical clustering algorithm is unstructured.
affinity : string or callable, default "euclidean"
Metric used to compute the linkage. Can be "euclidean", "l1", "l2",
"manhattan", "cosine", or 'precomputed'.
If linkage is "ward", only "euclidean" is accepted.
memory : Instance of joblib.Memory or string, optional
Used to cache the output of the computation of the tree.
By default, no caching is done. If a string is given, it is the
path to the caching directory.
n_components : int (optional)
Number of connected components. If None the number of connected
components is estimated from the connectivity matrix.
NOTE: This parameter is now directly determined from the connectivity
matrix and will be removed in 0.18
compute_full_tree : bool or 'auto', optional, default "auto"
Stop early the construction of the tree at n_clusters. This is
useful to decrease computation time if the number of clusters is
not small compared to the number of features. This option is
useful only when specifying a connectivity matrix. Note also that
when varying the number of clusters and using caching, it may
be advantageous to compute the full tree.
linkage : {"ward", "complete", "average"}, optional, default "ward"
Which linkage criterion to use. The linkage criterion determines which
distance to use between sets of features. The algorithm will merge
the pairs of cluster that minimize this criterion.
- ward minimizes the variance of the clusters being merged.
- average uses the average of the distances of each feature of
the two sets.
- complete or maximum linkage uses the maximum distances between
all features of the two sets.
pooling_func : callable, default np.mean
This combines the values of agglomerated features into a single
value, and should accept an array of shape [M, N] and the keyword
argument `axis=1`, and reduce it to an array of size [M].
Attributes
----------
labels_ : array-like, (n_features,)
cluster labels for each feature.
n_leaves_ : int
Number of leaves in the hierarchical tree.
n_components_ : int
The estimated number of connected components in the graph.
children_ : array-like, shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_features`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_features` is a non-leaf
node and has children `children_[i - n_features]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_features + i`
"""
def fit(self, X, y=None, **params):
"""Fit the hierarchical clustering on the data
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The data
Returns
-------
self
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
if not (len(X.shape) == 2 and X.shape[0] > 0):
raise ValueError('At least one sample is required to fit the '
'model. A data matrix of shape %s was given.'
% (X.shape, ))
return AgglomerativeClustering.fit(self, X.T, **params)
@property
def fit_predict(self):
raise AttributeError
###############################################################################
# Backward compatibility: class for Ward hierarchical clustering
class Ward(AgglomerativeClustering):
"""Ward hierarchical clustering: constructs a tree and cuts it.
Recursively merges the pair of clusters that minimally increases
within-cluster variance.
Parameters
----------
n_clusters : int or ndarray
The number of clusters to find.
connectivity : sparse matrix (optional)
Connectivity matrix. Defines for each sample the neighboring
samples following a given structure of the data.
Default is None, i.e, the hierarchical clustering algorithm is
unstructured.
memory : Instance of joblib.Memory or string (optional)
Used to cache the output of the computation of the tree.
By default, no caching is done. If a string is given, it is the
path to the caching directory.
n_components : int (optional)
The number of connected components in the graph defined by the
connectivity matrix. If not set, it is estimated.
compute_full_tree : bool or 'auto' (optional)
Stop early the construction of the tree at n_clusters. This is
useful to decrease computation time if the number of clusters is
not small compared to the number of samples. This option is
useful only when specifying a connectivity matrix. Note also that
when varying the number of clusters and using caching, it may
be advantageous to compute the full tree.
Attributes
----------
labels_ : array [n_features]
cluster labels for each feature
n_leaves_ : int
Number of leaves in the hierarchical tree.
n_components_ : int
The estimated number of connected components in the graph.
children_ : array-like, shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_samples`
refer to leaves of the tree. A greater value `i` indicates a node with
children `children_[i - n_samples]`.
See also
--------
AgglomerativeClustering : agglomerative hierarchical clustering
"""
linkage = 'ward'
def __init__(self, n_clusters=2, memory=Memory(cachedir=None, verbose=0),
connectivity=None, n_components=None,
compute_full_tree='auto', pooling_func=np.mean):
warnings.warn("The Ward class is deprecated since 0.14 and will be "
"removed in 0.17. Use the AgglomerativeClustering "
"instead.", DeprecationWarning)
self.n_clusters = n_clusters
self.memory = memory
self.n_components = n_components
self.connectivity = connectivity
self.compute_full_tree = compute_full_tree
self.affinity = "euclidean"
self.pooling_func = pooling_func
class WardAgglomeration(AgglomerationTransform, Ward):
"""Feature agglomeration based on Ward hierarchical clustering
Parameters
----------
n_clusters : int or ndarray
The number of clusters.
connectivity : array-like or callable, optional
Connectivity matrix. Defines for each sample the neighboring
samples following a given structure of the data.
This can be a connectivity matrix itself or a callable that transforms
the data into a connectivity matrix, such as derived from
kneighbors_graph. Default is None, i.e, the
hierarchical clustering algorithm is unstructured.
memory : Instance of joblib.Memory or string, optional
Used to cache the output of the computation of the tree.
By default, no caching is done. If a string is given, it is the
path to the caching directory.
n_components : int (optional)
The number of connected components in the graph defined by the
connectivity matrix. If not set, it is estimated.
compute_full_tree : bool or 'auto' (optional)
Stop early the construction of the tree at n_clusters. This is
useful to decrease computation time if the number of clusters is
not small compared to the number of samples. This option is
useful only when specifying a connectivity matrix. Note also that
when varying the number of cluster and using caching, it may
be advantageous to compute the full tree.
pooling_func : callable, default=np.mean
This combines the values of agglomerated features into a single
value, and should accept an array of shape [M, N] and the keyword
argument `axis=1`, and reduce it to an array of size [M].
Attributes
----------
children_ : array-like, shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_features`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_features` is a non-leaf
node and has children `children_[i - n_features]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_features + i`
labels_ : array [n_features]
cluster labels for each feature
n_leaves_ : int
Number of leaves in the hierarchical tree.
n_components_ : int
The estimated number of connected components in the graph.
"""
def fit(self, X, y=None, **params):
"""Fit the hierarchical clustering on the data
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The data
Returns
-------
self
"""
X = check_array(X)
return Ward.fit(self, X.T, **params)
@property
def fit_predict(self):
raise AttributeError
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper methods to deal with images.
This is essentially a copy from nova.virt.images.py
Some slight modifications, but at some point
we should look at maybe pushing this up to Oslo
"""
import contextlib
import math
import os
import re
import tempfile
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import fileutils
from oslo_utils import timeutils
from oslo_utils import units
from cinder import exception
from cinder.i18n import _, _LI, _LW
from cinder.openstack.common import imageutils
from cinder import utils
from cinder.volume import throttling
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
image_helper_opts = [cfg.StrOpt('image_conversion_dir',
default='$state_path/conversion',
help='Directory used for temporary storage '
'during image conversion'), ]
CONF = cfg.CONF
CONF.register_opts(image_helper_opts)
def qemu_img_info(path, run_as_root=True):
"""Return a object containing the parsed output from qemu-img info."""
cmd = ('env', 'LC_ALL=C', 'qemu-img', 'info', path)
if os.name == 'nt':
cmd = cmd[2:]
out, _err = utils.execute(*cmd, run_as_root=run_as_root)
return imageutils.QemuImgInfo(out)
def get_qemu_img_version():
info = utils.execute('qemu-img', '--help', check_exit_code=False)[0]
pattern = r"qemu-img version ([0-9\.]*)"
version = re.match(pattern, info)
if not version:
LOG.warning(_LW("qemu-img is not installed."))
return None
return _get_version_from_string(version.groups()[0])
def _get_version_from_string(version_string):
return [int(x) for x in version_string.split('.')]
def check_qemu_img_version(minimum_version):
qemu_version = get_qemu_img_version()
if (qemu_version is None
or qemu_version < _get_version_from_string(minimum_version)):
if qemu_version:
current_version = '.'.join((str(element)
for element in qemu_version))
else:
current_version = None
_msg = _('qemu-img %(minimum_version)s or later is required by '
'this volume driver. Current qemu-img version: '
'%(current_version)s') % {'minimum_version': minimum_version,
'current_version': current_version}
raise exception.VolumeBackendAPIException(data=_msg)
def _convert_image(prefix, source, dest, out_format, run_as_root=True):
"""Convert image to other format."""
cmd = prefix + ('qemu-img', 'convert',
'-O', out_format, source, dest)
# Check whether O_DIRECT is supported and set '-t none' if it is
# This is needed to ensure that all data hit the device before
# it gets unmapped remotely from the host for some backends
# Reference Bug: #1363016
# NOTE(jdg): In the case of file devices qemu does the
# flush properly and more efficiently than would be done
# setting O_DIRECT, so check for that and skip the
# setting for non BLK devs
if (utils.is_blk_device(dest) and
volume_utils.check_for_odirect_support(source,
dest,
'oflag=direct')):
cmd = prefix + ('qemu-img', 'convert',
'-t', 'none',
'-O', out_format, source, dest)
start_time = timeutils.utcnow()
utils.execute(*cmd, run_as_root=run_as_root)
duration = timeutils.delta_seconds(start_time, timeutils.utcnow())
# NOTE(jdg): use a default of 1, mostly for unit test, but in
# some incredible event this is 0 (cirros image?) don't barf
if duration < 1:
duration = 1
fsz_mb = os.stat(source).st_size / units.Mi
mbps = (fsz_mb / duration)
msg = ("Image conversion details: src %(src)s, size %(sz).2f MB, "
"duration %(duration).2f sec, destination %(dest)s")
LOG.debug(msg, {"src": source,
"sz": fsz_mb,
"duration": duration,
"dest": dest})
msg = _LI("Converted %(sz).2f MB image at %(mbps).2f MB/s")
LOG.info(msg, {"sz": fsz_mb, "mbps": mbps})
def convert_image(source, dest, out_format, run_as_root=True, throttle=None):
if not throttle:
throttle = throttling.Throttle.get_default()
with throttle.subcommand(source, dest) as throttle_cmd:
_convert_image(tuple(throttle_cmd['prefix']),
source, dest,
out_format, run_as_root=run_as_root)
def resize_image(source, size, run_as_root=False):
"""Changes the virtual size of the image."""
cmd = ('qemu-img', 'resize', source, '%sG' % size)
utils.execute(*cmd, run_as_root=run_as_root)
def fetch(context, image_service, image_id, path, _user_id, _project_id):
# TODO(vish): Improve context handling and add owner and auth data
# when it is added to glance. Right now there is no
# auth checking in glance, so we assume that access was
# checked before we got here.
start_time = timeutils.utcnow()
with fileutils.remove_path_on_error(path):
with open(path, "wb") as image_file:
image_service.download(context, image_id, image_file)
duration = timeutils.delta_seconds(start_time, timeutils.utcnow())
# NOTE(jdg): use a default of 1, mostly for unit test, but in
# some incredible event this is 0 (cirros image?) don't barf
if duration < 1:
duration = 1
fsz_mb = os.stat(image_file.name).st_size / units.Mi
mbps = (fsz_mb / duration)
msg = ("Image fetch details: dest %(dest)s, size %(sz).2f MB, "
"duration %(duration).2f sec")
LOG.debug(msg, {"dest": image_file.name,
"sz": fsz_mb,
"duration": duration})
msg = _LI("Image download %(sz).2f MB at %(mbps).2f MB/s")
LOG.info(msg, {"sz": fsz_mb, "mbps": mbps})
def fetch_verify_image(context, image_service, image_id, dest,
user_id=None, project_id=None, size=None,
run_as_root=True):
fetch(context, image_service, image_id, dest,
None, None)
with fileutils.remove_path_on_error(dest):
data = qemu_img_info(dest, run_as_root=run_as_root)
fmt = data.file_format
if fmt is None:
raise exception.ImageUnacceptable(
reason=_("'qemu-img info' parsing failed."),
image_id=image_id)
backing_file = data.backing_file
if backing_file is not None:
raise exception.ImageUnacceptable(
image_id=image_id,
reason=(_("fmt=%(fmt)s backed by: %(backing_file)s") %
{'fmt': fmt, 'backing_file': backing_file}))
# NOTE(xqueralt): If the image virtual size doesn't fit in the
# requested volume there is no point on resizing it because it will
# generate an unusable image.
if size is not None and data.virtual_size > size:
params = {'image_size': data.virtual_size, 'volume_size': size}
reason = _("Size is %(image_size)dGB and doesn't fit in a "
"volume of size %(volume_size)dGB.") % params
raise exception.ImageUnacceptable(image_id=image_id, reason=reason)
def fetch_to_vhd(context, image_service,
image_id, dest, blocksize,
user_id=None, project_id=None, run_as_root=True):
fetch_to_volume_format(context, image_service, image_id, dest, 'vpc',
blocksize, user_id, project_id,
run_as_root=run_as_root)
def fetch_to_raw(context, image_service,
image_id, dest, blocksize,
user_id=None, project_id=None, size=None, run_as_root=True):
fetch_to_volume_format(context, image_service, image_id, dest, 'raw',
blocksize, user_id, project_id, size,
run_as_root=run_as_root)
def fetch_to_volume_format(context, image_service,
image_id, dest, volume_format, blocksize,
user_id=None, project_id=None, size=None,
run_as_root=True):
qemu_img = True
image_meta = image_service.show(context, image_id)
# NOTE(avishay): I'm not crazy about creating temp files which may be
# large and cause disk full errors which would confuse users.
# Unfortunately it seems that you can't pipe to 'qemu-img convert' because
# it seeks. Maybe we can think of something for a future version.
with temporary_file() as tmp:
# We may be on a system that doesn't have qemu-img installed. That
# is ok if we are working with a RAW image. This logic checks to see
# if qemu-img is installed. If not we make sure the image is RAW and
# throw an exception if not. Otherwise we stop before needing
# qemu-img. Systems with qemu-img will always progress through the
# whole function.
try:
# Use the empty tmp file to make sure qemu_img_info works.
qemu_img_info(tmp, run_as_root=run_as_root)
except processutils.ProcessExecutionError:
qemu_img = False
if image_meta:
if image_meta['disk_format'] != 'raw':
raise exception.ImageUnacceptable(
reason=_("qemu-img is not installed and image is of "
"type %s. Only RAW images can be used if "
"qemu-img is not installed.") %
image_meta['disk_format'],
image_id=image_id)
else:
raise exception.ImageUnacceptable(
reason=_("qemu-img is not installed and the disk "
"format is not specified. Only RAW images "
"can be used if qemu-img is not installed."),
image_id=image_id)
tmp_images = TemporaryImages.for_image_service(image_service)
tmp_image = tmp_images.get(context, image_id)
if tmp_image:
tmp = tmp_image
else:
fetch(context, image_service, image_id, tmp, user_id, project_id)
if is_xenserver_image(context, image_service, image_id):
replace_xenserver_image_with_coalesced_vhd(tmp)
if not qemu_img:
# qemu-img is not installed but we do have a RAW image. As a
# result we only need to copy the image to the destination and then
# return.
LOG.debug('Copying image from %(tmp)s to volume %(dest)s - '
'size: %(size)s', {'tmp': tmp, 'dest': dest,
'size': image_meta['size']})
image_size_m = math.ceil(image_meta['size'] / units.Mi)
volume_utils.copy_volume(tmp, dest, image_size_m, blocksize)
return
data = qemu_img_info(tmp, run_as_root=run_as_root)
virt_size = data.virtual_size / units.Gi
# NOTE(xqueralt): If the image virtual size doesn't fit in the
# requested volume there is no point on resizing it because it will
# generate an unusable image.
if size is not None and virt_size > size:
params = {'image_size': virt_size, 'volume_size': size}
reason = _("Size is %(image_size)dGB and doesn't fit in a "
"volume of size %(volume_size)dGB.") % params
raise exception.ImageUnacceptable(image_id=image_id, reason=reason)
fmt = data.file_format
if fmt is None:
raise exception.ImageUnacceptable(
reason=_("'qemu-img info' parsing failed."),
image_id=image_id)
backing_file = data.backing_file
if backing_file is not None:
raise exception.ImageUnacceptable(
image_id=image_id,
reason=_("fmt=%(fmt)s backed by:%(backing_file)s")
% {'fmt': fmt, 'backing_file': backing_file, })
# NOTE(jdg): I'm using qemu-img convert to write
# to the volume regardless if it *needs* conversion or not
# TODO(avishay): We can speed this up by checking if the image is raw
# and if so, writing directly to the device. However, we need to keep
# check via 'qemu-img info' that what we copied was in fact a raw
# image and not a different format with a backing file, which may be
# malicious.
LOG.debug("%s was %s, converting to %s ", image_id, fmt, volume_format)
convert_image(tmp, dest, volume_format,
run_as_root=run_as_root)
data = qemu_img_info(dest, run_as_root=run_as_root)
if not _validate_file_format(data, volume_format):
raise exception.ImageUnacceptable(
image_id=image_id,
reason=_("Converted to %(vol_format)s, but format is "
"now %(file_format)s") % {'vol_format': volume_format,
'file_format': data.
file_format})
def _validate_file_format(image_data, expected_format):
if image_data.file_format == expected_format:
return True
elif image_data.file_format == 'vpc' and expected_format == 'vhd':
# qemu-img still uses the legacy 'vpc' name for the vhd format.
return True
return False
def upload_volume(context, image_service, image_meta, volume_path,
volume_format='raw', run_as_root=True):
image_id = image_meta['id']
if (image_meta['disk_format'] == volume_format):
LOG.debug("%s was %s, no need to convert to %s",
image_id, volume_format, image_meta['disk_format'])
if os.name == 'nt' or os.access(volume_path, os.R_OK):
with open(volume_path, 'rb') as image_file:
image_service.update(context, image_id, {}, image_file)
else:
with utils.temporary_chown(volume_path):
with open(volume_path) as image_file:
image_service.update(context, image_id, {}, image_file)
return
with temporary_file() as tmp:
LOG.debug("%s was %s, converting to %s",
image_id, volume_format, image_meta['disk_format'])
data = qemu_img_info(volume_path, run_as_root=run_as_root)
backing_file = data.backing_file
fmt = data.file_format
if backing_file is not None:
# Disallow backing files as a security measure.
# This prevents a user from writing an image header into a raw
# volume with a backing file pointing to data they wish to
# access.
raise exception.ImageUnacceptable(
image_id=image_id,
reason=_("fmt=%(fmt)s backed by:%(backing_file)s")
% {'fmt': fmt, 'backing_file': backing_file})
convert_image(volume_path, tmp, image_meta['disk_format'],
run_as_root=run_as_root)
data = qemu_img_info(tmp, run_as_root=run_as_root)
if data.file_format != image_meta['disk_format']:
raise exception.ImageUnacceptable(
image_id=image_id,
reason=_("Converted to %(f1)s, but format is now %(f2)s") %
{'f1': image_meta['disk_format'], 'f2': data.file_format})
with open(tmp, 'rb') as image_file:
image_service.update(context, image_id, {}, image_file)
def is_xenserver_image(context, image_service, image_id):
image_meta = image_service.show(context, image_id)
return is_xenserver_format(image_meta)
def is_xenserver_format(image_meta):
return (
image_meta['disk_format'] == 'vhd'
and image_meta['container_format'] == 'ovf'
)
def set_vhd_parent(vhd_path, parentpath):
utils.execute('vhd-util', 'modify', '-n', vhd_path, '-p', parentpath)
def extract_targz(archive_name, target):
utils.execute('tar', '-xzf', archive_name, '-C', target)
def fix_vhd_chain(vhd_chain):
for child, parent in zip(vhd_chain[:-1], vhd_chain[1:]):
set_vhd_parent(child, parent)
def get_vhd_size(vhd_path):
out, _err = utils.execute('vhd-util', 'query', '-n', vhd_path, '-v')
return int(out)
def resize_vhd(vhd_path, size, journal):
utils.execute(
'vhd-util', 'resize', '-n', vhd_path, '-s', '%d' % size, '-j', journal)
def coalesce_vhd(vhd_path):
utils.execute(
'vhd-util', 'coalesce', '-n', vhd_path)
def create_temporary_file(*args, **kwargs):
if (CONF.image_conversion_dir and not
os.path.exists(CONF.image_conversion_dir)):
os.makedirs(CONF.image_conversion_dir)
fd, tmp = tempfile.mkstemp(dir=CONF.image_conversion_dir, *args, **kwargs)
os.close(fd)
return tmp
@contextlib.contextmanager
def temporary_file(*args, **kwargs):
tmp = None
try:
tmp = create_temporary_file(*args, **kwargs)
yield tmp
finally:
if tmp:
fileutils.delete_if_exists(tmp)
def temporary_dir():
if (CONF.image_conversion_dir and not
os.path.exists(CONF.image_conversion_dir)):
os.makedirs(CONF.image_conversion_dir)
return utils.tempdir(dir=CONF.image_conversion_dir)
def coalesce_chain(vhd_chain):
for child, parent in zip(vhd_chain[:-1], vhd_chain[1:]):
with temporary_dir() as directory_for_journal:
size = get_vhd_size(child)
journal_file = os.path.join(
directory_for_journal, 'vhd-util-resize-journal')
resize_vhd(parent, size, journal_file)
coalesce_vhd(child)
return vhd_chain[-1]
def discover_vhd_chain(directory):
counter = 0
chain = []
while True:
fpath = os.path.join(directory, '%d.vhd' % counter)
if os.path.exists(fpath):
chain.append(fpath)
else:
break
counter += 1
return chain
def replace_xenserver_image_with_coalesced_vhd(image_file):
with temporary_dir() as tempdir:
extract_targz(image_file, tempdir)
chain = discover_vhd_chain(tempdir)
fix_vhd_chain(chain)
coalesced = coalesce_chain(chain)
fileutils.delete_if_exists(image_file)
os.rename(coalesced, image_file)
class TemporaryImages(object):
"""Manage temporarily downloaded images to avoid downloading it twice.
In the 'with TemporaryImages.fetch(image_service, ctx, image_id) as tmp'
clause, 'tmp' can be used as the downloaded image path. In addition,
image_utils.fetch() will use the pre-fetched image by the TemporaryImages.
This is useful to inspect image contents before conversion.
"""
def __init__(self, image_service):
self.temporary_images = {}
self.image_service = image_service
image_service.temp_images = self
@staticmethod
def for_image_service(image_service):
instance = image_service.temp_images
if instance:
return instance
return TemporaryImages(image_service)
@classmethod
@contextlib.contextmanager
def fetch(cls, image_service, context, image_id):
tmp_images = cls.for_image_service(image_service).temporary_images
with temporary_file() as tmp:
fetch_verify_image(context, image_service, image_id, tmp)
user = context.user_id
if not tmp_images.get(user):
tmp_images[user] = {}
tmp_images[user][image_id] = tmp
LOG.debug("Temporary image %(id)s is fetched for user %(user)s.",
{'id': image_id, 'user': user})
yield tmp
del tmp_images[user][image_id]
LOG.debug("Temporary image %(id)s for user %(user)s is deleted.",
{'id': image_id, 'user': user})
def get(self, context, image_id):
user = context.user_id
if not self.temporary_images.get(user):
return None
return self.temporary_images[user].get(image_id)
|
|
# -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import collections
import datetime
from io import BytesIO, UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header
from .packages.urllib3.fields import RequestField
from .packages.urllib3.filepost import encode_multipart_formdata
from .packages.urllib3.util import parse_url
from .packages.urllib3.exceptions import (
DecodeError, ReadTimeoutError, ProtocolError)
from .exceptions import (
HTTPError, RequestException, MissingSchema, InvalidURL,
ChunkedEncodingError, ContentDecodingError, ConnectionError)
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, to_native_string)
from .compat import (
cookielib, urlunparse, urlsplit, urlencode, str, bytes, StringIO,
is_py2, chardet, json, builtin_str, basestring)
from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
)
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, str):
fp = StringIO(fp)
if isinstance(fp, bytes):
fp = BytesIO(fp)
rf = RequestField(name=k, data=fp.read(),
filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, collections.Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, collections.Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach the request. If a dictionary is provided, form-encoding will take place.
:param params: dictionary of URL parameters to append to the URL.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None,
url=None,
headers=None,
files=None,
data=None,
params=None,
auth=None,
cookies=None,
hooks=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Generated from either a :class:`Request <Request>` object or manually.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> r = req.prepare()
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
def prepare(self, method=None, url=None, headers=None, files=None,
data=None, params=None, auth=None, cookies=None, hooks=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = self._cookies.copy() if self._cookies is not None else None
p.body = self.body
p.hooks = self.hooks
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = self.method.upper()
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
try:
url = unicode(url)
except NameError:
# We're on Python 3.
url = str(url)
except UnicodeDecodeError:
pass
# Don't do any URL preparation for oddball schemes
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
scheme, auth, host, port, path, query, fragment = parse_url(url)
if not scheme:
raise MissingSchema("Invalid URL {0!r}: No schema supplied. "
"Perhaps you meant http://{0}?".format(url))
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# Only want to apply IDNA to the hostname
try:
host = host.encode('idna').decode('utf-8')
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
if headers:
self.headers = CaseInsensitiveDict((to_native_string(name), value) for name, value in headers.items())
else:
self.headers = CaseInsensitiveDict()
def prepare_body(self, data, files):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
length = None
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, dict))
])
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
if is_stream:
body = data
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length is not None:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if (content_type) and (not 'content-type' in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
if hasattr(body, 'seek') and hasattr(body, 'tell'):
body.seek(0, 2)
self.headers['Content-Length'] = builtin_str(body.tell())
body.seek(0, 0)
elif body is not None:
l = super_len(body)
if l:
self.headers['Content-Length'] = builtin_str(l)
elif self.method not in ('GET', 'HEAD'):
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data."""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content',
'status_code',
'headers',
'url',
'history',
'encoding',
'reason',
'cookies',
'elapsed',
'request',
]
def __init__(self):
super(Response, self).__init__()
self._content = False
self._content_consumed = False
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
# This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta)
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return dict(
(attr, getattr(self, attr, None))
for attr in self.__attrs__
)
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
setattr(self, 'raw', None)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __nonzero__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
try:
self.raise_for_status()
except RequestException:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return ('location' in self.headers and self.status_code in REDIRECT_STATI)
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanant versions of redirect"""
return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library"""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
try:
# Special case for urllib3.
try:
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
except AttributeError:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
try:
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0:
self._content = None
else:
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
except AttributeError:
self._content = None
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
"""
if not self.encoding and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return json.loads(self.content.decode(encoding), **kwargs)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
return json.loads(self.text, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises stored :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if 400 <= self.status_code < 500:
http_error_msg = '%s Client Error: %s' % (self.status_code, self.reason)
elif 500 <= self.status_code < 600:
http_error_msg = '%s Server Error: %s' % (self.status_code, self.reason)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
return self.raw.release_conn()
|
|
# Copyright 2017, Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from google.auth import credentials
import mock
import pytest
from google.cloud.pubsub_v1.gapic import publisher_client
from google.cloud.pubsub_v1 import publisher
from google.cloud.pubsub_v1 import types
def test_init():
creds = mock.Mock(spec=credentials.Credentials)
client = publisher.Client(credentials=creds)
# A plain client should have an `api` (the underlying GAPIC) and a
# batch settings object, which should have the defaults.
assert isinstance(client.api, publisher_client.PublisherClient)
assert client.batch_settings.max_bytes == 10 * 1000 * 1000
assert client.batch_settings.max_latency == 0.05
assert client.batch_settings.max_messages == 1000
def test_init_emulator(monkeypatch):
monkeypatch.setenv('PUBSUB_EMULATOR_HOST', '/foo/bar/')
# NOTE: When the emulator host is set, a custom channel will be used, so
# no credentials (mock ot otherwise) can be passed in.
client = publisher.Client()
# Establish that a gRPC request would attempt to hit the emulator host.
#
# Sadly, there seems to be no good way to do this without poking at
# the private API of gRPC.
channel = client.api.transport.publish._channel
assert channel.target().decode('utf8') == '/foo/bar/'
def test_batch_create():
creds = mock.Mock(spec=credentials.Credentials)
client = publisher.Client(credentials=creds)
assert len(client._batches) == 0
topic = 'topic/path'
batch = client._batch(topic, autocommit=False)
assert client._batches == {topic: batch}
def test_batch_exists():
creds = mock.Mock(spec=credentials.Credentials)
client = publisher.Client(credentials=creds)
topic = 'topic/path'
client._batches[topic] = mock.sentinel.batch
# A subsequent request should return the same batch.
batch = client._batch(topic, autocommit=False)
assert batch is mock.sentinel.batch
assert client._batches == {topic: batch}
def test_batch_create_and_exists():
creds = mock.Mock(spec=credentials.Credentials)
client = publisher.Client(credentials=creds)
topic = 'topic/path'
client._batches[topic] = mock.sentinel.batch
# A subsequent request should return the same batch.
batch = client._batch(topic, create=True, autocommit=False)
assert batch is not mock.sentinel.batch
assert client._batches == {topic: batch}
def test_publish():
creds = mock.Mock(spec=credentials.Credentials)
client = publisher.Client(credentials=creds)
# Use a mock in lieu of the actual batch class.
batch = mock.Mock(spec=client._batch_class)
# Set the mock up to claim indiscriminately that it accepts all messages.
batch.will_accept.return_value = True
batch.publish.side_effect = (
mock.sentinel.future1,
mock.sentinel.future2,
)
topic = 'topic/path'
client._batches[topic] = batch
# Begin publishing.
future1 = client.publish(topic, b'spam')
future2 = client.publish(topic, b'foo', bar='baz')
assert future1 is mock.sentinel.future1
assert future2 is mock.sentinel.future2
# Check mock.
batch.publish.assert_has_calls(
[
mock.call(types.PubsubMessage(data=b'spam')),
mock.call(types.PubsubMessage(
data=b'foo',
attributes={'bar': 'baz'},
)),
],
)
def test_publish_data_not_bytestring_error():
creds = mock.Mock(spec=credentials.Credentials)
client = publisher.Client(credentials=creds)
topic = 'topic/path'
with pytest.raises(TypeError):
client.publish(topic, u'This is a text string.')
with pytest.raises(TypeError):
client.publish(topic, 42)
def test_publish_attrs_bytestring():
creds = mock.Mock(spec=credentials.Credentials)
client = publisher.Client(credentials=creds)
# Use a mock in lieu of the actual batch class.
batch = mock.Mock(spec=client._batch_class)
# Set the mock up to claim indiscriminately that it accepts all messages.
batch.will_accept.return_value = True
topic = 'topic/path'
client._batches[topic] = batch
# Begin publishing.
future = client.publish(topic, b'foo', bar=b'baz')
assert future is batch.publish.return_value
# The attributes should have been sent as text.
batch.publish.assert_called_once_with(
types.PubsubMessage(
data=b'foo',
attributes={'bar': u'baz'},
),
)
def test_publish_new_batch_needed():
creds = mock.Mock(spec=credentials.Credentials)
client = publisher.Client(credentials=creds)
# Use mocks in lieu of the actual batch class.
batch1 = mock.Mock(spec=client._batch_class)
batch2 = mock.Mock(spec=client._batch_class)
# Set the first mock up to claim indiscriminately that it rejects all
# messages and the second accepts all.
batch1.publish.return_value = None
batch2.publish.return_value = mock.sentinel.future
topic = 'topic/path'
client._batches[topic] = batch1
# Actually mock the batch class now.
batch_class = mock.Mock(spec=(), return_value=batch2)
client._batch_class = batch_class
# Publish a message.
future = client.publish(topic, b'foo', bar=b'baz')
assert future is mock.sentinel.future
# Check the mocks.
batch_class.assert_called_once_with(
autocommit=True,
client=client,
settings=client.batch_settings,
topic=topic,
)
message_pb = types.PubsubMessage(
data=b'foo',
attributes={'bar': u'baz'},
)
batch1.publish.assert_called_once_with(message_pb)
batch2.publish.assert_called_once_with(message_pb)
def test_publish_attrs_type_error():
creds = mock.Mock(spec=credentials.Credentials)
client = publisher.Client(credentials=creds)
topic = 'topic/path'
with pytest.raises(TypeError):
client.publish(topic, b'foo', answer=42)
def test_gapic_instance_method():
creds = mock.Mock(spec=credentials.Credentials)
client = publisher.Client(credentials=creds)
ct = mock.Mock()
client.api._inner_api_calls['create_topic'] = ct
client.create_topic('projects/foo/topics/bar')
assert ct.call_count == 1
_, args, _ = ct.mock_calls[0]
assert args[0] == types.Topic(name='projects/foo/topics/bar')
def test_gapic_class_method_on_class():
answer = publisher.Client.topic_path('foo', 'bar')
assert answer == 'projects/foo/topics/bar'
def test_class_method_factory():
patch = mock.patch(
'google.oauth2.service_account.Credentials.from_service_account_file')
with patch:
client = publisher.Client.from_service_account_file('filename.json')
assert isinstance(client, publisher.Client)
def test_gapic_class_method_on_instance():
creds = mock.Mock(spec=credentials.Credentials)
client = publisher.Client(credentials=creds)
answer = client.topic_path('foo', 'bar')
assert answer == 'projects/foo/topics/bar'
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import socket
import select
#import signal
import time
import copy
#tcp option
SO_REUSEADDR = 1
SO_KEEPALIVE = 1
TCP_NODELAY = 1
SO_SNDBUF = 10240
SO_RCVBUF = 10240
SOCKET_TIMEOUT = 120
#socket list
server_list = []
client_list = []
class ServerSocket:
def __init__(self, bind_port, bind_address):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, SO_REUSEADDR)
self.socket.setblocking(0)
self.socket.settimeout(SOCKET_TIMEOUT)
self.socket.bind((bind_address, bind_port))
self.socket.listen(10)
self.address = (bind_address, bind_port)
self.client_list = []
self.closed = False
server_list.append(self)
print "listen", self
def __str__(self):
return repr(self)+str(self.address)
def fileno(self):
return self.socket.fileno()
def socket_accept(self):
return self.socket.accept()
def get_active_client(self):
return get_active_client(self.client_list)
def accept(self):
client, address = self.socket_accept()
client.setblocking(0)
client.settimeout(SOCKET_TIMEOUT)
s = Socket(client, address, self)
self.client_list.append(s)
print "accept", s
def close(self):
if self.closed:
return
self.closed = True
while self.client_list:
self.client_list.pop().close(True)
self.socket.close()
self.socket = None
server_list.remove(self)
print "close", self
#print server_list, self.client_list
del self
class Socket:
def __init__(self, socket, address, server):
self.socket = socket
self.address = address
self.server = server
self.recv_data = ""
self.closed = False
client_list.append(self)
def __str__(self):
return repr(self)+str(self.address)
def socket_send(self, data):
self.socket.send(data)
def socket_sendall(self, data):
self.socket.sendall(data)
def socket_recv(self, size):
return self.socket.recv(size)
def fileno(self):
return self.socket.fileno()
def close(self, from_server=False):
if self.closed:
return
self.closed = True
self.socket.close()
self.socket = None
client_list.remove(self)
if self.server and not from_server:
self.server.client_list.remove(self)
print "close", self
#print client_list, self.server.client_list
del self
def send(self, data):
self.socket_sendall(data)
def recv(self, size=1024):
data = self.socket.recv(size)
self.recv_data += data
return data and True or False
def wait(self):
while not self.has_data():
time.sleep(0.01)
def has_data(self):
return self.recv_data and True or False
def get_data(self):
data = self.recv_data
self.recv_data = ""
return data
def create_server(bind_port, bind_address="0.0.0.0"):
server = ServerSocket(bind_port, bind_address)
return server
def create_connection(address):
"""address: (server address, server port)"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, SO_SNDBUF)
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, SO_RCVBUF)
s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, SO_KEEPALIVE)
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, TCP_NODELAY)
s.setblocking(0)
s.settimeout(SOCKET_TIMEOUT)
s.connect(address)
connection = Socket(s, address, None) #s.getsockname, s.getpeername
return connection
def close_server(bind_port, bind_address="0.0.0.0"):
server_remove = None
for server in server_list:
if server.address == (bind_address, bind_port):
server_remove = server
break
if server_remove:
server_remove.close()
else:
raise ValueError(
"Server bind (%s:%d) not in server list"%(bind_address, bind_port))
def get_active_client(lst=None):
active_client = []
if not lst: lst = client_list
for client in lst:
if client.has_data():
active_client.append(client)
return active_client
def handle():
while True:
read_list, write_list, error_list = select.select(server_list, (), (), 0)
if not read_list:
break
for server in read_list:
server.accept()
while True:
read_list, write_list, error_list = select.select(client_list, (), (), 0)
if not read_list:
break
for client in read_list:
try:
if not client.recv():
raise Exception
except:
client.close()
continue
return True
def telnet_server_handle():
global server
CMD_PRINT = "print "
CMD_QUIT = "quit\n"
CMD_SHUTDOWN = "shutdown\n"
CMD_REBOOT = "reboot\n"
CMD_PRINT_SOCKET_LIST = "prints\n"
CMD_HELP = "help\n"
for client in server.get_active_client():
data = client.get_data().replace("\r\n", "\n")
#print "recv", data
if data.startswith(CMD_PRINT):
client.send(data[len(CMD_PRINT):-1]+"\n")
elif data.startswith(CMD_QUIT):
client.close()
elif data.startswith(CMD_SHUTDOWN):
server.close()
elif data.startswith(CMD_REBOOT):
server.close()
server = create_server(10000)
elif data.startswith(CMD_PRINT_SOCKET_LIST):
client.send("server_list %s\n"%str(server_list))
client.send("client_list %s\n"%str(client_list))
elif data.startswith(CMD_HELP):
client.send("command list: %s\n"%str((CMD_PRINT,
CMD_QUIT, CMD_SHUTDOWN, CMD_REBOOT, CMD_PRINT_SOCKET_LIST,
CMD_HELP)))
else:
client.send("unknow command: %s"%data)
def server_test():
global server
server = create_server(10000)
while not time.sleep(0.1):
handle()
telnet_server_handle()
def connection_test():
connection = create_connection(("127.0.0.1", 10000))
connection.send("prints\n")
while not connection.has_data() and not time.sleep(0.1):
handle()
print connection.get_data(), client_list
connection.send("quit\n")
while client_list and not time.sleep(0.1):
handle()
print client_list
if __name__ == "__main__":
server_test()
#connection_test()
|
|
"""
from twisted.internet import defer
Tests borrowed from the twisted.web.client tests.
"""
import os
import shutil
import OpenSSL.SSL
from twisted.trial import unittest
from twisted.web import server, static, util, resource
from twisted.internet import reactor, defer
try:
from twisted.internet.testing import StringTransport
except ImportError:
# deprecated in Twisted 19.7.0
# (remove once we bump our requirement past that version)
from twisted.test.proto_helpers import StringTransport
from twisted.python.filepath import FilePath
from twisted.protocols.policies import WrappingFactory
from twisted.internet.defer import inlineCallbacks
from twisted.web.test.test_webclient import (
ForeverTakingResource,
ErrorResource,
NoLengthResource,
HostHeaderResource,
PayloadResource,
BrokenDownloadResource,
)
from scrapy.core.downloader import webclient as client
from scrapy.core.downloader.contextfactory import ScrapyClientContextFactory
from scrapy.http import Request, Headers
from scrapy.settings import Settings
from scrapy.utils.misc import create_instance
from scrapy.utils.python import to_bytes, to_unicode
from tests.mockserver import ssl_context_factory
def getPage(url, contextFactory=None, response_transform=None, *args, **kwargs):
"""Adapted version of twisted.web.client.getPage"""
def _clientfactory(url, *args, **kwargs):
url = to_unicode(url)
timeout = kwargs.pop('timeout', 0)
f = client.ScrapyHTTPClientFactory(
Request(url, *args, **kwargs), timeout=timeout)
f.deferred.addCallback(response_transform or (lambda r: r.body))
return f
from twisted.web.client import _makeGetterFactory
return _makeGetterFactory(
to_bytes(url), _clientfactory, contextFactory=contextFactory, *args, **kwargs
).deferred
class ParseUrlTestCase(unittest.TestCase):
"""Test URL parsing facility and defaults values."""
def _parse(self, url):
f = client.ScrapyHTTPClientFactory(Request(url))
return (f.scheme, f.netloc, f.host, f.port, f.path)
def testParse(self):
lip = '127.0.0.1'
tests = (
("http://127.0.0.1?c=v&c2=v2#fragment", ('http', lip, lip, 80, '/?c=v&c2=v2')),
("http://127.0.0.1/?c=v&c2=v2#fragment", ('http', lip, lip, 80, '/?c=v&c2=v2')),
("http://127.0.0.1/foo?c=v&c2=v2#frag", ('http', lip, lip, 80, '/foo?c=v&c2=v2')),
("http://127.0.0.1:100?c=v&c2=v2#fragment", ('http', lip + ':100', lip, 100, '/?c=v&c2=v2')),
("http://127.0.0.1:100/?c=v&c2=v2#frag", ('http', lip + ':100', lip, 100, '/?c=v&c2=v2')),
("http://127.0.0.1:100/foo?c=v&c2=v2#frag", ('http', lip + ':100', lip, 100, '/foo?c=v&c2=v2')),
("http://127.0.0.1", ('http', lip, lip, 80, '/')),
("http://127.0.0.1/", ('http', lip, lip, 80, '/')),
("http://127.0.0.1/foo", ('http', lip, lip, 80, '/foo')),
("http://127.0.0.1?param=value", ('http', lip, lip, 80, '/?param=value')),
("http://127.0.0.1/?param=value", ('http', lip, lip, 80, '/?param=value')),
("http://127.0.0.1:12345/foo", ('http', lip + ':12345', lip, 12345, '/foo')),
("http://spam:12345/foo", ('http', 'spam:12345', 'spam', 12345, '/foo')),
("http://spam.test.org/foo", ('http', 'spam.test.org', 'spam.test.org', 80, '/foo')),
("https://127.0.0.1/foo", ('https', lip, lip, 443, '/foo')),
("https://127.0.0.1/?param=value", ('https', lip, lip, 443, '/?param=value')),
("https://127.0.0.1:12345/", ('https', lip + ':12345', lip, 12345, '/')),
("http://scrapytest.org/foo ", ('http', 'scrapytest.org', 'scrapytest.org', 80, '/foo')),
("http://egg:7890 ", ('http', 'egg:7890', 'egg', 7890, '/')),
)
for url, test in tests:
test = tuple(
to_bytes(x) if not isinstance(x, int) else x for x in test)
self.assertEqual(client._parse(url), test, url)
class ScrapyHTTPPageGetterTests(unittest.TestCase):
def test_earlyHeaders(self):
# basic test stolen from twisted HTTPageGetter
factory = client.ScrapyHTTPClientFactory(Request(
url='http://foo/bar',
body="some data",
headers={
'Host': 'example.net',
'User-Agent': 'fooble',
'Cookie': 'blah blah',
'Content-Length': '12981',
'Useful': 'value'}))
self._test(
factory,
b"GET /bar HTTP/1.0\r\n"
b"Content-Length: 9\r\n"
b"Useful: value\r\n"
b"Connection: close\r\n"
b"User-Agent: fooble\r\n"
b"Host: example.net\r\n"
b"Cookie: blah blah\r\n"
b"\r\n"
b"some data")
# test minimal sent headers
factory = client.ScrapyHTTPClientFactory(Request('http://foo/bar'))
self._test(
factory,
b"GET /bar HTTP/1.0\r\n"
b"Host: foo\r\n"
b"\r\n")
# test a simple POST with body and content-type
factory = client.ScrapyHTTPClientFactory(Request(
method='POST',
url='http://foo/bar',
body='name=value',
headers={'Content-Type': 'application/x-www-form-urlencoded'}))
self._test(
factory,
b"POST /bar HTTP/1.0\r\n"
b"Host: foo\r\n"
b"Connection: close\r\n"
b"Content-Type: application/x-www-form-urlencoded\r\n"
b"Content-Length: 10\r\n"
b"\r\n"
b"name=value")
# test a POST method with no body provided
factory = client.ScrapyHTTPClientFactory(Request(
method='POST',
url='http://foo/bar'
))
self._test(
factory,
b"POST /bar HTTP/1.0\r\n"
b"Host: foo\r\n"
b"Content-Length: 0\r\n"
b"\r\n")
# test with single and multivalued headers
factory = client.ScrapyHTTPClientFactory(Request(
url='http://foo/bar',
headers={
'X-Meta-Single': 'single',
'X-Meta-Multivalued': ['value1', 'value2'],
},
))
self._test(
factory,
b"GET /bar HTTP/1.0\r\n"
b"Host: foo\r\n"
b"X-Meta-Multivalued: value1\r\n"
b"X-Meta-Multivalued: value2\r\n"
b"X-Meta-Single: single\r\n"
b"\r\n")
# same test with single and multivalued headers but using Headers class
factory = client.ScrapyHTTPClientFactory(Request(
url='http://foo/bar',
headers=Headers({
'X-Meta-Single': 'single',
'X-Meta-Multivalued': ['value1', 'value2'],
}),
))
self._test(
factory,
b"GET /bar HTTP/1.0\r\n"
b"Host: foo\r\n"
b"X-Meta-Multivalued: value1\r\n"
b"X-Meta-Multivalued: value2\r\n"
b"X-Meta-Single: single\r\n"
b"\r\n")
def _test(self, factory, testvalue):
transport = StringTransport()
protocol = client.ScrapyHTTPPageGetter()
protocol.factory = factory
protocol.makeConnection(transport)
self.assertEqual(
set(transport.value().splitlines()),
set(testvalue.splitlines()))
return testvalue
def test_non_standard_line_endings(self):
# regression test for: http://dev.scrapy.org/ticket/258
factory = client.ScrapyHTTPClientFactory(Request(
url='http://foo/bar'))
protocol = client.ScrapyHTTPPageGetter()
protocol.factory = factory
protocol.headers = Headers()
protocol.dataReceived(b"HTTP/1.0 200 OK\n")
protocol.dataReceived(b"Hello: World\n")
protocol.dataReceived(b"Foo: Bar\n")
protocol.dataReceived(b"\n")
self.assertEqual(protocol.headers, Headers({'Hello': ['World'], 'Foo': ['Bar']}))
class EncodingResource(resource.Resource):
out_encoding = 'cp1251'
def render(self, request):
body = to_unicode(request.content.read())
request.setHeader(b'content-encoding', self.out_encoding)
return body.encode(self.out_encoding)
class WebClientTestCase(unittest.TestCase):
def _listen(self, site):
return reactor.listenTCP(0, site, interface="127.0.0.1")
def setUp(self):
self.tmpname = self.mktemp()
os.mkdir(self.tmpname)
FilePath(self.tmpname).child("file").setContent(b"0123456789")
r = static.File(self.tmpname)
r.putChild(b"redirect", util.Redirect(b"/file"))
r.putChild(b"wait", ForeverTakingResource())
r.putChild(b"error", ErrorResource())
r.putChild(b"nolength", NoLengthResource())
r.putChild(b"host", HostHeaderResource())
r.putChild(b"payload", PayloadResource())
r.putChild(b"broken", BrokenDownloadResource())
r.putChild(b"encoding", EncodingResource())
self.site = server.Site(r, timeout=None)
self.wrapper = WrappingFactory(self.site)
self.port = self._listen(self.wrapper)
self.portno = self.port.getHost().port
@inlineCallbacks
def tearDown(self):
yield self.port.stopListening()
shutil.rmtree(self.tmpname)
def getURL(self, path):
return f"http://127.0.0.1:{self.portno}/{path}"
def testPayload(self):
s = "0123456789" * 10
return getPage(self.getURL("payload"), body=s).addCallback(
self.assertEqual, to_bytes(s))
def testHostHeader(self):
# if we pass Host header explicitly, it should be used, otherwise
# it should extract from url
return defer.gatherResults([
getPage(self.getURL("host")).addCallback(
self.assertEqual, to_bytes(f"127.0.0.1:{self.portno}")),
getPage(self.getURL("host"), headers={"Host": "www.example.com"}).addCallback(
self.assertEqual, to_bytes("www.example.com"))])
def test_getPage(self):
"""
L{client.getPage} returns a L{Deferred} which is called back with
the body of the response if the default method B{GET} is used.
"""
d = getPage(self.getURL("file"))
d.addCallback(self.assertEqual, b"0123456789")
return d
def test_getPageHead(self):
"""
L{client.getPage} returns a L{Deferred} which is called back with
the empty string if the method is C{HEAD} and there is a successful
response code.
"""
def _getPage(method):
return getPage(self.getURL("file"), method=method)
return defer.gatherResults([
_getPage("head").addCallback(self.assertEqual, b""),
_getPage("HEAD").addCallback(self.assertEqual, b"")])
def test_timeoutNotTriggering(self):
"""
When a non-zero timeout is passed to L{getPage} and the page is
retrieved before the timeout period elapses, the L{Deferred} is
called back with the contents of the page.
"""
d = getPage(self.getURL("host"), timeout=100)
d.addCallback(
self.assertEqual, to_bytes(f"127.0.0.1:{self.portno}"))
return d
def test_timeoutTriggering(self):
"""
When a non-zero timeout is passed to L{getPage} and that many
seconds elapse before the server responds to the request. the
L{Deferred} is errbacked with a L{error.TimeoutError}.
"""
finished = self.assertFailure(
getPage(self.getURL("wait"), timeout=0.000001),
defer.TimeoutError)
def cleanup(passthrough):
# Clean up the server which is hanging around not doing
# anything.
connected = list(self.wrapper.protocols.keys())
# There might be nothing here if the server managed to already see
# that the connection was lost.
if connected:
connected[0].transport.loseConnection()
return passthrough
finished.addBoth(cleanup)
return finished
def testNotFound(self):
return getPage(self.getURL('notsuchfile')).addCallback(self._cbNoSuchFile)
def _cbNoSuchFile(self, pageData):
self.assertIn(b'404 - No Such Resource', pageData)
def testFactoryInfo(self):
url = self.getURL('file')
_, _, host, port, _ = client._parse(url)
factory = client.ScrapyHTTPClientFactory(Request(url))
reactor.connectTCP(to_unicode(host), port, factory)
return factory.deferred.addCallback(self._cbFactoryInfo, factory)
def _cbFactoryInfo(self, ignoredResult, factory):
self.assertEqual(factory.status, b'200')
self.assertTrue(factory.version.startswith(b'HTTP/'))
self.assertEqual(factory.message, b'OK')
self.assertEqual(factory.response_headers[b'content-length'], b'10')
def testRedirect(self):
return getPage(self.getURL("redirect")).addCallback(self._cbRedirect)
def _cbRedirect(self, pageData):
self.assertEqual(
pageData,
b'\n<html>\n <head>\n <meta http-equiv="refresh" content="0;URL=/file">\n'
b' </head>\n <body bgcolor="#FFFFFF" text="#000000">\n '
b'<a href="/file">click here</a>\n </body>\n</html>\n')
def test_encoding(self):
""" Test that non-standart body encoding matches
Content-Encoding header """
body = b'\xd0\x81\xd1\x8e\xd0\xaf'
dfd = getPage(self.getURL('encoding'), body=body, response_transform=lambda r: r)
return dfd.addCallback(self._check_Encoding, body)
def _check_Encoding(self, response, original_body):
content_encoding = to_unicode(response.headers[b'Content-Encoding'])
self.assertEqual(content_encoding, EncodingResource.out_encoding)
self.assertEqual(
response.body.decode(content_encoding), to_unicode(original_body))
class WebClientSSLTestCase(unittest.TestCase):
context_factory = None
def _listen(self, site):
return reactor.listenSSL(
0, site,
contextFactory=self.context_factory or ssl_context_factory(),
interface="127.0.0.1")
def getURL(self, path):
return f"https://127.0.0.1:{self.portno}/{path}"
def setUp(self):
self.tmpname = self.mktemp()
os.mkdir(self.tmpname)
FilePath(self.tmpname).child("file").setContent(b"0123456789")
r = static.File(self.tmpname)
r.putChild(b"payload", PayloadResource())
self.site = server.Site(r, timeout=None)
self.wrapper = WrappingFactory(self.site)
self.port = self._listen(self.wrapper)
self.portno = self.port.getHost().port
@inlineCallbacks
def tearDown(self):
yield self.port.stopListening()
shutil.rmtree(self.tmpname)
def testPayload(self):
s = "0123456789" * 10
return getPage(self.getURL("payload"), body=s).addCallback(
self.assertEqual, to_bytes(s))
class WebClientCustomCiphersSSLTestCase(WebClientSSLTestCase):
# we try to use a cipher that is not enabled by default in OpenSSL
custom_ciphers = 'CAMELLIA256-SHA'
context_factory = ssl_context_factory(cipher_string=custom_ciphers)
def testPayload(self):
s = "0123456789" * 10
settings = Settings({'DOWNLOADER_CLIENT_TLS_CIPHERS': self.custom_ciphers})
client_context_factory = create_instance(ScrapyClientContextFactory, settings=settings, crawler=None)
return getPage(
self.getURL("payload"), body=s, contextFactory=client_context_factory
).addCallback(self.assertEqual, to_bytes(s))
def testPayloadDisabledCipher(self):
s = "0123456789" * 10
settings = Settings({'DOWNLOADER_CLIENT_TLS_CIPHERS': 'ECDHE-RSA-AES256-GCM-SHA384'})
client_context_factory = create_instance(ScrapyClientContextFactory, settings=settings, crawler=None)
d = getPage(self.getURL("payload"), body=s, contextFactory=client_context_factory)
return self.assertFailure(d, OpenSSL.SSL.Error)
|
|
#!/usr/bin/env python
################################################################################
#
# Copyright (c) 2012-2013, Alexander Todorov <atodorov@nospam.dif.io>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
import json
import logging
from utils import fetch_page
from datetime import datetime
logger = logging.getLogger(__name__)
def get_download_url(package, version, data = None):
"""
Return download URL.
NB: this is the gem_uri from the main record.
In case of JRuby it will return wrong URL.
The proper one is XXX-VERSION-java.gem. See neo4j-enterprise.
TODO: We need to store "java" inside version string.
"""
return "https://rubygems.org/gems/%s-%s.gem" % (package, version)
def get_release_date(package, version, data = None):
"""
Return the released_on date for this version.
"""
if not data:
json_data = fetch_page('https://rubygems.org/api/v1/versions/%s.json' % package)
# json_data = json_data.decode('UTF-8')
data = json.loads(json_data)
for ver in data:
if ver['number'] == version:
return datetime.strptime(ver['built_at'], '%Y-%m-%dT%H:%M:%SZ')
return None
def get_latest(package, last_checked=None):
"""
Get the latest version of a package
"""
json_data = fetch_page('http://rubygems.org/api/v1/versions/%s.json' % package, last_modified=last_checked)
if json_data is None: # NB: empty string is not None but will fail the check
return 304, 304
data = json.loads(json_data)
for version in data: # i in range(0, len(data)):
if version['prerelease']:
continue
else:
return version['number'], get_release_date(package, version['number'], data)
# in case there are only pre-release versions
return None, None
def get_url(package, version=None):
"""
Return homepage, repo, bugtracker URLs for a package
"""
urls = {
'homepage' : '',
'repository' : '',
'bugtracker' : '',
}
json_data = fetch_page('https://rubygems.org/api/v1/gems/%s.json' % package)
# json_data = json_data.decode('UTF-8')
data = json.loads(json_data)
if data.has_key('homepage_uri'):
urls['homepage'] = data['homepage_uri']
else:
urls['homepage'] = data['project_uri']
if data.has_key('bug_tracker_uri') and data['bug_tracker_uri']:
urls['bugtracker'] = data['bug_tracker_uri']
if data.has_key('source_code_uri') and data['source_code_uri']:
urls['repository'] = data['source_code_uri']
return urls
def compare_versions(ver1, ver2):
"""
Based on:
http://groups.google.com/group/gemcutter/msg/516151c8cdd02721?dmode=source
See also:
http://groups.google.com/group/gemcutter/browse_frm/thread/2218032b82053868
http://groups.google.com/group/gemcutter/browse_thread/thread/d0283c38b817ca1
See also version.rb.
NB: if package changes the versioning format, e.g. "X.1a" vs "X.1.a" then
"1a" > "1.a" which is a BUG but we should not be comparing preprelease versions anyway.
"""
ver1_a = ver1.split('.')
ver2_a = ver2.split('.')
lhsize = len(ver1_a)
rhsize = len(ver2_a)
limit = max(lhsize, rhsize)
for i in range(0, limit):
try:
lhs = ver1_a[i]
except IndexError:
lhs = '0'
try:
rhs = ver2_a[i]
except IndexError:
rhs = '0'
# do not compare dots
if "." in [lhs, rhs]:
continue
# if both are digits or
# both are not digits
if (lhs.isdigit() and rhs.isdigit()) or \
((not lhs.isdigit()) and (not rhs.isdigit())) :
# first try comparing as integers
try:
result = cmp(int(lhs), int(rhs))
except ValueError:
# if it doesn't work then compare as strings
result = cmp(lhs, rhs)
# don't abort comparison if equal
if result != 0:
return result
else: # one is not digit
for j in range(0, max(len(lhs), len(rhs))):
try:
l = lhs[j]
except IndexError:
return 1
try:
r = rhs[j]
except IndexError:
return -1
if l != r:
return cmp(l, r)
return 0
def get_latest_from_rss():
"""
@return - list of (name. version, released_on)
"""
data = fetch_page("https://rubygems.org/api/v1/activity/just_updated.json")
latest = json.loads(data)
result = []
for gem in latest:
# NB: not implemented
# see https://github.com/rubygems/rubygems.org/issues/536
# if gem['prerelease']:
# continue
# don't add prerelease software
(latest_ver, released_on) = get_latest(gem['name'])
#todo: this JSON give more info like GitHub URLs import from here
# and kill some messages
if latest_ver == gem['version']:
# RubyGems.org doesn't provide date of release
result.append((gem['name'], gem['version'], released_on))
return result
if __name__ == "__main__":
latest = get_latest_from_rss()
from pprint import pprint
pprint(latest)
unsorted = ['0.3.2.1.1', '0.3.2.1', '0.3.2.1a', '0.3.2.1b', '0.3.2', '0.3.2a', '0.3.1', '0.3.1a', '0.3', '0.4', '0.1']
print "Unsorted:"
for v in unsorted:
print v
unsorted.sort(compare_versions)
print "Sorted:"
for v in unsorted:
print v
for name in ["rack-mount", 'actionmailer']:
latest, released_on = get_latest(name)
urls = get_url(name)
print latest, released_on, type(released_on), urls
name = 'rack-mount'
for ver in ['0.7.4', '0.8.0', '0.8.1', '0.8.2', '0.8.3']:
print get_release_date(name, ver), name, ver
|
|
from __future__ import absolute_import
import contextlib
import errno
import locale
import logging
import re
import os
import posixpath
import shutil
import stat
import subprocess
import sys
import tarfile
import zipfile
from pip.exceptions import InstallationError
from pip.compat import console_to_str, stdlib_pkgs
from pip.locations import (
site_packages, user_site, running_under_virtualenv, virtualenv_no_global,
write_delete_marker_file,
)
from pip._vendor import pkg_resources
from pip._vendor.six.moves import input
from pip._vendor.six import PY2
from pip._vendor.retrying import retry
if PY2:
from io import BytesIO as StringIO
else:
from io import StringIO
__all__ = ['rmtree', 'display_path', 'backup_dir',
'ask', 'Inf', 'normalize_name', 'splitext',
'format_size', 'is_installable_dir',
'is_svn_page', 'file_contents',
'split_leading_dir', 'has_leading_dir',
'make_path_relative', 'normalize_path',
'renames', 'get_terminal_size', 'get_prog',
'unzip_file', 'untar_file', 'unpack_file', 'call_subprocess',
'captured_stdout', 'remove_tracebacks', 'ensure_dir',
'ARCHIVE_EXTENSIONS', 'SUPPORTED_EXTENSIONS']
logger = logging.getLogger(__name__)
BZ2_EXTENSIONS = ('.tar.bz2', '.tbz')
ZIP_EXTENSIONS = ('.zip', '.whl')
TAR_EXTENSIONS = ('.tar.gz', '.tgz', '.tar')
ARCHIVE_EXTENSIONS = ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS
try:
import bz2 # noqa
SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS
except ImportError:
logger.debug('bz2 module is not available')
SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS
def import_or_raise(pkg_or_module_string, ExceptionType, *args, **kwargs):
try:
return __import__(pkg_or_module_string)
except ImportError:
raise ExceptionType(*args, **kwargs)
def ensure_dir(path):
"""os.path.makedirs without EEXIST."""
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def get_prog():
try:
if os.path.basename(sys.argv[0]) in ('__main__.py', '-c'):
return "%s -m pip" % sys.executable
except (AttributeError, TypeError, IndexError):
pass
return 'pip'
# Retry every half second for up to 3 seconds
@retry(stop_max_delay=3000, wait_fixed=500)
def rmtree(dir, ignore_errors=False):
shutil.rmtree(dir, ignore_errors=ignore_errors,
onerror=rmtree_errorhandler)
def rmtree_errorhandler(func, path, exc_info):
"""On Windows, the files in .svn are read-only, so when rmtree() tries to
remove them, an exception is thrown. We catch that here, remove the
read-only attribute, and hopefully continue without problems."""
# if file type currently read only
if os.stat(path).st_mode & stat.S_IREAD:
# convert to read/write
os.chmod(path, stat.S_IWRITE)
# use the original function to repeat the operation
func(path)
return
else:
raise
def display_path(path):
"""Gives the display value for a given path, making it relative to cwd
if possible."""
path = os.path.normcase(os.path.abspath(path))
if sys.version_info[0] == 2:
path = path.decode(sys.getfilesystemencoding(), 'replace')
path = path.encode(sys.getdefaultencoding(), 'replace')
if path.startswith(os.getcwd() + os.path.sep):
path = '.' + path[len(os.getcwd()):]
return path
def backup_dir(dir, ext='.bak'):
"""Figure out the name of a directory to back up the given dir to
(adding .bak, .bak2, etc)"""
n = 1
extension = ext
while os.path.exists(dir + extension):
n += 1
extension = ext + str(n)
return dir + extension
def ask_path_exists(message, options):
for action in os.environ.get('PIP_EXISTS_ACTION', '').split():
if action in options:
return action
return ask(message, options)
def ask(message, options):
"""Ask the message interactively, with the given possible responses"""
while 1:
if os.environ.get('PIP_NO_INPUT'):
raise Exception(
'No input was expected ($PIP_NO_INPUT set); question: %s' %
message
)
response = input(message)
response = response.strip().lower()
if response not in options:
print(
'Your response (%r) was not one of the expected responses: '
'%s' % (response, ', '.join(options))
)
else:
return response
class _Inf(object):
"""I am bigger than everything!"""
def __eq__(self, other):
if self is other:
return True
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
return False
def __le__(self, other):
return False
def __gt__(self, other):
return True
def __ge__(self, other):
return True
def __repr__(self):
return 'Inf'
Inf = _Inf() # this object is not currently used as a sortable in our code
del _Inf
_normalize_re = re.compile(r'[^a-z]', re.I)
def normalize_name(name):
return _normalize_re.sub('-', name.lower())
def format_size(bytes):
if bytes > 1000 * 1000:
return '%.1fMB' % (bytes / 1000.0 / 1000)
elif bytes > 10 * 1000:
return '%ikB' % (bytes / 1000)
elif bytes > 1000:
return '%.1fkB' % (bytes / 1000.0)
else:
return '%ibytes' % bytes
def is_installable_dir(path):
"""Return True if `path` is a directory containing a setup.py file."""
if not os.path.isdir(path):
return False
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
return True
return False
def is_svn_page(html):
"""
Returns true if the page appears to be the index page of an svn repository
"""
return (re.search(r'<title>[^<]*Revision \d+:', html) and
re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I))
def file_contents(filename):
with open(filename, 'rb') as fp:
return fp.read().decode('utf-8')
def split_leading_dir(path):
path = str(path)
path = path.lstrip('/').lstrip('\\')
if '/' in path and (('\\' in path and path.find('/') < path.find('\\')) or
'\\' not in path):
return path.split('/', 1)
elif '\\' in path:
return path.split('\\', 1)
else:
return path, ''
def has_leading_dir(paths):
"""Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)"""
common_prefix = None
for path in paths:
prefix, rest = split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def make_path_relative(path, rel_to):
"""
Make a filename relative, where the filename path, and it is
relative to rel_to
>>> make_path_relative('/usr/share/something/a-file.pth',
... '/usr/share/another-place/src/Directory')
'../../../something/a-file.pth'
>>> make_path_relative('/usr/share/something/a-file.pth',
... '/home/user/src/Directory')
'../../../usr/share/something/a-file.pth'
>>> make_path_relative('/usr/share/a-file.pth', '/usr/share/')
'a-file.pth'
"""
path_filename = os.path.basename(path)
path = os.path.dirname(path)
path = os.path.normpath(os.path.abspath(path))
rel_to = os.path.normpath(os.path.abspath(rel_to))
path_parts = path.strip(os.path.sep).split(os.path.sep)
rel_to_parts = rel_to.strip(os.path.sep).split(os.path.sep)
while path_parts and rel_to_parts and path_parts[0] == rel_to_parts[0]:
path_parts.pop(0)
rel_to_parts.pop(0)
full_parts = ['..'] * len(rel_to_parts) + path_parts + [path_filename]
if full_parts == ['']:
return '.' + os.path.sep
return os.path.sep.join(full_parts)
def normalize_path(path, resolve_symlinks=True):
"""
Convert a path to its canonical, case-normalized, absolute version.
"""
path = os.path.expanduser(path)
if resolve_symlinks:
path = os.path.realpath(path)
else:
path = os.path.abspath(path)
return os.path.normcase(path)
def splitext(path):
"""Like os.path.splitext, but take off .tar too"""
base, ext = posixpath.splitext(path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def renames(old, new):
"""Like os.renames(), but handles renaming across devices."""
# Implementation borrowed from os.renames().
head, tail = os.path.split(new)
if head and tail and not os.path.exists(head):
os.makedirs(head)
shutil.move(old, new)
head, tail = os.path.split(old)
if head and tail:
try:
os.removedirs(head)
except OSError:
pass
def is_local(path):
"""
Return True if path is within sys.prefix, if we're running in a virtualenv.
If we're not in a virtualenv, all paths are considered "local."
"""
if not running_under_virtualenv():
return True
return normalize_path(path).startswith(normalize_path(sys.prefix))
def dist_is_local(dist):
"""
Return True if given Distribution object is installed locally
(i.e. within current virtualenv).
Always True if we're not in a virtualenv.
"""
return is_local(dist_location(dist))
def dist_in_usersite(dist):
"""
Return True if given Distribution is installed in user site.
"""
norm_path = normalize_path(dist_location(dist))
return norm_path.startswith(normalize_path(user_site))
def dist_in_site_packages(dist):
"""
Return True if given Distribution is installed in
distutils.sysconfig.get_python_lib().
"""
return normalize_path(
dist_location(dist)
).startswith(normalize_path(site_packages))
def dist_is_editable(dist):
"""Is distribution an editable install?"""
# TODO: factor out determining editableness out of FrozenRequirement
from pip import FrozenRequirement
req = FrozenRequirement.from_dist(dist, [])
return req.editable
def get_installed_distributions(local_only=True,
skip=stdlib_pkgs,
include_editables=True,
editables_only=False,
user_only=False):
"""
Return a list of installed Distribution objects.
If ``local_only`` is True (default), only return installations
local to the current virtualenv, if in a virtualenv.
``skip`` argument is an iterable of lower-case project names to
ignore; defaults to stdlib_pkgs
If ``editables`` is False, don't report editables.
If ``editables_only`` is True , only report editables.
If ``user_only`` is True , only report installations in the user
site directory.
"""
if local_only:
local_test = dist_is_local
else:
def local_test(d):
return True
if include_editables:
def editable_test(d):
return True
else:
def editable_test(d):
return not dist_is_editable(d)
if editables_only:
def editables_only_test(d):
return dist_is_editable(d)
else:
def editables_only_test(d):
return True
if user_only:
user_test = dist_in_usersite
else:
def user_test(d):
return True
return [d for d in pkg_resources.working_set
if local_test(d) and
d.key not in skip and
editable_test(d) and
editables_only_test(d) and
user_test(d)
]
def egg_link_path(dist):
"""
Return the path for the .egg-link file if it exists, otherwise, None.
There's 3 scenarios:
1) not in a virtualenv
try to find in site.USER_SITE, then site_packages
2) in a no-global virtualenv
try to find in site_packages
3) in a yes-global virtualenv
try to find in site_packages, then site.USER_SITE
(don't look in global location)
For #1 and #3, there could be odd cases, where there's an egg-link in 2
locations.
This method will just return the first one found.
"""
sites = []
if running_under_virtualenv():
if virtualenv_no_global():
sites.append(site_packages)
else:
sites.append(site_packages)
if user_site:
sites.append(user_site)
else:
if user_site:
sites.append(user_site)
sites.append(site_packages)
for site in sites:
egglink = os.path.join(site, dist.project_name) + '.egg-link'
if os.path.isfile(egglink):
return egglink
def dist_location(dist):
"""
Get the site-packages location of this distribution. Generally
this is dist.location, except in the case of develop-installed
packages, where dist.location is the source code location, and we
want to know where the egg-link file is.
"""
egg_link = egg_link_path(dist)
if egg_link:
return egg_link
return dist.location
def get_terminal_size():
"""Returns a tuple (x, y) representing the width(x) and the height(x)
in characters of the terminal window."""
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
import struct
cr = struct.unpack(
'hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234')
)
except:
return None
if cr == (0, 0):
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
cr = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80))
return int(cr[1]), int(cr[0])
def current_umask():
"""Get the current umask which involves having to set it temporarily."""
mask = os.umask(0)
os.umask(mask)
return mask
def unzip_file(filename, location, flatten=True):
"""
Unzip the file (with path `filename`) to the destination `location`. All
files are written based on system defaults and umask (i.e. permissions are
not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
zipfp = open(filename, 'rb')
try:
zip = zipfile.ZipFile(zipfp, allowZip64=True)
leading = has_leading_dir(zip.namelist()) and flatten
for info in zip.infolist():
name = info.filename
data = zip.read(name)
fn = name
if leading:
fn = split_leading_dir(name)[1]
fn = os.path.join(location, fn)
dir = os.path.dirname(fn)
if fn.endswith('/') or fn.endswith('\\'):
# A directory
ensure_dir(fn)
else:
ensure_dir(dir)
fp = open(fn, 'wb')
try:
fp.write(data)
finally:
fp.close()
mode = info.external_attr >> 16
# if mode and regular file and any execute permissions for
# user/group/world?
if mode and stat.S_ISREG(mode) and mode & 0o111:
# make dest file have execute for user/group/world
# (chmod +x) no-op on windows per python docs
os.chmod(fn, (0o777 - current_umask() | 0o111))
finally:
zipfp.close()
def untar_file(filename, location):
"""
Untar the file (with path `filename`) to the destination `location`.
All files are written based on system defaults and umask (i.e. permissions
are not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):
mode = 'r:gz'
elif filename.lower().endswith(BZ2_EXTENSIONS):
mode = 'r:bz2'
elif filename.lower().endswith('.tar'):
mode = 'r'
else:
logger.warning(
'Cannot determine compression type for file %s', filename,
)
mode = 'r:*'
tar = tarfile.open(filename, mode)
try:
# note: python<=2.5 doesn't seem to know about pax headers, filter them
leading = has_leading_dir([
member.name for member in tar.getmembers()
if member.name != 'pax_global_header'
])
for member in tar.getmembers():
fn = member.name
if fn == 'pax_global_header':
continue
if leading:
fn = split_leading_dir(fn)[1]
path = os.path.join(location, fn)
if member.isdir():
ensure_dir(path)
elif member.issym():
try:
tar._extract_member(member, path)
except Exception as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
else:
try:
fp = tar.extractfile(member)
except (KeyError, AttributeError) as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
ensure_dir(os.path.dirname(path))
destfp = open(path, 'wb')
try:
shutil.copyfileobj(fp, destfp)
finally:
destfp.close()
fp.close()
# member have any execute permissions for user/group/world?
if member.mode & 0o111:
# make dest file have execute for user/group/world
# no-op on windows per python docs
os.chmod(path, (0o777 - current_umask() | 0o111))
finally:
tar.close()
def unpack_file(filename, location, content_type, link):
filename = os.path.realpath(filename)
if (content_type == 'application/zip' or
filename.lower().endswith(ZIP_EXTENSIONS) or
zipfile.is_zipfile(filename)):
unzip_file(
filename,
location,
flatten=not filename.endswith('.whl')
)
elif (content_type == 'application/x-gzip' or
tarfile.is_tarfile(filename) or
filename.lower().endswith(TAR_EXTENSIONS + BZ2_EXTENSIONS)):
untar_file(filename, location)
elif (content_type and content_type.startswith('text/html') and
is_svn_page(file_contents(filename))):
# We don't really care about this
from pip.vcs.subversion import Subversion
Subversion('svn+' + link.url).unpack(location)
else:
# FIXME: handle?
# FIXME: magic signatures?
logger.critical(
'Cannot unpack file %s (downloaded from %s, content-type: %s); '
'cannot detect archive format',
filename, location, content_type,
)
raise InstallationError(
'Cannot determine archive format of %s' % location
)
def remove_tracebacks(output):
pattern = (r'(?:\W+File "(?:.*)", line (?:.*)\W+(?:.*)\W+\^\W+)?'
r'Syntax(?:Error|Warning): (?:.*)')
output = re.sub(pattern, '', output)
if PY2:
return output
# compileall.compile_dir() prints different messages to stdout
# in Python 3
return re.sub(r"\*\*\* Error compiling (?:.*)", '', output)
def call_subprocess(cmd, show_stdout=True, cwd=None,
raise_on_returncode=True,
command_level=logging.DEBUG, command_desc=None,
extra_environ=None):
if command_desc is None:
cmd_parts = []
for part in cmd:
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
cmd_parts.append(part)
command_desc = ' '.join(cmd_parts)
if show_stdout:
stdout = None
else:
stdout = subprocess.PIPE
logger.log(command_level, "Running command %s", command_desc)
env = os.environ.copy()
if extra_environ:
env.update(extra_environ)
try:
proc = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout,
cwd=cwd, env=env)
except Exception as exc:
logger.critical(
"Error %s while executing command %s", exc, command_desc,
)
raise
all_output = []
if stdout is not None:
while True:
line = console_to_str(proc.stdout.readline())
if not line:
break
line = line.rstrip()
all_output.append(line + '\n')
logger.debug(line)
if not all_output:
returned_stdout, returned_stderr = proc.communicate()
all_output = [returned_stdout or '']
proc.wait()
if proc.returncode:
if raise_on_returncode:
if all_output:
logger.info(
'Complete output from command %s:', command_desc,
)
logger.info(
''.join(all_output) +
'\n----------------------------------------'
)
raise InstallationError(
'Command "%s" failed with error code %s in %s'
% (command_desc, proc.returncode, cwd))
else:
logger.warning(
'Command "%s" had error code %s in %s',
command_desc, proc.returncode, cwd,
)
if stdout is not None:
return remove_tracebacks(''.join(all_output))
def read_text_file(filename):
"""Return the contents of *filename*.
Try to decode the file contents with utf-8, the preferred system encoding
(e.g., cp1252 on some Windows machines), and latin1, in that order.
Decoding a byte string with latin1 will never raise an error. In the worst
case, the returned string will contain some garbage characters.
"""
with open(filename, 'rb') as fp:
data = fp.read()
encodings = ['utf-8', locale.getpreferredencoding(False), 'latin1']
for enc in encodings:
try:
data = data.decode(enc)
except UnicodeDecodeError:
continue
break
assert type(data) != bytes # Latin1 should have worked.
return data
def _make_build_dir(build_dir):
os.makedirs(build_dir)
write_delete_marker_file(build_dir)
class FakeFile(object):
"""Wrap a list of lines in an object with readline() to make
ConfigParser happy."""
def __init__(self, lines):
self._gen = (l for l in lines)
def readline(self):
try:
try:
return next(self._gen)
except NameError:
return self._gen.next()
except StopIteration:
return ''
def __iter__(self):
return self._gen
class StreamWrapper(StringIO):
@classmethod
def from_stream(cls, orig_stream):
cls.orig_stream = orig_stream
return cls()
# compileall.compile_dir() needs stdout.encoding to print to stdout
@property
def encoding(self):
return self.orig_stream.encoding
@contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO.
Taken from Lib/support/app.py in the CPython repo.
"""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, StreamWrapper.from_stream(orig_stdout))
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print('hello')
self.assertEqual(stdout.getvalue(), 'hello\n')
Taken from Lib/support/app.py in the CPython repo.
"""
return captured_output('stdout')
class cached_property(object):
"""A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property.
Source: https://github.com/bottlepy/bottle/blob/0.11.5/bottle.py#L175
"""
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
# We're being accessed from the class itself, not from an object
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
|
|
from __future__ import unicode_literals
from django.core.urlresolvers import reverse
import json
import pytest
import rest_framework
from tests.models import Article, Person, Comment
pytestmark = pytest.mark.django_db
def test_no_pagination_if_all_results(client):
molly = Person.objects.create(last_name="Davis", first_name="Molly")
Article.objects.create(title="Molly's article", author=molly)
buzz = Person.objects.create(last_name="Lightyear", first_name="Buzz")
Article.objects.create(title="Buzz' article", author=buzz)
response = client.get(reverse("article-list"))
assert json.loads(response.content.decode()) == {
"data": [
{
"id": "1",
"type": "article",
"attributes": {
"title": "Molly's article"
},
"relationships": {
"author": {
"data": {
"id": "1",
"type": "person"
}
},
"comments": {
"data": []
}
}
},
{
"id": "2",
"type": "article",
"attributes": {
"title": "Buzz' article"
},
"relationships": {
"author": {
"data": {
"id": "2",
"type": "person"
}
},
"comments": {
"data": []
}
}
}
]
}
def test_page_number(client):
molly = Person.objects.create(last_name="Davis", first_name="Molly")
Article.objects.create(title="Molly's article", author=molly)
buzz = Person.objects.create(last_name="Lightyear", first_name="Buzz")
Article.objects.create(title="Buzz' article", author=buzz)
sid = Person.objects.create(last_name="Phillips", first_name="Sid")
Article.objects.create(title="Sid's article", author=sid)
bo = Person.objects.create(last_name="Peep", first_name="Bo")
Article.objects.create(title="Bo's article", author=bo)
response = client.get(reverse("article-list"))
assert json.loads(response.content.decode()) == {
"links": {
"first": "http://testserver/articles",
"last": "http://testserver/articles?page%5Bnumber%5D=2",
"prev": None,
"next": "http://testserver/articles?page%5Bnumber%5D=2"
},
"meta": {
"count": 4
},
"data": {
"data": [
{
"id": "1",
"type": "article",
"attributes": {
"title": "Molly's article"
},
"relationships": {
"author": {
"data": {
"id": "1",
"type": "person"
}
},
"comments": {
"data": []
}
}
},
{
"id": "2",
"type": "article",
"attributes": {
"title": "Buzz' article"
},
"relationships": {
"author": {
"data": {
"id": "2",
"type": "person"
}
},
"comments": {
"data": []
}
}
},
{
"id": "3",
"type": "article",
"attributes": {
"title": "Sid's article"
},
"relationships": {
"author": {
"data": {
"id": "3",
"type": "person"
}
},
"comments": {
"data": []
}
}
}
]
}
}
next_response = client.get("http://testserver/articles?page%5Bnumber%5D=2")
assert json.loads(next_response.content.decode()) == {
"links": {
"first": "http://testserver/articles",
"last": "http://testserver/articles?page%5Bnumber%5D=2",
"prev": "http://testserver/articles",
"next": None
},
"meta": {
"count": 4
},
"data": {
"data": [
{
"id": "4",
"type": "article",
"attributes": {
"title": "Bo's article"
},
"relationships": {
"author": {
"data": {
"id": "4",
"type": "person"
}
},
"comments": {
"data": []
}
}
}
]
}
}
def test_limit_offset(client):
Person.objects.create(last_name="Davis", first_name="Molly")
Person.objects.create(last_name="Lightyear", first_name="Buzz")
Person.objects.create(last_name="Phillips", first_name="Sid")
Person.objects.create(last_name="Peep", first_name="Bo")
response = client.get(reverse("person-list"))
expected = {
"links": {
"prev": None,
"next": "http://testserver/people?page%5Boffset%5D=3"
},
"meta": {
"count": 4
},
"data": {
"data": [
{
"id": "1",
"type": "person",
"attributes": {
"first-name": "Molly",
"last-name": "Davis",
"twitter": ""
}
},
{
"id": "2",
"type": "person",
"attributes": {
"first-name": "Buzz",
"last-name": "Lightyear",
"twitter": ""
}
},
{
"id": "3",
"type": "person",
"attributes": {
"first-name": "Sid",
"last-name": "Phillips",
"twitter": ""
}
}
]
}
}
# Limit is always included since DRF 3.2 (35c28a2)
if rest_framework.__version__.split(".")[1] >= "2":
expected["links"]["next"] = "http://testserver/people?page%5Blimit%5D"\
"=3&page%5Boffset%5D=3"
assert json.loads(response.content.decode()) == expected
next_response = client.get("http://testserver/people?page%5Boffset%5D=3")
expected = {
"links": {
"prev": "http://testserver/people",
"next": None
},
"meta": {
"count": 4
},
"data": {
"data": [
{
"id": "4",
"type": "person",
"attributes": {
"first-name": "Bo",
"last-name": "Peep",
"twitter": ""
}
}
]
}
}
# Limit is always included since DRF 3.2 (35c28a2)
if rest_framework.__version__.split(".")[1] >= "2":
expected["links"]["prev"] += "?page%5Blimit%5D=3"
assert json.loads(next_response.content.decode()) == expected
def test_cursor_and_sideloading(client):
molly = Person.objects.create(last_name="Davis", first_name="Molly")
Comment.objects.create(body="Molly's comment", author=molly)
buzz = Person.objects.create(last_name="Lightyear", first_name="Buzz")
Comment.objects.create(body="Buzz' comment", author=buzz)
sid = Person.objects.create(last_name="Phillips", first_name="Sid")
Comment.objects.create(body="Sid's comment", author=sid)
bo = Person.objects.create(last_name="Peep", first_name="Bo")
Comment.objects.create(body="Bo's comment", author=bo)
response = client.get("{}?include=author".format(reverse("comment-list")))
assert json.loads(response.content.decode()) == {
"links": {
"prev": None,
"next": "http://testserver/comments?include=author"
"&page%5Bcursor%5D=cD0z"
},
"data": {
"data": [
{
"id": "1",
"type": "comment",
"attributes": {
"body": "Molly's comment"
},
"relationships": {
"author": {
"data": {
"id": "1",
"type": "person"
}
}
}
},
{
"id": "2",
"type": "comment",
"attributes": {
"body": "Buzz' comment"
},
"relationships": {
"author": {
"data": {
"id": "2",
"type": "person"
}
}
}
},
{
"id": "3",
"type": "comment",
"attributes": {
"body": "Sid's comment"
},
"relationships": {
"author": {
"data": {
"id": "3",
"type": "person"
}
}
}
}
],
"included": [
{
"id": "1",
"type": "person",
"attributes": {
"first-name": "Molly",
"last-name": "Davis",
"twitter": ""
}
},
{
"id": "2",
"type": "person",
"attributes": {
"first-name": "Buzz",
"last-name": "Lightyear",
"twitter": ""
}
},
{
"id": "3",
"type": "person",
"attributes": {
"first-name": "Sid",
"last-name": "Phillips",
"twitter": ""
}
}
]
}
}
next_response = client.get("http://testserver/comments?include=author"
"&page%5Bcursor%5D=cD0z")
response_data = json.loads(next_response.content.decode())
assert response_data["data"] == {
"data": [
{
"id": "4",
"type": "comment",
"attributes": {
"body": "Bo's comment"
},
"relationships": {
"author": {
"data": {
"id": "4",
"type": "person"
}
}
}
}
],
"included": [
{
"id": "4",
"type": "person",
"attributes": {
"first-name": "Bo",
"last-name": "Peep",
"twitter": ""
}
}
]
}
assert response_data["links"]["prev"] in [
"http://testserver/comments?include=author"
"&page%5Bcursor%5D=cj0xJnA9NA%3D%3D",
"http://testserver/comments?include=author"
"&page%5Bcursor%5D=cD00JnI9MQ%3D%3D",
]
assert response_data["links"]["next"] is None
|
|
#!/usr/bin/env python
#
# SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
"""
Generate Kconfig.soc_caps.in with defines from soc_caps.h
"""
import argparse
import inspect
import io
import logging
import sys
from difflib import unified_diff
from os import path
from pathlib import Path
from string import Template
import pyparsing
from pyparsing import (CaselessLiteral, Char, Combine, Group, Literal, OneOrMore, # pylint: disable=unused-import
Optional, ParserElement, QuotedString, Word, alphas, hexnums, nums)
pyparsing.usePackrat = True
try:
import typing # noqa: F401 # pylint: disable=unused-import
except ImportError:
pass
class KconfigWriter():
PREAMBLE = inspect.cleandoc('''
#####################################################
# This file is auto-generated from SoC caps
# using gen_soc_caps_kconfig.py, do not edit manually
#####################################################
''')
KCONFIG_ENTRY_TEMPLATE = Template(
inspect.cleandoc('''
config $name
$entry_type
default $value
'''))
def __init__(self): # type: () -> None
self.entries = set('')
self.kconfig_text = io.StringIO('')
self.kconfig_text.write(self.PREAMBLE)
def add_entry(self, name, entry_type, value): # type: (str, str, typing.Any) -> None
if name in self.entries:
logging.info('Duplicate entry: {}'.format(name))
return
self.entries.add(name)
self.kconfig_text.write('\n\n')
# Format values for kconfig
if entry_type == 'bool':
value = 'y' if value else 'n'
elif entry_type == 'string':
value = '"' + value + '"'
entry = self.KCONFIG_ENTRY_TEMPLATE.substitute(name=name, entry_type=entry_type, value=value)
self.kconfig_text.write(entry)
def update_file(self, kconfig_path, always_write): # type: (Path, bool) -> bool
try:
with open(kconfig_path, 'r') as f:
old_content = f.readlines()
except FileNotFoundError:
old_content = ['']
self.kconfig_text.seek(0)
new_content = self.kconfig_text.readlines()
new_content[-1] += '\n' # Add final newline to end of file
file_needs_update = always_write
# Check if file was updated and print diff for users
diff = unified_diff(old_content, new_content, fromfile=str(kconfig_path), n=2)
for line in diff:
print(line, end='')
file_needs_update = True
if file_needs_update:
print('\n' + 'Updating file: {}'.format(kconfig_path))
with open(kconfig_path, 'w') as f:
f.writelines(new_content)
return file_needs_update
def parse_define(define_line): # type: (str) -> typing.Any[typing.Type[ParserElement]]
# Group for parsing literal suffix of a numbers, e.g. 100UL
literal_symbol = Group(CaselessLiteral('L') | CaselessLiteral('U'))
literal_suffix = OneOrMore(literal_symbol)
# Define name
name = Word(alphas, alphas + nums + '_')
# Define value, either a hex, int or a string
hex_value = Combine(Literal('0x') + Word(hexnums) + Optional(literal_suffix).suppress())('hex_value')
int_value = Word(nums)('int_value') + ~Char('.') + Optional(literal_suffix)('literal_suffix')
str_value = QuotedString('"')('str_value')
# Remove optional parenthesis around values
value = Optional('(').suppress() + (hex_value ^ int_value ^ str_value)('value') + Optional(')').suppress()
expr = '#define' + Optional(name)('name') + Optional(value)
res = expr.parseString(define_line)
return res
def generate_defines(soc_caps_dir, filename, always_write): # type: (Path, str, bool) -> bool
soc_headers = list(soc_caps_dir.glob(filename))
if soc_headers == []:
return False
# Sort header files to make the generated files deterministic
soc_headers.sort(key=lambda file: file.name)
defines = []
for soc_header in soc_headers:
defines.extend(get_defines(soc_header))
writer = KconfigWriter()
for line in defines:
try:
res = parse_define(line)
except pyparsing.ParseException:
logging.debug('Failed to parse: {}'.format(line))
continue
# Add the kconfig entry corresponding to the type we parsed
if 'str_value' in res:
writer.add_entry(res.name, 'string', res.str_value)
elif 'int_value' in res:
# defines with an integer value of 0 or 1 are
# added as bool entries as long they have no literal suffix
if 'literal_suffix' not in res and res.int_value == '0':
writer.add_entry(res.name, 'bool', False)
elif 'literal_suffix' not in res and res.int_value == '1':
writer.add_entry(res.name, 'bool', True)
else:
writer.add_entry(res.name, 'int', res.int_value)
elif 'hex_value' in res:
writer.add_entry(res.name, 'hex', res.hex_value)
# Updates output if necessary
updated = writer.update_file(Path(soc_caps_dir) / 'Kconfig.soc_caps.in', always_write)
return updated
def get_defines(header_path): # type: (Path) -> list[str]
defines = []
logging.info('Reading macros from {}...'.format(header_path))
with open(header_path, 'r') as f:
output = f.read()
for line in output.split('\n'):
line = line.strip()
if len(line):
defines.append(line)
return defines
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-d', '--dir', help='SoC caps folder paths, support wildcards', nargs='+', default=[])
parser.add_argument('-n', '--filename', nargs='?', default='*caps.h',
help='SoC caps filename, support wildcards')
parser.add_argument('-v', '--verbose', action='count', help='Increase the logging level of the script. Can be specified multiple times.')
parser.add_argument('--always-write', help='Always generate new output files', action='store_true')
args = parser.parse_args()
if not args.verbose:
log_level = logging.WARNING
elif args.verbose == 1:
log_level = logging.INFO
else:
log_level = logging.DEBUG
logging.basicConfig(level=log_level)
files_updated = []
for caps_dir in args.dir:
soc_caps_dirs = Path().glob(caps_dir)
files_updated += [generate_defines(d, args.filename, args.always_write) for d in soc_caps_dirs if path.isdir(d)]
print('Updated {} files'.format(sum(files_updated)))
sys.exit(all(files_updated))
|
|
import numpy as np
class PerspCamera(object):
r"""Perspective camera in 35mm format.
Attributes:
f_mm (float): See ``f``.
im_h (float): See ``im_res``.
im_w (float): See ``im_res``.
loc (numpy.ndarray)
lookat (numpy.ndarray)
up (numpy.ndarray)
Note:
- Sensor width of the 35mm format is actually 36mm.
- This class assumes unit pixel aspect ratio (i.e., :math:`f_x = f_y`)
and no skewing between the sensor plane and optical axis.
- The active sensor size may be smaller than ``sensor_size``, depending
on ``im_res``.
- ``aov`` is a hardware property, having nothing to do with ``im_res``.
"""
def __init__(
self, f=50., im_res=(256, 256), loc=(1, 1, 1), lookat=(0, 0, 0),
up=(0, 1, 0)):
"""
Args:
f (float, optional): 35mm format-equivalent focal length in mm.
im_res (array_like, optional): Image height and width in pixels.
loc (array_like, optional): Camera location in object space.
lookat (array_like, optional): Where the camera points to in
object space, so default :math:`(0, 0, 0)` is the object center.
up (array_like, optional): Vector in object space that, when
projected, points upward in image.
"""
self.f_mm = f
self.im_h, self.im_w = im_res
self.loc = np.array(loc)
self.lookat = np.array(lookat)
self.up = np.array(up)
@property
def sensor_w(self):
"""float: Fixed at 36mm"""
return 36 # mm
@property
def sensor_h(self):
"""float: Fixed at 24mm"""
return 24 # mm
@property
def aov(self):
"""tuple: Vertical and horizontal angles of view in degrees."""
alpha_v = 2 * np.arctan(self.sensor_h / (2 * self.f_mm))
alpha_h = 2 * np.arctan(self.sensor_w / (2 * self.f_mm))
return (alpha_v / np.pi * 180, alpha_h / np.pi * 180)
@property
def _mm_per_pix(self):
return min(self.sensor_h / self.im_h, self.sensor_w / self.im_w)
@property
def f_pix(self):
"""float: Focal length in pixels."""
return self.f_mm / self._mm_per_pix
@property
def int_mat(self):
"""numpy.ndarray: 3-by-3 intrinsics matrix."""
return np.array([
[self.f_pix, 0, self.im_w / 2],
[0, self.f_pix, self.im_h / 2],
[0, 0, 1],
])
@property
def ext_mat(self):
"""numpy.ndarray: 3-by-4 extrinsics matrix, i.e., rotation and
translation that transform a point from object space to camera space.
"""
# Two coordinate systems involved:
# 1. Object space: "obj"
# 2. Desired computer vision camera coordinates: "cv"
# - x is horizontal, pointing right (to align with pixel coordinates)
# - y is vertical, pointing down
# - right-handed: positive z is the look-at direction
# cv axes expressed in obj space
cvz_obj = self.lookat - self.loc
assert np.linalg.norm(cvz_obj) > 0, "Camera location and lookat coincide"
cvx_obj = np.cross(cvz_obj, self.up)
cvy_obj = np.cross(cvz_obj, cvx_obj)
# Normalize
cvz_obj = cvz_obj / np.linalg.norm(cvz_obj)
cvx_obj = cvx_obj / np.linalg.norm(cvx_obj)
cvy_obj = cvy_obj / np.linalg.norm(cvy_obj)
# Compute rotation from obj to cv: R
# R(1, 0, 0)^T = cvx_obj gives first column of R
# R(0, 1, 0)^T = cvy_obj gives second column of R
# R(0, 0, 1)^T = cvz_obj gives third column of R
rot_obj2cv = np.vstack((cvx_obj, cvy_obj, cvz_obj)).T
# Extrinsics
return rot_obj2cv.dot(
np.array([
[1, 0, 0, -self.loc[0]],
[0, 1, 0, -self.loc[1]],
[0, 0, 1, -self.loc[2]],
])
)
@property
def proj_mat(self):
"""numpy.ndarray: 3-by-4 projection matrix, derived from
intrinsics and extrinsics.
"""
return self.int_mat.dot(self.ext_mat)
def set_from_mitsuba(self, xml_path):
"""Sets camera according to a Mitsuba XML file.
Args:
xml_path (str): Path to the XML file.
Raises:
NotImplementedError: If focal length is not specified in mm.
"""
from xml.etree.ElementTree import parse
tree = parse(xml_path)
# Focal length
f_tag = tree.find('./sensor/string[@name="focalLength"]')
if f_tag is None:
self.f_mm = 50. # Mitsuba default
else:
f_str = f_tag.attrib['value']
if f_str[-2:] == 'mm':
self.f_mm = float(f_str[:-2])
else:
raise NotImplementedError(f_str)
# Extrinsics
cam_transform = tree.find('./sensor/transform/lookAt').attrib
self.loc = np.fromstring(cam_transform['origin'], sep=',')
self.lookat = np.fromstring(cam_transform['target'], sep=',')
self.up = np.fromstring(cam_transform['up'], sep=',')
# Resolution
self.im_h = int(tree.find('./sensor/film/integer[@name="height"]').attrib['value'])
self.im_w = int(tree.find('./sensor/film/integer[@name="width"]').attrib['value'])
def proj(self, pts, space='object'):
"""Projects 3D points to 2D.
Args:
pts (array_like): 3D point(s) of shape N-by-3 or 3-by-N, or of length 3.
space (str, optional): In which space these points are specified:
``'object'`` or ``'camera'``.
Returns:
array_like: Vertical and horizontal coordinates of the projections, following:
.. code-block:: none
+-----------> dim1
|
|
|
v dim0
"""
pts = np.array(pts)
if pts.shape == (3,):
pts = pts.reshape((3, 1))
elif pts.shape[1] == 3:
pts = pts.T
assert space in ('object', 'camera'), "Unrecognized space"
# 3 x N
n_pts = pts.shape[1]
pts_homo = np.vstack((pts, np.ones((1, n_pts))))
# 4 x N
if space == 'object':
proj_mat = self.proj_mat
else:
ext_mat = np.hstack((np.eye(3), np.zeros((3, 1))))
proj_mat = self.int_mat.dot(ext_mat)
# Project
hvs_homo = proj_mat.dot(pts_homo)
# 3 x N: dim0 is horizontal, and dim1 is vertical
hs_homo = hvs_homo[0, :]
vs_homo = hvs_homo[1, :]
ws = hvs_homo[2, :]
hs = np.divide(hs_homo, ws)
vs = np.divide(vs_homo, ws)
vhs = np.vstack((vs, hs)).T
if vhs.shape[0] == 1:
# Single point
vhs = vhs[0, :]
return vhs
def backproj(self, depth, fg_mask=None, depth_type='plane', space='object'):
"""Backprojects depth map to 3D points.
Args:
depth (numpy.ndarray): Depth map.
fg_mask (numpy.ndarray, optional): Backproject only pixels falling inside this
foreground mask. Its values should be logical.
depth_type (str, optional): Plane or ray depth.
space (str, optional): In which space the backprojected points are specified:
``'object'`` or ``'camera'``.
Returns:
numpy.ndarray: 3D points.
"""
if fg_mask is None:
fg_mask = np.ones(depth.shape, dtype=bool)
assert depth_type in ('ray', 'plane'), "Unrecognized depth type"
assert space in ('object', 'camera'), "Unrecognized space"
v_is, h_is = np.where(fg_mask)
hs = h_is + 0.5
vs = v_is + 0.5
h_c = (depth.shape[1] - 1) / 2
v_c = (depth.shape[0] - 1) / 2
zs = depth[fg_mask]
if depth_type == 'ray':
d2 = np.power(vs - v_c, 2) + np.power(hs - h_c, 2)
# Similar triangles
zs_plane = np.multiply(zs, self.f_pix / np.sqrt(self.f_pix ** 2 + d2))
zs = zs_plane
# Backproject to camera space
xs = np.multiply(zs, hs - h_c) / self.f_pix
ys = np.multiply(zs, vs - v_c) / self.f_pix
pts = np.vstack((xs, ys, zs))
if space == 'camera':
return pts.T
# Need to further transform to object space
rot_mat = self.ext_mat[:, :3] # happens first in projection
trans_vec = self.ext_mat[:, 3].reshape(-1, 1) # happens second in projection
n_pts = pts.shape[1]
pts_obj = np.linalg.inv(rot_mat).dot(pts - np.tile(trans_vec, (1, n_pts)))
return pts_obj.T
|
|
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from oslo_config import cfg
from neutron.agent.linux import bridge_lib
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import constants
from neutron.common import exceptions
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2.drivers.linuxbridge.agent.common \
import constants as lconst
from neutron.plugins.ml2.drivers.linuxbridge.agent \
import linuxbridge_neutron_agent
from neutron.tests import base
LOCAL_IP = '192.168.0.33'
DEVICE_1 = 'tapabcdef01-12'
class FakeIpLinkCommand(object):
def set_up(self):
pass
class FakeIpDevice(object):
def __init__(self):
self.link = FakeIpLinkCommand()
class TestLinuxBridge(base.BaseTestCase):
def setUp(self):
super(TestLinuxBridge, self).setUp()
interface_mappings = {'physnet1': 'eth1'}
with mock.patch.object(ip_lib.IPWrapper,
'get_device_by_ip', return_value=None),\
mock.patch.object(ip_lib, 'device_exists',
return_value=True):
self.linux_bridge = linuxbridge_neutron_agent.LinuxBridgeManager(
interface_mappings)
def test_ensure_physical_in_bridge_invalid(self):
result = self.linux_bridge.ensure_physical_in_bridge('network_id',
p_const.TYPE_VLAN,
'physnetx',
7)
self.assertFalse(result)
def test_ensure_physical_in_bridge_flat(self):
with mock.patch.object(self.linux_bridge,
'ensure_flat_bridge') as flat_bridge_func:
self.linux_bridge.ensure_physical_in_bridge(
'network_id', p_const.TYPE_FLAT, 'physnet1', None)
self.assertTrue(flat_bridge_func.called)
def test_ensure_physical_in_bridge_vlan(self):
with mock.patch.object(self.linux_bridge,
'ensure_vlan_bridge') as vlan_bridge_func:
self.linux_bridge.ensure_physical_in_bridge(
'network_id', p_const.TYPE_VLAN, 'physnet1', 7)
self.assertTrue(vlan_bridge_func.called)
def test_ensure_physical_in_bridge_vxlan(self):
self.linux_bridge.vxlan_mode = lconst.VXLAN_UCAST
with mock.patch.object(self.linux_bridge,
'ensure_vxlan_bridge') as vxlan_bridge_func:
self.linux_bridge.ensure_physical_in_bridge(
'network_id', 'vxlan', 'physnet1', 7)
self.assertTrue(vxlan_bridge_func.called)
class TestLinuxBridgeAgent(base.BaseTestCase):
def setUp(self):
super(TestLinuxBridgeAgent, self).setUp()
# disable setting up periodic state reporting
cfg.CONF.set_override('report_interval', 0, 'AGENT')
cfg.CONF.set_override('prevent_arp_spoofing', False, 'AGENT')
cfg.CONF.set_default('firewall_driver',
'neutron.agent.firewall.NoopFirewallDriver',
group='SECURITYGROUP')
cfg.CONF.set_default('quitting_rpc_timeout', 10, 'AGENT')
self.get_devices_p = mock.patch.object(ip_lib.IPWrapper, 'get_devices')
self.get_devices = self.get_devices_p.start()
self.get_devices.return_value = [ip_lib.IPDevice('eth77')]
self.get_mac_p = mock.patch('neutron.agent.linux.utils.'
'get_interface_mac')
self.get_mac = self.get_mac_p.start()
self.get_mac.return_value = '00:00:00:00:00:01'
with mock.patch.object(ip_lib.IPWrapper,
'get_device_by_ip', return_value=None):
self.agent = linuxbridge_neutron_agent.LinuxBridgeNeutronAgentRPC(
{}, 0, cfg.CONF.AGENT.quitting_rpc_timeout)
with mock.patch.object(self.agent, "daemon_loop"):
self.agent.start()
def test_treat_devices_removed_with_existed_device(self):
agent = self.agent
devices = [DEVICE_1]
with mock.patch.object(agent.plugin_rpc,
"update_device_down") as fn_udd,\
mock.patch.object(agent.sg_agent,
"remove_devices_filter") as fn_rdf:
fn_udd.return_value = {'device': DEVICE_1,
'exists': True}
with mock.patch.object(linuxbridge_neutron_agent.LOG,
'info') as log:
resync = agent.treat_devices_removed(devices)
self.assertEqual(2, log.call_count)
self.assertFalse(resync)
self.assertTrue(fn_udd.called)
self.assertTrue(fn_rdf.called)
def test_treat_devices_removed_with_not_existed_device(self):
agent = self.agent
devices = [DEVICE_1]
with mock.patch.object(agent.plugin_rpc,
"update_device_down") as fn_udd,\
mock.patch.object(agent.sg_agent,
"remove_devices_filter") as fn_rdf:
fn_udd.return_value = {'device': DEVICE_1,
'exists': False}
with mock.patch.object(linuxbridge_neutron_agent.LOG,
'debug') as log:
resync = agent.treat_devices_removed(devices)
self.assertEqual(1, log.call_count)
self.assertFalse(resync)
self.assertTrue(fn_udd.called)
self.assertTrue(fn_rdf.called)
def test_treat_devices_removed_failed(self):
agent = self.agent
devices = [DEVICE_1]
with mock.patch.object(agent.plugin_rpc,
"update_device_down") as fn_udd,\
mock.patch.object(agent.sg_agent,
"remove_devices_filter") as fn_rdf:
fn_udd.side_effect = Exception()
with mock.patch.object(linuxbridge_neutron_agent.LOG,
'debug') as log:
resync = agent.treat_devices_removed(devices)
self.assertEqual(2, log.call_count)
self.assertTrue(resync)
self.assertTrue(fn_udd.called)
self.assertTrue(fn_rdf.called)
def _test_scan_devices(self, previous, updated,
fake_current, expected, sync):
self.agent.br_mgr = mock.Mock()
self.agent.br_mgr.get_tap_devices.return_value = fake_current
self.agent.updated_devices = updated
results = self.agent.scan_devices(previous, sync)
self.assertEqual(expected, results)
def test_scan_devices_no_changes(self):
previous = {'current': set([1, 2]),
'updated': set(),
'added': set(),
'removed': set()}
fake_current = set([1, 2])
updated = set()
expected = {'current': set([1, 2]),
'updated': set(),
'added': set(),
'removed': set()}
self._test_scan_devices(previous, updated, fake_current, expected,
sync=False)
def test_scan_devices_added_removed(self):
previous = {'current': set([1, 2]),
'updated': set(),
'added': set(),
'removed': set()}
fake_current = set([2, 3])
updated = set()
expected = {'current': set([2, 3]),
'updated': set(),
'added': set([3]),
'removed': set([1])}
self._test_scan_devices(previous, updated, fake_current, expected,
sync=False)
def test_scan_devices_removed_retried_on_sync(self):
previous = {'current': set([2, 3]),
'updated': set(),
'added': set(),
'removed': set([1])}
fake_current = set([2, 3])
updated = set()
expected = {'current': set([2, 3]),
'updated': set(),
'added': set([2, 3]),
'removed': set([1])}
self._test_scan_devices(previous, updated, fake_current, expected,
sync=True)
def test_scan_devices_vanished_removed_on_sync(self):
previous = {'current': set([2, 3]),
'updated': set(),
'added': set(),
'removed': set([1])}
# Device 2 disappeared.
fake_current = set([3])
updated = set()
# Device 1 should be retried.
expected = {'current': set([3]),
'updated': set(),
'added': set([3]),
'removed': set([1, 2])}
self._test_scan_devices(previous, updated, fake_current, expected,
sync=True)
def test_scan_devices_updated(self):
previous = {'current': set([1, 2]),
'updated': set(),
'added': set(),
'removed': set()}
fake_current = set([1, 2])
updated = set([1])
expected = {'current': set([1, 2]),
'updated': set([1]),
'added': set(),
'removed': set()}
self._test_scan_devices(previous, updated, fake_current, expected,
sync=False)
def test_scan_devices_updated_non_existing(self):
previous = {'current': set([1, 2]),
'updated': set(),
'added': set(),
'removed': set()}
fake_current = set([1, 2])
updated = set([3])
expected = {'current': set([1, 2]),
'updated': set(),
'added': set(),
'removed': set()}
self._test_scan_devices(previous, updated, fake_current, expected,
sync=False)
def test_scan_devices_updated_on_sync(self):
previous = {'current': set([1, 2]),
'updated': set([1]),
'added': set(),
'removed': set()}
fake_current = set([1, 2])
updated = set([2])
expected = {'current': set([1, 2]),
'updated': set([1, 2]),
'added': set([1, 2]),
'removed': set()}
self._test_scan_devices(previous, updated, fake_current, expected,
sync=True)
def test_process_network_devices(self):
agent = self.agent
device_info = {'current': set(),
'added': set(['tap3', 'tap4']),
'updated': set(['tap2', 'tap3']),
'removed': set(['tap1'])}
agent.sg_agent.setup_port_filters = mock.Mock()
agent.treat_devices_added_updated = mock.Mock(return_value=False)
agent.treat_devices_removed = mock.Mock(return_value=False)
agent.process_network_devices(device_info)
agent.sg_agent.setup_port_filters.assert_called_with(
device_info['added'],
device_info['updated'])
agent.treat_devices_added_updated.assert_called_with(set(['tap2',
'tap3',
'tap4']))
agent.treat_devices_removed.assert_called_with(set(['tap1']))
def test_treat_devices_added_updated_admin_state_up_true(self):
agent = self.agent
mock_details = {'device': 'dev123',
'port_id': 'port123',
'network_id': 'net123',
'admin_state_up': True,
'network_type': 'vlan',
'segmentation_id': 100,
'physical_network': 'physnet1'}
agent.plugin_rpc = mock.Mock()
agent.plugin_rpc.get_devices_details_list.return_value = [mock_details]
agent.br_mgr = mock.Mock()
agent.br_mgr.add_interface.return_value = True
resync_needed = agent.treat_devices_added_updated(set(['tap1']))
self.assertFalse(resync_needed)
agent.br_mgr.add_interface.assert_called_with('net123', 'vlan',
'physnet1', 100,
'port123')
self.assertTrue(agent.plugin_rpc.update_device_up.called)
def test_treat_devices_added_updated_admin_state_up_false(self):
agent = self.agent
mock_details = {'device': 'dev123',
'port_id': 'port123',
'network_id': 'net123',
'admin_state_up': False,
'network_type': 'vlan',
'segmentation_id': 100,
'physical_network': 'physnet1'}
agent.plugin_rpc = mock.Mock()
agent.plugin_rpc.get_devices_details_list.return_value = [mock_details]
agent.remove_port_binding = mock.Mock()
resync_needed = agent.treat_devices_added_updated(set(['tap1']))
self.assertFalse(resync_needed)
agent.remove_port_binding.assert_called_with('net123', 'port123')
self.assertFalse(agent.plugin_rpc.update_device_up.called)
def test_set_rpc_timeout(self):
self.agent.stop()
for rpc_client in (self.agent.plugin_rpc.client,
self.agent.sg_plugin_rpc.client,
self.agent.state_rpc.client):
self.assertEqual(cfg.CONF.AGENT.quitting_rpc_timeout,
rpc_client.timeout)
def test_set_rpc_timeout_no_value(self):
self.agent.quitting_rpc_timeout = None
with mock.patch.object(self.agent, 'set_rpc_timeout') as mock_set_rpc:
self.agent.stop()
self.assertFalse(mock_set_rpc.called)
class TestLinuxBridgeManager(base.BaseTestCase):
def setUp(self):
super(TestLinuxBridgeManager, self).setUp()
self.interface_mappings = {'physnet1': 'eth1'}
with mock.patch.object(ip_lib.IPWrapper,
'get_device_by_ip', return_value=None),\
mock.patch.object(ip_lib, 'device_exists',
return_value=True):
self.lbm = linuxbridge_neutron_agent.LinuxBridgeManager(
self.interface_mappings)
def test_interface_exists_on_bridge(self):
with mock.patch.object(os, 'listdir') as listdir_fn:
listdir_fn.return_value = ["abc"]
self.assertTrue(
self.lbm.interface_exists_on_bridge("br-int", "abc")
)
self.assertFalse(
self.lbm.interface_exists_on_bridge("br-int", "abd")
)
def test_get_bridge_name(self):
nw_id = "123456789101112"
self.assertEqual(self.lbm.get_bridge_name(nw_id),
"brq" + nw_id[0:11])
nw_id = ""
self.assertEqual(self.lbm.get_bridge_name(nw_id),
"brq")
def test_get_subinterface_name(self):
self.assertEqual(self.lbm.get_subinterface_name("eth0", "0"),
"eth0.0")
self.assertEqual(self.lbm.get_subinterface_name("eth0", ""),
"eth0.")
def test_get_tap_device_name(self):
if_id = "123456789101112"
self.assertEqual(self.lbm.get_tap_device_name(if_id),
constants.TAP_DEVICE_PREFIX + if_id[0:11])
if_id = ""
self.assertEqual(self.lbm.get_tap_device_name(if_id),
constants.TAP_DEVICE_PREFIX)
def test_get_vxlan_device_name(self):
vn_id = p_const.MAX_VXLAN_VNI
self.assertEqual(self.lbm.get_vxlan_device_name(vn_id),
"vxlan-" + str(vn_id))
self.assertIsNone(self.lbm.get_vxlan_device_name(vn_id + 1))
def test_get_vxlan_group(self):
cfg.CONF.set_override('vxlan_group', '239.1.2.3/24', 'VXLAN')
vn_id = p_const.MAX_VXLAN_VNI
self.assertEqual('239.1.2.255', self.lbm.get_vxlan_group(vn_id))
vn_id = 256
self.assertEqual('239.1.2.0', self.lbm.get_vxlan_group(vn_id))
vn_id = 257
self.assertEqual('239.1.2.1', self.lbm.get_vxlan_group(vn_id))
cfg.CONF.set_override('vxlan_group', '240.0.0.0', 'VXLAN')
self.assertIsNone(self.lbm.get_vxlan_group(vn_id))
cfg.CONF.set_override('vxlan_group', '224.0.0.1/', 'VXLAN')
self.assertIsNone(self.lbm.get_vxlan_group(vn_id))
def test_get_all_neutron_bridges(self):
br_list = ["br-int", "brq1", "brq2", "br-ex"]
with mock.patch.object(os, 'listdir') as listdir_fn:
listdir_fn.return_value = br_list
self.assertEqual(self.lbm.get_all_neutron_bridges(),
br_list[1:3])
self.assertTrue(listdir_fn.called)
def test_get_interfaces_on_bridge(self):
with mock.patch.object(utils, 'execute'),\
mock.patch.object(os, 'listdir') as listdir_fn,\
mock.patch.object(ip_lib, 'device_exists', return_value=True):
listdir_fn.return_value = ["qbr1"]
self.assertEqual(self.lbm.get_interfaces_on_bridge("br0"),
["qbr1"])
def test_get_interfaces_on_bridge_not_existing(self):
with mock.patch.object(ip_lib, 'device_exists', return_value=False):
self.assertEqual([], self.lbm.get_interfaces_on_bridge("br0"))
def test_get_tap_devices_count(self):
with mock.patch.object(os, 'listdir') as listdir_fn:
listdir_fn.return_value = ['tap2101', 'eth0.100', 'vxlan-1000']
self.assertEqual(self.lbm.get_tap_devices_count('br0'), 1)
listdir_fn.side_effect = OSError()
self.assertEqual(self.lbm.get_tap_devices_count('br0'), 0)
def test_get_bridge_for_tap_device(self):
with mock.patch.object(self.lbm,
"get_all_neutron_bridges") as get_all_qbr_fn,\
mock.patch.object(self.lbm,
"get_interfaces_on_bridge") as get_if_fn:
get_all_qbr_fn.return_value = ["br-int", "br-ex"]
get_if_fn.return_value = ["tap1", "tap2", "tap3"]
self.assertEqual(self.lbm.get_bridge_for_tap_device("tap1"),
"br-int")
self.assertIsNone(self.lbm.get_bridge_for_tap_device("tap4"))
def test_is_device_on_bridge(self):
self.assertTrue(not self.lbm.is_device_on_bridge(""))
with mock.patch.object(os.path, 'exists') as exists_fn:
exists_fn.return_value = True
self.assertTrue(self.lbm.is_device_on_bridge("tap1"))
exists_fn.assert_called_with(
"/sys/class/net/tap1/brport"
)
def test_get_interface_details(self):
with mock.patch.object(ip_lib.IpAddrCommand, 'list') as list_fn,\
mock.patch.object(ip_lib.IpRouteCommand,
'get_gateway') as getgw_fn:
gwdict = dict(gateway='1.1.1.1')
getgw_fn.return_value = gwdict
ipdict = dict(cidr='1.1.1.1/24',
broadcast='1.1.1.255',
scope='global',
ip_version=4,
dynamic=False)
list_fn.return_value = ipdict
ret = self.lbm.get_interface_details("eth0")
self.assertTrue(list_fn.called)
self.assertTrue(getgw_fn.called)
self.assertEqual(ret, (ipdict, gwdict))
def test_ensure_flat_bridge(self):
with mock.patch.object(ip_lib.IpAddrCommand, 'list') as list_fn,\
mock.patch.object(ip_lib.IpRouteCommand,
'get_gateway') as getgw_fn:
gwdict = dict(gateway='1.1.1.1')
getgw_fn.return_value = gwdict
ipdict = dict(cidr='1.1.1.1/24',
broadcast='1.1.1.255',
scope='global',
ip_version=4,
dynamic=False)
list_fn.return_value = ipdict
with mock.patch.object(self.lbm, 'ensure_bridge') as ens:
self.assertEqual(
self.lbm.ensure_flat_bridge("123", "eth0"),
"eth0"
)
self.assertTrue(list_fn.called)
self.assertTrue(getgw_fn.called)
ens.assert_called_once_with("brq123", "eth0",
ipdict, gwdict)
def test_ensure_vlan_bridge(self):
with mock.patch.object(self.lbm, 'ensure_vlan') as ens_vl_fn,\
mock.patch.object(self.lbm, 'ensure_bridge') as ens,\
mock.patch.object(self.lbm,
'get_interface_details') as get_int_det_fn:
ens_vl_fn.return_value = "eth0.1"
get_int_det_fn.return_value = (None, None)
self.assertEqual(self.lbm.ensure_vlan_bridge("123", "eth0", "1"),
"eth0.1")
ens.assert_called_with("brq123", "eth0.1", None, None)
get_int_det_fn.return_value = ("ips", "gateway")
self.assertEqual(self.lbm.ensure_vlan_bridge("123", "eth0", "1"),
"eth0.1")
ens.assert_called_with("brq123", "eth0.1", "ips", "gateway")
def test_ensure_local_bridge(self):
with mock.patch.object(self.lbm, 'ensure_bridge') as ens_fn:
self.lbm.ensure_local_bridge("54321")
ens_fn.assert_called_once_with("brq54321")
def test_ensure_vlan(self):
with mock.patch.object(ip_lib, 'device_exists') as de_fn:
de_fn.return_value = True
self.assertEqual(self.lbm.ensure_vlan("eth0", "1"), "eth0.1")
de_fn.return_value = False
with mock.patch.object(utils, 'execute') as exec_fn:
exec_fn.return_value = False
self.assertEqual(self.lbm.ensure_vlan("eth0", "1"), "eth0.1")
# FIXME(kevinbenton): validate the params to the exec_fn calls
self.assertEqual(exec_fn.call_count, 2)
exec_fn.return_value = True
self.assertIsNone(self.lbm.ensure_vlan("eth0", "1"))
self.assertEqual(exec_fn.call_count, 3)
def test_ensure_vxlan(self):
seg_id = "12345678"
self.lbm.local_int = 'eth0'
self.lbm.vxlan_mode = lconst.VXLAN_MCAST
with mock.patch.object(ip_lib, 'device_exists') as de_fn:
de_fn.return_value = True
self.assertEqual(self.lbm.ensure_vxlan(seg_id), "vxlan-" + seg_id)
de_fn.return_value = False
with mock.patch.object(self.lbm.ip,
'add_vxlan') as add_vxlan_fn:
add_vxlan_fn.return_value = FakeIpDevice()
self.assertEqual(self.lbm.ensure_vxlan(seg_id),
"vxlan-" + seg_id)
add_vxlan_fn.assert_called_with("vxlan-" + seg_id, seg_id,
group="224.0.0.1",
dev=self.lbm.local_int)
cfg.CONF.set_override('l2_population', 'True', 'VXLAN')
self.assertEqual(self.lbm.ensure_vxlan(seg_id),
"vxlan-" + seg_id)
add_vxlan_fn.assert_called_with("vxlan-" + seg_id, seg_id,
group="224.0.0.1",
dev=self.lbm.local_int,
proxy=True)
def test_update_interface_ip_details(self):
gwdict = dict(gateway='1.1.1.1',
metric=50)
ipdict = dict(cidr='1.1.1.1/24',
broadcast='1.1.1.255',
scope='global',
ip_version=4,
dynamic=False)
with mock.patch.object(ip_lib.IpAddrCommand, 'add') as add_fn,\
mock.patch.object(ip_lib.IpAddrCommand, 'delete') as del_fn:
self.lbm.update_interface_ip_details("br0", "eth0",
[ipdict], None)
self.assertTrue(add_fn.called)
self.assertTrue(del_fn.called)
with mock.patch.object(ip_lib.IpRouteCommand,
'add_gateway') as addgw_fn,\
mock.patch.object(ip_lib.IpRouteCommand,
'delete_gateway') as delgw_fn:
self.lbm.update_interface_ip_details("br0", "eth0",
None, gwdict)
self.assertTrue(addgw_fn.called)
self.assertTrue(delgw_fn.called)
def test_bridge_exists_and_ensure_up(self):
ip_lib_mock = mock.Mock()
with mock.patch.object(ip_lib, 'IPDevice', return_value=ip_lib_mock):
# device exists
self.assertTrue(self.lbm._bridge_exists_and_ensure_up("br0"))
self.assertTrue(ip_lib_mock.link.set_up.called)
# device doesn't exists
ip_lib_mock.link.set_up.side_effect = RuntimeError
self.assertFalse(self.lbm._bridge_exists_and_ensure_up("br0"))
def test_ensure_bridge(self):
bridge_device = mock.Mock()
bridge_device_old = mock.Mock()
with mock.patch.object(self.lbm,
'_bridge_exists_and_ensure_up') as de_fn,\
mock.patch.object(bridge_lib, "BridgeDevice",
return_value=bridge_device_old) as br_fn, \
mock.patch.object(self.lbm,
'update_interface_ip_details') as upd_fn,\
mock.patch.object(self.lbm,
'interface_exists_on_bridge') as ie_fn,\
mock.patch.object(self.lbm, 'is_device_on_bridge'),\
mock.patch.object(self.lbm,
'get_bridge_for_tap_device') as get_if_br_fn:
de_fn.return_value = False
br_fn.addbr.return_value = bridge_device
bridge_device.setfd.return_value = False
bridge_device.disable_stp.return_value = False
bridge_device.link.set_up.return_value = False
self.assertEqual(self.lbm.ensure_bridge("br0", None), "br0")
ie_fn.return_Value = False
self.lbm.ensure_bridge("br0", "eth0")
upd_fn.assert_called_with("br0", "eth0", None, None)
ie_fn.assert_called_with("br0", "eth0")
self.lbm.ensure_bridge("br0", "eth0", "ips", "gateway")
upd_fn.assert_called_with("br0", "eth0", "ips", "gateway")
ie_fn.assert_called_with("br0", "eth0")
de_fn.return_value = True
bridge_device.delif.side_effect = Exception()
self.lbm.ensure_bridge("br0", "eth0")
ie_fn.assert_called_with("br0", "eth0")
de_fn.return_value = True
ie_fn.return_value = False
get_if_br_fn.return_value = "br1"
self.lbm.ensure_bridge("br0", "eth0")
bridge_device_old.delif.assert_called_once_with('eth0')
br_fn.return_value.addif.assert_called_once_with('eth0')
def test_ensure_physical_in_bridge(self):
self.assertFalse(
self.lbm.ensure_physical_in_bridge("123", p_const.TYPE_VLAN,
"phys", "1")
)
with mock.patch.object(self.lbm, "ensure_flat_bridge") as flbr_fn:
self.assertTrue(
self.lbm.ensure_physical_in_bridge("123", p_const.TYPE_FLAT,
"physnet1", None)
)
self.assertTrue(flbr_fn.called)
with mock.patch.object(self.lbm, "ensure_vlan_bridge") as vlbr_fn:
self.assertTrue(
self.lbm.ensure_physical_in_bridge("123", p_const.TYPE_VLAN,
"physnet1", "1")
)
self.assertTrue(vlbr_fn.called)
with mock.patch.object(self.lbm, "ensure_vxlan_bridge") as vlbr_fn:
self.lbm.vxlan_mode = lconst.VXLAN_MCAST
self.assertTrue(
self.lbm.ensure_physical_in_bridge("123", p_const.TYPE_VXLAN,
"physnet1", "1")
)
self.assertTrue(vlbr_fn.called)
def test_add_tap_interface(self):
with mock.patch.object(ip_lib, "device_exists") as de_fn:
de_fn.return_value = False
self.assertFalse(
self.lbm.add_tap_interface("123", p_const.TYPE_VLAN,
"physnet1", "1", "tap1")
)
de_fn.return_value = True
bridge_device = mock.Mock()
with mock.patch.object(self.lbm, "ensure_local_bridge") as en_fn,\
mock.patch.object(bridge_lib, "BridgeDevice",
return_value=bridge_device), \
mock.patch.object(self.lbm,
"get_bridge_for_tap_device") as get_br:
bridge_device.addif.retun_value = False
get_br.return_value = True
self.assertTrue(self.lbm.add_tap_interface("123",
p_const.TYPE_LOCAL,
"physnet1", None,
"tap1"))
en_fn.assert_called_with("123")
get_br.return_value = False
bridge_device.addif.retun_value = True
self.assertFalse(self.lbm.add_tap_interface("123",
p_const.TYPE_LOCAL,
"physnet1", None,
"tap1"))
with mock.patch.object(self.lbm,
"ensure_physical_in_bridge") as ens_fn,\
mock.patch.object(self.lbm,
"ensure_tap_mtu") as en_mtu_fn,\
mock.patch.object(self.lbm,
"get_bridge_for_tap_device") as get_br:
ens_fn.return_value = False
self.assertFalse(self.lbm.add_tap_interface("123",
p_const.TYPE_VLAN,
"physnet1", "1",
"tap1"))
ens_fn.return_value = "eth0.1"
get_br.return_value = "brq123"
self.lbm.add_tap_interface("123", p_const.TYPE_VLAN,
"physnet1", "1", "tap1")
en_mtu_fn.assert_called_once_with("tap1", "eth0.1")
def test_add_interface(self):
with mock.patch.object(self.lbm, "add_tap_interface") as add_tap:
self.lbm.add_interface("123", p_const.TYPE_VLAN, "physnet-1",
"1", "234")
add_tap.assert_called_with("123", p_const.TYPE_VLAN, "physnet-1",
"1", "tap234")
def test_delete_vlan_bridge(self):
bridge_device = mock.Mock()
with mock.patch.object(ip_lib, "device_exists") as de_fn,\
mock.patch.object(self.lbm,
"get_interfaces_on_bridge") as getif_fn,\
mock.patch.object(self.lbm, "remove_interface"),\
mock.patch.object(self.lbm,
"get_interface_details") as if_det_fn,\
mock.patch.object(self.lbm,
"update_interface_ip_details") as updif_fn,\
mock.patch.object(self.lbm, "delete_vxlan") as del_vxlan,\
mock.patch.object(bridge_lib, "BridgeDevice",
return_value=bridge_device):
de_fn.return_value = False
self.lbm.delete_vlan_bridge("br0")
self.assertFalse(getif_fn.called)
de_fn.return_value = True
getif_fn.return_value = ["eth0", "eth1", "vxlan-1002"]
if_det_fn.return_value = ("ips", "gateway")
bridge_device.link.set_down.return_value = False
self.lbm.delete_vlan_bridge("br0")
updif_fn.assert_called_with("eth1", "br0", "ips", "gateway")
del_vxlan.assert_called_with("vxlan-1002")
def test_delete_vlan_bridge_with_ip(self):
bridge_device = mock.Mock()
with mock.patch.object(ip_lib, "device_exists") as de_fn,\
mock.patch.object(self.lbm,
"get_interfaces_on_bridge") as getif_fn,\
mock.patch.object(self.lbm, "remove_interface"),\
mock.patch.object(self.lbm,
"get_interface_details") as if_det_fn,\
mock.patch.object(self.lbm,
"update_interface_ip_details") as updif_fn,\
mock.patch.object(self.lbm, "delete_vlan") as del_vlan,\
mock.patch.object(bridge_lib, "BridgeDevice",
return_value=bridge_device):
de_fn.return_value = True
getif_fn.return_value = ["eth0", "eth1.1"]
if_det_fn.return_value = ("ips", "gateway")
bridge_device.link.set_down.return_value = False
self.lbm.delete_vlan_bridge("br0")
updif_fn.assert_called_with("eth1.1", "br0", "ips", "gateway")
self.assertFalse(del_vlan.called)
def test_delete_vlan_bridge_no_ip(self):
bridge_device = mock.Mock()
with mock.patch.object(ip_lib, "device_exists") as de_fn,\
mock.patch.object(self.lbm,
"get_interfaces_on_bridge") as getif_fn,\
mock.patch.object(self.lbm, "remove_interface"),\
mock.patch.object(self.lbm,
"get_interface_details") as if_det_fn,\
mock.patch.object(self.lbm,
"update_interface_ip_details") as updif_fn,\
mock.patch.object(self.lbm, "delete_vlan") as del_vlan,\
mock.patch.object(bridge_lib, "BridgeDevice",
return_value=bridge_device):
de_fn.return_value = True
getif_fn.return_value = ["eth0", "eth1.1"]
bridge_device.link.set_down.return_value = False
if_det_fn.return_value = ([], None)
self.lbm.delete_vlan_bridge("br0")
del_vlan.assert_called_with("eth1.1")
self.assertFalse(updif_fn.called)
def test_delete_vxlan_bridge_no_int_mappings(self):
interface_mappings = {}
with mock.patch.object(ip_lib.IPWrapper,
'get_device_by_ip', return_value=None):
lbm = linuxbridge_neutron_agent.LinuxBridgeManager(
interface_mappings)
bridge_device = mock.Mock()
with mock.patch.object(ip_lib, "device_exists") as de_fn,\
mock.patch.object(lbm,
"get_interfaces_on_bridge") as getif_fn,\
mock.patch.object(lbm, "remove_interface"),\
mock.patch.object(lbm, "delete_vxlan") as del_vxlan,\
mock.patch.object(bridge_lib, "BridgeDevice",
return_value=bridge_device):
de_fn.return_value = False
lbm.delete_vlan_bridge("br0")
self.assertFalse(getif_fn.called)
de_fn.return_value = True
getif_fn.return_value = ["vxlan-1002"]
bridge_device.link.set_down.return_value = False
lbm.delete_vlan_bridge("br0")
del_vxlan.assert_called_with("vxlan-1002")
def test_remove_empty_bridges(self):
self.lbm.network_map = {'net1': mock.Mock(), 'net2': mock.Mock()}
def tap_count_side_effect(*args):
return 0 if args[0] == 'brqnet1' else 1
with mock.patch.object(self.lbm, "delete_vlan_bridge") as del_br_fn,\
mock.patch.object(self.lbm,
"get_tap_devices_count",
side_effect=tap_count_side_effect):
self.lbm.remove_empty_bridges()
del_br_fn.assert_called_once_with('brqnet1')
def test_remove_interface(self):
bridge_device = mock.Mock()
with mock.patch.object(ip_lib, "device_exists") as de_fn,\
mock.patch.object(self.lbm,
"is_device_on_bridge") as isdev_fn,\
mock.patch.object(bridge_lib, "BridgeDevice",
return_value=bridge_device):
de_fn.return_value = False
self.assertFalse(self.lbm.remove_interface("br0", "eth0"))
self.assertFalse(isdev_fn.called)
de_fn.return_value = True
isdev_fn.return_value = False
self.assertTrue(self.lbm.remove_interface("br0", "eth0"))
isdev_fn.return_value = True
bridge_device.delif.return_value = True
self.assertFalse(self.lbm.remove_interface("br0", "eth0"))
bridge_device.delif.return_value = False
self.assertTrue(self.lbm.remove_interface("br0", "eth0"))
def test_delete_vlan(self):
with mock.patch.object(ip_lib, "device_exists") as de_fn,\
mock.patch.object(utils, "execute") as exec_fn:
de_fn.return_value = False
self.lbm.delete_vlan("eth1.1")
self.assertFalse(exec_fn.called)
de_fn.return_value = True
exec_fn.return_value = True
self.lbm.delete_vlan("eth1.1")
self.assertTrue(exec_fn.called)
def _check_vxlan_support(self, expected, vxlan_ucast_supported,
vxlan_mcast_supported):
with mock.patch.object(self.lbm,
'vxlan_ucast_supported',
return_value=vxlan_ucast_supported),\
mock.patch.object(self.lbm,
'vxlan_mcast_supported',
return_value=vxlan_mcast_supported):
if expected == lconst.VXLAN_NONE:
self.assertRaises(exceptions.VxlanNetworkUnsupported,
self.lbm.check_vxlan_support)
self.assertEqual(expected, self.lbm.vxlan_mode)
else:
self.lbm.check_vxlan_support()
self.assertEqual(expected, self.lbm.vxlan_mode)
def test_check_vxlan_support(self):
self._check_vxlan_support(expected=lconst.VXLAN_UCAST,
vxlan_ucast_supported=True,
vxlan_mcast_supported=True)
self._check_vxlan_support(expected=lconst.VXLAN_MCAST,
vxlan_ucast_supported=False,
vxlan_mcast_supported=True)
self._check_vxlan_support(expected=lconst.VXLAN_NONE,
vxlan_ucast_supported=False,
vxlan_mcast_supported=False)
self._check_vxlan_support(expected=lconst.VXLAN_NONE,
vxlan_ucast_supported=False,
vxlan_mcast_supported=False)
def _check_vxlan_ucast_supported(
self, expected, l2_population, iproute_arg_supported, fdb_append):
cfg.CONF.set_override('l2_population', l2_population, 'VXLAN')
with mock.patch.object(ip_lib, 'device_exists', return_value=False),\
mock.patch.object(ip_lib, 'vxlan_in_use', return_value=False),\
mock.patch.object(self.lbm,
'delete_vxlan',
return_value=None),\
mock.patch.object(self.lbm,
'ensure_vxlan',
return_value=None),\
mock.patch.object(
utils,
'execute',
side_effect=None if fdb_append else RuntimeError()),\
mock.patch.object(ip_lib,
'iproute_arg_supported',
return_value=iproute_arg_supported):
self.assertEqual(expected, self.lbm.vxlan_ucast_supported())
def test_vxlan_ucast_supported(self):
self._check_vxlan_ucast_supported(
expected=False,
l2_population=False, iproute_arg_supported=True, fdb_append=True)
self._check_vxlan_ucast_supported(
expected=False,
l2_population=True, iproute_arg_supported=False, fdb_append=True)
self._check_vxlan_ucast_supported(
expected=False,
l2_population=True, iproute_arg_supported=True, fdb_append=False)
self._check_vxlan_ucast_supported(
expected=True,
l2_population=True, iproute_arg_supported=True, fdb_append=True)
def _check_vxlan_mcast_supported(
self, expected, vxlan_group, iproute_arg_supported):
cfg.CONF.set_override('vxlan_group', vxlan_group, 'VXLAN')
with mock.patch.object(
ip_lib, 'iproute_arg_supported',
return_value=iproute_arg_supported):
self.assertEqual(expected, self.lbm.vxlan_mcast_supported())
def test_vxlan_mcast_supported(self):
self._check_vxlan_mcast_supported(
expected=False,
vxlan_group='',
iproute_arg_supported=True)
self._check_vxlan_mcast_supported(
expected=False,
vxlan_group='224.0.0.1',
iproute_arg_supported=False)
self._check_vxlan_mcast_supported(
expected=True,
vxlan_group='224.0.0.1',
iproute_arg_supported=True)
class TestLinuxBridgeRpcCallbacks(base.BaseTestCase):
def setUp(self):
cfg.CONF.set_override('local_ip', LOCAL_IP, 'VXLAN')
super(TestLinuxBridgeRpcCallbacks, self).setUp()
class FakeLBAgent(object):
def __init__(self):
self.agent_id = 1
with mock.patch.object(
ip_lib.IPWrapper,
'get_device_by_ip', return_value=None),\
mock.patch.object(ip_lib, 'device_exists',
return_value=True):
self.br_mgr = (linuxbridge_neutron_agent.
LinuxBridgeManager({'physnet1': 'eth1'}))
self.br_mgr.vxlan_mode = lconst.VXLAN_UCAST
segment = mock.Mock()
segment.network_type = 'vxlan'
segment.segmentation_id = 1
self.br_mgr.network_map['net_id'] = segment
self.lb_rpc = linuxbridge_neutron_agent.LinuxBridgeRpcCallbacks(
object(),
FakeLBAgent(),
object()
)
def test_network_delete(self):
with mock.patch.object(self.lb_rpc.agent.br_mgr,
"get_bridge_name") as get_br_fn,\
mock.patch.object(self.lb_rpc.agent.br_mgr,
"delete_vlan_bridge") as del_fn:
get_br_fn.return_value = "br0"
self.lb_rpc.network_delete("anycontext", network_id="123")
get_br_fn.assert_called_with("123")
del_fn.assert_called_with("br0")
def test_fdb_add(self):
fdb_entries = {'net_id':
{'ports':
{'agent_ip': [constants.FLOODING_ENTRY,
['port_mac', 'port_ip']]},
'network_type': 'vxlan',
'segment_id': 1}}
with mock.patch.object(utils, 'execute',
return_value='') as execute_fn:
self.lb_rpc.fdb_add(None, fdb_entries)
expected = [
mock.call(['bridge', 'fdb', 'show', 'dev', 'vxlan-1'],
run_as_root=True),
mock.call(['bridge', 'fdb', 'add',
constants.FLOODING_ENTRY[0],
'dev', 'vxlan-1', 'dst', 'agent_ip'],
run_as_root=True,
check_exit_code=False),
mock.call(['ip', 'neigh', 'replace', 'port_ip', 'lladdr',
'port_mac', 'dev', 'vxlan-1', 'nud', 'permanent'],
run_as_root=True,
check_exit_code=False),
mock.call(['bridge', 'fdb', 'replace', 'port_mac', 'dev',
'vxlan-1', 'dst', 'agent_ip'],
run_as_root=True,
check_exit_code=False),
]
execute_fn.assert_has_calls(expected)
def test_fdb_ignore(self):
fdb_entries = {'net_id':
{'ports':
{LOCAL_IP: [constants.FLOODING_ENTRY,
['port_mac', 'port_ip']]},
'network_type': 'vxlan',
'segment_id': 1}}
with mock.patch.object(utils, 'execute',
return_value='') as execute_fn:
self.lb_rpc.fdb_add(None, fdb_entries)
self.lb_rpc.fdb_remove(None, fdb_entries)
self.assertFalse(execute_fn.called)
fdb_entries = {'other_net_id':
{'ports':
{'192.168.0.67': [constants.FLOODING_ENTRY,
['port_mac', 'port_ip']]},
'network_type': 'vxlan',
'segment_id': 1}}
with mock.patch.object(utils, 'execute',
return_value='') as execute_fn:
self.lb_rpc.fdb_add(None, fdb_entries)
self.lb_rpc.fdb_remove(None, fdb_entries)
self.assertFalse(execute_fn.called)
def test_fdb_remove(self):
fdb_entries = {'net_id':
{'ports':
{'agent_ip': [constants.FLOODING_ENTRY,
['port_mac', 'port_ip']]},
'network_type': 'vxlan',
'segment_id': 1}}
with mock.patch.object(utils, 'execute',
return_value='') as execute_fn:
self.lb_rpc.fdb_remove(None, fdb_entries)
expected = [
mock.call(['bridge', 'fdb', 'del',
constants.FLOODING_ENTRY[0],
'dev', 'vxlan-1', 'dst', 'agent_ip'],
run_as_root=True,
check_exit_code=False),
mock.call(['ip', 'neigh', 'del', 'port_ip', 'lladdr',
'port_mac', 'dev', 'vxlan-1'],
run_as_root=True,
check_exit_code=False),
mock.call(['bridge', 'fdb', 'del', 'port_mac',
'dev', 'vxlan-1', 'dst', 'agent_ip'],
run_as_root=True,
check_exit_code=False),
]
execute_fn.assert_has_calls(expected)
def test_fdb_update_chg_ip(self):
fdb_entries = {'chg_ip':
{'net_id':
{'agent_ip':
{'before': [['port_mac', 'port_ip_1']],
'after': [['port_mac', 'port_ip_2']]}}}}
with mock.patch.object(utils, 'execute',
return_value='') as execute_fn:
self.lb_rpc.fdb_update(None, fdb_entries)
expected = [
mock.call(['ip', 'neigh', 'replace', 'port_ip_2', 'lladdr',
'port_mac', 'dev', 'vxlan-1', 'nud', 'permanent'],
run_as_root=True,
check_exit_code=False),
mock.call(['ip', 'neigh', 'del', 'port_ip_1', 'lladdr',
'port_mac', 'dev', 'vxlan-1'],
run_as_root=True,
check_exit_code=False)
]
execute_fn.assert_has_calls(expected)
def test_fdb_update_chg_ip_empty_lists(self):
fdb_entries = {'chg_ip': {'net_id': {'agent_ip': {}}}}
self.lb_rpc.fdb_update(None, fdb_entries)
|
|
import logging
from collections import Counter
logger = logging.getLogger()
storage = {'parquet': 'STORED AS PARQUET',
'com.databricks.spark.csv': ("ROW FORMAT SERDE"
"'org.apache.hadoop.hive.serde2.OpenCSVSerde'\n"
"STORED AS TEXTFILE")}
# key we can ignore when comparing if two fields are equal
METADATA = 'metadata'
# key where type information or nested fields are stored
TYPE = 'type'
# key where array type information of nested fields is stored
ARRAYTYPE = 'elementType'
# key where nullability is stored
NULLABLE = 'nullable'
# key where the fields are stored
FIELDS = 'fields'
# key where the name of the fields are stored
NAME = 'name'
STRUCT = 'struct'
ARRAY = 'array'
class SchemaError(Exception):
pass
def sanitize(key):
"""
Sanitize column names (they cannot begin with '_') by surrounding them with backticks (`)
"""
if key[0] == "_":
return "`%s`" % key
else:
return key
def are_schemas_equal(new_df, old_df, *, partition_col=None):
"""
Check if two dataframe schemas are exactly the same, modulo the partition_col
:param new_df: The new Spark DataFrame
:param old_df: The old Spark DataFrame, that can contain `partition_col`
:param str partition_col: The name of a column that might be only in `old_df`
:return: A boolean indicating if the schemas are equal
"""
if not partition_col:
partition_col_set = set()
else:
partition_col_set = {partition_col}
old_dtypes = dict(old_df.dtypes)
new_dtypes = dict(new_df.dtypes)
new_keys = new_dtypes.keys() - old_dtypes.keys() - partition_col_set
if new_keys:
return False
else:
return all(value == old_dtypes[key]
for key, value in new_dtypes.items() if key != partition_col)
def create_schema(df, database, table, partition_col='dt',
format_output='parquet', output_path=None, external=False, **kwargs):
"""
Create the schema (as a SQL string) for the dataframe in question
The `format_output` is needed as this has to be specified in the create statement
:param df: The dataframe that has been written partitioned on "disk"
:type df: A Spark dataframe
:param database str: To which database does the table belong
:param table str: On which tables has this been written to
:param partition_col str: On which column should it be partitioned
:param format_output str: What format should the table use.
:param output_path: Where the table should be written (if not in the metastore managed folder).
"""
if format_output and format_output not in storage:
raise KeyError(
"Unrecognized format_output %s. Available values are %s" % (format_output,
list(storage.keys())))
external = "EXTERNAL" if external else ""
init_string = ("CREATE {external} TABLE "
"IF NOT EXISTS {database}.{table} ".format(external=external,
database=database,
table=table))
fields_string = "(\n" + ",\n".join([sanitize(key) + " " + value
for key, value in df.dtypes
if key != partition_col]) + "\n)"
if partition_col:
partition_string = "\nPARTITIONED BY (%s STRING)" % partition_col
else:
partition_string = ""
format_string = "\n%s" % storage.get(format_output, "")
if output_path:
location = "\nLOCATION '%s'" % output_path
else:
location = ""
return init_string + fields_string + partition_string + format_string + location
def list_to_dict(lst, attr):
"""
Convert a list of dictionaries into a dictionary
:param list[dict] lst: A list of dictionaries, all containing the `attr` key
:param attr: The key to indicate the element in the resulting dict
:return: A dictionary of dictionaries
"""
if Counter(elem[attr] for elem in lst).most_common(1)[0][1] > 1:
raise ValueError("""
The dictionary can't be created unambigously.
More than one element contains the same {}""".format(attr))
return {elem[attr]: elem for elem in lst}
def are_fields_complex(new_field, old_field):
"""
Check if the fields are complex (and if the old one exists)
:param any new_field: The new field to check. Can't be None
:param any old_field: The old field to check. Can be None
:return: A boolean
"""
return type(new_field[TYPE]) == dict and old_field and type(old_field[TYPE]) == dict
def _compare_fields(new_field, old_field):
"""
Compare if all elements, besides METADATA, exists
:param dict new_field: The new field to check
:param dict old_field: The old field to check
:return: A boolean indicating if they are compatible
"""
return all(new_value == old_field.get(new_key)
for new_key, new_value in new_field.items() if new_key != METADATA)
def compare_complex_fields(new_field, old_field):
"""
Compare if two complex fields are compatible
:param dict new_field: The new field to check
:param dict old_field: The old field to check
:return: A boolean indicating if they are compatible
"""
# The complex fields are nested
complex_new_field = new_field[TYPE]
complex_old_field = old_field[TYPE]
if complex_new_field[TYPE] == complex_old_field[TYPE] == STRUCT:
new_schema = complex_new_field
old_schema = complex_old_field
elif complex_new_field[TYPE] == complex_old_field[TYPE] == ARRAY:
# somehow, for array, the fields are stored in ARRAYTYPE
new_schema = complex_new_field[ARRAYTYPE]
old_schema = complex_old_field[ARRAYTYPE]
# the next happens for json sometimes:
# old data: [(a: 1), (a: 5)] <-- array of structs
# new data: [] <-- array of string, but it's empty! thank you json
if ((old_schema and type(old_schema) != str) and type(new_schema) == str):
logger.warning("New schema is backward incompatible. "
"Old schema is {}, new is {}".format(old_schema, new_schema))
raise SchemaError("Found array of strings instead of array of structs")
elif (old_schema and type(old_schema) == type(new_schema) == str):
return old_schema == new_schema
elif (old_schema and type(old_schema) == str and type(new_schema) != str):
return True # this should not be True, but the case of data in the above comment, it is
else:
# When the new one is a STRUCT, and the old one an ARRAY, or vice versa
return False
return are_schemas_compatible(new_schema, old_schema)
def compare_fields(new_field, old_field):
"""
Compare two schema fields
:param dict new_field: The new field to check
:param dict old_field: The old field to check
:return: A boolean indicating if they are compatible
"""
if are_fields_complex(new_field, old_field):
return compare_complex_fields(new_field, old_field)
elif old_field and new_field[TYPE] != old_field[TYPE]:
# this could be more accurante, as some numeric type are compatible (int -> float)
return False
elif old_field and new_field[TYPE] == old_field[TYPE]:
return _compare_fields(new_field, old_field)
else:
# this happens when old_field is None. In that case the new field should be NULLABLE
return new_field.get(NULLABLE)
def are_schemas_compatible(new_schema, old_schema, remove_from_old=None):
"""
Check for schema compatibility
The schema should be dict as returned by df.schema.jsonValue()
:param dict new_field: The new field to check
:param dict old_field: The old field to check
:param remove_from_old: The (optional) field to remove from the old_schema
:return: A boolean indicating if they are compatible
"""
new_schema = list_to_dict(new_schema[FIELDS], NAME)
old_schema = list_to_dict(old_schema[FIELDS], NAME)
if (isinstance(remove_from_old, str) # this fails when remove_from_old=None
and old_schema.get(remove_from_old)):
old_schema.pop(remove_from_old)
if (isinstance(remove_from_old, str)
and not old_schema.get(remove_from_old)):
logger.warning(
'The `remove_from_old`={} key was not found in `old_schema`'.format(remove_from_old))
logger.warning("Available keys are {}".format(old_schema.keys()))
return all(compare_fields(new_value, old_schema.get(new_key))
for new_key, new_value in new_schema.items())
|
|
# -*- coding: utf-8 -*-
"""
equip.analysis.python.types
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Knowledge of Python type system and builtin types.
:copyright: (c) 2014 by Romain Gaucher (@rgaucher)
:license: Apache 2, see LICENSE for more details.
"""
from __future__ import absolute_import
import types
import _ast
from ..ast.utils import serialize_name_attr
BOOLEAN_TYPE = (bool,)
NUMERIC_TYPES = (int, long, float, complex)
SEQUENCE_TYPES = (str, unicode, list, tuple, bytearray, buffer, xrange, basestring)
SET_TYPES = (set, frozenset)
DICT_TYPE = (dict,)
BUILTIN_TYPES = NUMERIC_TYPES + SEQUENCE_TYPES + SET_TYPES + DICT_TYPE
ITERATOR_REQUIRED_METHODS = ('__iter__', 'next')
class GenericType(object):
pass
class UnknownType(GenericType):
def __init__(self):
GenericType.__init__(self)
def __repr__(self):
return 'unknown'
class NumericType(GenericType):
def __init__(self):
GenericType.__init__(self)
def __repr__(self):
return 'numeric'
class NoneType(GenericType):
def __init__(self):
GenericType.__init__(self)
def __repr__(self):
return 'none'
class BooleanType(GenericType):
def __init__(self):
GenericType.__init__(self)
def __repr__(self):
return 'boolean'
class IntType(GenericType):
def __init__(self):
GenericType.__init__(self)
def __repr__(self):
return 'int'
class LongType(GenericType):
def __init__(self):
GenericType.__init__(self)
def __repr__(self):
return 'long'
class FloatType(GenericType):
def __init__(self):
GenericType.__init__(self)
def __repr__(self):
return 'float'
class ComplexType(GenericType):
def __init__(self):
GenericType.__init__(self)
def __repr__(self):
return 'complex'
class StringType(GenericType):
def __init__(self):
GenericType.__init__(self)
def __repr__(self):
return 'string'
class TupleType(GenericType):
def __init__(self):
GenericType.__init__(self)
def __repr__(self):
return 'tuple'
class ListType(GenericType):
def __init__(self):
GenericType.__init__(self)
def __repr__(self):
return 'list'
class DictType(GenericType):
def __init__(self):
GenericType.__init__(self)
def __repr__(self):
return 'dict'
class FunctionType(GenericType):
def __init__(self):
GenericType.__init__(self)
def __repr__(self):
return 'function'
class LambdaType(GenericType):
def __init__(self):
GenericType.__init__(self)
def __repr__(self):
return 'lambda'
class GeneratorType(GenericType):
def __init__(self):
GenericType.__init__(self)
def __repr__(self):
return 'generator'
class ObjectType(GenericType):
def __init__(self):
GenericType.__init__(self)
self._attributes = None
@property
def attributes(self):
return self._attributes
@attributes.setter
def attributes(self, value):
self._attributes = value
def __repr__(self):
return 'object{%s}' % (', '.join(self._attributes) if self._attributes else '')
class MethodType(GenericType):
def __init__(self):
GenericType.__init__(self)
def __repr__(self):
return 'method'
class FileType(GenericType):
def __init__(self):
GenericType.__init__(self)
def __repr__(self):
return 'file'
class XRangeType(GenericType):
def __init__(self):
GenericType.__init__(self)
def __repr__(self):
return 'xrange'
class TracebackType(GenericType):
def __init__(self):
GenericType.__init__(self)
def __repr__(self):
return 'traceback'
class SequenceType(GenericType):
def __init__(self):
GenericType.__init__(self)
def __repr__(self):
return 'sequence'
class UnionType(GenericType):
def __init__(self):
GenericType.__init__(self)
self._types = set()
@property
def types(self):
return self._types
def add(self, _type):
self._types.add(_type)
def __repr__(self):
return 'Union{%s}' % repr(self.types)
def numeric_typeof(ast_node):
assert type(ast_node) == _ast.Num
value = ast_node.n
if value is None:
return NoneType()
if isinstance(value, int):
return IntType()
elif isinstance(value, long):
return LongType()
elif isinstance(value, float):
return FloatType()
elif isinstance(value, complex):
return ComplexType()
return NumericType()
def is_numeric(ast_node):
return isinstance(ast_node, _ast.Num)
def sequence_typeof(ast_node):
if isinstance(ast_node, _ast.Str):
return StringType()
elif isinstance(ast_node, _ast.Tuple):
return TupleType()
elif isinstance(ast_node, _ast.List):
return ListType()
return SequenceType()
def is_sequence(ast_node):
return isinstance(ast_node, _ast.Str) \
or isinstance(ast_node, _ast.Tuple) \
or isinstance(ast_node, _ast.List)
def is_dict(ast_node):
return isinstance(ast_node, _ast.Dict)
def dict_typeof(ast_node=None):
return DictType()
def is_set(ast_node):
return isinstance(ast_node, _ast.Set)
def set_typeof(ast_node=None):
return SetType()
|
|
import decimal
import json
import unittest
import uuid
from django import forms
from django.core import exceptions, serializers, validators
from django.core.management import call_command
from django.db import IntegrityError, connection, models
from django.test import TransactionTestCase, override_settings
from django.utils import timezone
from . import PostgreSQLTestCase
from .models import (
ArrayFieldSubclass, CharArrayModel, DateTimeArrayModel, IntegerArrayModel,
NestedIntegerArrayModel, NullableIntegerArrayModel, OtherTypesArrayModel,
)
try:
from django.contrib.postgres.fields import ArrayField
from django.contrib.postgres.forms import SimpleArrayField, SplitArrayField
except ImportError:
pass
class TestSaveLoad(PostgreSQLTestCase):
def test_integer(self):
instance = IntegerArrayModel(field=[1, 2, 3])
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_char(self):
instance = CharArrayModel(field=['hello', 'goodbye'])
instance.save()
loaded = CharArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_dates(self):
instance = DateTimeArrayModel(
datetimes=[timezone.now()],
dates=[timezone.now().date()],
times=[timezone.now().time()],
)
instance.save()
loaded = DateTimeArrayModel.objects.get()
self.assertEqual(instance.datetimes, loaded.datetimes)
self.assertEqual(instance.dates, loaded.dates)
self.assertEqual(instance.times, loaded.times)
def test_tuples(self):
instance = IntegerArrayModel(field=(1,))
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertSequenceEqual(instance.field, loaded.field)
def test_integers_passed_as_strings(self):
# This checks that get_prep_value is deferred properly
instance = IntegerArrayModel(field=['1'])
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertEqual(loaded.field, [1])
def test_default_null(self):
instance = NullableIntegerArrayModel()
instance.save()
loaded = NullableIntegerArrayModel.objects.get(pk=instance.pk)
self.assertEqual(loaded.field, None)
self.assertEqual(instance.field, loaded.field)
def test_null_handling(self):
instance = NullableIntegerArrayModel(field=None)
instance.save()
loaded = NullableIntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
instance = IntegerArrayModel(field=None)
with self.assertRaises(IntegrityError):
instance.save()
def test_nested(self):
instance = NestedIntegerArrayModel(field=[[1, 2], [3, 4]])
instance.save()
loaded = NestedIntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_other_array_types(self):
instance = OtherTypesArrayModel(
ips=['192.168.0.1', '::1'],
uuids=[uuid.uuid4()],
decimals=[decimal.Decimal(1.25), 1.75],
)
instance.save()
loaded = OtherTypesArrayModel.objects.get()
self.assertEqual(instance.ips, loaded.ips)
self.assertEqual(instance.uuids, loaded.uuids)
self.assertEqual(instance.decimals, loaded.decimals)
class TestQuerying(PostgreSQLTestCase):
def setUp(self):
self.objs = [
NullableIntegerArrayModel.objects.create(field=[1]),
NullableIntegerArrayModel.objects.create(field=[2]),
NullableIntegerArrayModel.objects.create(field=[2, 3]),
NullableIntegerArrayModel.objects.create(field=[20, 30, 40]),
NullableIntegerArrayModel.objects.create(field=None),
]
def test_exact(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__exact=[1]),
self.objs[:1]
)
def test_isnull(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__isnull=True),
self.objs[-1:]
)
def test_gt(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__gt=[0]),
self.objs[:4]
)
def test_lt(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__lt=[2]),
self.objs[:1]
)
def test_in(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__in=[[1], [2]]),
self.objs[:2]
)
def test_contained_by(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contained_by=[1, 2]),
self.objs[:2]
)
def test_contains(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contains=[2]),
self.objs[1:3]
)
def test_contains_charfield(self):
# Regression for #22907
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__contains=['text']),
[]
)
def test_contained_by_charfield(self):
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__contained_by=['text']),
[]
)
def test_overlap_charfield(self):
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__overlap=['text']),
[]
)
def test_index(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0=2),
self.objs[1:3]
)
def test_index_chained(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0__lt=3),
self.objs[0:3]
)
def test_index_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0__0=1),
[instance]
)
@unittest.expectedFailure
def test_index_used_on_nested_data(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0=[1, 2]),
[instance]
)
def test_overlap(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__overlap=[1, 2]),
self.objs[0:3]
)
def test_len(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__len__lte=2),
self.objs[0:3]
)
def test_slice(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0_1=[2]),
self.objs[1:3]
)
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0_2=[2, 3]),
self.objs[2:3]
)
@unittest.expectedFailure
def test_slice_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0__0_1=[1]),
[instance]
)
class TestChecks(PostgreSQLTestCase):
def test_field_checks(self):
field = ArrayField(models.CharField())
field.set_attributes_from_name('field')
errors = field.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, 'postgres.E001')
def test_invalid_base_fields(self):
field = ArrayField(models.ManyToManyField('postgres_tests.IntegerArrayModel'))
field.set_attributes_from_name('field')
errors = field.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, 'postgres.E002')
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific tests")
class TestMigrations(TransactionTestCase):
available_apps = ['postgres_tests']
def test_deconstruct(self):
field = ArrayField(models.IntegerField())
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(type(new.base_field), type(field.base_field))
def test_deconstruct_with_size(self):
field = ArrayField(models.IntegerField(), size=3)
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(new.size, field.size)
def test_deconstruct_args(self):
field = ArrayField(models.CharField(max_length=20))
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(new.base_field.max_length, field.base_field.max_length)
def test_subclass_deconstruct(self):
field = ArrayField(models.IntegerField())
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.fields.ArrayField')
field = ArrayFieldSubclass()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, 'postgres_tests.models.ArrayFieldSubclass')
@override_settings(MIGRATION_MODULES={
"postgres_tests": "postgres_tests.array_default_migrations",
})
def test_adding_field_with_default(self):
# See #22962
table_name = 'postgres_tests_integerarraydefaultmodel'
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
call_command('migrate', 'postgres_tests', verbosity=0)
with connection.cursor() as cursor:
self.assertIn(table_name, connection.introspection.table_names(cursor))
call_command('migrate', 'postgres_tests', 'zero', verbosity=0)
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
class TestSerialization(PostgreSQLTestCase):
test_data = '[{"fields": {"field": "[\\"1\\", \\"2\\"]"}, "model": "postgres_tests.integerarraymodel", "pk": null}]'
def test_dumping(self):
instance = IntegerArrayModel(field=[1, 2])
data = serializers.serialize('json', [instance])
self.assertEqual(json.loads(data), json.loads(self.test_data))
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.field, [1, 2])
class TestValidation(PostgreSQLTestCase):
def test_unbounded(self):
field = ArrayField(models.IntegerField())
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([1, None], None)
self.assertEqual(cm.exception.code, 'item_invalid')
self.assertEqual(cm.exception.message % cm.exception.params, 'Item 1 in the array did not validate: This field cannot be null.')
def test_blank_true(self):
field = ArrayField(models.IntegerField(blank=True, null=True))
# This should not raise a validation error
field.clean([1, None], None)
def test_with_size(self):
field = ArrayField(models.IntegerField(), size=3)
field.clean([1, 2, 3], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([1, 2, 3, 4], None)
self.assertEqual(cm.exception.messages[0], 'List contains 4 items, it should contain no more than 3.')
def test_nested_array_mismatch(self):
field = ArrayField(ArrayField(models.IntegerField()))
field.clean([[1, 2], [3, 4]], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([[1, 2], [3, 4, 5]], None)
self.assertEqual(cm.exception.code, 'nested_array_mismatch')
self.assertEqual(cm.exception.messages[0], 'Nested arrays must have the same length.')
def test_with_validators(self):
field = ArrayField(models.IntegerField(validators=[validators.MinValueValidator(1)]))
field.clean([1, 2], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([0], None)
self.assertEqual(cm.exception.code, 'item_invalid')
self.assertEqual(cm.exception.messages[0], 'Item 0 in the array did not validate: Ensure this value is greater than or equal to 1.')
class TestSimpleFormField(PostgreSQLTestCase):
def test_valid(self):
field = SimpleArrayField(forms.CharField())
value = field.clean('a,b,c')
self.assertEqual(value, ['a', 'b', 'c'])
def test_to_python_fail(self):
field = SimpleArrayField(forms.IntegerField())
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,9')
self.assertEqual(cm.exception.messages[0], 'Item 0 in the array did not validate: Enter a whole number.')
def test_validate_fail(self):
field = SimpleArrayField(forms.CharField(required=True))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,')
self.assertEqual(cm.exception.messages[0], 'Item 2 in the array did not validate: This field is required.')
def test_validators_fail(self):
field = SimpleArrayField(forms.RegexField('[a-e]{2}'))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,bc,de')
self.assertEqual(cm.exception.messages[0], 'Item 0 in the array did not validate: Enter a valid value.')
def test_delimiter(self):
field = SimpleArrayField(forms.CharField(), delimiter='|')
value = field.clean('a|b|c')
self.assertEqual(value, ['a', 'b', 'c'])
def test_delimiter_with_nesting(self):
field = SimpleArrayField(SimpleArrayField(forms.CharField()), delimiter='|')
value = field.clean('a,b|c,d')
self.assertEqual(value, [['a', 'b'], ['c', 'd']])
def test_prepare_value(self):
field = SimpleArrayField(forms.CharField())
value = field.prepare_value(['a', 'b', 'c'])
self.assertEqual(value, 'a,b,c')
def test_max_length(self):
field = SimpleArrayField(forms.CharField(), max_length=2)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,c')
self.assertEqual(cm.exception.messages[0], 'List contains 3 items, it should contain no more than 2.')
def test_min_length(self):
field = SimpleArrayField(forms.CharField(), min_length=4)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,c')
self.assertEqual(cm.exception.messages[0], 'List contains 3 items, it should contain no fewer than 4.')
def test_required(self):
field = SimpleArrayField(forms.CharField(), required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('')
self.assertEqual(cm.exception.messages[0], 'This field is required.')
def test_model_field_formfield(self):
model_field = ArrayField(models.CharField(max_length=27))
form_field = model_field.formfield()
self.assertIsInstance(form_field, SimpleArrayField)
self.assertIsInstance(form_field.base_field, forms.CharField)
self.assertEqual(form_field.base_field.max_length, 27)
def test_model_field_formfield_size(self):
model_field = ArrayField(models.CharField(max_length=27), size=4)
form_field = model_field.formfield()
self.assertIsInstance(form_field, SimpleArrayField)
self.assertEqual(form_field.max_length, 4)
class TestSplitFormField(PostgreSQLTestCase):
def test_valid(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
data = {'array_0': 'a', 'array_1': 'b', 'array_2': 'c'}
form = SplitForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data, {'array': ['a', 'b', 'c']})
def test_required(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), required=True, size=3)
data = {'array_0': '', 'array_1': '', 'array_2': ''}
form = SplitForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'array': ['This field is required.']})
def test_remove_trailing_nulls(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(required=False), size=5, remove_trailing_nulls=True)
data = {'array_0': 'a', 'array_1': '', 'array_2': 'b', 'array_3': '', 'array_4': ''}
form = SplitForm(data)
self.assertTrue(form.is_valid(), form.errors)
self.assertEqual(form.cleaned_data, {'array': ['a', '', 'b']})
def test_required_field(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
data = {'array_0': 'a', 'array_1': 'b', 'array_2': ''}
form = SplitForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'array': ['Item 2 in the array did not validate: This field is required.']})
def test_rendering(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
self.assertHTMLEqual(str(SplitForm()), '''
<tr>
<th><label for="id_array_0">Array:</label></th>
<td>
<input id="id_array_0" name="array_0" type="text" />
<input id="id_array_1" name="array_1" type="text" />
<input id="id_array_2" name="array_2" type="text" />
</td>
</tr>
''')
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import tempfile
from contextlib import contextmanager
import pandas as pd
import numpy as np
from pyspark import pandas as ps
from pyspark.testing.pandasutils import PandasOnSparkTestCase, TestUtils
def normalize_text(s):
return "\n".join(map(str.strip, s.strip().split("\n")))
class CsvTest(PandasOnSparkTestCase, TestUtils):
def setUp(self):
self.tmp_dir = tempfile.mkdtemp(prefix=CsvTest.__name__)
def tearDown(self):
shutil.rmtree(self.tmp_dir, ignore_errors=True)
@property
def csv_text(self):
return normalize_text(
"""
name,amount
Alice,100
Bob,-200
Charlie,300
Dennis,400
Edith,-500
Frank,600
Alice,200
Frank,-200
Bob,600
Alice,400
Frank,200
Alice,300
Edith,600
"""
)
@property
def csv_text_2(self):
return normalize_text(
"""
A,B
item1,1
item2,1,2
item3,1,2,3,4
item4,1
"""
)
@property
def csv_text_with_comments(self):
return normalize_text(
"""
# header
%s
# comment
Alice,400
Edith,600
# footer
"""
% self.csv_text
)
@property
def tab_delimited_csv_text(self):
return normalize_text(
"""
name\tamount
Alice\t100
Bob\t-200
Charlie\t300
"""
)
@property
def q_quoted_csv_text(self):
return normalize_text(
"""
QnameQ,QamountQ
QA,liceQ,Q100Q
QB,obQ,Q-200Q
QC,harlieQ,Q300Q
"""
)
@property
def e_escapeted_csv_text(self):
return normalize_text(
"""
name,amount
"AE"lice",100
"BE"ob",-200
"CE"harlie",300
"""
)
@contextmanager
def csv_file(self, csv):
with self.temp_file() as tmp:
with open(tmp, "w") as f:
f.write(csv)
yield tmp
def test_read_csv(self):
with self.csv_file(self.csv_text) as fn:
def check(header="infer", names=None, usecols=None, index_col=None):
expected = pd.read_csv(
fn, header=header, names=names, usecols=usecols, index_col=index_col
)
actual = ps.read_csv(
fn, header=header, names=names, usecols=usecols, index_col=index_col
)
self.assert_eq(expected, actual, almost=True)
check()
check(header=0)
check(header=None)
check(names=["n", "a"])
check(names=[("x", "n"), ("y", "a")])
check(names=[10, 20])
check(header=0, names=["n", "a"])
check(usecols=[1])
check(usecols=[1, 0])
check(usecols=["amount"])
check(usecols=["amount", "name"])
check(usecols=[])
check(usecols=[1, 1])
check(usecols=["amount", "amount"])
check(header=None, usecols=[1])
check(names=["n", "a"], usecols=["a"])
check(header=None, names=["n", "a"], usecols=["a"])
check(index_col=["amount"])
check(header=None, index_col=[1])
check(names=["n", "a"], index_col=["a"])
# check with pyspark patch.
expected = pd.read_csv(fn)
actual = ps.read_csv(fn)
self.assert_eq(expected, actual, almost=True)
self.assertRaisesRegex(
ValueError, "non-unique", lambda: ps.read_csv(fn, names=["n", "n"])
)
self.assertRaisesRegex(
ValueError,
"does not match the number.*3",
lambda: ps.read_csv(fn, names=["n", "a", "b"]),
)
self.assertRaisesRegex(
ValueError,
"does not match the number.*3",
lambda: ps.read_csv(fn, header=0, names=["n", "a", "b"]),
)
self.assertRaisesRegex(
ValueError, "Usecols do not match.*3", lambda: ps.read_csv(fn, usecols=[1, 3])
)
self.assertRaisesRegex(
ValueError,
"Usecols do not match.*col",
lambda: ps.read_csv(fn, usecols=["amount", "col"]),
)
self.assertRaisesRegex(
ValueError, "Unknown header argument 1", lambda: ps.read_csv(fn, header="1")
)
expected_error_message = (
"'usecols' must either be list-like of all strings, "
"all unicode, all integers or a callable."
)
self.assertRaisesRegex(
ValueError, expected_error_message, lambda: ps.read_csv(fn, usecols=[1, "amount"])
)
# check with index_col
expected = pd.read_csv(fn).set_index("name")
actual = ps.read_csv(fn, index_col="name")
self.assert_eq(expected, actual, almost=True)
def test_read_with_spark_schema(self):
with self.csv_file(self.csv_text_2) as fn:
actual = ps.read_csv(fn, names="A string, B string, C long, D long, E long")
expected = pd.read_csv(fn, names=["A", "B", "C", "D", "E"])
self.assert_eq(expected, actual)
def test_read_csv_with_comment(self):
with self.csv_file(self.csv_text_with_comments) as fn:
expected = pd.read_csv(fn, comment="#")
actual = ps.read_csv(fn, comment="#")
self.assert_eq(expected, actual, almost=True)
self.assertRaisesRegex(
ValueError,
"Only length-1 comment characters supported",
lambda: ps.read_csv(fn, comment="").show(),
)
self.assertRaisesRegex(
ValueError,
"Only length-1 comment characters supported",
lambda: ps.read_csv(fn, comment="##").show(),
)
self.assertRaisesRegex(
ValueError,
"Only length-1 comment characters supported",
lambda: ps.read_csv(fn, comment=1),
)
self.assertRaisesRegex(
ValueError,
"Only length-1 comment characters supported",
lambda: ps.read_csv(fn, comment=[1]),
)
def test_read_csv_with_limit(self):
with self.csv_file(self.csv_text_with_comments) as fn:
expected = pd.read_csv(fn, comment="#", nrows=2)
actual = ps.read_csv(fn, comment="#", nrows=2)
self.assert_eq(expected, actual, almost=True)
def test_read_csv_with_encoding(self):
# SPARK-37181: Read csv supporting latin-1 encoding.
with self.csv_file(self.csv_text) as fn:
expected = pd.read_csv(fn, encoding="latin-1")
actual = ps.read_csv(fn, encoding="latin-1")
self.assert_eq(expected, actual, almost=True)
def test_read_csv_with_sep(self):
with self.csv_file(self.tab_delimited_csv_text) as fn:
expected = pd.read_csv(fn, sep="\t")
actual = ps.read_csv(fn, sep="\t")
self.assert_eq(expected, actual, almost=True)
def test_read_csv_with_squeeze(self):
with self.csv_file(self.csv_text) as fn:
expected = pd.read_csv(fn, squeeze=True, usecols=["name"])
actual = ps.read_csv(fn, squeeze=True, usecols=["name"])
self.assert_eq(expected, actual, almost=True)
expected = pd.read_csv(fn, squeeze=True, usecols=["name", "amount"])
actual = ps.read_csv(fn, squeeze=True, usecols=["name", "amount"])
self.assert_eq(expected, actual, almost=True)
expected = pd.read_csv(fn, squeeze=True, usecols=["name", "amount"], index_col=["name"])
actual = ps.read_csv(fn, squeeze=True, usecols=["name", "amount"], index_col=["name"])
self.assert_eq(expected, actual, almost=True)
def test_read_csv_with_mangle_dupe_cols(self):
self.assertRaisesRegex(
ValueError, "mangle_dupe_cols", lambda: ps.read_csv("path", mangle_dupe_cols=False)
)
def test_read_csv_with_parse_dates(self):
self.assertRaisesRegex(
ValueError, "parse_dates", lambda: ps.read_csv("path", parse_dates=True)
)
def test_read_csv_with_dtype(self):
with self.csv_file(self.csv_text) as fn:
self.assert_eq(ps.read_csv(fn), pd.read_csv(fn), almost=True)
self.assert_eq(ps.read_csv(fn, dtype=str), pd.read_csv(fn, dtype=str))
self.assert_eq(
ps.read_csv(fn, dtype={"amount": "int64"}),
pd.read_csv(fn, dtype={"amount": "int64"}),
)
def test_read_csv_with_quotechar(self):
with self.csv_file(self.q_quoted_csv_text) as fn:
self.assert_eq(
ps.read_csv(fn, quotechar="Q"), pd.read_csv(fn, quotechar="Q"), almost=True
)
def test_read_csv_with_escapechar(self):
with self.csv_file(self.e_escapeted_csv_text) as fn:
self.assert_eq(
ps.read_csv(fn, escapechar="E"), pd.read_csv(fn, escapechar="E"), almost=True
)
self.assert_eq(
ps.read_csv(fn, escapechar="ABC", escape="E"),
pd.read_csv(fn, escapechar="E"),
almost=True,
)
def test_to_csv(self):
pdf = pd.DataFrame({"aa": [1, 2, 3], "bb": [4, 5, 6]}, index=[0, 1, 3])
psdf = ps.DataFrame(pdf)
self.assert_eq(psdf.to_csv(), pdf.to_csv(index=False))
self.assert_eq(psdf.to_csv(columns=["aa"]), pdf.to_csv(columns=["aa"], index=False))
self.assert_eq(psdf.aa.to_csv(), pdf.aa.to_csv(index=False, header=True))
pdf = pd.DataFrame({"a": [1, np.nan, 3], "b": ["one", "two", None]}, index=[0, 1, 3])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.to_csv(na_rep="null"), pdf.to_csv(na_rep="null", index=False))
self.assert_eq(
psdf.a.to_csv(na_rep="null"), pdf.a.to_csv(na_rep="null", index=False, header=True)
)
self.assertRaises(KeyError, lambda: psdf.to_csv(columns=["ab"]))
pdf = pd.DataFrame({"a": [1.0, 2.0, 3.0], "b": [4.0, 5.0, 6.0]}, index=[0, 1, 3])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.to_csv(), pdf.to_csv(index=False))
self.assert_eq(psdf.to_csv(header=False), pdf.to_csv(header=False, index=False))
self.assert_eq(psdf.to_csv(), pdf.to_csv(index=False))
# non-string names
pdf = pd.DataFrame({10: [1, 2, 3], 20: [4, 5, 6]}, index=[0, 1, 3])
psdf = ps.DataFrame(pdf)
self.assert_eq(psdf.to_csv(), pdf.to_csv(index=False))
self.assert_eq(psdf.to_csv(columns=[10]), pdf.to_csv(columns=[10], index=False))
self.assertRaises(TypeError, lambda: psdf.to_csv(columns=10))
def _check_output(self, dir, expected):
output_paths = [path for path in os.listdir(dir) if path.startswith("part-")]
assert len(output_paths) > 0
output_path = "%s/%s" % (dir, output_paths[0])
with open(output_path) as f:
self.assertEqual(f.read(), expected)
def test_to_csv_with_path(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]})
psdf = ps.DataFrame(pdf)
tmp_dir = "{}/tmp1".format(self.tmp_dir)
psdf.to_csv(tmp_dir, num_files=1)
self._check_output(tmp_dir, pdf.to_csv(index=False))
tmp_dir = "{}/tmp2".format(self.tmp_dir)
self.assertRaises(KeyError, lambda: psdf.to_csv(tmp_dir, columns=["c"], num_files=1))
# non-string names
pdf = pd.DataFrame({10: [1, 2, 3], 20: ["a", "b", "c"]})
psdf = ps.DataFrame(pdf)
tmp_dir = "{}/tmp3".format(self.tmp_dir)
psdf.to_csv(tmp_dir, num_files=1)
self._check_output(tmp_dir, pdf.to_csv(index=False))
tmp_dir = "{}/tmp4".format(self.tmp_dir)
psdf.to_csv(tmp_dir, columns=[10], num_files=1)
self._check_output(tmp_dir, pdf.to_csv(columns=[10], index=False))
tmp_dir = "{}/tmp5".format(self.tmp_dir)
self.assertRaises(TypeError, lambda: psdf.to_csv(tmp_dir, columns=10, num_files=1))
def test_to_csv_with_path_and_basic_options(self):
pdf = pd.DataFrame({"aa": [1, 2, 3], "bb": ["a", "b", "c"]})
psdf = ps.DataFrame(pdf)
psdf.to_csv(self.tmp_dir, num_files=1, sep="|", header=False, columns=["aa"])
expected = pdf.to_csv(index=False, sep="|", header=False, columns=["aa"])
self._check_output(self.tmp_dir, expected)
def test_to_csv_with_path_and_basic_options_multiindex_columns(self):
pdf = pd.DataFrame({("x", "a"): [1, 2, 3], ("y", "b"): ["a", "b", "c"]})
psdf = ps.DataFrame(pdf)
with self.assertRaises(ValueError):
psdf.to_csv(self.tmp_dir, num_files=1, sep="|", columns=[("x", "a")])
psdf.to_csv(self.tmp_dir, num_files=1, sep="|", header=["a"], columns=[("x", "a")])
pdf.columns = ["a", "b"]
expected = pdf.to_csv(index=False, sep="|", columns=["a"])
self._check_output(self.tmp_dir, expected)
def test_to_csv_with_path_and_pyspark_options(self):
pdf = pd.DataFrame({"a": [1, 2, 3, None], "b": ["a", "b", "c", None]})
psdf = ps.DataFrame(pdf)
psdf.to_csv(self.tmp_dir, nullValue="null", num_files=1)
expected = pdf.to_csv(index=False, na_rep="null")
self._check_output(self.tmp_dir, expected)
def test_to_csv_with_partition_cols(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]})
psdf = ps.DataFrame(pdf)
psdf.to_csv(self.tmp_dir, partition_cols="b", num_files=1)
partition_paths = [path for path in os.listdir(self.tmp_dir) if path.startswith("b=")]
assert len(partition_paths) > 0
for partition_path in partition_paths:
column, value = partition_path.split("=")
expected = pdf[pdf[column] == value].drop("b", axis=1).to_csv(index=False)
output_paths = [
path
for path in os.listdir("%s/%s" % (self.tmp_dir, partition_path))
if path.startswith("part-")
]
assert len(output_paths) > 0
output_path = "%s/%s/%s" % (self.tmp_dir, partition_path, output_paths[0])
with open(output_path) as f:
self.assertEqual(f.read(), expected)
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_csv import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
|
import re
from binary_tree_with_parent_prototype import BinaryTreeNode
from list_node import ListNode, list_size
from test_framework.binary_tree_utils import (binary_tree_height,
binary_tree_size)
class SerializationTrait:
def name(self):
raise NotImplementedError('Unsupported type')
def parse(self, arg):
raise NotImplementedError('Unsupported type')
def get_metric_names(self, arg_name):
raise NotImplementedError('Unsupported type')
def get_metrics(self, x):
raise NotImplementedError('Unsupported type')
def is_void(self):
return False
class VoidTrait(SerializationTrait):
def name(self):
return 'void'
def parse(self, arg):
raise RuntimeError('Can\'t parse void')
def get_metric_names(self, arg_name):
return []
def get_metrics(self, x):
return []
def is_void(self):
return True
class StringTrait(SerializationTrait):
def name(self):
return 'string'
def parse(self, json_object):
return str(json_object)
def get_metric_names(self, arg_name):
return ['size({})'.format(arg_name)]
def get_metrics(self, x):
return [len(x)]
class IntegerTrait(SerializationTrait):
def name(self):
return 'int'
def parse(self, json_object):
return int(json_object)
def get_metric_names(self, arg_name):
return [str(arg_name)]
def get_metrics(self, x):
return [abs(int(x))]
class FloatTrait(SerializationTrait):
def name(self):
return 'float'
def parse(self, json_object):
return float(json_object)
def get_metric_names(self, arg_name):
return [str(arg_name)]
def get_metrics(self, x):
return [min(2 ^ 32 - 1, abs(float(x)))]
class BooleanTrait(SerializationTrait):
def name(self):
return 'bool'
def parse(self, json_object):
return bool(json_object)
def get_metric_names(self, arg_name):
return []
def get_metrics(self, x):
return []
class ListTrait(SerializationTrait):
def __init__(self, inner_type_trait):
super().__init__()
self._inner_type_trait = inner_type_trait
def name(self):
return 'array({})'.format(self._inner_type_trait.name())
def parse(self, json_object):
return [self._inner_type_trait.parse(inner) for inner in json_object]
def get_metric_names(self, arg_name):
return ['size({})'.format(arg_name)]
def get_metrics(self, x):
if isinstance(x, list):
return [len(x)]
raise RuntimeError('Expected list')
def get_inner_trait(self):
return self._inner_type_trait
class BinaryTreeTrait(SerializationTrait):
def __init__(self, node_type, inner_type_trait):
super().__init__()
self._node_type = node_type
self._inner_type_trait = inner_type_trait
def name(self):
return 'binary_tree({})'.format(self._inner_type_trait.name())
def parse(self, json_object):
def build_binary_tree(data):
"""A helper function for binary tree parser.
Constructs a binary tree from an list of keys (and None values).
:param data - a list of serialized keys.
"""
nodes = [
None if node is None else BinaryTreeNode(
self._inner_type_trait.parse(node)) for node in data
]
candidate_children = nodes[::-1]
root = candidate_children.pop()
for node in nodes:
if node:
if candidate_children:
node.left = candidate_children.pop()
if node.left is not None:
node.left.parent = node
if candidate_children:
node.right = candidate_children.pop()
if node.right is not None:
node.right.parent = node
return root
return build_binary_tree(json_object)
def get_metric_names(self, arg_name):
return ['size({})'.format(arg_name), 'height({})'.format(arg_name)]
def get_metrics(self, x):
return [binary_tree_size(x), binary_tree_height(x)]
class LinkedListTrait(SerializationTrait):
def __init__(self, inner_type_trait):
super().__init__()
self._list_trait = ListTrait(inner_type_trait)
def name(self):
return 'linked_list({})'.format(
self._list_trait.get_inner_trait().name())
def parse(self, json_object):
parsed = self._list_trait.parse(json_object)
head = None
for value in reversed(parsed):
head = ListNode(value, head)
return head
def get_metric_names(self, arg_name):
return ['size({})'.format(arg_name)]
def get_metrics(self, x):
if x is None:
return [0]
elif isinstance(x, ListNode):
return [list_size(x)]
raise RuntimeError('Expected ListNode')
class SetTrait(SerializationTrait):
def __init__(self, inner_type_trait):
super().__init__()
self._list_trait = ListTrait(inner_type_trait)
def name(self):
return 'set({})'.format(self._list_trait.get_inner_trait().name())
def parse(self, json_object):
return set(self._list_trait.parse(json_object))
def get_metric_names(self, arg_name):
return ['size({})'.format(arg_name)]
def get_metrics(self, x):
if isinstance(x, set):
return [len(x)]
raise RuntimeError('Expected set')
# TODO: Custom parser that throws with mismatch info.
def get_inner_trait(self):
return self._list_trait.get_inner_trait()
class TupleTrait(SerializationTrait):
def __init__(self, inner_type_traits):
super().__init__()
self._inner_type_traits = inner_type_traits
def name(self):
return 'tuple({})'.format(','.join(t.name()
for t in self._inner_type_traits))
def parse(self, json_object):
if len(json_object) != len(self._inner_type_traits):
raise RuntimeError(
'Tuple parser: expected {} values, provide {}'.format(
len(self._inner_type_traits), len(json_object)))
return tuple(
inner_type_trait.parse(p) for inner_type_trait, p in zip(
self._inner_type_traits, json_object))
def get_metric_names(self, arg_name):
# TODO: Find how to provide custom metrics.
return []
def get_metrics(self, x):
return []
PRIMITIVE_TYPES_MAPPINGS = {
'void': VoidTrait,
'string': StringTrait,
'int': IntegerTrait,
'long': IntegerTrait,
'float': FloatTrait,
'bool': BooleanTrait
}
def get_trait(typename):
if typename in PRIMITIVE_TYPES_MAPPINGS:
return PRIMITIVE_TYPES_MAPPINGS[typename]()
list_regex = re.compile(r'^array\((.*)\)$')
m = list_regex.match(typename)
if m and len(m.groups()) == 1:
return ListTrait(get_trait(m.group(1)))
binary_tree_regex = re.compile(r'^binary_tree\((.*)\)$')
m = binary_tree_regex.match(typename)
if m and len(m.groups()) == 1:
return BinaryTreeTrait(BinaryTreeNode, get_trait(m.group(1)))
linked_list_regex = re.compile(r'^linked_list\((.*)\)$')
m = linked_list_regex.match(typename)
if m and len(m.groups()) == 1:
return LinkedListTrait(get_trait(m.group(1)))
set_regex = re.compile(r'^set\((.*)\)$')
m = set_regex.match(typename)
if m and len(m.groups()) == 1:
return SetTrait(get_trait(m.group(1)))
tuple_regex = re.compile(r'^tuple\((.*)\)$')
m = tuple_regex.match(typename)
if m and len(m.groups()) == 1:
return TupleTrait([get_trait(x) for x in m.group(1).split(',')])
raise NotImplementedError("Unsupported type " + typename)
|
|
# ===============================================================================
# Copyright 2017 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import json
import struct
import time
from numpy import append as npappend
from traits.api import List
from pychron.hardware.quadera_spectrometer_controller import QuaderaController
from pychron.processing.isotope import Isotope
from pychron.processing.isotope_group import IsotopeGroup
from pychron.pychron_constants import (
ISOTOPX_DEFAULT_INTEGRATION_TIME,
ISOTOPX_INTEGRATION_TIMES,
NULL_STR,
QUADERA_DEFAULT_INTEGRATION_TIME,
QUADERA_INTEGRATION_TIMES,
)
from pychron.spectrometer.base_spectrometer import BaseSpectrometer
# from pychron.spectrometer.isotopx import SOURCE_CONTROL_PARAMETERS, IsotopxMixin
from pychron.spectrometer.pfeiffer import PfeifferMixin
from pychron.spectrometer.pfeiffer.detector.quadera import QuaderaDetector
from pychron.spectrometer.pfeiffer.magnet.quadera import QuaderaMagnet
from pychron.spectrometer.pfeiffer.source.quadera import QuaderaSource
class QuaderaSpectrometer(BaseSpectrometer, PfeifferMixin):
# integration_time = Int
integration_times = List(QUADERA_INTEGRATION_TIMES)
magnet_klass = QuaderaMagnet
detector_klass = QuaderaDetector
source_klass = QuaderaSource
microcontroller_klass = QuaderaController
# _test_connect_command = 'GETMASS'
# _read_enabled = True
use_deflection_correction = False
use_hv_correction = False
def _microcontroller_default(self):
# service = 'pychron.hardware.quadera_spectrometer_controller.QuaderaController'
# s = self.application.get_service(service)
s = QuaderaController(name="spectrometer_microcontroller")
s.bootstrap()
s.communicator.simulation = True
return s
def set_data_pump_mode(self, mode):
pass
def sink_data(self, writer, n, delay):
client = self.microcontroller.communicator
handle = client.get_handler()
sock = handle.sock
# get the data
header = None
cnt = 1
start_time = st = time.time()
isotopes = {}
while 1:
if cnt > n:
break
et = time.time() - st
if et < delay:
time.sleep(delay - et)
st = time.time()
size = sock.recv(4)
size = struct.unpack("i", size)[0]
str_data = sock.recv(size)
# self.debug(str_data)
s = str_data.decode("ascii")
self.debug(s)
s = s.replace("False", '"False"')
s = s.replace("True", '"True"')
obj = json.loads(s)
# if not i:
# construct and write the header
keys = list(obj.keys())
if "amuNames" not in keys:
continue
if not header:
masses = ["mass({})".format(m) for m in obj["amuNames"]]
header = (
[
"count",
"time",
]
+ masses
+ keys
)
writer.writerow(header)
raw = [obj[h] for h in keys]
intensities = obj["intensity"]
ct = time.time()
for m, si in zip(obj["amuNames"], intensities):
if m not in isotopes:
iso = Isotope(m, "Detector")
iso.name = m
isotopes[m] = iso
else:
iso = isotopes[m]
iso.xs = npappend(iso.xs, ct - start_time)
iso.ys = npappend(iso.ys, si)
row = (
[
cnt,
ct,
]
+ intensities
+ raw
)
self.debug("sinking row: {}".format(row))
writer.writerow(row)
cnt += 1
return IsotopeGroup(isotopes=isotopes)
# def set_data_pump_mode(self, mode):
# resp = self.microcontroller.ask('General.DataPump.Mode {}'.format(mode))
#
# def halted(self):
# """
# General.Cycle.Status
# 1= halt, 5=run multi
# """
# resp = self.microcontroller.ask('General.Cycle.Status')
# if resp:
# resp = resp.strip()
# return int(resp) == 1
#
# def sink_data(self):
# packet = self.microcontroller.ask('General.DataPump.Data')
#
# def get_bytes(n):
# i = 0
# while 1:
# yield packet[i:i+n]
# i+=n
#
# channel = get_bytes(1)
# datatype = get_bytes(1)
# status = get_bytes(1)
# ndata = get_bytes(1)
#
# timestamp = get_bytes(8)
# max_data_tuples = get_bytes(2)
# first_mass = get_bytes(2)
# last_mass = get_bytes(2)
# dwell_speed = get_bytes(1)
# measure_unit_mass_resol = get_bytes(1)
# ndata_tuples = int(get_bytes(1), 16)
#
# for j in range(ndata_tuples):
# intensity = get_bytes(4)
# mass = get_bytes(2)
# status = get_bytes(1)
# adjust_mode = get_bytes(1)
#
# return timestamp, channel, intensity
def make_configuration_dict(self):
return {}
def make_gains_dict(self):
return {}
def make_deflection_dict(self):
return {}
# def start(self):
# self.set_integration_time(1, force=True)
# def finish_loading(self):
# super(QuaderaSpectrometer, self).finish_loading()
# config = self._get_cached_config()
# if config is not None:
# magnet = config['magnet']
# # specparams, defl, trap, magnet = ret
# mftable_name = magnet.get('mftable')
# if mftable_name:
# self.debug('updating mftable name {}'.format(mftable_name))
# self.magnet.field_table.path = mftable_name
# self.magnet.field_table.load_table(load_items=True)
def _send_configuration(self, **kw):
pass
def _get_cached_config(self):
return {}
# def get_update_period(self, it=None, is_scan=False):
# return self.integration_time * 0.95
# def cancel(self):
# self.debug('canceling')
# def read_intensities(self, *args, **kw):
# def read_intensities(self, timeout=60, trigger=False, target='ACQ.B', verbose=False):
# self._read_enabled = True
# if verbose:
# self.debug('read intensities')
# resp = True
# if trigger:
# resp = self.trigger_acq()
# if resp is not None:
# # if verbose:
# # self.debug(f'waiting {self.integration_time * 0.95} before trying to get data')
# # time.sleep(self.integration_time * 0.95)
# time.sleep(0.95)
# # if verbose:
# # self.debug('trigger wait finished')
#
# keys = []
# signals = []
# collection_time = None
#
# # self.microcontroller.lock.acquire()
# # self.debug(f'acquired mcir lock {self.microcontroller.lock}')
# target = '#EVENT:{},{}'.format(target, self.rcs_id)
# if resp is not None:
# keys = self.detector_names[::-1]
# while 1:
# line = self.readline()
# if line is None:
# break
#
# if verbose:
# self.debug('raw: {}'.format(line))
# if line and line.startswith(target):
# args = line[:-1].split(',')
# ct = datetime.strptime(args[4], '%H:%M:%S.%f')
#
# collection_time = datetime.now()
#
# # copy to collection time
# collection_time.replace(hour=ct.hour, minute=ct.minute, second=ct.second,
# microsecond=ct.microsecond)
# try:
# signals = [float(i) for i in args[5:]]
# except ValueError as e:
# self.warning('Failed getting data. error={}'.format(e))
#
# if verbose:
# self.debug('line: {}'.format(line[:15]))
# break
#
# # self.microcontroller.lock.release()
# if len(signals) != len(keys):
# keys, signals = [], []
# return keys, signals, collection_time
def read_integration_time(self):
return self.integration_time
def set_integration_time(self, it, force=False):
"""
:param it: float, integration time in seconds
:param force: set integration even if "it" is not different than self.integration_time
:return: float, integration time
"""
self.debug(
"acquisition period set to 1 second. integration time set to {}".format(it)
)
# self.ask('SetAcqPeriod 1000')
self.integration_time = it
# if self.integration_time != it or force:
# self.ask('StopAcq')
# self.debug('setting integration time = {}'.format(it))
#
# self.ask('SetAcqPeriod {}'.format(int(it * 1000)))
# self.trait_setq(integration_time=it)
return it
# def read_parameter_word(self, keys):
# self.debug('read parameter word. keys={}'.format(keys))
# values = []
# for kk in keys:
# try:
# key = SOURCE_CONTROL_PARAMETERS[kk]
# except KeyError:
# values.append(NULL_STR)
# continue
#
# resp = self.ask('GetSourceOutput {}'.format(key))
# if resp is not None:
# try:
# last_set, readback = resp.split(',')
# values.append(float(readback))
# except ValueError:
# values.append(NULL_STR)
# return values
def _get_simulation_data(self):
signals = [1, 100, 3, 0.01, 0.01, 0.01] # + random(6)
keys = ["H2", "H1", "AX", "L1", "L2", "CDD"]
return keys, signals, None
def _integration_time_default(self):
self.default_integration_time = QUADERA_DEFAULT_INTEGRATION_TIME
return QUADERA_DEFAULT_INTEGRATION_TIME
# ============= EOF =============================================
|
|
#!/bin/env python
'''This script illustrates how to download and access the `local
politics` subcorpus within TREC DD
This particular corpus is a selection of the TREC KBA 2014
StreamCorpus that has already been tagged with Serif NER, and is
organized into hourly directories based on the origination time stamp
on each document.
The full list of files is available at
https://aws-publicdatasets.s3.amazonaws.com/trec/dd/local-politics-streamcorpus-v0_3_0-s3-paths.txt.xz
and that file must be downloaded to your local directory in order for
this script to work.
The filtering process that generated this data set used these
substrings:
https://aws-publicdatasets.s3.amazonaws.com/trec/dd/local-politics-domain-substrings-filtering.txt
and this streamcorpus_pipeline configuration file:
https://aws-publicdatasets.s3.amazonaws.com/trec/dd/local-politics-streamcorpus-pipeline-filter-config.yaml
https://aws-publicdatasets.s3.amazonaws.com/trec/dd/local-politics-streamcorpus-pipeline-filter-domains.txt.xz.gpg
and this command:
streamcorpus_pipeline -c local-politics-streamcorpus-pipeline-filter-config.yaml -i <path to input S3 file>
'''
## python standard library components
import argparse
from cStringIO import StringIO
from hashlib import md5
from itertools import chain
import logging
from operator import attrgetter
import os
import sys
import traceback
import time
## these python packages require installation; consider using a
## virtualenv, which you can install on Ubuntu like this:
# sudo apt-get install python-virtualenv liblzma-dev python-dev
# virtualenv ve
# source ve/bin/activate
# pip install requests backports.lzma streamcorpus
## installation on CentOS/RHEL is similar using yum instead of apt-get
from backports import lzma
import cbor
import requests
from streamcorpus import Chunk, decrypt_and_uncompress, compress_and_encrypt
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
class trec_xml_file_roller(object):
'''provides a context manager for rolling files
'''
def __init__(self, output_dir, max_chunk_size=500, compress=False):
self.output_dir = output_dir
self.max_chunk_size = max_chunk_size
self.compress = compress
def __enter__(self):
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
self.current_file_path = None
self.current_file = None
self.tmp_file_path = os.path.join(self.output_dir, 'tmp.xml')
self.total_written = 0
return self
def __exit__(self, exc_type=None, exc_value=None, traceback=None):
self.roll()
def roll(self):
'''close `current_file` and rename it to `current_file_path`
'''
if self.current_file is not None:
self.current_file.close()
## atomic rename to final location
os.rename(self.tmp_file_path, self.current_file_path)
self.current_file = None
def add(self, item):
'''add `item` to `current_file`, opening it as temporary file if not
already open. This also constructs the `current_file_path`
when it opens the temporary file.
'''
if self.current_file is None:
## construct a final path to which this fil will be moved
## when it rolls
self.current_file_path = os.path.join(
self.output_dir,
'trec-dd-local-politics-%d.xml' % self.total_written)
if self.compress:
self.current_file = gzip.open(self.tmp_file_path, 'wb')
self.current_file_path += '.gz'
else:
self.current_file = open(self.tmp_file_path, 'wb')
## write the data
self.current_file.write('<DOC>\n')
self.current_file.write('<DOCNO>%s</DOCNO>\n' % item['key'])
self.current_file.write('<TIMESTAMP>%s</TIMESTAMP>\n' % item['timestamp'])
self.current_file.write('<URL>%s</URL>\n' % item['url'])
self.current_file.write('<TEXT>\n%s\n</TEXT>\n' % item['response']['body'])
self.current_file.write('</DOC>\n')
## roll the files each time we reach max_chunk_size
self.total_written += 1
if self.total_written % self.max_chunk_size == 0:
self.roll()
class cbor_file_roller(trec_xml_file_roller):
def __init__(self, output_dir, max_chunk_size=500, compress=False):
super(cbor_file_roller, self).__init__(output_dir, max_chunk_size, compress)
def add(self, item):
'''add `item` to `current_file`, opening it as temporary file if not
already open. This also constructs the `current_file_path`
when it opens the temporary file.
'''
if self.current_file is None:
## construct a final path to which this fil will be moved
## when it rolls
self.current_file_path = os.path.join(
self.output_dir,
'trec-dd-local-politics-%d.cbor' % self.total_written)
if self.compress:
self.current_file = gzip.open(self.tmp_file_path, 'wb')
self.current_file_path += '.gz'
else:
self.current_file = open(self.tmp_file_path, 'wb')
## write the data
cbor.dump(item, self.current_file)
## roll the files each time we reach max_chunk_size
self.total_written += 1
if self.total_written % self.max_chunk_size == 0:
self.roll()
def cca_items(args):
'''This generator takes an s3_paths_fname file, fetches the data,
constructs a CCA record, and yields it.
'''
for path in lzma.open(args.s3_paths_fname):
if args.date_hour is not None:
if not path.startswith(args.date_hour):
continue
s3_path = args.s3_path_prefix + path.strip()
url = args.s3_http_host + s3_path
logger.info( url )
retries = 0
max_retries = 10
while retries < max_retries:
retries += 1
sys.stderr.flush()
try:
resp = requests.get(url)
errors, data = decrypt_and_uncompress(resp.content, gpg_private='trec-kba-rsa')
logger.info( '\n'.join(errors) )
for si in Chunk(file_obj=StringIO(data)):
item = {
'key': si.stream_id,
'url': si.abs_url,
'timestamp': si.stream_time.epoch_ticks,
'request': None, ## not part of this data set
'response': {
'headers': [
['Content-Type', 'text/html'],
],
'body': si.body.clean_html,
## alternatively, could use si.body.raw and
## si.body.media_type for the Content-Type
## header, but that would cause the Serif NER
## to be useless to teams...
},
'imported': None,
}
yield item
#print cbor.dumps(rec)
## do something with the data
logger.info(
'%d bytes of html, or %d bytes of tag-stripped clean_visible, and %d sentences with %d tokens' % (
len(si.body.clean_html), len(si.body.clean_visible),
len(si.body.sentences['serif']),
len(list(chain(*map(attrgetter('tokens'), si.body.sentences['serif'])))),
))
break # break out of retry loop
except Exception, exc:
logger.critical( traceback.format_exc(exc) )
logger.critical( 'retrying %d of %d times to fetch and access: %s' % (retries, max_retries, url) )
time.sleep(1)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--date-hour', help='If specified, then `date-hour` is *added* to `output-dir`'
' and this process will filter through the lines in `s3-paths-fname` to select only'
' those files that start with this date-hour. There are 11,129 date hours that you can'
' find by `xzcat local-politics-streamcorpus-v0_3_0-s3-paths.txt.xz | cut -d/ -f 1 | uniq | sort -u > date-hours.txt`')
parser.add_argument('--output-dir', help='directory for writing output files in trec/xml format')
parser.add_argument('--max-chunk-size', default=500, type=int, help='size at which to roll chunk files')
parser.add_argument('--compress', default=False, action='store_true', help='compress output files with gzip')
parser.add_argument('--s3-paths-fname', default='local-politics-streamcorpus-v0_3_0-s3-paths.txt.xz')
parser.add_argument('--s3-http-host', default='https://aws-publicdatasets.s3.amazonaws.com/')
parser.add_argument('--s3-path-prefix', default='trec/dd/local-politics-streamcorpus-v0_3_0/')
args = parser.parse_args()
if not os.path.exists(args.s3_paths_fname):
sys.exit('please download %strec/dd/%s' % (s3_http_host, s3_paths_fname))
if args.date_hour:
args.output_dir += '/' + args.date_hour
with cbor_file_roller(args.output_dir, max_chunk_size=args.max_chunk_size, compress=args.compress) as roller:
for item in cca_items(args):
roller.add(item)
logger.critical('added %r %r %s' % (item['key'], item['url'], md5(item['response']['body']).hexdigest()))
if __name__ == '__main__':
main()
|
|
"""
Author: Justin Cappos
Start Date: July 1st, 2008
Description:
Handles exiting and killing all threads, tracking CPU / Mem usage, etc.
"""
import threading
import os
import time
# needed for sys.stderr and windows Popen hackery
import sys
# needed for signal numbers
import signal
# needed for harshexit
import harshexit
# print useful info when exiting...
import tracebackrepy
# used to query status, etc.
# This may fail on Windows CE
try:
import subprocess
mobile_no_subprocess = False
except ImportError:
# Set flag to avoid using subprocess
mobile_no_subprocess = True
# used for socket.error
import socket
# need for status retrieval
import statusstorage
# Get constants
import repy_constants
# Get access to the status interface so we can start it
import nmstatusinterface
# This allows us to meter resource use
import nanny
# This is used for IPC
import marshal
# This will fail on non-windows systems
try:
import windows_api as windows_api
except:
windows_api = None
# Armon: This is a place holder for the module that will be imported later
os_api = None
# Armon: See additional imports at the bottom of the file
class UnsupportedSystemException(Exception):
pass
################### Publicly visible functions #######################
# check the disk space used by a dir.
def compute_disk_use(dirname):
# Convert path to absolute
dirname = os.path.abspath(dirname)
diskused = 0
for filename in os.listdir(dirname):
try:
diskused = diskused + os.path.getsize(os.path.join(dirname, filename))
except IOError: # They likely deleted the file in the meantime...
pass
except OSError: # They likely deleted the file in the meantime...
pass
# charge an extra 4K for each file to prevent lots of little files from
# using up the disk. I'm doing this outside of the except clause in
# the failure to get the size wasn't related to deletion
diskused = diskused + 4096
return diskused
# prepare a socket so it behaves how we want
def preparesocket(socketobject):
if ostype == 'Windows':
# we need to set a timeout because on rare occasions Windows will block
# on recvmess with a bad socket. This prevents it from locking the system.
# We use select, so the timeout should never be actually used.
# The actual value doesn't seem to matter, so I'll use 100 years
socketobject.settimeout(60*60*24*365*100)
elif ostype == 'Linux' or ostype == 'Darwin':
# Linux seems not to care if we set the timeout, Mac goes nuts and refuses
# to let you send from a socket you're receiving on (why?)
pass
else:
raise UnsupportedSystemException, "Unsupported system type: '"+osrealtype+"' (alias: "+ostype+")"
# Armon: Also launches the nmstatusinterface thread.
# This will result in an internal thread on Windows
# and a thread on the external process for *NIX
def monitor_cpu_disk_and_mem():
if ostype == 'Linux' or ostype == 'Darwin':
# Startup a CPU monitoring thread/process
do_forked_resource_monitor()
elif ostype == 'Windows':
# Now we set up a cpu nanny...
WinCPUNannyThread().start()
# Launch mem./disk resource nanny
WindowsNannyThread().start()
# Start the nmstatusinterface. Windows means repy isn't run in an external
# process, so pass None instead of a process id.
nmstatusinterface.launch(None)
else:
raise UnsupportedSystemException, "Unsupported system type: '"+osrealtype+"' (alias: "+ostype+")"
# Elapsed time
elapsedtime = 0
# Store the uptime of the system when we first get loaded
starttime = 0
last_uptime = 0
# Timestamp from our starting point
last_timestamp = time.time()
# This is our uptime granularity
granularity = 1
# This ensures only one thread calling getruntime at any given time
runtimelock = threading.Lock()
def getruntime():
"""
<Purpose>
Return the amount of time the program has been running. This is in
wall clock time. This function is not guaranteed to always return
increasing values due to NTP, etc.
<Arguments>
None
<Exceptions>
None.
<Side Effects>
None
<Remarks>
By default this will have the same granularity as the system clock. However, if time
goes backward due to NTP or other issues, getruntime falls back to system uptime.
This has much lower granularity, and varies by each system.
<Returns>
The elapsed time as float
"""
global starttime, last_uptime, last_timestamp, elapsedtime, granularity, runtimelock
# Get the lock
runtimelock.acquire()
# Check if Linux or BSD/Mac
if ostype in ["Linux", "Darwin"]:
uptime = os_api.get_system_uptime()
# Check if time is going backward
if uptime < last_uptime:
# If the difference is less than 1 second, that is okay, since
# The boot time is only precise to 1 second
if (last_uptime - uptime) > 1:
raise EnvironmentError, "Uptime is going backwards!"
else:
# Use the last uptime
uptime = last_uptime
# No change in uptime
diff_uptime = 0
else:
# Current uptime, minus the last uptime
diff_uptime = uptime - last_uptime
# Update last uptime
last_uptime = uptime
# Check for windows
elif ostype in ["Windows"]:
# Release the lock
runtimelock.release()
# Time.clock returns elapsedtime since the first call to it, so this works for us
return time.clock()
# Who knows...
else:
raise EnvironmentError, "Unsupported Platform!"
# Current uptime minus start time
runtime = uptime - starttime
# Get runtime from time.time
current_time = time.time()
# Current time, minus the last time
diff_time = current_time - last_timestamp
# Update the last_timestamp
last_timestamp = current_time
# Is time going backward?
if diff_time < 0.0:
# Add in the change in uptime
elapsedtime += diff_uptime
# Lets check if time.time is too skewed
else:
skew = abs(elapsedtime + diff_time - runtime)
# If the skew is too great, use uptime instead of time.time()
if skew < granularity:
elapsedtime += diff_time
else:
elapsedtime += diff_uptime
# Release the lock
runtimelock.release()
# Return the new elapsedtime
return elapsedtime
# This lock is used to serialize calls to get_resources
get_resources_lock = threading.Lock()
# Cache the disk used from the external process
cached_disk_used = 0L
# This array holds the times that repy was stopped.
# It is an array of tuples, of the form (time, amount)
# where time is when repy was stopped (from getruntime()) and amount
# is the stop time in seconds. The last process_stopped_max_entries are retained
process_stopped_timeline = []
process_stopped_max_entries = 100
# Method to expose resource limits and usage
def get_resources():
"""
<Purpose>
Returns the resource utilization limits as well
as the current resource utilization.
<Arguments>
None.
<Returns>
A tuple of dictionaries and an array (limits, usage, stoptimes).
Limits is the dictionary which maps the resource name
to its maximum limit.
Usage is the dictionary which maps the resource name
to its current usage.
Stoptimes is an array of tuples with the times which the Repy process
was stopped and for how long, due to CPU over-use.
Each entry in the array is a tuple (TOS, Sleep Time) where TOS is the
time of stop (respective to getruntime()) and Sleep Time is how long the
repy process was suspended.
The stop times array holds a fixed number of the last stop times.
Currently, it holds the last 100 stop times.
"""
# Acquire the lock...
get_resources_lock.acquire()
# ...but always release it
try:
# Construct the dictionaries as copies from nanny
(limits,usage) = nanny.get_resource_information()
# Calculate all the usage's
pid = os.getpid()
# Get CPU and memory, this is thread specific
if ostype in ["Linux", "Darwin"]:
# Get CPU first, then memory
usage["cpu"] = os_api.get_process_cpu_time(pid)
# This uses the cached PID data from the CPU check
usage["memory"] = os_api.get_process_rss()
# Get the thread specific CPU usage
usage["threadcpu"] = os_api.get_current_thread_cpu_time()
# Windows Specific versions
elif ostype in ["Windows"]:
# Get the CPU time
usage["cpu"] = windows_api.get_process_cpu_time(pid)
# Get the memory, use the resident set size
usage["memory"] = windows_api.process_memory_info(pid)['WorkingSetSize']
# Get thread-level CPU
usage["threadcpu"] = windows_api.get_current_thread_cpu_time()
# Unknown OS
else:
raise EnvironmentError("Unsupported Platform!")
# Use the cached disk used amount
usage["diskused"] = cached_disk_used
finally:
# Release the lock
get_resources_lock.release()
# Copy the stop times
stoptimes = process_stopped_timeline[:]
# Return the dictionaries and the stoptimes
return (limits,usage,stoptimes)
################### Windows specific functions #######################
class WindowsNannyThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self,name="NannyThread")
def run(self):
# How often the memory will be checked (seconds)
memory_check_interval = repy_constants.CPU_POLLING_FREQ_WIN
# The ratio of the disk polling time to memory polling time.
disk_to_memory_ratio = int(repy_constants.DISK_POLLING_HDD / memory_check_interval)
# Which cycle number we're on
counter = 0
# Elevate our priority, above normal is higher than the usercode, and is enough for disk/mem
windows_api.set_current_thread_priority(windows_api.THREAD_PRIORITY_ABOVE_NORMAL)
# need my pid to get a process handle...
mypid = os.getpid()
# run forever (only exit if an error occurs)
while True:
try:
# Increment the interval counter
counter += 1
# Check memory use, get the WorkingSetSize or RSS
memused = windows_api.process_memory_info(mypid)['WorkingSetSize']
if memused > nanny.get_resource_limit("memory"):
# We will be killed by the other thread...
raise Exception, "Memory use '"+str(memused)+"' over limit '"+str(nanny.get_resource_limit("memory"))+"'"
# Check if we should check the disk
if (counter % disk_to_memory_ratio) == 0:
# Check diskused
diskused = compute_disk_use(repy_constants.REPY_CURRENT_DIR)
if diskused > nanny.get_resource_limit("diskused"):
raise Exception, "Disk use '"+str(diskused)+"' over limit '"+str(nanny.get_resource_limit("diskused"))+"'"
# Sleep until the next iteration of checking the memory
time.sleep(memory_check_interval)
except windows_api.DeadProcess:
# Process may be dead, or die while checking memory use
# In any case, there is no reason to continue running, just exit
harshexit.harshexit(99)
except:
tracebackrepy.handle_exception()
print >> sys.stderr, "Nanny died! Trying to kill everything else"
harshexit.harshexit(20)
# Windows specific CPU Nanny Stuff
winlastcpuinfo = [0,0]
# Enforces CPU limit on Windows and Windows CE
def win_check_cpu_use(cpulim, pid):
global winlastcpuinfo
# get use information and time...
now = getruntime()
# Get the total cpu time
usertime = windows_api.get_process_cpu_time(pid)
useinfo = [usertime, now]
# get the previous time and cpu so we can compute the percentage
oldusertime = winlastcpuinfo[0]
oldnow = winlastcpuinfo[1]
if winlastcpuinfo == [0,0]:
winlastcpuinfo = useinfo
# give them a free pass if it's their first time...
return 0
# save this data for next time...
winlastcpuinfo = useinfo
# Get the elapsed time...
elapsedtime = now - oldnow
# This is a problem
if elapsedtime == 0:
return -1 # Error condition
# percent used is the amount of change divided by the time...
percentused = (usertime - oldusertime) / elapsedtime
# Calculate amount of time to sleep for
stoptime = nanny.calculate_cpu_sleep_interval(cpulim, percentused,elapsedtime)
if stoptime > 0.0:
# Try to timeout the process
if windows_api.timeout_process(pid, stoptime):
# Log the stoptime
process_stopped_timeline.append((now, stoptime))
# Drop the first element if the length is greater than the maximum entries
if len(process_stopped_timeline) > process_stopped_max_entries:
process_stopped_timeline.pop(0)
# Return how long we slept so parent knows whether it should sleep
return stoptime
else:
# Process must have been making system call, try again next time
return -1
# If the stop time is 0, then avoid calling timeout_process
else:
return 0.0
# Dedicated Thread for monitoring CPU, this is run as a part of repy
class WinCPUNannyThread(threading.Thread):
# Thread variables
pid = 0 # Process pid
def __init__(self):
self.pid = os.getpid()
threading.Thread.__init__(self,name="CPUNannyThread")
def run(self):
# Elevate our priority, set us to the highest so that we can more effectively throttle
success = windows_api.set_current_thread_priority(windows_api.THREAD_PRIORITY_HIGHEST)
# If we failed to get HIGHEST priority, try above normal, else we're still at default
if not success:
windows_api.set_current_thread_priority(windows_api.THREAD_PRIORITY_ABOVE_NORMAL)
# Run while the process is running
while True:
try:
# Get the frequency
frequency = repy_constants.CPU_POLLING_FREQ_WIN
# Base amount of sleeping on return value of
# win_check_cpu_use to prevent under/over sleeping
slept = win_check_cpu_use(nanny.get_resource_limit("cpu"), self.pid)
if slept == -1:
# Something went wrong, try again
pass
elif (slept < frequency):
time.sleep(frequency-slept)
except windows_api.DeadProcess:
# Process may be dead
harshexit.harshexit(97)
except:
tracebackrepy.handle_exception()
print >> sys.stderr, "CPU Nanny died! Trying to kill everything else"
harshexit.harshexit(25)
############## *nix specific functions (may include Mac) ###############
# This method handles messages on the "diskused" channel from
# the external process. When the external process measures disk used,
# it is piped in and cached for calls to getresources.
def IPC_handle_diskused(bytes):
cached_disk_used = bytes
# This method handles messages on the "repystopped" channel from
# the external process. When the external process stops repy, it sends
# a tuple with (TOS, amount) where TOS is time of stop (getruntime()) and
# amount is the amount of time execution was suspended.
def IPC_handle_stoptime(info):
# Push this onto the timeline
process_stopped_timeline.append(info)
# Drop the first element if the length is greater than the max
if len(process_stopped_timeline) > process_stopped_max_entries:
process_stopped_timeline.pop(0)
# Use a special class of exception for when
# resource limits are exceeded
class ResourceException(Exception):
pass
# Armon: Method to write a message to the pipe, used for IPC.
# This allows the pipe to be multiplexed by sending simple dictionaries
def write_message_to_pipe(writehandle, channel, data):
"""
<Purpose>
Writes a message to the pipe
<Arguments>
writehandle:
A handle to a pipe which can be written to.
channel:
The channel used to describe the data. Used for multiplexing.
data:
The data to send.
<Exceptions>
As with os.write()
EnvironmentError will be thrown if os.write() sends 0 bytes, indicating the
pipe is broken.
"""
# Construct the dictionary
mesg_dict = {"ch":channel,"d":data}
# Convert to a string
mesg_dict_str = marshal.dumps(mesg_dict)
# Make a full string
mesg = str(len(mesg_dict_str)) + ":" + mesg_dict_str
# Send this
index = 0
while index < len(mesg):
bytes = os.write(writehandle, mesg[index:])
if bytes == 0:
raise EnvironmentError, "Write send 0 bytes! Pipe broken!"
index += bytes
# Armon: Method to read a message from the pipe, used for IPC.
# This allows the pipe to be multiplexed by sending simple dictionaries
def read_message_from_pipe(readhandle):
"""
<Purpose>
Reads a message from a pipe.
<Arguments>
readhandle:
A handle to a pipe which can be read from
<Exceptions>
As with os.read().
EnvironmentError will be thrown if os.read() returns a 0-length string, indicating
the pipe is broken.
<Returns>
A tuple (Channel, Data) where Channel is used to multiplex the pipe.
"""
# Read until we get to a colon
data = ""
index = 0
# Loop until we get a message
while True:
# Read in data if the buffer is empty
if index >= len(data):
# Read 8 bytes at a time
mesg = os.read(readhandle,8)
if len(mesg) == 0:
raise EnvironmentError, "Read returned empty string! Pipe broken!"
data += mesg
# Increment the index while there is data and we have not found a colon
while index < len(data) and data[index] != ":":
index += 1
# Check if we've found a colon
if len(data) > index and data[index] == ":":
# Get the message length
mesg_length = int(data[:index])
# Determine how much more data we need
more_data = mesg_length - len(data) + index + 1
# Read in the rest of the message
while more_data > 0:
mesg = os.read(readhandle, more_data)
if len(mesg) == 0:
raise EnvironmentError, "Read returned empty string! Pipe broken!"
data += mesg
more_data -= len(mesg)
# Done, convert the message to a dict
whole_mesg = data[index+1:]
mesg_dict = marshal.loads(whole_mesg)
# Return a tuple (Channel, Data)
return (mesg_dict["ch"],mesg_dict["d"])
# This dictionary defines the functions that handle messages
# on each channel. E.g. when a message arrives on the "repystopped" channel,
# the IPC_handle_stoptime function should be invoked to handle it.
IPC_HANDLER_FUNCTIONS = {"repystopped":IPC_handle_stoptime,
"diskused":IPC_handle_diskused }
# This thread checks that the parent process is alive and invokes
# delegate methods when messages arrive on the pipe.
class parent_process_checker(threading.Thread):
def __init__(self, readhandle):
"""
<Purpose>
Terminates harshly if our parent dies before we do.
<Arguments>
readhandle: A file descriptor to the handle of a pipe to our parent.
"""
# Name our self
threading.Thread.__init__(self, name="ParentProcessChecker")
# Store the handle
self.readhandle = readhandle
def run(self):
# Run forever
while True:
# Read a message
try:
mesg = read_message_from_pipe(self.readhandle)
except Exception, e:
break
# Check for a handler function
if mesg[0] in IPC_HANDLER_FUNCTIONS:
# Invoke the handler function with the data
handler = IPC_HANDLER_FUNCTIONS[mesg[0]]
handler(mesg[1])
# Print a message if there is a message on an unknown channel
else:
print "[WARN] Message on unknown channel from parent process:", mesg[0]
### We only leave the loop on a fatal error, so we need to exit now
# Write out status information, our parent would do this, but its dead.
statusstorage.write_status("Terminated")
print >> sys.stderr, "Monitor process died! Terminating!"
harshexit.harshexit(70)
# For *NIX systems, there is an external process, and the
# pid for the actual repy process is stored here
repy_process_id = None
# Forks Repy. The child will continue execution, and the parent
# will become a resource monitor
def do_forked_resource_monitor():
global repy_process_id
# Get a pipe
(readhandle, writehandle) = os.pipe()
# I'll fork a copy of myself
childpid = os.fork()
if childpid == 0:
# We are the child, close the write end of the pipe
os.close(writehandle)
# Start a thread to check on the survival of the parent
parent_process_checker(readhandle).start()
return
else:
# We are the parent, close the read end
os.close(readhandle)
# Store the childpid
repy_process_id = childpid
# Start the nmstatusinterface
nmstatusinterface.launch(repy_process_id)
# Small internal error handler function
def _internal_error(message):
try:
print >> sys.stderr, message
sys.stderr.flush()
except:
pass
# Stop the nmstatusinterface, we don't want any more status updates
nmstatusinterface.stop()
# Kill repy
harshexit.portablekill(childpid)
try:
# Write out status information, repy was Stopped
statusstorage.write_status("Terminated")
except:
pass
try:
# Some OS's require that you wait on the pid at least once
# before they do any accounting
(pid, status) = os.waitpid(childpid,os.WNOHANG)
# Launch the resource monitor, if it fails determine why and restart if necessary
resource_monitor(childpid, writehandle)
except ResourceException, exp:
# Repy exceeded its resource limit, kill it
_internal_error(str(exp)+" Impolitely killing child!")
harshexit.harshexit(98)
except Exception, exp:
# There is some general error...
try:
(pid, status) = os.waitpid(childpid,os.WNOHANG)
except:
# This means that the process is dead
pass
# Check if this is repy exiting
if os.WIFEXITED(status) or os.WIFSIGNALED(status):
sys.exit(0)
else:
_internal_error(str(exp)+" Monitor death! Impolitely killing child!")
raise
def resource_monitor(childpid, pipe_handle):
"""
<Purpose>
Function runs in a loop forever, checking resource usage and throttling CPU.
Checks CPU, memory, and disk.
<Arguments>
childpid:
The child pid, e.g. the pid of repy
pipe_handle:
A handle to the pipe to the repy process. Allows sending resource use information.
"""
# Get our pid
ourpid = os.getpid()
# Calculate how often disk should be checked
disk_interval = int(repy_constants.RESOURCE_POLLING_FREQ_LINUX / repy_constants.CPU_POLLING_FREQ_LINUX)
current_interval = -1 # What cycle are we on
# Store time of the last interval
last_time = getruntime()
last_CPU_time = 0
resume_time = 0
# Run forever...
while True:
########### Check CPU ###########
# Get elapsed time
currenttime = getruntime()
elapsedtime1 = currenttime - last_time # Calculate against last run
elapsedtime2 = currenttime - resume_time # Calculate since we last resumed repy
elapsedtime = min(elapsedtime1, elapsedtime2) # Take the minimum interval
last_time = currenttime # Save the current time
# Safety check, prevent ZeroDivisionError
if elapsedtime == 0.0:
continue
# Get the total cpu at this point
totalCPU = os_api.get_process_cpu_time(ourpid) # Our own usage
totalCPU += os_api.get_process_cpu_time(childpid) # Repy's usage
# Calculate percentage of CPU used
percentused = (totalCPU - last_CPU_time) / elapsedtime
# Do not throttle for the first interval, wrap around
# Store the totalCPU for the next cycle
if last_CPU_time == 0:
last_CPU_time = totalCPU
continue
else:
last_CPU_time = totalCPU
# Calculate stop time
stoptime = nanny.calculate_cpu_sleep_interval(nanny.get_resource_limit("cpu"), percentused, elapsedtime)
# If we are supposed to stop repy, then suspend, sleep and resume
if stoptime > 0.0:
# They must be punished by stopping
os.kill(childpid, signal.SIGSTOP)
# Sleep until time to resume
time.sleep(stoptime)
# And now they can start back up!
os.kill(childpid, signal.SIGCONT)
# Save the resume time
resume_time = getruntime()
# Send this information as a tuple containing the time repy was stopped and
# for how long it was stopped
write_message_to_pipe(pipe_handle, "repystopped", (currenttime, stoptime))
########### End Check CPU ###########
#
########### Check Memory ###########
# Get how much memory repy is using
memused = os_api.get_process_rss()
# Check if it is using too much memory
if memused > nanny.get_resource_limit("memory"):
raise ResourceException, "Memory use '"+str(memused)+"' over limit '"+str(nanny.get_resource_limit("memory"))+"'."
########### End Check Memory ###########
#
########### Check Disk Usage ###########
# Increment our current cycle
current_interval += 1;
# Check if it is time to check the disk usage
if (current_interval % disk_interval) == 0:
# Reset the interval
current_interval = 0
# Calculate disk used
diskused = compute_disk_use(repy_constants.REPY_CURRENT_DIR)
# Raise exception if we are over limit
if diskused > nanny.get_resource_limit("diskused"):
raise ResourceException, "Disk use '"+str(diskused)+"' over limit '"+str(nanny.get_resource_limit("diskused"))+"'."
# Send the disk usage information, raw bytes used
write_message_to_pipe(pipe_handle, "diskused", diskused)
########### End Check Disk ###########
# Sleep before the next iteration
time.sleep(repy_constants.CPU_POLLING_FREQ_LINUX)
########### functions that help me figure out the os type ###########
# Calculates the system granularity
def calculate_granularity():
global granularity
if ostype in ["Windows"]:
# The Granularity of getTickCount is 1 millisecond
granularity = pow(10,-3)
elif ostype == "Linux":
# We don't know if the granularity is correct yet
correct_granularity = False
# How many times have we tested
tests = 0
# Loop while the granularity is incorrect, up to 10 times
while not correct_granularity and tests <= 10:
current_granularity = os_api.get_uptime_granularity()
uptime_pre = os_api.get_system_uptime()
time.sleep(current_granularity / 10)
uptime_post = os_api.get_system_uptime()
diff = uptime_post - uptime_pre
correct_granularity = int(diff / current_granularity) == (diff / current_granularity)
tests += 1
granularity = current_granularity
elif ostype == "Darwin":
granularity = os_api.get_uptime_granularity()
# Call init_ostype!!!
harshexit.init_ostype()
ostype = harshexit.ostype
osrealtype = harshexit.osrealtype
# Import the proper system wide API
if osrealtype == "Linux":
import linux_api as os_api
elif osrealtype == "Darwin":
import darwin_api as os_api
elif osrealtype == "FreeBSD":
import freebsd_api as os_api
elif ostype == "Windows":
# There is no real reason to do this, since windows is imported separately
import windows_api as os_api
else:
# This is a non-supported OS
raise UnsupportedSystemException, "The current Operating System is not supported! Fatal Error."
# Set granularity
calculate_granularity()
# For Windows, we need to initialize time.clock()
if ostype in ["Windows"]:
time.clock()
# Initialize getruntime for other platforms
else:
# Set the starttime to the initial uptime
starttime = getruntime()
last_uptime = starttime
# Reset elapsed time
elapsedtime = 0
|
|
"""
Interval datatypes
"""
import pkg_resources
pkg_resources.require( "bx-python" )
import logging, os, sys, time, sets, tempfile, shutil
import data
from galaxy import util
from galaxy.datatypes.sniff import *
from galaxy.web import url_for
from cgi import escape
import urllib
from bx.intervals.io import *
from galaxy.datatypes import metadata
from galaxy.datatypes.metadata import MetadataElement
from galaxy.datatypes.tabular import Tabular
import math
log = logging.getLogger(__name__)
#
# contain the meta columns and the words that map to it
# list aliases on the right side of the : in decreasing order of priority
#
alias_spec = {
'chromCol' : [ 'chrom' , 'CHROMOSOME' , 'CHROM', 'Chromosome Name' ],
'startCol' : [ 'start' , 'START', 'chromStart', 'txStart', 'Start Position (bp)' ],
'endCol' : [ 'end' , 'END' , 'STOP', 'chromEnd', 'txEnd', 'End Position (bp)' ],
'strandCol' : [ 'strand', 'STRAND', 'Strand' ],
'nameCol' : [ 'name', 'NAME', 'Name', 'name2', 'NAME2', 'Name2', 'Ensembl Gene ID', 'Ensembl Transcript ID', 'Ensembl Peptide ID' ]
}
# a little faster lookup
alias_helper = {}
for key, value in alias_spec.items():
for elem in value:
alias_helper[elem] = key
class Interval( Tabular ):
"""Tab delimited data containing interval information"""
file_ext = "interval"
"""Add metadata elements"""
MetadataElement( name="chromCol", default=1, desc="Chrom column", param=metadata.ColumnParameter )
MetadataElement( name="startCol", default=2, desc="Start column", param=metadata.ColumnParameter )
MetadataElement( name="endCol", default=3, desc="End column", param=metadata.ColumnParameter )
MetadataElement( name="strandCol", desc="Strand column (click box & select)", param=metadata.ColumnParameter, optional=True, no_value=0 )
MetadataElement( name="nameCol", desc="Name/Identifier column (click box & select)", param=metadata.ColumnParameter, optional=True, no_value=0 )
MetadataElement( name="columns", default=3, desc="Number of columns", readonly=True, visible=False )
def __init__(self, **kwd):
"""Initialize interval datatype, by adding UCSC display apps"""
Tabular.__init__(self, **kwd)
self.add_display_app ( 'ucsc', 'display at UCSC', 'as_ucsc_display_file', 'ucsc_links' )
def init_meta( self, dataset, copy_from=None ):
Tabular.init_meta( self, dataset, copy_from=copy_from )
def set_peek( self, dataset, line_count=None ):
"""Set the peek and blurb text"""
if not dataset.dataset.purged:
dataset.peek = data.get_file_peek( dataset.file_name )
if line_count is None:
dataset.blurb = "%s regions" % util.commaify( str( data.get_line_count( dataset.file_name ) ) )
else:
dataset.blurb = "%s regions" % util.commaify( str( line_count ) )
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def set_meta( self, dataset, overwrite = True, first_line_is_header = False, **kwd ):
Tabular.set_meta( self, dataset, overwrite = overwrite, skip = 0 )
"""Tries to guess from the line the location number of the column for the chromosome, region start-end and strand"""
if dataset.has_data():
for i, line in enumerate( file( dataset.file_name ) ):
line = line.rstrip( '\r\n' )
if line:
if ( first_line_is_header or line[0] == '#' ):
self.init_meta( dataset )
line = line.strip( '#' )
elems = line.split( '\t' )
valid = dict( alias_helper ) # shrinks
for index, col_name in enumerate( elems ):
if col_name in valid:
meta_name = valid[col_name]
if overwrite or not dataset.metadata.element_is_set( meta_name ):
setattr( dataset.metadata, meta_name, index+1 )
values = alias_spec[ meta_name ]
start = values.index( col_name )
for lower in values[ start: ]:
del valid[ lower ] # removes lower priority keys
break # Our metadata is set, so break out of the outer loop
else:
# Header lines in Interval files are optional. For example, BED is Interval but has no header.
# We'll make a best guess at the location of the metadata columns.
metadata_is_set = False
elems = line.split( '\t' )
if len( elems ) > 2:
for str in data.col1_startswith:
if line.lower().startswith( str ):
if overwrite or not dataset.metadata.element_is_set( 'chromCol' ):
dataset.metadata.chromCol = 1
try:
int( elems[1] )
if overwrite or not dataset.metadata.element_is_set( 'startCol' ):
dataset.metadata.startCol = 2
except:
pass # Metadata default will be used
try:
int( elems[2] )
if overwrite or not dataset.metadata.element_is_set( 'endCol' ):
dataset.metadata.endCol = 3
except:
pass # Metadata default will be used
if len( elems ) > 3:
try:
int( elems[3] )
except:
if overwrite or not dataset.metadata.element_is_set( 'nameCol' ):
dataset.metadata.nameCol = 4
if len( elems ) < 6 or elems[5] not in data.valid_strand:
if overwrite or not dataset.metadata.element_is_set( 'strandCol' ):
dataset.metadata.strandCol = 0
else:
if overwrite or not dataset.metadata.element_is_set( 'strandCol' ):
dataset.metadata.strandCol = 6
metadata_is_set = True
break
if metadata_is_set:
break # Our metadata is set, so break out of the outer loop
def get_estimated_display_viewport( self, dataset ):
"""Return a chrom, start, stop tuple for viewing a file."""
if dataset.has_data() and dataset.state == dataset.states.OK:
try:
c, s, e = dataset.metadata.chromCol, dataset.metadata.startCol, dataset.metadata.endCol
c, s, e = int(c)-1, int(s)-1, int(e)-1
peek = []
for idx, line in enumerate(file(dataset.file_name)):
if line[0] != '#':
peek.append( line.split() )
if idx > 10:
break
chr, start, stop = peek[0][c], int( peek[0][s] ), int( peek[0][e] )
for p in peek[1:]:
if p[0] == chr:
start = min( start, int( p[s] ) )
stop = max( stop, int( p[e] ) )
except Exception, exc:
#log.error( 'Viewport generation error -> %s ' % str(exc) )
(chr, start, stop) = 'chr1', 1, 1000
return (chr, str( start ), str( stop ))
else:
return ('', '', '')
def as_ucsc_display_file( self, dataset, **kwd ):
"""Returns file contents with only the bed data"""
fd, temp_name = tempfile.mkstemp()
c, s, e, t, n = dataset.metadata.chromCol, dataset.metadata.startCol, dataset.metadata.endCol, dataset.metadata.strandCol or 0, dataset.metadata.nameCol or 0
c, s, e, t, n = int(c)-1, int(s)-1, int(e)-1, int(t)-1, int(n)-1
if t >= 0: # strand column (should) exists
for i, elems in enumerate( util.file_iter(dataset.file_name) ):
strand = "+"
name = "region_%i" % i
if n >= 0 and n < len( elems ): name = elems[n]
if t<len(elems): strand = elems[t]
tmp = [ elems[c], elems[s], elems[e], name, '0', strand ]
os.write(fd, '%s\n' % '\t'.join(tmp) )
elif n >= 0: # name column (should) exists
for i, elems in enumerate( util.file_iter(dataset.file_name) ):
name = "region_%i" % i
if n >= 0 and n < len( elems ): name = elems[n]
tmp = [ elems[c], elems[s], elems[e], name ]
os.write(fd, '%s\n' % '\t'.join(tmp) )
else:
for elems in util.file_iter(dataset.file_name):
tmp = [ elems[c], elems[s], elems[e] ]
os.write(fd, '%s\n' % '\t'.join(tmp) )
os.close(fd)
return open(temp_name)
def make_html_table( self, dataset, skipchars=[] ):
"""Create HTML table, used for displaying peek"""
out = ['<table cellspacing="0" cellpadding="3">']
comments = []
try:
# Generate column header
out.append('<tr>')
for i in range( 1, dataset.metadata.columns+1 ):
if i == dataset.metadata.chromCol:
out.append( '<th>%s.Chrom</th>' % i )
elif i == dataset.metadata.startCol:
out.append( '<th>%s.Start</th>' % i )
elif i == dataset.metadata.endCol:
out.append( '<th>%s.End</th>' % i )
elif dataset.metadata.strandCol and i == dataset.metadata.strandCol:
out.append( '<th>%s.Strand</th>' % i )
elif dataset.metadata.nameCol and i == dataset.metadata.nameCol:
out.append( '<th>%s.Name</th>' % i )
else:
out.append( '<th>%s</th>' % i )
out.append('</tr>')
out.append( self.make_html_peek_rows( dataset, skipchars=skipchars ) )
out.append( '</table>' )
out = "".join( out )
except Exception, exc:
out = "Can't create peek %s" % str( exc )
return out
def ucsc_links( self, dataset, type, app, base_url ):
ret_val = []
if dataset.has_data:
viewport_tuple = self.get_estimated_display_viewport(dataset)
if viewport_tuple:
chrom = viewport_tuple[0]
start = viewport_tuple[1]
stop = viewport_tuple[2]
for site_name, site_url in util.get_ucsc_by_build(dataset.dbkey):
if site_name in app.config.ucsc_display_sites:
# HACK: UCSC doesn't support https, so force http even
# if our URL scheme is https. Making this work
# requires additional hackery in your upstream proxy.
# If UCSC ever supports https, remove this hack.
if base_url.startswith( 'https://' ):
base_url = base_url.replace( 'https', 'http', 1 )
display_url = urllib.quote_plus( "%s%s/display_as?id=%i&display_app=%s" % (base_url, url_for( controller='root' ), dataset.id, type) )
link = "%sdb=%s&position=%s:%s-%s&hgt.customText=%s" % (site_url, dataset.dbkey, chrom, start, stop, display_url )
ret_val.append( (site_name, link) )
return ret_val
def validate( self, dataset ):
"""Validate an interval file using the bx GenomicIntervalReader"""
errors = list()
c, s, e, t = dataset.metadata.chromCol, dataset.metadata.startCol, dataset.metadata.endCol, dataset.metadata.strandCol
c, s, e, t = int(c)-1, int(s)-1, int(e)-1, int(t)-1
infile = open(dataset.file_name, "r")
reader = GenomicIntervalReader(
infile,
chrom_col = c,
start_col = s,
end_col = e,
strand_col = t)
while True:
try:
reader.next()
except ParseError, e:
errors.append(e)
except StopIteration:
infile.close()
return errors
def repair_methods( self, dataset ):
"""Return options for removing errors along with a description"""
return [("lines","Remove erroneous lines")]
def sniff( self, filename ):
"""
Checks for 'intervalness'
This format is mostly used by galaxy itself. Valid interval files should include
a valid header comment, but this seems to be loosely regulated.
>>> fname = get_test_fname( 'test_space.txt' )
>>> Interval().sniff( fname )
False
>>> fname = get_test_fname( 'interval.interval' )
>>> Interval().sniff( fname )
True
"""
headers = get_headers( filename, '\t' )
try:
"""
If we got here, we already know the file is_column_based and is not bed,
so we'll just look for some valid data.
"""
for hdr in headers:
if hdr and not hdr[0].startswith( '#' ):
if len(hdr) < 3:
return False
try:
# Assume chrom start and end are in column positions 1 and 2
# respectively ( for 0 based columns )
check = int( hdr[1] )
check = int( hdr[2] )
except:
return False
return True
except:
return False
def get_track_window(self, dataset, data, start, end):
"""
Assumes the incoming track data is sorted already.
"""
window = list()
for record in data:
fields = record.rstrip("\n\r").split("\t")
record_chrom = fields[dataset.metadata.chromCol-1]
record_start = int(fields[dataset.metadata.startCol-1])
record_end = int(fields[dataset.metadata.endCol-1])
if record_start < end and record_end > start:
window.append( (record_chrom, record_start, record_end) ) #Yes I did want to use a generator here, but it doesn't work downstream
return window
def get_track_resolution( self, dataset, start, end):
return None
def get_track_type( self ):
return "FeatureTrack"
class Bed( Interval ):
"""Tab delimited data in BED format"""
file_ext = "bed"
"""Add metadata elements"""
MetadataElement( name="chromCol", default=1, desc="Chrom column", param=metadata.ColumnParameter )
MetadataElement( name="startCol", default=2, desc="Start column", param=metadata.ColumnParameter )
MetadataElement( name="endCol", default=3, desc="End column", param=metadata.ColumnParameter )
MetadataElement( name="strandCol", desc="Strand column (click box & select)", param=metadata.ColumnParameter, optional=True, no_value=0 )
MetadataElement( name="columns", default=3, desc="Number of columns", readonly=True, visible=False )
###do we need to repeat these? they are the same as should be inherited from interval type
def set_meta( self, dataset, overwrite = True, **kwd ):
"""Sets the metadata information for datasets previously determined to be in bed format."""
i = 0
if dataset.has_data():
for i, line in enumerate( file(dataset.file_name) ):
metadata_set = False
line = line.rstrip('\r\n')
if line and not line.startswith('#'):
elems = line.split('\t')
if len(elems) > 2:
for startswith in data.col1_startswith:
if line.lower().startswith( startswith ):
if len( elems ) > 3:
if overwrite or not dataset.metadata.element_is_set( 'nameCol' ):
dataset.metadata.nameCol = 4
if len(elems) < 6:
if overwrite or not dataset.metadata.element_is_set( 'strandCol' ):
dataset.metadata.strandCol = 0
else:
if overwrite or not dataset.metadata.element_is_set( 'strandCol' ):
dataset.metadata.strandCol = 6
metadata_set = True
break
if metadata_set: break
Tabular.set_meta( self, dataset, overwrite = overwrite, skip = i )
def as_ucsc_display_file( self, dataset, **kwd ):
"""Returns file contents with only the bed data. If bed 6+, treat as interval."""
for line in open(dataset.file_name):
line = line.strip()
if line == "" or line.startswith("#"):
continue
fields = line.split('\t')
"""check to see if this file doesn't conform to strict genome browser accepted bed"""
try:
if len(fields) > 12:
return Interval.as_ucsc_display_file(self, dataset) #too many fields
if len(fields) > 6:
int(fields[6])
if len(fields) > 7:
int(fields[7])
if len(fields) > 8:
if int(fields[8]) != 0:
return Interval.as_ucsc_display_file(self, dataset)
if len(fields) > 9:
int(fields[9])
if len(fields) > 10:
fields2 = fields[10].rstrip(",").split(",") #remove trailing comma and split on comma
for field in fields2:
int(field)
if len(fields) > 11:
fields2 = fields[11].rstrip(",").split(",") #remove trailing comma and split on comma
for field in fields2:
int(field)
except: return Interval.as_ucsc_display_file(self, dataset)
#only check first line for proper form
break
try: return open(dataset.file_name)
except: return "This item contains no content"
def sniff( self, filename ):
"""
Checks for 'bedness'
BED lines have three required fields and nine additional optional fields.
The number of fields per line must be consistent throughout any single set of data in
an annotation track. The order of the optional fields is binding: lower-numbered
fields must always be populated if higher-numbered fields are used. The data type of
all 12 columns is:
1-str, 2-int, 3-int, 4-str, 5-int, 6-str, 7-int, 8-int, 9-int or list, 10-int, 11-list, 12-list
For complete details see http://genome.ucsc.edu/FAQ/FAQformat#format1
>>> fname = get_test_fname( 'test_tab.bed' )
>>> Bed().sniff( fname )
True
>>> fname = get_test_fname( 'interval1.bed' )
>>> Bed().sniff( fname )
True
>>> fname = get_test_fname( 'complete.bed' )
>>> Bed().sniff( fname )
True
"""
headers = get_headers( filename, '\t' )
try:
if not headers: return False
for hdr in headers:
if (hdr[0] == '' or hdr[0].startswith( '#' )):
continue
valid_col1 = False
if len(hdr) < 3 or len(hdr) > 12:
return False
for str in data.col1_startswith:
if hdr[0].lower().startswith(str):
valid_col1 = True
break
if valid_col1:
try:
int( hdr[1] )
int( hdr[2] )
except:
return False
if len( hdr ) > 4:
#hdr[3] is a string, 'name', which defines the name of the BED line - difficult to test for this.
#hdr[4] is an int, 'score', a score between 0 and 1000.
try:
if int( hdr[4] ) < 0 or int( hdr[4] ) > 1000: return False
except:
return False
if len( hdr ) > 5:
#hdr[5] is strand
if hdr[5] not in data.valid_strand: return False
if len( hdr ) > 6:
#hdr[6] is thickStart, the starting position at which the feature is drawn thickly.
try: int( hdr[6] )
except: return False
if len( hdr ) > 7:
#hdr[7] is thickEnd, the ending position at which the feature is drawn thickly
try: int( hdr[7] )
except: return False
if len( hdr ) > 8:
#hdr[8] is itemRgb, an RGB value of the form R,G,B (e.g. 255,0,0). However, this could also be an int (e.g., 0)
try: int( hdr[8] )
except:
try: hdr[8].split(',')
except: return False
if len( hdr ) > 9:
#hdr[9] is blockCount, the number of blocks (exons) in the BED line.
try: block_count = int( hdr[9] )
except: return False
if len( hdr ) > 10:
#hdr[10] is blockSizes - A comma-separated list of the block sizes.
#Sometimes the blosck_sizes and block_starts lists end in extra commas
try: block_sizes = hdr[10].rstrip(',').split(',')
except: return False
if len( hdr ) > 11:
#hdr[11] is blockStarts - A comma-separated list of block starts.
try: block_starts = hdr[11].rstrip(',').split(',')
except: return False
if len(block_sizes) != block_count or len(block_starts) != block_count: return False
else: return False
return True
except: return False
class Gff( Tabular ):
"""Tab delimited data in Gff format"""
file_ext = "gff"
column_names = [ 'Seqname', 'Source', 'Feature', 'Start', 'End', 'Score', 'Strand', 'Frame', 'Group' ]
"""Add metadata elements"""
MetadataElement( name="columns", default=9, desc="Number of columns", readonly=True, visible=False )
MetadataElement( name="column_types", default=['str','str','str','int','int','int','str','str','str'], param=metadata.ColumnTypesParameter, desc="Column types", readonly=True, visible=False )
def __init__(self, **kwd):
"""Initialize datatype, by adding GBrowse display app"""
Tabular.__init__(self, **kwd)
self.add_display_app ( 'c_elegans', 'display in Wormbase', 'as_gbrowse_display_file', 'gbrowse_links' )
def set_meta( self, dataset, overwrite = True, **kwd ):
i = 0
for i, line in enumerate( file ( dataset.file_name ) ):
line = line.rstrip('\r\n')
if line and not line.startswith( '#' ):
elems = line.split( '\t' )
if len(elems) == 9:
try:
int( elems[3] )
int( elems[4] )
break
except:
pass
Tabular.set_meta( self, dataset, overwrite = overwrite, skip = i )
def make_html_table( self, dataset, skipchars=[] ):
"""Create HTML table, used for displaying peek"""
out = ['<table cellspacing="0" cellpadding="3">']
comments = []
try:
# Generate column header
out.append( '<tr>' )
for i, name in enumerate( self.column_names ):
out.append( '<th>%s.%s</th>' % ( str( i+1 ), name ) )
out.append( self.make_html_peek_rows( dataset, skipchars=skipchars ) )
out.append( '</table>' )
out = "".join( out )
except Exception, exc:
out = "Can't create peek %s" % exc
return out
def as_gbrowse_display_file( self, dataset, **kwd ):
"""Returns file contents that can be displayed in GBrowse apps."""
return open( dataset.file_name )
def get_estimated_display_viewport( self, dataset ):
"""
Return a chrom, start, stop tuple for viewing a file. There are slight differences between gff 2 and gff 3
formats. This function should correctly handle both...
"""
if dataset.has_data() and dataset.state == dataset.states.OK:
try:
seqid = ''
start = 2147483647 # Maximum value of a signed 32 bit integer ( 2**31 - 1 )
stop = 0
for i, line in enumerate( file( dataset.file_name ) ):
line = line.rstrip( '\r\n' )
if not line:
continue
if line.startswith( '##sequence-region' ): # ##sequence-region IV 6000000 6030000
elems = line.split()
seqid = elems[1] # IV
start = elems[2] # 6000000
stop = elems[3] # 6030000
break
if not line.startswith( '#' ):
elems = line.split( '\t' )
if not seqid:
# We can only set the viewport for a single chromosome
seqid = elems[0]
if seqid == elems[0]:
# Make sure we have not spanned chromosomes
start = min( start, int( elems[3] ) )
stop = max( stop, int( elems[4] ) )
else:
# We've spanned a chromosome
break
if i > 10:
break
except:
seqid, start, stop = ( '', '', '' )
return ( seqid, str( start ), str( stop ) )
else:
return ( '', '', '' )
def gbrowse_links( self, dataset, type, app, base_url ):
ret_val = []
if dataset.has_data:
viewport_tuple = self.get_estimated_display_viewport( dataset )
seqid = viewport_tuple[0]
start = viewport_tuple[1]
stop = viewport_tuple[2]
if seqid and start and stop:
for site_name, site_url in util.get_gbrowse_sites_by_build( dataset.dbkey ):
if site_name in app.config.gbrowse_display_sites:
link = "%s?start=%s&stop=%s&ref=%s&dbkey=%s" % ( site_url, start, stop, seqid, dataset.dbkey )
ret_val.append( ( site_name, link ) )
return ret_val
def sniff( self, filename ):
"""
Determines whether the file is in gff format
GFF lines have nine required fields that must be tab-separated.
For complete details see http://genome.ucsc.edu/FAQ/FAQformat#format3
>>> fname = get_test_fname( 'gff_version_3.gff' )
>>> Gff().sniff( fname )
False
>>> fname = get_test_fname( 'test.gff' )
>>> Gff().sniff( fname )
True
"""
headers = get_headers( filename, '\t' )
try:
if len(headers) < 2:
return False
for hdr in headers:
if hdr and hdr[0].startswith( '##gff-version' ) and hdr[0].find( '2' ) < 0:
return False
if hdr and hdr[0] and not hdr[0].startswith( '#' ):
if len(hdr) != 9:
return False
try:
int( hdr[3] )
int( hdr[4] )
except:
return False
if hdr[5] != '.':
try:
score = int(hdr[5])
except:
return False
if (score < 0 or score > 1000):
return False
if hdr[6] not in data.valid_strand:
return False
return True
except:
return False
class Gff3( Gff ):
"""Tab delimited data in Gff3 format"""
file_ext = "gff3"
valid_gff3_strand = ['+', '-', '.', '?']
valid_gff3_phase = ['.', '0', '1', '2']
column_names = [ 'Seqid', 'Source', 'Type', 'Start', 'End', 'Score', 'Strand', 'Phase', 'Attributes' ]
"""Add metadata elements"""
MetadataElement( name="column_types", default=['str','str','str','int','int','float','str','int','list'], param=metadata.ColumnTypesParameter, desc="Column types", readonly=True, visible=False )
def __init__(self, **kwd):
"""Initialize datatype, by adding GBrowse display app"""
Gff.__init__(self, **kwd)
def set_meta( self, dataset, overwrite = True, **kwd ):
i = 0
for i, line in enumerate( file ( dataset.file_name ) ):
line = line.rstrip('\r\n')
if line and not line.startswith( '#' ):
elems = line.split( '\t' )
valid_start = False
valid_end = False
if len( elems ) == 9:
try:
start = int( elems[3] )
valid_start = True
except:
if elems[3] == '.':
valid_start = True
try:
end = int( elems[4] )
valid_end = True
except:
if elems[4] == '.':
valid_end = True
strand = elems[6]
phase = elems[7]
if valid_start and valid_end and start < end and strand in self.valid_gff3_strand and phase in self.valid_gff3_phase:
break
Tabular.set_meta( self, dataset, overwrite = overwrite, skip = i )
def sniff( self, filename ):
"""
Determines whether the file is in gff version 3 format
GFF 3 format:
1) adds a mechanism for representing more than one level
of hierarchical grouping of features and subfeatures.
2) separates the ideas of group membership and feature name/id
3) constrains the feature type field to be taken from a controlled
vocabulary.
4) allows a single feature, such as an exon, to belong to more than
one group at a time.
5) provides an explicit convention for pairwise alignments
6) provides an explicit convention for features that occupy disjunct regions
The format consists of 9 columns, separated by tabs (NOT spaces).
Undefined fields are replaced with the "." character, as described in the original GFF spec.
For complete details see http://song.sourceforge.net/gff3.shtml
>>> fname = get_test_fname( 'test.gff' )
>>> Gff3().sniff( fname )
False
>>> fname = get_test_fname('gff_version_3.gff')
>>> Gff3().sniff( fname )
True
"""
headers = get_headers( filename, '\t' )
try:
if len(headers) < 2:
return False
for hdr in headers:
if hdr and hdr[0].startswith( '##gff-version' ) and hdr[0].find( '3' ) >= 0:
return True
elif hdr and hdr[0].startswith( '##gff-version' ) and hdr[0].find( '3' ) < 0:
return False
# Header comments may have been stripped, so inspect the data
if hdr and hdr[0] and not hdr[0].startswith( '#' ):
if len(hdr) != 9:
return False
try:
int( hdr[3] )
except:
if hdr[3] != '.':
return False
try:
int( hdr[4] )
except:
if hdr[4] != '.':
return False
if hdr[5] != '.':
try:
score = int(hdr[5])
except:
return False
if (score < 0 or score > 1000):
return False
if hdr[6] not in self.valid_gff3_strand:
return False
if hdr[7] not in self.valid_gff3_phase:
return False
return True
except:
return False
class Wiggle( Tabular ):
"""Tab delimited data in wiggle format"""
file_ext = "wig"
MetadataElement( name="columns", default=3, desc="Number of columns", readonly=True, visible=False )
def make_html_table( self, dataset ):
return Tabular.make_html_table( self, dataset, skipchars=['track', '#'] )
def set_meta( self, dataset, overwrite = True, **kwd ):
i = 0
for i, line in enumerate( file ( dataset.file_name ) ):
line = line.rstrip('\r\n')
if line and not line.startswith( '#' ):
elems = line.split( '\t' )
try:
float( elems[0] ) #"Wiggle track data values can be integer or real, positive or negative values"
break
except:
do_break = False
for str in data.col1_startswith:
if elems[0].lower().startswith(str):
do_break = True
break
if do_break:
break
Tabular.set_meta( self, dataset, overwrite = overwrite, skip = i )
def sniff( self, filename ):
"""
Determines wether the file is in wiggle format
The .wig format is line-oriented. Wiggle data is preceeded by a track definition line,
which adds a number of options for controlling the default display of this track.
Following the track definition line is the track data, which can be entered in several
different formats.
The track definition line begins with the word 'track' followed by the track type.
The track type with version is REQUIRED, and it currently must be wiggle_0. For example,
track type=wiggle_0...
For complete details see http://genome.ucsc.edu/goldenPath/help/wiggle.html
>>> fname = get_test_fname( 'interval1.bed' )
>>> Wiggle().sniff( fname )
False
>>> fname = get_test_fname( 'wiggle.wig' )
>>> Wiggle().sniff( fname )
True
"""
headers = get_headers( filename, None )
try:
for hdr in headers:
if len(hdr) > 1 and hdr[0] == 'track' and hdr[1].startswith('type=wiggle'):
return True
return False
except:
return False
def get_track_window(self, dataset, data, start, end):
"""
Assumes we have a numpy file.
"""
# Maybe if we import here people will still be able to use Galaxy when numpy kills it
pkg_resources.require("numpy>=1.2.1")
#from numpy.lib import format
import numpy
range = end - start
# Determine appropriate resolution to plot ~1000 points
resolution = ( 10 ** math.ceil( math.log10( range / 1000 ) ) )
# Restrict to valid range
resolution = min( resolution, 100000 )
resolution = max( resolution, 1 )
# Memory map the array (don't load all the data)
data = numpy.load( data )
# Grab just what we need
t_start = math.floor( start / resolution )
t_end = math.ceil( end / resolution )
x = numpy.arange( t_start, t_end ) * resolution
y = data[ t_start : t_end ]
return zip(x.tolist(), y.tolist())
def get_track_resolution( self, dataset, start, end):
range = end - start
# Determine appropriate resolution to plot ~1000 points
resolution = math.ceil( 10 ** math.ceil( math.log10( range / 1000 ) ) )
# Restrict to valid range
resolution = min( resolution, 100000 )
resolution = max( resolution, 1 )
return resolution
def get_track_type( self ):
return "LineTrack"
class CustomTrack ( Tabular ):
"""UCSC CustomTrack"""
file_ext = "customtrack"
def __init__(self, **kwd):
"""Initialize interval datatype, by adding UCSC display app"""
Tabular.__init__(self, **kwd)
self.add_display_app ( 'ucsc', 'display at UCSC', 'as_ucsc_display_file', 'ucsc_links' )
def set_readonly_meta( self, dataset, skip=1, **kwd ):
"""Resets the values of readonly metadata elements."""
Tabular.set_readonly_meta( self, dataset, skip = skip, **kwd )
def set_meta( self, dataset, overwrite = True, **kwd ):
Tabular.set_meta( self, dataset, overwrite = overwrite, skip = 1 )
def display_peek( self, dataset ):
"""Returns formated html of peek"""
return Tabular.make_html_table( self, dataset, skipchars=['track', '#'] )
def get_estimated_display_viewport( self, dataset ):
try:
wiggle_format = False
for line in open(dataset.file_name):
if (line.startswith("chr") or line.startswith("scaffold")):
start = line.split("\t")[1].replace(",","")
end = line.split("\t")[2].replace(",","")
if int(start) < int(end):
value = ( line.split("\t")[0], start, end )
else:
value = ( line.split("\t")[0], end, start )
break
elif (line.startswith('variableStep')):
# wiggle format
wiggle_format = True
wig_chr = line.split()[1].split('=')[1]
if not wig_chr.startswith("chr"):
value = ('', '', '')
break
elif wiggle_format:
# wiggle format
if line.split("\t")[0].isdigit():
start = line.split("\t")[0]
end = str(int(start) + 1)
value = (wig_chr, start, end)
else:
value = (wig_chr, '', '')
break
return value #returns the co-ordinates of the 1st track/dataset
except:
#return "."
return ('', '', '')
def as_ucsc_display_file( self, dataset ):
return open(dataset.file_name)
def ucsc_links( self, dataset, type, app, base_url ):
ret_val = []
if dataset.has_data:
viewport_tuple = self.get_estimated_display_viewport(dataset)
if viewport_tuple:
chrom = viewport_tuple[0]
start = viewport_tuple[1]
stop = viewport_tuple[2]
for site_name, site_url in util.get_ucsc_by_build(dataset.dbkey):
if site_name in app.config.ucsc_display_sites:
display_url = urllib.quote_plus( "%s%s/display_as?id=%i&display_app=%s" % (base_url, url_for( controller='root' ), dataset.id, type) )
link = "%sdb=%s&position=%s:%s-%s&hgt.customText=%s" % (site_url, dataset.dbkey, chrom, start, stop, display_url )
ret_val.append( (site_name, link) )
return ret_val
def sniff( self, filename ):
"""
Determines whether the file is in customtrack format.
CustomTrack files are built within Galaxy and are basically bed or interval files with the first line looking
something like this.
track name="User Track" description="User Supplied Track (from Galaxy)" color=0,0,0 visibility=1
>>> fname = get_test_fname( 'complete.bed' )
>>> CustomTrack().sniff( fname )
False
>>> fname = get_test_fname( 'ucsc.customtrack' )
>>> CustomTrack().sniff( fname )
True
"""
headers = get_headers( filename, None )
first_line = True
for hdr in headers:
if first_line:
first_line = False
try:
if hdr[0].startswith('track'):
color_found = False
visibility_found = False
for elem in hdr[1:]:
if elem.startswith('color'): color_found = True
if elem.startswith('visibility'): visibility_found = True
if color_found and visibility_found: break
if not color_found or not visibility_found: return False
else: return False
except: return False
else:
try:
if hdr[0] and not hdr[0].startswith( '#' ):
if len( hdr ) < 3:
return False
try:
int( hdr[1] )
int( hdr[2] )
except:
return False
except:
return False
return True
class GBrowseTrack ( Tabular ):
"""GMOD GBrowseTrack"""
file_ext = "gbrowsetrack"
def __init__(self, **kwd):
"""Initialize datatype, by adding GBrowse display app"""
Tabular.__init__(self, **kwd)
self.add_display_app ('c_elegans', 'display in Wormbase', 'as_gbrowse_display_file', 'gbrowse_links' )
def set_readonly_meta( self, dataset, skip=1, **kwd ):
"""Resets the values of readonly metadata elements."""
Tabular.set_readonly_meta( self, dataset, skip = skip, **kwd )
def set_meta( self, dataset, overwrite = True, **kwd ):
Tabular.set_meta( self, dataset, overwrite = overwrite, skip = 1 )
def make_html_table( self, dataset ):
return Tabular.make_html_table( self, dataset, skipchars=['track', '#'] )
def get_estimated_display_viewport( self, dataset ):
#TODO: fix me...
return ('', '', '')
def gbrowse_links( self, dataset, type, app, base_url ):
ret_val = []
if dataset.has_data:
viewport_tuple = self.get_estimated_display_viewport(dataset)
if viewport_tuple:
chrom = viewport_tuple[0]
start = viewport_tuple[1]
stop = viewport_tuple[2]
for site_name, site_url in util.get_gbrowse_sites_by_build(dataset.dbkey):
if site_name in app.config.gbrowse_display_sites:
display_url = urllib.quote_plus( "%s%s/display_as?id=%i&display_app=%s" % (base_url, url_for( controller='root' ), dataset.id, type) )
link = "%sname=%s&ref=%s:%s..%s&eurl=%s" % (site_url, dataset.dbkey, chrom, start, stop, display_url )
ret_val.append( (site_name, link) )
return ret_val
def as_gbrowse_display_file( self, dataset, **kwd ):
"""Returns file contents that can be displayed in GBrowse apps."""
#TODO: fix me...
return open(dataset.file_name)
def sniff( self, filename ):
"""
Determines whether the file is in gbrowsetrack format.
GBrowseTrack files are built within Galaxy.
TODO: Not yet sure what this file will look like. Fix this sniffer and add some unit tests here as soon as we know.
"""
return False
if __name__ == '__main__':
import doctest, sys
doctest.testmod(sys.modules[__name__])
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Gandi Live driver base classes
"""
import json
from libcloud.common.base import ConnectionKey, JsonResponse
from libcloud.common.types import ProviderError
from libcloud.utils.py3 import httplib
__all__ = [
"API_HOST",
"GandiLiveBaseError",
"JsonParseError",
"ResourceNotFoundError",
"InvalidRequestError",
"ResourceConflictError",
"GandiLiveResponse",
"GandiLiveConnection",
"BaseGandiLiveDriver",
]
API_HOST = "dns.api.gandi.net"
class GandiLiveBaseError(ProviderError):
"""
Exception class for Gandi Live driver
"""
pass
class JsonParseError(GandiLiveBaseError):
pass
# Example:
# {
# "code": 404,
# "message": "Unknown zone",
# "object": "LocalizedHTTPNotFound",
# "cause": "Not Found"
# }
class ResourceNotFoundError(GandiLiveBaseError):
pass
# Example:
# {
# "code": 400,
# "message": "zone or zone_uuid must be set",
# "object": "HTTPBadRequest",
# "cause": "No zone set.",
# "errors": [
# {
# "location": "body",
# "name": "zone_uuid",
# "description": "\"FAKEUUID\" is not a UUID"
# }
# ]
# }
class InvalidRequestError(GandiLiveBaseError):
pass
# Examples:
# {
# "code": 409,
# "message": "Zone Testing already exists",
# "object": "HTTPConflict",
# "cause": "Duplicate Entry"
# }
# {
# "code": 409,
# "message": "The domain example.org already exists",
# "object": "HTTPConflict",
# "cause": "Duplicate Entry"
# }
# {
# "code": 409,
# "message": "This zone is still used by 1 domains",
# "object": "HTTPConflict",
# "cause": "In use"
# }
class ResourceConflictError(GandiLiveBaseError):
pass
class GandiLiveResponse(JsonResponse):
"""
A Base Gandi Live Response class to derive from.
"""
def success(self):
"""
Determine if our request was successful.
For the Gandi Live response class, tag all responses as successful and
raise appropriate Exceptions from parse_body.
:return: C{True}
"""
return True
def parse_body(self):
"""
Parse the JSON response body, or raise exceptions as appropriate.
:return: JSON dictionary
:rtype: ``dict``
"""
json_error = False
try:
body = json.loads(self.body)
except Exception:
# If there is both a JSON parsing error and an unsuccessful http
# response (like a 404), we want to raise the http error and not
# the JSON one, so don't raise JsonParseError here.
body = self.body
json_error = True
# Service does not appear to return HTTP 202 Accepted for anything.
valid_http_codes = [
httplib.OK,
httplib.CREATED,
]
if self.status in valid_http_codes:
if json_error:
raise JsonParseError(body, self.status)
else:
return body
elif self.status == httplib.NO_CONTENT:
# Parse error for empty body is acceptable, but a non-empty body
# is not.
if len(body) > 0:
msg = '"No Content" response contained content'
raise GandiLiveBaseError(msg, self.status)
else:
return {}
elif self.status == httplib.NOT_FOUND:
message = self._get_error(body, json_error)
raise ResourceNotFoundError(message, self.status)
elif self.status == httplib.BAD_REQUEST:
message = self._get_error(body, json_error)
raise InvalidRequestError(message, self.status)
elif self.status == httplib.CONFLICT:
message = self._get_error(body, json_error)
raise ResourceConflictError(message, self.status)
else:
message = self._get_error(body, json_error)
raise GandiLiveBaseError(message, self.status)
# Errors are not described at all in Gandi's official documentation.
# It appears when an error arises, a JSON object is returned along with
# an HTTP 4xx class code. The object is structured as:
# {
# code: <code>,
# object: <object>,
# message: <message>,
# cause: <cause>,
# errors: [
# {
# location: <error-location>,
# name: <error-name>,
# description: <error-description>
# }
# ]
# }
# where
# <code> is a number equal to the HTTP response status code
# <object> is a string with some internal name for the status code
# <message> is a string detailing what the problem is
# <cause> is a string that comes from a set of succinct problem summaries
# errors is optional; if present:
# <error-location> is a string for which part of the request to look in
# <error-name> is a string naming the parameter
# <error-description> is a string detailing what the problem is
# Here we ignore object and combine message and cause along with an error
# if one or more exists.
def _get_error(self, body, json_error):
"""
Get the error code and message from a JSON response.
Incorporate the first error if there are multiple errors.
:param body: The body of the JSON response dictionary
:type body: ``dict``
:return: String containing error message
:rtype: ``str``
"""
if not json_error and "cause" in body:
message = "%s: %s" % (body["cause"], body["message"])
if "errors" in body:
err = body["errors"][0]
message = "%s (%s in %s: %s)" % (
message,
err.get("location"),
err.get("name"),
err.get("description"),
)
else:
message = body
return message
class GandiLiveConnection(ConnectionKey):
"""
Connection class for the Gandi Live driver
"""
responseCls = GandiLiveResponse
host = API_HOST
def add_default_headers(self, headers):
"""
Returns default headers as a dictionary.
"""
headers["Content-Type"] = "application/json"
headers["X-Api-Key"] = self.key
return headers
def encode_data(self, data):
"""Encode data to JSON"""
return json.dumps(data)
class BaseGandiLiveDriver(object):
"""
Gandi Live base driver
"""
connectionCls = GandiLiveConnection
name = "GandiLive"
|
|
#www.stuffaboutcode.com
#Raspberry Pi, Minecraft Twitter
#import the minecraft.py module from the minecraft directory
import minecraft.minecraft as minecraft
#import minecraft block module
import minecraft.block as block
#import time, so delays can be used
import time
#import oauth for twitter security
from oauth.oauth import OAuthRequest, OAuthSignatureMethod_HMAC_SHA1
from hashlib import md5
#required by twitter stream class
import json, random, math, urllib, urllib2, pycurl
#Letters used in the program, hashes are turned into blocks
letters = {"a":
"###" + "\n" +
"# #" + "\n" +
"###" + "\n" +
"# #" + "\n" +
"# #" + "\n",
"b":
"###" + "\n" +
"# #" + "\n" +
"###" + "\n" +
"# #" + "\n" +
"###" + "\n",
"c":
"###" + "\n" +
"#" + "\n" +
"#" + "\n" +
"#" + "\n" +
"###" + "\n",
"d":
"##" + "\n" +
"# #" + "\n" +
"# #" + "\n" +
"# #" + "\n" +
"##" + "\n",
"e":
"###" + "\n" +
"#" + "\n" +
"###" + "\n" +
"#" + "\n" +
"###" + "\n",
"f":
"###" + "\n" +
"#" + "\n" +
"###" + "\n" +
"#" + "\n" +
"#" + "\n",
"g":
"###" + "\n" +
"# #" + "\n" +
"###" + "\n" +
" #" + "\n" +
"###" + "\n",
"h":
"# #" + "\n" +
"# #" + "\n" +
"###" + "\n" +
"# #" + "\n" +
"# #" + "\n",
"i":
"###" + "\n" +
" #" + "\n" +
" #" + "\n" +
" #" + "\n" +
"###" + "\n",
"j":
"###" + "\n" +
" #" + "\n" +
" #" + "\n" +
" #" + "\n" +
"##" + "\n",
"k":
"# #" + "\n" +
"##" + "\n" +
"#" + "\n" +
"##" + "\n" +
"# #" + "\n",
"l":
"#" + "\n" +
"#" + "\n" +
"#" + "\n" +
"#" + "\n" +
"###" + "\n",
"m":
"# #" + "\n" +
"###" + "\n" +
"###" + "\n" +
"# #" + "\n" +
"# #" + "\n",
"n":
"###" + "\n" +
"# #" + "\n" +
"# #" + "\n" +
"# #" + "\n" +
"# #" + "\n",
"o":
"###" + "\n" +
"# #" + "\n" +
"# #" + "\n" +
"# #" + "\n" +
"###" + "\n",
"p":
"###" + "\n" +
"# #" + "\n" +
"###" + "\n" +
"#" + "\n" +
"#" + "\n",
"q":
"###" + "\n" +
"# #" + "\n" +
"###" + "\n" +
" #" + "\n" +
" #" + "\n",
"r":
"###" + "\n" +
"# #" + "\n" +
"##" + "\n" +
"# #" + "\n" +
"# #" + "\n",
"s":
"###" + "\n" +
"#" + "\n" +
"###" + "\n" +
" #" + "\n" +
"###" + "\n",
"t":
"###" + "\n" +
" #" + "\n" +
" #" + "\n" +
" #" + "\n" +
" #" + "\n",
"u":
"# #" + "\n" +
"# #" + "\n" +
"# #" + "\n" +
"# #" + "\n" +
"###" + "\n",
"v":
"# #" + "\n" +
"# #" + "\n" +
"# #" + "\n" +
"# #" + "\n" +
" #" + "\n",
"w":
"# #" + "\n" +
"# #" + "\n" +
"# #" + "\n" +
"###" + "\n" +
"###" + "\n",
"x":
"# #" + "\n" +
" #" + "\n" +
" #" + "\n" +
" #" + "\n" +
"# #" + "\n",
"y":
"# #" + "\n" +
"# #" + "\n" +
"###" + "\n" +
" #" + "\n" +
"###" + "\n",
"z":
"###" + "\n" +
" #" + "\n" +
" #" + "\n" +
"#" + "\n" +
"###" + "\n",
" ":
" ",
"1":
" #" + "\n" +
"##" + "\n" +
" #" + "\n" +
" #" + "\n" +
"###" + "\n",
"2":
"###" + "\n" +
" #" + "\n" +
"###" + "\n" +
"#" + "\n" +
"###" + "\n",
"3":
"###" + "\n" +
" #" + "\n" +
"###" + "\n" +
" #" + "\n" +
"###" + "\n",
"4":
"#" + "\n" +
"#" + "\n" +
"# #" + "\n" +
"###" + "\n" +
" #" + "\n",
"5":
"###" + "\n" +
"#" + "\n" +
"###" + "\n" +
" #" + "\n" +
"###" + "\n",
"6":
"###" + "\n" +
"#" + "\n" +
"###" + "\n" +
"# #" + "\n" +
"###" + "\n",
"7":
"###" + "\n" +
" # " + "\n" +
" #" + "\n" +
" #" + "\n" +
"#" + "\n",
"8":
"###" + "\n" +
"# #" + "\n" +
"###" + "\n" +
"# #" + "\n" +
"###" + "\n",
"9":
"###" + "\n" +
"# #" + "\n" +
"###" + "\n" +
" #" + "\n" +
"###" + "\n",
"0":
"###" + "\n" +
"# #" + "\n" +
"# #" + "\n" +
"# #" + "\n" +
"###" + "\n",
"!":
" # " + "\n" +
" # " + "\n" +
" # " + "\n" +
" " + "\n" +
" # " + "\n",
"?":
"###" + "\n" +
" #" + "\n" +
" ##" + "\n" +
" " + "\n" +
" # " + "\n",
".":
" " + "\n" +
" " + "\n" +
" " + "\n" +
" " + "\n" +
" # " + "\n",
",":
" " + "\n" +
" " + "\n" +
" " + "\n" +
" #" + "\n" +
" # " + "\n",
"/":
" #" + "\n" +
" #" + "\n" +
" # " + "\n" +
"# " + "\n" +
"# " + "\n",
":":
" " + "\n" +
" # " + "\n" +
" " + "\n" +
" # " + "\n" +
" " + "\n",
"@":
"###" + "\n" +
"# #" + "\n" +
"## " + "\n" +
"# " + "\n" +
"###" + "\n",
"'":
" # " + "\n" +
" # " + "\n" +
" " + "\n" +
" " + "\n" +
" " + "\n",
"#":
" # " + "\n" +
"###" + "\n" +
" # " + "\n" +
"###" + "\n" +
" # " + "\n"
}
# twitter oauth keys, get yours from dev.twitter.com
CONSUMER_KEY = '#############'
CONSUMER_SECRET = '###############'
ACCESS_TOKEN = '###############
ACCESS_TOKEN_SECRET = '###############'
# constants to position the text lines in minecraft
LETTERBLOCKID = block.COBBLESTONE.id
LETTERBLOCKDATA = 0
#These are the lines where the tweets will be written
TEXTLINES = {0:[[minecraft.Vec3(-95, 55, -95), minecraft.Vec3(+95, 55, -95)],
[minecraft.Vec3(+95, 55, -95), minecraft.Vec3(+95, 55, +95)],
[minecraft.Vec3(+95, 55, +95), minecraft.Vec3(-95, 55, +95)],
[minecraft.Vec3(-95, 55, +95), minecraft.Vec3(-95, 55, -95)]],
1:[[minecraft.Vec3(-95, 47, -95), minecraft.Vec3(+95, 47, -95)],
[minecraft.Vec3(+95, 47, -95), minecraft.Vec3(+95, 47, +95)],
[minecraft.Vec3(+95, 47, +95), minecraft.Vec3(-95, 47, +95)],
[minecraft.Vec3(-95, 47, +95), minecraft.Vec3(-95, 47, -95)]],
2:[[minecraft.Vec3(-95, 39, -95), minecraft.Vec3(+95, 39, -95)],
[minecraft.Vec3(+95, 39, -95), minecraft.Vec3(+95, 39, +95)],
[minecraft.Vec3(+95, 39, +95), minecraft.Vec3(-95, 39, +95)],
[minecraft.Vec3(-95, 39, +95), minecraft.Vec3(-95, 39, -95)]]
}
LINEHEIGHT = 5
LETTERWIDTH = 3
#Class for creating text in minecraft
class MinecraftText:
def __init__(self, mc):
self.mc = mc
self.currentLine = 0
self.currentTopLeft = LINETOPLEFTS[self.currentLine]
#writes a line to minecraft at the next position
def writeNextLine(self, line):
#Output message
self.clearLine(self.currentLine)
self.writeLineToMC(line, self.currentLine)
self.currentLine+=1
#if I have reached the top line, reset it
if self.currentLine == 4: self.currentLine = 0
#writes a line of text into minecraft
def writeLineToMC(self, line, lineNumber):
#get the textlines
textlines = TEXTLINES[lineNumber]
#current testline
currentTextLine = 0
#set the cursor position
currentCursor = minecraft.Vec3(textlines[currentTextLine][0].x,
textlines[currentTextLine][0].y,
textlines[currentTextLine][0].z)
#setup x and z directions
xDirection, zDirection = 1, 0
nextTextLine = False
#make line lower case
line = line.lower()
#write the line to minecraft
for character in line:
#create the character in minecraft
self.writeLetterToMC(character, currentCursor, xDirection, zDirection)
#move the 'cursor' on
# check if the current cursor pos is outside the textLine,
# if so move to the next text line
if currentTextLine == 0:
currentCursor.x = currentCursor.x + LETTERWIDTH + 1
if currentCursor.x > textlines[currentTextLine][1].x: nextTextLine = True
if currentTextLine == 1:
currentCursor.z = currentCursor.z + LETTERWIDTH + 14
if currentCursor.z > textlines[currentTextLine][1].z:
nextTextLine = True
if currentTextLine == 2:
currentCursor.x = currentCursor.x - LETTERWIDTH + 14
if currentCursor.x < textlines[currentTextLine][1].x: nextTextLine = True
if currentTextLine == 3:
currentCursor.z = currentCursor.z - LETTERWIDTH + 14
#if currentCursor.z < textlines[currentTextLine][1].z: nextTextLine = True
if nextTextLine == True:
nextTextLine = False
#next testline
currentTextLine+=1
#set the cursor position
currentCursor = minecraft.Vec3(textlines[currentTextLine][0].x,
textlines[currentTextLine][0].y,
textlines[currentTextLine][0].z)
#setup x and z diections
if currentTextLine == 1: xDirection, zDirection = 0, 1
if currentTextLine == 2: xDirection, zDirection = -1, 0
if currentTextLine == 3: xDirection, zDirection = 0, -1
#create a letter in minecraft
def writeLetterToMC(self, character, cursorTopLeft, xDirection, zDirection):
# the current position is where we have reached in creating the letter
currentPos = minecraft.Vec3(cursorTopLeft.x, cursorTopLeft.y, cursorTopLeft.z)
# is the character in my letter list?
if (character in letters.keys()):
# get the hashes for the character
letterString = letters[character]
#loop through all the hashes, creating block
for digit in letterString:
if digit == "#":
#print "create block x = " + str(currentPos.x) + " y = " + str(currentPos.y)
self.mc.setBlock(currentPos.x, currentPos.y, currentPos.z, LETTERBLOCKID, LETTERBLOCKDATA)
currentPos.x = currentPos.x + xDirection
currentPos.z = currentPos.z + zDirection
if digit == " ":
self.mc.setBlock(currentPos.x, currentPos.y, currentPos.z, block.AIR.id)
currentPos.x = currentPos.x + xDirection
currentPos.z = currentPos.z + zDirection
if digit == "\n":
currentPos.y = currentPos.y - 1
currentPos.x = cursorTopLeft.x
currentPos.z = cursorTopLeft.z
#clears a line of text in minecraft
def clearLine(self, lineNumber):
for textline in TEXTLINES[lineNumber]:
self.mc.setBlocks(textline[0].x,
textline[0].y,
textline[0].z,
textline[1].x,
textline[1].y - LINEHEIGHT,
textline[1].z,
block.AIR.id)
# class for managing oauth tokens
class Token(object):
def __init__(self,key,secret):
self.key = key
self.secret = secret
def _generate_nonce(self):
random_number = ''.join(str(random.randint(0, 9)) for i in range(40))
m = md5(str(time.time()) + str(random_number))
return m.hexdigest()
# twitter client
class MinecraftTwitterStreamClient:
def __init__(self, streamURL):
#Connect to minecraft by creating the minecraft object
# - minecraft needs to be running and in a game
self.mc = minecraft.Minecraft.create()
#Post a message to the minecraft chat window
self.mc.postToChat("Minecraft twitter stream active")
#create my minecraft text screen object
self.mcText = MinecraftText(self.mc)
#setup connection to twitter stream
self.streamURL = streamURL
self.buffer = ""
self.conn = pycurl.Curl()
self.conn.setopt(pycurl.URL, self.streamURL)
self.conn.setopt(pycurl.WRITEFUNCTION, self.on_receive)
self.conn.perform()
#this method is called each time some data arrives on the stream
def on_receive(self, data):
# debug - to see when this is called sys.stdout.write(".")
self.buffer += data
if data.endswith("\n") and self.buffer.strip():
content = json.loads(self.buffer)
self.buffer = ""
#debug - output json from buffer print content
#friends data - store for later
if "friends" in content:
self.friends = content["friends"]
#text (tweet) arrives
if "text" in content:
print u"{0[user][name]}: {0[text]}".format(content).encode('utf-8')
tweet = u"{0[user][name]}: {0[text]}".format(content).encode('utf-8')
self.mcText.writeNextLine(tweet)
#speakSpeechFromText(u"A tweet from {0[user][name]}".format(content))
# get the url needed to open the twitter user stream, including signature after authentication
def getTwitterUserStreamURL():
STREAM_URL = "https://userstream.twitter.com/2/user.json"
access_token = Token(ACCESS_TOKEN,ACCESS_TOKEN_SECRET)
consumer = Token(CONSUMER_KEY,CONSUMER_SECRET)
parameters = {
'oauth_consumer_key': CONSUMER_KEY,
'oauth_token': access_token.key,
'oauth_signature_method': 'HMAC-SHA1',
'oauth_timestamp': str(int(time.time())),
'oauth_nonce': access_token._generate_nonce(),
'oauth_version': '1.0',
}
oauth_request = OAuthRequest.from_token_and_callback(access_token,
http_url=STREAM_URL,
parameters=parameters)
signature_method = OAuthSignatureMethod_HMAC_SHA1()
signature = signature_method.build_signature(oauth_request, consumer, access_token)
parameters['oauth_signature'] = signature
data = urllib.urlencode(parameters)
return "%s?%s" % (STREAM_URL,data)
if __name__ == "__main__":
#Create minecraft twitter
mcTwitter = MinecraftTwitterStreamClient(getTwitterUserStreamURL())
|
|
"""The WaveBlocks Project
This file contains the basic interface for general wavepackets.
@author: R. Bourquin
@copyright: Copyright (C) 2012, 2013, 2014, 2016 R. Bourquin
@license: Modified BSD License
"""
from numpy import array, complexfloating, cumsum, vsplit, vstack, zeros, conjugate
from scipy import sqrt
from scipy.linalg import norm
from WaveBlocksND.Wavepacket import Wavepacket
from WaveBlocksND.GradientHAWP import GradientHAWP
from functools import reduce
__all__ = ["HagedornWavepacketBase"]
class HagedornWavepacketBase(Wavepacket):
r"""This class implements the abstract :py:class:`Wavepacket` interface
and contains code common to all types of Hagedorn wavepackets.
"""
def __init__(self, parameters):
r"""Initialize a wavepacket object that represents :math:`\Psi`.
"""
raise NotImplementedError("'HagedornWavepacketBase' should not be instantiated.")
# We can handle basis shapes here as the logic is the same for
# homogeneous and inhomogeneous Hagedorn wavepackets.
def _resize_coefficient_storage(self, component, bs_old, bs_new):
r"""
"""
bso = bs_old.get_basis_size()
bsn = bs_new.get_basis_size()
# Find the intersection of K and K'
# Optimization: iterate over smaller set
if bso <= bsn:
insec = [k for k in bs_old if k in bs_new]
elif bso > bsn:
insec = [k for k in bs_new if k in bs_old]
# TODO: Consider making this part of the BasisShape interface
# TODO: Consider implementing set operations for basis shapes
# Construct the index mapping
i = array([bs_old[k] for k in insec])
j = array([bs_new[k] for k in insec])
# Copy over the data
cnew = zeros((bsn, 1), dtype=complexfloating)
cnew[j] = self._coefficients[component][i]
self._coefficients[component] = cnew
def get_basis_shapes(self, *, component=None):
r"""Retrieve the basis shapes :math:`\mathfrak{K}_i` for each component :math:`i`.
:param component: The component :math:`i` whose basis shape we request. (Default is
``None`` which means to return the basis shapes for all components.
:type component: int
:return: The basis shape for an individual component or a list with all shapes.
"""
if component is not None:
return self._basis_shapes[component]
else:
return tuple(self._basis_shapes)
def set_basis_shapes(self, basis_shape, *, component=None):
r"""Set the basis shape :math:`\mathfrak{K}` of a given component or for all components.
:param basis_shape: The basis shape for an individual component or a list with all :math:`N` shapes.
:type basis_shape: A subclass of :py:class:`BasisShape`.
:param component: The component :math:`i` whose basis shape we want to set. (Default is
``None`` which means to set the basis shapes for all components.
:type component: int
"""
if component is not None:
# Check for valid input basis shape
if component not in range(self._number_components):
raise ValueError("Invalid component index " + str(component))
# Adapt the coefficient storage vectors
self._resize_coefficient_storage(component, self._basis_shapes[component], basis_shape)
# Set the new basis shape for the given component
self._basis_shapes[component] = basis_shape
else:
# Check for valid input basis shape
if not len(basis_shape) == self._number_components:
raise ValueError("Number of basis shape(s) does not match to number of components.")
for index, bsnew in enumerate(basis_shape):
# Adapt the coefficient storage vectors
self._resize_coefficient_storage(index, self._basis_shapes[index], bsnew)
# Set the new basis shape for the given component
self._basis_shapes[index] = bsnew
# And update the caches information
self._basis_sizes = [bs.get_basis_size() for bs in self._basis_shapes]
# We can handle coefficient set manipulation here as the logic is
# the same for homogeneous and inhomogeneous Hagedorn wavepackets.
def set_coefficient(self, component, index, value):
r"""Set a single coefficient :math:`c^i_k` of the specified component :math:`\Phi_i`
of :math:`\Psi`.
:param component: The index :math:`i` of the component :math:`\Phi_i` we want to update.
:type components: int
:param index: The multi-index :math:`k` of the coefficient :math:`c^i_k` we want to update.
:type index: A tuple of :math:`D` integers.
:param value: The new value of the coefficient :math:`c^i_k`.
:raise: :py:class:`ValueError` For invalid indices :math:`i` or :math:`k`.
"""
if component > self._number_components - 1 or component < 0:
raise ValueError("There is no component with index {}.".format(component))
if index not in self._basis_shapes[component]:
raise ValueError("There is no basis function with multi-index {}.".format(index))
# Apply linear order mapping here
key = self._basis_shapes[component][index]
self._coefficients[component][key] = value
def get_coefficient(self, component, index):
r"""Retrieve a single coefficient :math:`c^i_k` of the specified component :math:`\Phi_i`
of :math:`\Psi`.
:param component: The index :math:`i` of the component :math:`\Phi_i` we want to update.
:type components: int
:param index: The multi-index :math:`k` of the coefficient :math:`c^i_k` we want to update.
:type index: A tuple of :math:`D` integers.
:return: A single complex number.
:raise: :py:class:`ValueError` For invalid indices :math:`i` or :math:`k`.
"""
if component > self._number_components - 1 or component < 0:
raise ValueError("There is no component with index {}.".format(component))
if index not in self._basis_shapes[component]:
raise ValueError("There is no basis function with multi-index {}.".format(index))
# Apply linear order mapping here
key = self._basis_shapes[component][index]
return self._coefficients[component][key]
def set_coefficients(self, values, *, component=None):
r"""Update all the coefficients :math:`c` of :math:`\Psi` or update
the coefficients :math:`c^i` of the components :math:`\Phi_i` only.
Note: this method copies the data arrays.
:param values: The new values of the coefficients :math:`c^i` of :math:`\Phi_i`.
:type values: An ndarray of suitable shape or a list of ndarrays.
:param component: The index :math:`i` of the component we want to update with new coefficients.
:type component: int (Default is ``None`` meaning all)
:raise: :py:class:`ValueError` For invalid component indices :math:`i`.
"""
if component is None:
if len(values) != self._number_components:
raise ValueError("Too less or too many data provided.")
for index, value in enumerate(values):
bs = self._basis_sizes[index]
self._coefficients[index] = value.copy().reshape((bs, 1))
else:
if component > self._number_components - 1 or component < 0:
raise ValueError("There is no component with index {}.".format(component))
bs = self._basis_sizes[component]
self._coefficients[component] = values.copy().reshape((bs, 1))
def get_coefficients(self, *, component=None):
r"""Returns the coefficients :math:`c^i` for some component :math:`\Phi_i` of
:math:`\Psi` or all the coefficients :math:`c` of all components.
Note: this method copies the data arrays.
:param component: The index :math:`i` of the component we want to retrieve.
:type component: int (Default is ``None`` meaning all)
:return: A single ndarray with the coefficients of the given component or
a list containing the ndarrays for each component. Each ndarray
is two-dimensional with a shape of :math:`(|\mathfrak{K}_i|, 1)`.
:raise: :py:class:`ValueError` For invalid component indices :math:`i`.
"""
if component is None:
return [item.copy() for item in self._coefficients]
else:
if component > self._number_components - 1 or component < 0:
raise ValueError("There is no component with index {}.".format(component))
return self._coefficients[component].copy()
def get_coefficient_vector(self, *, component=None):
r"""Retrieve the coefficients for all components :math:`\Phi_i` simultaneously.
.. warning:: This function does *not* copy the input data!
This is for efficiency as this routine is used in the innermost loops.
:param component: The component :math:`i` whose coefficients we request. (Default is
``None`` which means to return the coefficients for all components.
:type component: int
:return: The coefficients :math:`c^i` of all components
:math:`\Phi_i` stacked into a single long column vector.
"""
if component is None:
return vstack(self._coefficients)
else:
return self._coefficients[component]
def set_coefficient_vector(self, vector):
"""Set the coefficients for all components :math:`\Phi_i` simultaneously.
.. warning:: This function does *not* copy the input data!
This is for efficiency as this routine is used in the innermost loops.
:param vector: The coefficients of all components as a single long column vector.
:type vector: A two-dimensional ndarray of appropriate shape.
"""
# Compute the partition of the block-vector from the basis sizes
partition = cumsum(self._basis_sizes)[:-1]
# Split the block-vector with the given partition and assign
self._coefficients = vsplit(vector, partition)
def get_eps(self):
r"""Retrieve the semi-classical scaling parameter :math:`\varepsilon` of the wavepacket.
:return: The value of :math:`\varepsilon`.
"""
return self._eps
# We can compute the norms the same way for homogeneous and inhomogeneous Hagedorn wavepackets.
def norm(self, component=None, summed=False):
r"""Calculate the :math:`L^2` norm :math:`\langle\Psi|\Psi\rangle` of the wavepacket :math:`\Psi`.
:param component: The index :math:`i` of the component :math:`\Phi_i` whose norm is calculated.
The default value is ``None`` which means to compute the norms of all :math:`N` components.
:type component: int or ``None``.
:param summed: Whether to sum up the norms :math:`\langle\Phi_i|\Phi_i\rangle` of the
individual components :math:`\Phi_i`.
:type summed: Boolean, default is ``False``.
:type summed: Boolean, default is ``False``.
:return: The norm of :math:`\Psi` or the norm of :math:`\Phi_i` or a list with the :math:`N`
norms of all components. Depending on the values of ``component`` and ``summed``.
"""
if component is not None:
result = norm(self._coefficients[component])
else:
result = [norm(item) for item in self._coefficients]
if summed is True:
result = reduce(lambda x, y: x + conjugate(y) * y, result, 0.0)
result = sqrt(result)
return result
# A wavepacket knows how to compute gradients
# TODO: Consider moving this inside the general codata framework
def get_gradient_operator(self):
r"""Return the :py:class:`Gradient` subclass suitable for
computing gradients of this wavepacket.
:return: A :py:class:`GradientHAWP` instance.
"""
return GradientHAWP()
# A wavepacket knows how to compute inner products
# TODO: Consider moving this inside the general codata framework
# TODO: Rethink if wavepackets should contain a QR
def set_innerproduct(self, innerproduct):
"""Set the :py:class:`InnerProduct` subclass instance used for computing
inner products and evaluating brakets.
:param innerproduct: The new :py:class:`InnerProduct` subclass instance.
"""
self._IP = innerproduct
def get_innerproduct(self):
"""Return the :py:class:`InnerProduct` subclass instance used computing
inner products and evaluating brakets.
:return: The current :py:class:`InnerProduct` subclass instance.
"""
return self._IP
|
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""GYP backend that generates Eclipse CDT settings files.
This backend DOES NOT generate Eclipse CDT projects. Instead, it generates XML
files that can be imported into an Eclipse CDT project. The XML file contains a
list of include paths and symbols (i.e. defines).
Because a full .cproject definition is not created by this generator, it's not
possible to properly define the include dirs and symbols for each file
individually. Instead, one set of includes/symbols is generated for the entire
project. This works fairly well (and is a vast improvement in general), but may
still result in a few indexer issues here and there.
This generator has no automated tests, so expect it to be broken.
"""
from xml.sax.saxutils import escape
import os.path
import subprocess
import gyp
import gyp.common
import gyp.msvs_emulation
import shlex
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'PRODUCT_DIR', 'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!).
generator_default_variables[dirname] = 'dir'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
# Include dirs will occasionally use the SHARED_INTERMEDIATE_DIR variable as
# part of the path when dealing with generated headers. This value will be
# replaced dynamically for each configuration.
generator_default_variables['SHARED_INTERMEDIATE_DIR'] = \
'$SHARED_INTERMEDIATE_DIR'
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
flavor = gyp.common.GetFlavor(params)
default_variables.setdefault('OS', flavor)
if flavor == 'win':
# Copy additional generator configuration data from VS, which is shared
# by the Eclipse generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
def GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs, config_name, params,
compiler_path):
"""Calculate the set of include directories to be used.
Returns:
A list including all the include_dir's specified for every target followed
by any include directories that were added as cflag compiler options.
"""
gyp_includes_set = set()
compiler_includes_list = []
# Find compiler's default include dirs.
if compiler_path:
command = shlex.split(compiler_path)
command.extend(['-E', '-xc++', '-v', '-'])
proc = subprocess.Popen(args=command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = proc.communicate()[1]
# Extract the list of include dirs from the output, which has this format:
# ...
# #include "..." search starts here:
# #include <...> search starts here:
# /usr/include/c++/4.6
# /usr/local/include
# End of search list.
# ...
in_include_list = False
for line in output.splitlines():
if line.startswith('#include'):
in_include_list = True
continue
if line.startswith('End of search list.'):
break
if in_include_list:
include_dir = line.strip()
if include_dir not in compiler_includes_list:
compiler_includes_list.append(include_dir)
flavor = gyp.common.GetFlavor(params)
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
for target_name in target_list:
target = target_dicts[target_name]
if config_name in target['configurations']:
config = target['configurations'][config_name]
# Look for any include dirs that were explicitly added via cflags. This
# may be done in gyp files to force certain includes to come at the end.
# TODO(jgreenwald): Change the gyp files to not abuse cflags for this, and
# remove this.
if flavor == 'win':
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
cflags = msvs_settings.GetCflags(config_name)
else:
cflags = config['cflags']
for cflag in cflags:
if cflag.startswith('-I'):
include_dir = cflag[2:]
if include_dir not in compiler_includes_list:
compiler_includes_list.append(include_dir)
# Find standard gyp include dirs.
if config.has_key('include_dirs'):
include_dirs = config['include_dirs']
for shared_intermediate_dir in shared_intermediate_dirs:
for include_dir in include_dirs:
include_dir = include_dir.replace('$SHARED_INTERMEDIATE_DIR',
shared_intermediate_dir)
if not os.path.isabs(include_dir):
base_dir = os.path.dirname(target_name)
include_dir = base_dir + '/' + include_dir
include_dir = os.path.abspath(include_dir)
gyp_includes_set.add(include_dir)
# Generate a list that has all the include dirs.
all_includes_list = list(gyp_includes_set)
all_includes_list.sort()
for compiler_include in compiler_includes_list:
if not compiler_include in gyp_includes_set:
all_includes_list.append(compiler_include)
# All done.
return all_includes_list
def GetCompilerPath(target_list, data, options):
"""Determine a command that can be used to invoke the compiler.
Returns:
If this is a gyp project that has explicit make settings, try to determine
the compiler from that. Otherwise, see if a compiler was specified via the
CC_target environment variable.
"""
# First, see if the compiler is configured in make's settings.
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_dict = data[build_file].get('make_global_settings', {})
for key, value in make_global_settings_dict:
if key in ['CC', 'CXX']:
return os.path.join(options.toplevel_dir, value)
# Check to see if the compiler was specified as an environment variable.
for key in ['CC_target', 'CC', 'CXX']:
compiler = os.environ.get(key)
if compiler:
return compiler
return 'gcc'
def GetAllDefines(target_list, target_dicts, data, config_name, params,
compiler_path):
"""Calculate the defines for a project.
Returns:
A dict that includes explict defines declared in gyp files along with all of
the default defines that the compiler uses.
"""
# Get defines declared in the gyp files.
all_defines = {}
flavor = gyp.common.GetFlavor(params)
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
for target_name in target_list:
target = target_dicts[target_name]
if flavor == 'win':
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
extra_defines = msvs_settings.GetComputedDefines(config_name)
else:
extra_defines = []
if config_name in target['configurations']:
config = target['configurations'][config_name]
target_defines = config['defines']
else:
target_defines = []
for define in target_defines + extra_defines:
split_define = define.split('=', 1)
if len(split_define) == 1:
split_define.append('1')
if split_define[0].strip() in all_defines:
# Already defined
continue
all_defines[split_define[0].strip()] = split_define[1].strip()
# Get default compiler defines (if possible).
if flavor == 'win':
return all_defines # Default defines already processed in the loop above.
if compiler_path:
command = shlex.split(compiler_path)
command.extend(['-E', '-dM', '-'])
cpp_proc = subprocess.Popen(args=command, cwd='.',
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
cpp_output = cpp_proc.communicate()[0]
cpp_lines = cpp_output.split('\n')
for cpp_line in cpp_lines:
if not cpp_line.strip():
continue
cpp_line_parts = cpp_line.split(' ', 2)
key = cpp_line_parts[1]
if len(cpp_line_parts) >= 3:
val = cpp_line_parts[2]
else:
val = '1'
all_defines[key] = val
return all_defines
def WriteIncludePaths(out, eclipse_langs, include_dirs):
"""Write the includes section of a CDT settings export file."""
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
'settingswizards.IncludePaths">\n')
out.write(' <language name="holder for library settings"></language>\n')
for lang in eclipse_langs:
out.write(' <language name="%s">\n' % lang)
for include_dir in include_dirs:
out.write(' <includepath workspace_path="false">%s</includepath>\n' %
include_dir)
out.write(' </language>\n')
out.write(' </section>\n')
def WriteMacros(out, eclipse_langs, defines):
"""Write the macros section of a CDT settings export file."""
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
'settingswizards.Macros">\n')
out.write(' <language name="holder for library settings"></language>\n')
for lang in eclipse_langs:
out.write(' <language name="%s">\n' % lang)
for key in sorted(defines.iterkeys()):
out.write(' <macro><name>%s</name><value>%s</value></macro>\n' %
(escape(key), escape(defines[key])))
out.write(' </language>\n')
out.write(' </section>\n')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.join(generator_flags.get('output_dir', 'out'),
config_name)
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
# Ninja uses out/Debug/gen while make uses out/Debug/obj/gen as the
# SHARED_INTERMEDIATE_DIR. Include both possible locations.
shared_intermediate_dirs = [os.path.join(toplevel_build, 'obj', 'gen'),
os.path.join(toplevel_build, 'gen')]
out_name = os.path.join(toplevel_build, 'eclipse-cdt-settings.xml')
gyp.common.EnsureDirExists(out_name)
out = open(out_name, 'w')
out.write('<?xml version="1.0" encoding="UTF-8"?>\n')
out.write('<cdtprojectproperties>\n')
eclipse_langs = ['C++ Source File', 'C Source File', 'Assembly Source File',
'GNU C++', 'GNU C', 'Assembly']
compiler_path = GetCompilerPath(target_list, data, options)
include_dirs = GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs, config_name,
params, compiler_path)
WriteIncludePaths(out, eclipse_langs, include_dirs)
defines = GetAllDefines(target_list, target_dicts, data, config_name, params,
compiler_path)
WriteMacros(out, eclipse_langs, defines)
out.write('</cdtprojectproperties>\n')
out.close()
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate an XML settings file that can be imported into a CDT project."""
if params['options'].generator_output:
raise NotImplementedError, "--generator_output not implemented for eclipse"
user_config = params.get('generator_flags', {}).get('config', None)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
|
|
import os
import shutil
import sys
import tempfile
import pytest
from mock import Mock, mock_open, patch
from pip._vendor import pkg_resources
from pip._vendor.packaging.markers import Marker
from pip._vendor.packaging.requirements import Requirement
from pip._internal.commands.install import InstallCommand
from pip._internal.download import PipSession, path_to_url
from pip._internal.exceptions import (
HashErrors, InstallationError, InvalidWheelFilename, PreviousBuildDirError,
)
from pip._internal.index import PackageFinder
from pip._internal.operations.prepare import RequirementPreparer
from pip._internal.req import InstallRequirement, RequirementSet
from pip._internal.req.req_file import process_line
from pip._internal.req.req_install import parse_editable
from pip._internal.resolve import Resolver
from pip._internal.utils.misc import read_text_file
from tests.lib import DATA_DIR, assert_raises_regexp, requirements_file
def get_processed_req_from_line(line, fname='file', lineno=1):
req = list(process_line(line, fname, lineno))[0]
req.is_direct = True
return req
class TestRequirementSet(object):
"""RequirementSet tests"""
def setup(self):
self.tempdir = tempfile.mkdtemp()
def teardown(self):
shutil.rmtree(self.tempdir, ignore_errors=True)
def _basic_resolver(self, finder):
preparer = RequirementPreparer(
build_dir=os.path.join(self.tempdir, 'build'),
src_dir=os.path.join(self.tempdir, 'src'),
download_dir=None,
wheel_download_dir=None,
progress_bar="on",
build_isolation=True,
)
return Resolver(
preparer=preparer, wheel_cache=None,
session=PipSession(), finder=finder,
use_user_site=False, upgrade_strategy="to-satisfy-only",
ignore_dependencies=False, ignore_installed=False,
ignore_requires_python=False, force_reinstall=False,
isolated=False,
)
def test_no_reuse_existing_build_dir(self, data):
"""Test prepare_files raise exception with previous build dir"""
build_dir = os.path.join(self.tempdir, 'build', 'simple')
os.makedirs(build_dir)
open(os.path.join(build_dir, "setup.py"), 'w')
reqset = RequirementSet()
req = InstallRequirement.from_line('simple')
req.is_direct = True
reqset.add_requirement(req)
finder = PackageFinder([data.find_links], [], session=PipSession())
resolver = self._basic_resolver(finder)
assert_raises_regexp(
PreviousBuildDirError,
r"pip can't proceed with [\s\S]*%s[\s\S]*%s" %
(req, build_dir.replace('\\', '\\\\')),
resolver.resolve,
reqset,
)
def test_environment_marker_extras(self, data):
"""
Test that the environment marker extras are used with
non-wheel installs.
"""
reqset = RequirementSet()
req = InstallRequirement.from_editable(
data.packages.join("LocalEnvironMarker")
)
req.is_direct = True
reqset.add_requirement(req)
finder = PackageFinder([data.find_links], [], session=PipSession())
resolver = self._basic_resolver(finder)
resolver.resolve(reqset)
# This is hacky but does test both case in py2 and py3
if sys.version_info[:2] in ((2, 7), (3, 4)):
assert reqset.has_requirement('simple')
else:
assert not reqset.has_requirement('simple')
@pytest.mark.network
def test_missing_hash_checking(self):
"""Make sure prepare_files() raises an error when a requirement has no
hash in implicit hash-checking mode.
"""
reqset = RequirementSet()
# No flags here. This tests that detection of later flags nonetheless
# requires earlier packages to have hashes:
reqset.add_requirement(get_processed_req_from_line(
'blessings==1.0', lineno=1
))
# This flag activates --require-hashes mode:
reqset.add_requirement(get_processed_req_from_line(
'tracefront==0.1 --hash=sha256:somehash', lineno=2,
))
# This hash should be accepted because it came from the reqs file, not
# from the internet:
reqset.add_requirement(get_processed_req_from_line(
'https://pypi.python.org/packages/source/m/more-itertools/'
'more-itertools-1.0.tar.gz#md5=b21850c3cfa7efbb70fd662ab5413bdd',
lineno=3,
))
# The error text should list this as a URL and not `peep==3.1.1`:
reqset.add_requirement(get_processed_req_from_line(
'https://pypi.python.org/packages/source/p/peep/peep-3.1.1.tar.gz',
lineno=4,
))
finder = PackageFinder(
[],
['https://pypi.python.org/simple'],
session=PipSession(),
)
resolver = self._basic_resolver(finder)
assert_raises_regexp(
HashErrors,
r'Hashes are required in --require-hashes mode, but they are '
r'missing .*\n'
r' https://pypi\.python\.org/packages/source/p/peep/peep'
r'-3\.1\.1\.tar\.gz --hash=sha256:[0-9a-f]+\n'
r' blessings==1.0 --hash=sha256:[0-9a-f]+\n'
r'THESE PACKAGES DO NOT MATCH THE HASHES.*\n'
r' tracefront==0.1 .*:\n'
r' Expected sha256 somehash\n'
r' Got [0-9a-f]+$',
resolver.resolve,
reqset
)
def test_missing_hash_with_require_hashes(self, data):
"""Setting --require-hashes explicitly should raise errors if hashes
are missing.
"""
reqset = RequirementSet(require_hashes=True)
reqset.add_requirement(get_processed_req_from_line(
'simple==1.0', lineno=1
))
finder = PackageFinder([data.find_links], [], session=PipSession())
resolver = self._basic_resolver(finder)
assert_raises_regexp(
HashErrors,
r'Hashes are required in --require-hashes mode, but they are '
r'missing .*\n'
r' simple==1.0 --hash=sha256:393043e672415891885c9a2a0929b1af95'
r'fb866d6ca016b42d2e6ce53619b653$',
resolver.resolve,
reqset
)
def test_missing_hash_with_require_hashes_in_reqs_file(self, data, tmpdir):
"""--require-hashes in a requirements file should make its way to the
RequirementSet.
"""
req_set = RequirementSet(require_hashes=False)
session = PipSession()
finder = PackageFinder([data.find_links], [], session=session)
command = InstallCommand()
with requirements_file('--require-hashes', tmpdir) as reqs_file:
options, args = command.parse_args(['-r', reqs_file])
command.populate_requirement_set(
req_set, args, options, finder, session, command.name,
wheel_cache=None,
)
assert req_set.require_hashes
def test_unsupported_hashes(self, data):
"""VCS and dir links should raise errors when --require-hashes is
on.
In addition, complaints about the type of requirement (VCS or dir)
should trump the presence or absence of a hash.
"""
reqset = RequirementSet(require_hashes=True)
reqset.add_requirement(get_processed_req_from_line(
'git+git://github.com/pypa/pip-test-package --hash=sha256:123',
lineno=1,
))
dir_path = data.packages.join('FSPkg')
reqset.add_requirement(get_processed_req_from_line(
'file://%s' % (dir_path,),
lineno=2,
))
finder = PackageFinder([data.find_links], [], session=PipSession())
resolver = self._basic_resolver(finder)
sep = os.path.sep
if sep == '\\':
sep = '\\\\' # This needs to be escaped for the regex
assert_raises_regexp(
HashErrors,
r"Can't verify hashes for these requirements because we don't "
r"have a way to hash version control repositories:\n"
r" git\+git://github\.com/pypa/pip-test-package \(from -r file "
r"\(line 1\)\)\n"
r"Can't verify hashes for these file:// requirements because they "
r"point to directories:\n"
r" file://.*{sep}data{sep}packages{sep}FSPkg "
r"\(from -r file \(line 2\)\)".format(sep=sep),
resolver.resolve,
reqset)
def test_unpinned_hash_checking(self, data):
"""Make sure prepare_files() raises an error when a requirement is not
version-pinned in hash-checking mode.
"""
reqset = RequirementSet()
# Test that there must be exactly 1 specifier:
reqset.add_requirement(get_processed_req_from_line(
'simple --hash=sha256:a90427ae31f5d1d0d7ec06ee97d9fcf2d0fc9a786985'
'250c1c83fd68df5911dd', lineno=1,
))
# Test that the operator must be ==:
reqset.add_requirement(get_processed_req_from_line(
'simple2>1.0 --hash=sha256:3ad45e1e9aa48b4462af0'
'123f6a7e44a9115db1ef945d4d92c123dfe21815a06',
lineno=2,
))
finder = PackageFinder([data.find_links], [], session=PipSession())
resolver = self._basic_resolver(finder)
assert_raises_regexp(
HashErrors,
# Make sure all failing requirements are listed:
r'versions pinned with ==. These do not:\n'
r' simple .* \(from -r file \(line 1\)\)\n'
r' simple2>1.0 .* \(from -r file \(line 2\)\)',
resolver.resolve,
reqset)
def test_hash_mismatch(self, data):
"""A hash mismatch should raise an error."""
file_url = path_to_url(
(data.packages / 'simple-1.0.tar.gz').abspath)
reqset = RequirementSet(require_hashes=True)
reqset.add_requirement(get_processed_req_from_line(
'%s --hash=sha256:badbad' % file_url, lineno=1,
))
finder = PackageFinder([data.find_links], [], session=PipSession())
resolver = self._basic_resolver(finder)
assert_raises_regexp(
HashErrors,
r'THESE PACKAGES DO NOT MATCH THE HASHES.*\n'
r' file:///.*/data/packages/simple-1\.0\.tar\.gz .*:\n'
r' Expected sha256 badbad\n'
r' Got 393043e672415891885c9a2a0929b1af95fb866d'
r'6ca016b42d2e6ce53619b653$',
resolver.resolve,
reqset)
def test_unhashed_deps_on_require_hashes(self, data):
"""Make sure unhashed, unpinned, or otherwise unrepeatable
dependencies get complained about when --require-hashes is on."""
reqset = RequirementSet()
finder = PackageFinder([data.find_links], [], session=PipSession())
resolver = self._basic_resolver(finder)
reqset.add_requirement(get_processed_req_from_line(
'TopoRequires2==0.0.1 ' # requires TopoRequires
'--hash=sha256:eaf9a01242c9f2f42cf2bd82a6a848cd'
'e3591d14f7896bdbefcf48543720c970',
lineno=1
))
assert_raises_regexp(
HashErrors,
r'In --require-hashes mode, all requirements must have their '
r'versions pinned.*\n'
r' TopoRequires from .*$',
resolver.resolve,
reqset)
def test_hashed_deps_on_require_hashes(self):
"""Make sure hashed dependencies get installed when --require-hashes
is on.
(We actually just check that no "not all dependencies are hashed!"
error gets raised while preparing; there is no reason to expect
installation to then fail, as the code paths are the same as ever.)
"""
reqset = RequirementSet()
reqset.add_requirement(get_processed_req_from_line(
'TopoRequires2==0.0.1 ' # requires TopoRequires
'--hash=sha256:eaf9a01242c9f2f42cf2bd82a6a848cd'
'e3591d14f7896bdbefcf48543720c970',
lineno=1
))
reqset.add_requirement(get_processed_req_from_line(
'TopoRequires==0.0.1 '
'--hash=sha256:d6dd1e22e60df512fdcf3640ced3039b3b02a56ab2cee81ebcb'
'3d0a6d4e8bfa6',
lineno=2
))
@pytest.mark.parametrize(('file_contents', 'expected'), [
(b'\xf6\x80', b'\xc3\xb6\xe2\x82\xac'), # cp1252
(b'\xc3\xb6\xe2\x82\xac', b'\xc3\xb6\xe2\x82\xac'), # utf-8
(b'\xc3\xb6\xe2', b'\xc3\x83\xc2\xb6\xc3\xa2'), # Garbage
])
def test_egg_info_data(file_contents, expected):
om = mock_open(read_data=file_contents)
em = Mock()
em.return_value = 'cp1252'
with patch('pip._internal.utils.misc.open', om, create=True):
with patch('locale.getpreferredencoding', em):
ret = read_text_file('foo')
assert ret == expected.decode('utf-8')
class TestInstallRequirement(object):
def setup(self):
self.tempdir = tempfile.mkdtemp()
def teardown(self):
shutil.rmtree(self.tempdir, ignore_errors=True)
def test_url_with_query(self):
"""InstallRequirement should strip the fragment, but not the query."""
url = 'http://foo.com/?p=bar.git;a=snapshot;h=v0.1;sf=tgz'
fragment = '#egg=bar'
req = InstallRequirement.from_line(url + fragment)
assert req.link.url == url + fragment, req.link
def test_unsupported_wheel_link_requirement_raises(self):
reqset = RequirementSet()
req = InstallRequirement.from_line(
'https://whatever.com/peppercorn-0.4-py2.py3-bogus-any.whl',
)
assert req.link is not None
assert req.link.is_wheel
assert req.link.scheme == "https"
with pytest.raises(InstallationError):
reqset.add_requirement(req)
def test_unsupported_wheel_local_file_requirement_raises(self, data):
reqset = RequirementSet()
req = InstallRequirement.from_line(
data.packages.join('simple.dist-0.1-py1-none-invalid.whl'),
)
assert req.link is not None
assert req.link.is_wheel
assert req.link.scheme == "file"
with pytest.raises(InstallationError):
reqset.add_requirement(req)
def test_installed_version_not_installed(self):
req = InstallRequirement.from_line('simple-0.1-py2.py3-none-any.whl')
assert req.installed_version is None
def test_str(self):
req = InstallRequirement.from_line('simple==0.1')
assert str(req) == 'simple==0.1'
def test_repr(self):
req = InstallRequirement.from_line('simple==0.1')
assert repr(req) == (
'<InstallRequirement object: simple==0.1 editable=False>'
)
def test_invalid_wheel_requirement_raises(self):
with pytest.raises(InvalidWheelFilename):
InstallRequirement.from_line('invalid.whl')
def test_wheel_requirement_sets_req_attribute(self):
req = InstallRequirement.from_line('simple-0.1-py2.py3-none-any.whl')
assert isinstance(req.req, Requirement)
assert str(req.req) == 'simple==0.1'
def test_url_preserved_line_req(self):
"""Confirm the url is preserved in a non-editable requirement"""
url = 'git+http://foo.com@ref#egg=foo'
req = InstallRequirement.from_line(url)
assert req.link.url == url
def test_url_preserved_editable_req(self):
"""Confirm the url is preserved in a editable requirement"""
url = 'git+http://foo.com@ref#egg=foo'
req = InstallRequirement.from_editable(url)
assert req.link.url == url
@pytest.mark.parametrize('path', (
'/path/to/foo.egg-info'.replace('/', os.path.sep),
# Tests issue fixed by https://github.com/pypa/pip/pull/2530
'/path/to/foo.egg-info/'.replace('/', os.path.sep),
))
def test_get_dist(self, path):
req = InstallRequirement.from_line('foo')
req.egg_info_path = Mock(return_value=path)
dist = req.get_dist()
assert isinstance(dist, pkg_resources.Distribution)
assert dist.project_name == 'foo'
assert dist.location == '/path/to'.replace('/', os.path.sep)
def test_markers(self):
for line in (
# recommended syntax
'mock3; python_version >= "3"',
# with more spaces
'mock3 ; python_version >= "3" ',
# without spaces
'mock3;python_version >= "3"',
):
req = InstallRequirement.from_line(line)
assert req.req.name == 'mock3'
assert str(req.req.specifier) == ''
assert str(req.markers) == 'python_version >= "3"'
def test_markers_semicolon(self):
# check that the markers can contain a semicolon
req = InstallRequirement.from_line('semicolon; os_name == "a; b"')
assert req.req.name == 'semicolon'
assert str(req.req.specifier) == ''
assert str(req.markers) == 'os_name == "a; b"'
def test_markers_url(self):
# test "URL; markers" syntax
url = 'http://foo.com/?p=bar.git;a=snapshot;h=v0.1;sf=tgz'
line = '%s; python_version >= "3"' % url
req = InstallRequirement.from_line(line)
assert req.link.url == url, req.url
assert str(req.markers) == 'python_version >= "3"'
# without space, markers are part of the URL
url = 'http://foo.com/?p=bar.git;a=snapshot;h=v0.1;sf=tgz'
line = '%s;python_version >= "3"' % url
req = InstallRequirement.from_line(line)
assert req.link.url == line, req.url
assert req.markers is None
def test_markers_match_from_line(self):
# match
for markers in (
'python_version >= "1.0"',
'sys_platform == %r' % sys.platform,
):
line = 'name; ' + markers
req = InstallRequirement.from_line(line)
assert str(req.markers) == str(Marker(markers))
assert req.match_markers()
# don't match
for markers in (
'python_version >= "5.0"',
'sys_platform != %r' % sys.platform,
):
line = 'name; ' + markers
req = InstallRequirement.from_line(line)
assert str(req.markers) == str(Marker(markers))
assert not req.match_markers()
def test_markers_match(self):
# match
for markers in (
'python_version >= "1.0"',
'sys_platform == %r' % sys.platform,
):
line = 'name; ' + markers
req = InstallRequirement.from_line(line, comes_from='')
assert str(req.markers) == str(Marker(markers))
assert req.match_markers()
# don't match
for markers in (
'python_version >= "5.0"',
'sys_platform != %r' % sys.platform,
):
line = 'name; ' + markers
req = InstallRequirement.from_line(line, comes_from='')
assert str(req.markers) == str(Marker(markers))
assert not req.match_markers()
def test_extras_for_line_path_requirement(self):
line = 'SomeProject[ex1,ex2]'
filename = 'filename'
comes_from = '-r %s (line %s)' % (filename, 1)
req = InstallRequirement.from_line(line, comes_from=comes_from)
assert len(req.extras) == 2
assert req.extras == {'ex1', 'ex2'}
def test_extras_for_line_url_requirement(self):
line = 'git+https://url#egg=SomeProject[ex1,ex2]'
filename = 'filename'
comes_from = '-r %s (line %s)' % (filename, 1)
req = InstallRequirement.from_line(line, comes_from=comes_from)
assert len(req.extras) == 2
assert req.extras == {'ex1', 'ex2'}
def test_extras_for_editable_path_requirement(self):
url = '.[ex1,ex2]'
filename = 'filename'
comes_from = '-r %s (line %s)' % (filename, 1)
req = InstallRequirement.from_editable(url, comes_from=comes_from)
assert len(req.extras) == 2
assert req.extras == {'ex1', 'ex2'}
def test_extras_for_editable_url_requirement(self):
url = 'git+https://url#egg=SomeProject[ex1,ex2]'
filename = 'filename'
comes_from = '-r %s (line %s)' % (filename, 1)
req = InstallRequirement.from_editable(url, comes_from=comes_from)
assert len(req.extras) == 2
assert req.extras == {'ex1', 'ex2'}
def test_unexisting_path(self):
with pytest.raises(InstallationError) as e:
InstallRequirement.from_line(
os.path.join('this', 'path', 'does', 'not', 'exist'))
err_msg = e.value.args[0]
assert "Invalid requirement" in err_msg
assert "It looks like a path." in err_msg
def test_single_equal_sign(self):
with pytest.raises(InstallationError) as e:
InstallRequirement.from_line('toto=42')
err_msg = e.value.args[0]
assert "Invalid requirement" in err_msg
assert "= is not a valid operator. Did you mean == ?" in err_msg
def test_traceback(self):
with pytest.raises(InstallationError) as e:
InstallRequirement.from_line('toto 42')
err_msg = e.value.args[0]
assert "Invalid requirement" in err_msg
assert "\nTraceback " in err_msg
def test_requirement_file(self):
req_file_path = os.path.join(self.tempdir, 'test.txt')
with open(req_file_path, 'w') as req_file:
req_file.write('pip\nsetuptools')
with pytest.raises(InstallationError) as e:
InstallRequirement.from_line(req_file_path)
err_msg = e.value.args[0]
assert "Invalid requirement" in err_msg
assert "It looks like a path. It does exist." in err_msg
assert "appears to be a requirements file." in err_msg
assert "If that is the case, use the '-r' flag to install" in err_msg
@patch('pip._internal.req.req_install.os.path.abspath')
@patch('pip._internal.req.req_install.os.path.exists')
@patch('pip._internal.req.req_install.os.path.isdir')
def test_parse_editable_local(
isdir_mock, exists_mock, abspath_mock):
exists_mock.return_value = isdir_mock.return_value = True
# mocks needed to support path operations on windows tests
abspath_mock.return_value = "/some/path"
assert parse_editable('.') == (None, 'file:///some/path', None)
abspath_mock.return_value = "/some/path/foo"
assert parse_editable('foo') == (
None, 'file:///some/path/foo', None,
)
def test_parse_editable_explicit_vcs():
assert parse_editable('svn+https://foo#egg=foo') == (
'foo',
'svn+https://foo#egg=foo',
None,
)
def test_parse_editable_vcs_extras():
assert parse_editable('svn+https://foo#egg=foo[extras]') == (
'foo[extras]',
'svn+https://foo#egg=foo[extras]',
None,
)
@patch('pip._internal.req.req_install.os.path.abspath')
@patch('pip._internal.req.req_install.os.path.exists')
@patch('pip._internal.req.req_install.os.path.isdir')
def test_parse_editable_local_extras(
isdir_mock, exists_mock, abspath_mock):
exists_mock.return_value = isdir_mock.return_value = True
abspath_mock.return_value = "/some/path"
assert parse_editable('.[extras]') == (
None, 'file://' + "/some/path", {'extras'},
)
abspath_mock.return_value = "/some/path/foo"
assert parse_editable('foo[bar,baz]') == (
None, 'file:///some/path/foo', {'bar', 'baz'},
)
def test_exclusive_environment_markers():
"""Make sure RequirementSet accepts several excluding env markers"""
eq26 = InstallRequirement.from_line(
"Django>=1.6.10,<1.7 ; python_version == '2.6'")
eq26.is_direct = True
ne26 = InstallRequirement.from_line(
"Django>=1.6.10,<1.8 ; python_version != '2.6'")
ne26.is_direct = True
req_set = RequirementSet()
req_set.add_requirement(eq26)
req_set.add_requirement(ne26)
assert req_set.has_requirement('Django')
def test_mismatched_versions(caplog, tmpdir):
original_source = os.path.join(DATA_DIR, 'src', 'simplewheel-1.0')
source_dir = os.path.join(tmpdir, 'simplewheel')
shutil.copytree(original_source, source_dir)
req = InstallRequirement(req=Requirement('simplewheel==2.0'),
comes_from=None, source_dir=source_dir)
req.run_egg_info()
req.assert_source_matches_version()
assert caplog.records[-1].message == (
'Requested simplewheel==2.0, '
'but installing version 1.0'
)
|
|
# coding=utf-8
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for StatementVisitor."""
from __future__ import unicode_literals
import re
import subprocess
import textwrap
import unittest
from grumpy_tools.compiler import block
from grumpy_tools.compiler import imputil
from grumpy_tools.compiler import shard_test
from grumpy_tools.compiler import stmt
from grumpy_tools.compiler import util
import pythonparser
from pythonparser import ast
import pytest
class StatementVisitorTest(unittest.TestCase):
def testAssertNoMsg(self):
self.assertEqual((0, 'AssertionError()\n'), _GrumpRun(textwrap.dedent("""\
try:
assert False
except AssertionError as e:
print repr(e)""")))
def testAssertMsg(self):
want = (0, "AssertionError('foo',)\n")
self.assertEqual(want, _GrumpRun(textwrap.dedent("""\
try:
assert False, 'foo'
except AssertionError as e:
print repr(e)""")))
def testBareAssert(self):
# Assertion errors at the top level of a block should raise:
# https://github.com/google/grumpy/issues/18
want = (0, 'ok\n')
self.assertEqual(want, _GrumpRun(textwrap.dedent("""\
def foo():
assert False
try:
foo()
except AssertionError:
print 'ok'
else:
print 'bad'""")))
def testAssignAttribute(self):
self.assertEqual((0, '123\n'), _GrumpRun(textwrap.dedent("""\
e = Exception()
e.foo = 123
print e.foo""")))
def testAssignName(self):
self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
foo = 'bar'
print foo""")))
def testAssignMultiple(self):
self.assertEqual((0, 'baz baz\n'), _GrumpRun(textwrap.dedent("""\
foo = bar = 'baz'
print foo, bar""")))
def testAssignSubscript(self):
self.assertEqual((0, "{'bar': None}\n"), _GrumpRun(textwrap.dedent("""\
foo = {}
foo['bar'] = None
print foo""")))
def testAssignTuple(self):
self.assertEqual((0, 'a b\n'), _GrumpRun(textwrap.dedent("""\
baz = ('a', 'b')
foo, bar = baz
print foo, bar""")))
def testAugAssign(self):
self.assertEqual((0, '42\n'), _GrumpRun(textwrap.dedent("""\
foo = 41
foo += 1
print foo""")))
def testAugAssignBitAnd(self):
self.assertEqual((0, '3\n'), _GrumpRun(textwrap.dedent("""\
foo = 7
foo &= 3
print foo""")))
def testAugAssignPow(self):
self.assertEqual((0, '64\n'), _GrumpRun(textwrap.dedent("""\
foo = 8
foo **= 2
print foo""")))
def testClassDef(self):
self.assertEqual((0, "<type 'type'>\n"), _GrumpRun(textwrap.dedent("""\
class Foo(object):
pass
print type(Foo)""")))
def testClassDefWithVar(self):
self.assertEqual((0, 'abc\n'), _GrumpRun(textwrap.dedent("""\
class Foo(object):
bar = 'abc'
print Foo.bar""")))
def testDeleteAttribute(self):
self.assertEqual((0, 'False\n'), _GrumpRun(textwrap.dedent("""\
class Foo(object):
bar = 42
del Foo.bar
print hasattr(Foo, 'bar')""")))
def testDeleteClassLocal(self):
self.assertEqual((0, 'False\n'), _GrumpRun(textwrap.dedent("""\
class Foo(object):
bar = 'baz'
del bar
print hasattr(Foo, 'bar')""")))
def testDeleteGlobal(self):
self.assertEqual((0, 'False\n'), _GrumpRun(textwrap.dedent("""\
foo = 42
del foo
print 'foo' in globals()""")))
def testDeleteLocal(self):
self.assertEqual((0, 'ok\n'), _GrumpRun(textwrap.dedent("""\
def foo():
bar = 123
del bar
try:
print bar
raise AssertionError
except UnboundLocalError:
print 'ok'
foo()""")))
def testDeleteNonexistentLocal(self):
self.assertRaisesRegexp(
util.ParseError, 'cannot delete nonexistent local',
_ParseAndVisit, 'def foo():\n del bar')
def testDeleteSubscript(self):
self.assertEqual((0, '{}\n'), _GrumpRun(textwrap.dedent("""\
foo = {'bar': 'baz'}
del foo['bar']
print foo""")))
def testExprCall(self):
self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
def foo():
print 'bar'
foo()""")))
def testExprNameGlobal(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
foo = 42
foo""")))
def testExprNameLocal(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
foo = 42
def bar():
foo
bar()""")))
def testFor(self):
self.assertEqual((0, '1\n2\n3\n'), _GrumpRun(textwrap.dedent("""\
for i in (1, 2, 3):
print i""")))
def testForBreak(self):
self.assertEqual((0, '1\n'), _GrumpRun(textwrap.dedent("""\
for i in (1, 2, 3):
print i
break""")))
def testForContinue(self):
self.assertEqual((0, '1\n2\n3\n'), _GrumpRun(textwrap.dedent("""\
for i in (1, 2, 3):
print i
continue
raise AssertionError""")))
def testForElse(self):
self.assertEqual((0, 'foo\nbar\n'), _GrumpRun(textwrap.dedent("""\
for i in (1,):
print 'foo'
else:
print 'bar'""")))
def testForElseBreakNotNested(self):
self.assertRaisesRegexp(
util.ParseError, "'continue' not in loop",
_ParseAndVisit, 'for i in (1,):\n pass\nelse:\n continue')
def testForElseContinueNotNested(self):
self.assertRaisesRegexp(
util.ParseError, "'continue' not in loop",
_ParseAndVisit, 'for i in (1,):\n pass\nelse:\n continue')
def testFunctionDecorator(self):
self.assertEqual((0, '<b>foo</b>\n'), _GrumpRun(textwrap.dedent("""\
def bold(fn):
return lambda: '<b>' + fn() + '</b>'
@bold
def foo():
return 'foo'
print foo()""")))
def testFunctionDecoratorWithArg(self):
self.assertEqual((0, '<b id=red>foo</b>\n'), _GrumpRun(textwrap.dedent("""\
def tag(name):
def bold(fn):
return lambda: '<b id=' + name + '>' + fn() + '</b>'
return bold
@tag('red')
def foo():
return 'foo'
print foo()""")))
def testFunctionDef(self):
self.assertEqual((0, 'bar baz\n'), _GrumpRun(textwrap.dedent("""\
def foo(a, b):
print a, b
foo('bar', 'baz')""")))
def testFunctionDefWithTupleArgs(self):
self.assertEqual((0, "('bar', 'baz')\n"), _GrumpRun(textwrap.dedent("""\
def foo((a, b)):
print(a, b)
foo(('bar', 'baz'))""")))
def testFunctionDefWithNestedTupleArgs(self):
self.assertEqual((0, "('bar', 'baz', 'qux')\n"), _GrumpRun(textwrap.dedent("""\
def foo(((a, b), c)):
print(a, b, c)
foo((('bar', 'baz'), 'qux'))""")))
def testFunctionDefWithMultipleTupleArgs(self):
self.assertEqual((0, "('bar', 'baz')\n"), _GrumpRun(textwrap.dedent("""\
def foo(((a, ), (b, ))):
print(a, b)
foo((('bar',), ('baz', )))""")))
def testFunctionDefTupleArgsInLambda(self):
self.assertEqual((0, "[(3, 2), (4, 3), (12, 1)]\n"), _GrumpRun(textwrap.dedent("""\
c = {12: 1, 3: 2, 4: 3}
top = sorted(c.items(), key=lambda (k,v): v)
print (top)""")))
def testFunctionDefGenerator(self):
self.assertEqual((0, "['foo', 'bar']\n"), _GrumpRun(textwrap.dedent("""\
def gen():
yield 'foo'
yield 'bar'
print list(gen())""")))
def testFunctionDefGeneratorReturnValue(self):
self.assertRaisesRegexp(
util.ParseError, 'returning a value in a generator function',
_ParseAndVisit, 'def foo():\n yield 1\n return 2')
def testFunctionDefLocal(self):
self.assertEqual((0, 'baz\n'), _GrumpRun(textwrap.dedent("""\
def foo():
def bar():
print 'baz'
bar()
foo()""")))
def testIf(self):
self.assertEqual((0, 'foo\n'), _GrumpRun(textwrap.dedent("""\
if 123:
print 'foo'
if '':
print 'bar'""")))
def testIfElif(self):
self.assertEqual((0, 'foo\nbar\n'), _GrumpRun(textwrap.dedent("""\
if True:
print 'foo'
elif False:
print 'bar'
if False:
print 'foo'
elif True:
print 'bar'""")))
def testIfElse(self):
self.assertEqual((0, 'foo\nbar\n'), _GrumpRun(textwrap.dedent("""\
if True:
print 'foo'
else:
print 'bar'
if False:
print 'foo'
else:
print 'bar'""")))
def testImport(self):
self.assertEqual((0, "<type 'dict'>\n"), _GrumpRun(textwrap.dedent("""\
import sys
print type(sys.modules)""")))
def testImportFutureLateRaises(self):
regexp = 'from __future__ imports must occur at the beginning of the file'
self.assertRaisesRegexp(util.ImportError, regexp, _ParseAndVisit,
'foo = bar\nfrom __future__ import print_function')
def testFutureUnicodeLiterals(self):
want = "u'foo'\n"
self.assertEqual((0, want), _GrumpRun(textwrap.dedent("""\
from __future__ import unicode_literals
print repr('foo')""")))
def testImportMember(self):
self.assertEqual((0, "<type 'dict'>\n"), _GrumpRun(textwrap.dedent("""\
from sys import modules
print type(modules)""")))
def testImportConflictingPackage(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
import time
from "__go__/time" import Now""")))
def testImportNative(self):
self.assertEqual((0, '1 1000000000\n'), _GrumpRun(textwrap.dedent("""\
from "__go__/time" import Nanosecond, Second
print Nanosecond, Second""")))
def testImportGrumpy(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
from "__go__/grumpy" import Assert
Assert(__frame__(), True, 'bad')""")))
def testImportNativeType(self):
self.assertEqual((0, "<type 'Duration'>\n"), _GrumpRun(textwrap.dedent("""\
from "__go__/time" import Duration
print Duration""")))
def testPrintStatement(self):
self.assertEqual((0, 'abc 123\nfoo bar\n'), _GrumpRun(textwrap.dedent("""\
print 'abc',
print '123'
print 'foo', 'bar'""")))
def testImportWildcard(self):
result = _GrumpRun(textwrap.dedent("""\
from time import *
print sleep"""))
self.assertEqual(0, result[0])
self.assertIn('<function sleep at', result[1])
def testImportTryExcept(self):
result = _GrumpRun(textwrap.dedent("""\
try:
import inexistantmodule
except ImportError:
from time import sleep as inexistantmodule
print inexistantmodule
"""))
self.assertEqual(0, result[0])
self.assertIn('<function sleep at', result[1])
def testImportFromTryExcept(self):
result = _GrumpRun(textwrap.dedent("""\
try:
from time import inexistantfunction
except ImportError:
from time import sleep
print sleep
"""))
self.assertEqual(0, result[0])
self.assertIn('<function sleep at', result[1])
def testPrintFunction(self):
want = "abc\n123\nabc 123\nabcx123\nabc 123 "
self.assertEqual((0, want), _GrumpRun(textwrap.dedent("""\
"module docstring is ok to proceed __future__"
from __future__ import print_function
print('abc')
print(123)
print('abc', 123)
print('abc', 123, sep='x')
print('abc', 123, end=' ')""")))
def testModuleDocstring(self):
want = "__doc__ (unicode) is module docstring\n"
self.assertEqual((0, want), _GrumpRun(textwrap.dedent("""\
from __future__ import unicode_literals
"module docstring"
print "__doc__ (" + type(__doc__).__name__ + ") is " + str(__doc__)"""
)))
def testModuleDocstringAbsent(self):
want = "__doc__ (NoneType) is None\n"
self.assertEqual((0, want), _GrumpRun(textwrap.dedent("""\
from __future__ import unicode_literals
print "__doc__ (" + type(__doc__).__name__ + ") is " + str(__doc__)"""
)))
def testClassDocstring(self):
want = "Foo.__doc__ (unicode) is class docstring\n"
self.assertEqual((0, want), _GrumpRun(textwrap.dedent("""\
from __future__ import unicode_literals
"module docstring"
class Foo(object):
"class docstring"
pass
print "Foo.__doc__ (" + type(Foo.__doc__).__name__ + ") is " + str(Foo.__doc__)"""
)))
@pytest.mark.xfail
def testClassDocstringAbsent(self):
want = "Foo.__doc__ (NoneType) is None\n"
self.assertEqual((0, want), _GrumpRun(textwrap.dedent("""\
from __future__ import unicode_literals
"module docstring"
class Foo(object):
pass
print "Foo.__doc__ (" + type(Foo.__doc__).__name__ + ") is " + str(Foo.__doc__)"""
)))
@pytest.mark.xfail
def testFunctionDocstring(self):
want = "Foo.func.__doc__ (unicode) is function docstring\n"
self.assertEqual((0, want), _GrumpRun(textwrap.dedent("""\
from __future__ import unicode_literals
"module docstring"
class Foo(object):
"class docstring"
def func(self):
"function docstring"
return
print "Foo.func.__doc__ (" + type(Foo.__doc__).__name__ + ") is " + str(Foo.func.__doc__)"""
)))
def testFunctionDocstringAbsent(self):
want = "Foo.func.__doc__ (NoneType) is None\n"
self.assertEqual((0, want), _GrumpRun(textwrap.dedent("""\
from __future__ import unicode_literals
"module docstring"
class Foo(object):
"class docstring"
def func(self):
return
print "Foo.func.__doc__ (" + type(Foo.func.__doc__).__name__ + ") is " + str(Foo.func.__doc__)"""
)))
def testRaiseExitStatus(self):
self.assertEqual(1, _GrumpRun('raise Exception')[0])
def testRaiseInstance(self):
self.assertEqual((0, 'foo\n'), _GrumpRun(textwrap.dedent("""\
try:
raise RuntimeError('foo')
print 'bad'
except RuntimeError as e:
print e""")))
def testRaiseTypeAndArg(self):
self.assertEqual((0, 'foo\n'), _GrumpRun(textwrap.dedent("""\
try:
raise KeyError('foo')
print 'bad'
except KeyError as e:
print e""")))
def testRaiseAgain(self):
self.assertEqual((0, 'foo\n'), _GrumpRun(textwrap.dedent("""\
try:
try:
raise AssertionError('foo')
except AssertionError:
raise
except Exception as e:
print e""")))
def testRaiseTraceback(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
import sys
try:
try:
raise Exception
except:
e, _, tb = sys.exc_info()
raise e, None, tb
except:
e2, _, tb2 = sys.exc_info()
assert e is e2
assert tb is tb2""")))
def testReturn(self):
self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
def foo():
return 'bar'
print foo()""")))
def testTryBareExcept(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
try:
raise AssertionError
except:
pass""")))
def testTryElse(self):
self.assertEqual((0, 'foo baz\n'), _GrumpRun(textwrap.dedent("""\
try:
print 'foo',
except:
print 'bar'
else:
print 'baz'""")))
def testTryMultipleExcept(self):
self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
try:
raise AssertionError
except RuntimeError:
print 'foo'
except AssertionError:
print 'bar'
except:
print 'baz'""")))
def testTryFinally(self):
result = _GrumpRun(textwrap.dedent("""\
try:
print 'foo',
finally:
print 'bar'
try:
print 'foo',
raise Exception
finally:
print 'bar'"""))
self.assertEqual(1, result[0])
self.assertIn('foo bar\nfoo bar\n', result[1])
self.assertIn('Exception\n', result[1])
def testWhile(self):
self.assertEqual((0, '2\n1\n'), _GrumpRun(textwrap.dedent("""\
i = 2
while i:
print i
i -= 1""")))
def testWhileElse(self):
self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
while False:
print 'foo'
else:
print 'bar'""")))
def testWith(self):
self.assertEqual((0, 'enter\n1\nexit\nenter\n2\nexit\n3\n'),
_GrumpRun(textwrap.dedent("""\
class ContextManager(object):
def __enter__(self):
print "enter"
def __exit__(self, exc_type, value, traceback):
print "exit"
a = ContextManager()
with a:
print 1
try:
with a:
print 2
raise RuntimeError
except RuntimeError:
print 3
""")))
def testWithAsMultiple(self):
self.assertEqual((0, '1 2 3\n1 2 3\n'),
_GrumpRun(textwrap.dedent("""\
class ContextManager(object):
def __enter__(self):
return (1, (2, 3))
def __exit__(self, *args):
pass
with ContextManager() as [x, (y, z)], ContextManager() as [x2, (y2, z2)]:
print x, y, z
print x2, y2, z2
""")))
def testWithAs(self):
self.assertEqual((0, '1 2 3\n'),
_GrumpRun(textwrap.dedent("""\
class ContextManager(object):
def __enter__(self):
return (1, (2, 3))
def __exit__(self, *args):
pass
with ContextManager() as [x, (y, z)]:
print x, y, z
""")))
def testWriteExceptDispatcherBareExcept(self):
visitor = stmt.StatementVisitor(_MakeModuleBlock())
handlers = [ast.ExceptHandler(type=ast.Name(id='foo')),
ast.ExceptHandler(type=None)]
self.assertEqual(visitor._write_except_dispatcher( # pylint: disable=protected-access
'exc', 'tb', handlers), [1, 2])
expected = re.compile(r'ResolveGlobal\(.*foo.*\bIsInstance\(.*'
r'goto Label1.*goto Label2', re.DOTALL)
self.assertRegexpMatches(visitor.writer.getvalue(), expected)
def testWriteExceptDispatcherBareExceptionNotLast(self):
visitor = stmt.StatementVisitor(_MakeModuleBlock())
handlers = [ast.ExceptHandler(type=None),
ast.ExceptHandler(type=ast.Name(id='foo'))]
self.assertRaisesRegexp(util.ParseError, r"default 'except:' must be last",
visitor._write_except_dispatcher, # pylint: disable=protected-access
'exc', 'tb', handlers)
def testWriteExceptDispatcherMultipleExcept(self):
visitor = stmt.StatementVisitor(_MakeModuleBlock())
handlers = [ast.ExceptHandler(type=ast.Name(id='foo')),
ast.ExceptHandler(type=ast.Name(id='bar'))]
self.assertEqual(visitor._write_except_dispatcher( # pylint: disable=protected-access
'exc', 'tb', handlers), [1, 2])
expected = re.compile(
r'ResolveGlobal\(.*foo.*\bif .*\bIsInstance\(.*\{.*goto Label1.*'
r'ResolveGlobal\(.*bar.*\bif .*\bIsInstance\(.*\{.*goto Label2.*'
r'\bRaise\(exc\.ToObject\(\), nil, tb\.ToObject\(\)\)', re.DOTALL)
self.assertRegexpMatches(visitor.writer.getvalue(), expected)
def _MakeModuleBlock():
return block.ModuleBlock(None, '__main__', '<test>', '',
imputil.FutureFeatures())
def _ParseAndVisit(source):
mod = pythonparser.parse(source)
_, future_features = imputil.parse_future_features(mod)
importer = imputil.Importer(None, 'foo', 'foo.py', False)
b = block.ModuleBlock(importer, '__main__', '<test>',
source, future_features)
visitor = stmt.StatementVisitor(b)
visitor.visit(mod)
return visitor
def _GrumpRun(cmd):
p = subprocess.Popen(['grumpy', 'run'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = p.communicate(cmd)
return p.returncode, out
if __name__ == '__main__':
shard_test.main()
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""for_loop and pfor ops."""
# pylint: disable=g-direct-tensorflow-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops.parallel_for.pfor import PFor
from tensorflow.python.ops.parallel_for.pfor import PForConfig
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
def for_loop(loop_fn, loop_fn_dtypes, iters, parallel_iterations=None):
"""Runs `loop_fn` `iters` times and stacks the outputs.
Runs `loop_fn` `iters` times, with input values from 0 to `iters - 1`, and
stacks corresponding outputs of the different runs.
Args:
loop_fn: A function that takes an int32 scalar tf.Tensor object representing
the iteration number, and returns a possibly nested structure of tensor
objects. The shape of these outputs should not depend on the input.
loop_fn_dtypes: dtypes for the outputs of loop_fn.
iters: Number of iterations for which to run loop_fn.
parallel_iterations: The number of iterations that can be dispatched in
parallel. This knob can be used to control the total memory usage.
Returns:
Returns a nested structure of stacked output tensor objects with the same
nested structure as the output of `loop_fn`.
"""
flat_loop_fn_dtypes = nest.flatten(loop_fn_dtypes)
is_none_list = []
def while_body(i, *ta_list):
"""Body of while loop."""
fn_output = nest.flatten(loop_fn(i))
if len(fn_output) != len(flat_loop_fn_dtypes):
raise ValueError(
"Number of expected outputs, %d, does not match the number of "
"actual outputs, %d, from loop_fn" % (len(flat_loop_fn_dtypes),
len(fn_output)))
outputs = []
del is_none_list[:]
is_none_list.extend([x is None for x in fn_output])
for out, ta in zip(fn_output, ta_list):
# TODO(agarwal): support returning Operation objects from loop_fn.
if out is not None:
ta = ta.write(i, array_ops.expand_dims(out, 0))
outputs.append(ta)
return tuple([i + 1] + outputs)
if parallel_iterations is not None:
extra_args = {"parallel_iterations": parallel_iterations}
else:
extra_args = {}
ta_list = control_flow_ops.while_loop(
lambda i, *ta: i < iters,
while_body,
[0] + [tensor_array_ops.TensorArray(dtype, iters)
for dtype in flat_loop_fn_dtypes],
**extra_args)[1:]
# TODO(rachelim): enable this for sparse tensors
output = [None if is_none else ta.concat()
for ta, is_none in zip(ta_list, is_none_list)]
return nest.pack_sequence_as(loop_fn_dtypes, output)
def _flatten_first_two_dims(x):
"""Flattens the first two dimensions of x into a single dimension."""
old_shape = array_ops.shape(x)
new_shape = array_ops.concat([[old_shape[0] * old_shape[1]], old_shape[2:]],
axis=0)
return array_ops.reshape(x, new_shape)
PFOR_CONFIG_ARG = "pfor_config"
def pfor(loop_fn, iters, parallel_iterations=None):
"""Equivalent to running `loop_fn` `iters` times and stacking the outputs.
`pfor` has functionality similar to `for_loop`, i.e. running `loop_fn` `iters`
times, with input from 0 to `iters - 1`, and stacking corresponding output of
each iteration. However the implementation does not use a tf.while_loop.
Instead it adds new operations to the graph that collectively compute the same
value as what running `loop_fn` in a loop would compute.
This is an experimental feature and currently has a lot of limitations:
- There should be no data depenendency between the different iterations. For
example, a future iteration should not depend on a value or side-effect of
a previous iteration.
- Stateful kernels may mostly not be supported since these often imply a
data dependency or ordering of the iterations. We do support a limited set
of such stateful kernels though (like RandomFoo, Variable operations like
reads, etc).
- Conversion works only on a limited set of kernels for which a converter
has been registered.
- loop_fn has limited support for control flow operations. tf.cond in
particular is not supported.
- `loop_fn` should return nested structure of Tensors or Operations. However
if an Operation is returned, it should have zero outputs.
- The shape and dtype of `loop_fn` outputs should not depend on the input
to loop_fn.
Args:
loop_fn: A function that takes an int32 scalar tf.Tensor object representing
the iteration number, and optionally a keyword argument `pfor_config` set
to a PForConfig object. It returns a possibly nested structure of Tensor
or Operation objects. Note that if setting `parallel_iterations` argument
to something other than None, `loop_fn` may be called more than once
during graph construction. So it may need to avoid mutating global state.
iters: Number of iterations for which to run loop_fn.
parallel_iterations: A knob to control how many iterations are vectorized
and dispatched in parallel. The default value of None corresponds to
vectorizing all the iterations. If `parallel_iterations` is smaller than
`iters`, then chunks of at most that many iterations are dispatched in
sequence. This knob can be used to control the total memory usage.
Returns:
Returns a nested structure of stacked tensor objects with the same nested
structure as the output of `loop_fn`.
Raises:
ValueError: If parallel_iterations is not None and not an integer > 1.
"""
def f():
return _pfor_impl(loop_fn, iters, parallel_iterations=parallel_iterations)
if context.executing_eagerly():
f = function.defun(f)
return f()
def _loop_fn_has_config(loop_fn):
"""Test if `loop_fn` has a `pfor_config` argument."""
if tf_inspect.isfunction(loop_fn):
argspec = tf_inspect.getargspec(loop_fn)
return PFOR_CONFIG_ARG in argspec.args
elif isinstance(loop_fn, functools.partial):
fn = loop_fn.func
argspec = tf_inspect.getargspec(fn)
return (PFOR_CONFIG_ARG in argspec.args and
PFOR_CONFIG_ARG not in loop_fn.keywords)
else:
loop_class = tf_decorator.unwrap(loop_fn)[1]
if not hasattr(loop_class, "__call__"):
raise ValueError("loop_fn object did not have a __call__ method")
argspec = tf_inspect.getargspec(loop_class.__call__)
return PFOR_CONFIG_ARG in argspec.args
def _pfor_impl(loop_fn, iters, parallel_iterations=None, pfor_config=None):
"""Implementation of pfor."""
loop_fn_has_config = _loop_fn_has_config(loop_fn)
existing_ops = set(ops.get_default_graph().get_operations())
with ops.name_scope("loop_body"):
loop_var = array_ops.placeholder(dtypes.int32, shape=[])
if loop_fn_has_config:
if pfor_config is None:
pfor_config = PForConfig()
pfor_config._set_iters(iters) # pylint: disable=protected-access
loop_fn_outputs = loop_fn(loop_var, **{PFOR_CONFIG_ARG: pfor_config})
else:
assert pfor_config is None
loop_fn_outputs = loop_fn(loop_var)
new_ops = set(ops.get_default_graph().get_operations()) - existing_ops
iters = ops.convert_to_tensor(iters)
if parallel_iterations is not None:
if parallel_iterations < 1:
raise ValueError("parallel_iterations must be None or a positive integer")
if parallel_iterations == 1:
raise ValueError("Found parallel_iterations == 1. Use for_loop instead.")
iters_value = tensor_util.constant_value(iters)
if iters_value is not None and iters_value < parallel_iterations:
parallel_iterations = None
if parallel_iterations is None:
with ops.name_scope("pfor"):
converter = PFor(loop_var, iters, new_ops, pfor_config=pfor_config)
outputs = []
for loop_fn_output in nest.flatten(loop_fn_outputs):
outputs.append(converter.convert(loop_fn_output))
return nest.pack_sequence_as(loop_fn_outputs, outputs)
else:
if pfor_config is not None and pfor_config._has_reductions(): # pylint: disable=protected-access
raise ValueError("Setting parallel_iterations currently unsupported if"
" reductions across iterations are performed.")
num_tiled_iterations = iters // parallel_iterations
num_remaining_iterations = iters % parallel_iterations
# TODO(agarwal): Avoid calling loop_fn twice. Generate the loop body inside
# a tf.function and extract the graph from there to vectorize it.
with ops.name_scope("pfor_untiled"):
converter = PFor(loop_var, num_remaining_iterations, new_ops,
pfor_config=pfor_config)
remaining_outputs = []
flattened_loop_fn_outputs = nest.flatten(loop_fn_outputs)
for loop_fn_output in flattened_loop_fn_outputs:
remaining_outputs.append(converter.convert(loop_fn_output))
with ops.name_scope("pfor_tiled"):
loop_fn_dtypes = [ops.convert_to_tensor(x).dtype
for x in flattened_loop_fn_outputs]
def tiled_loop_body(j):
offset = j * parallel_iterations + num_remaining_iterations
def tiled_loop_fn(i, pfor_config=None):
if loop_fn_has_config:
return nest.flatten(loop_fn(i + offset, pfor_config=pfor_config))
else:
return nest.flatten(loop_fn(i + offset))
return _pfor_impl(
tiled_loop_fn, parallel_iterations, pfor_config=pfor_config)
tiled_outputs = for_loop(tiled_loop_body, loop_fn_dtypes,
num_tiled_iterations, parallel_iterations=1)
tiled_outputs = [_flatten_first_two_dims(y) for y in tiled_outputs]
with ops.name_scope("pfor"):
iters_value = tensor_util.constant_value(iters)
if iters_value is None or iters_value % parallel_iterations:
outputs = control_flow_ops.cond(
math_ops.equal(num_remaining_iterations, 0),
lambda: tiled_outputs,
lambda: [array_ops.concat([x, y], axis=0)
for x, y in zip(remaining_outputs, tiled_outputs)])
else:
outputs = tiled_outputs
return nest.pack_sequence_as(loop_fn_outputs, nest.flatten(outputs))
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014-2016 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file LICENSE, which
# you should have received as part of this distribution.
#
import abc
import asyncio
import base64
import functools
import hashlib
import hmac
import http.cookies
from collections import namedtuple
from .hdrs import (
AUTHORIZATION,
COOKIE,
X_AUTH_COUCHDB_ROLES,
X_AUTH_COUCHDB_TOKEN,
X_AUTH_COUCHDB_USERNAME
)
__all__ = (
'AuthProvider',
'NoAuthProvider',
'BasicAuthProvider',
'BasicAuthCredentials',
'CookieAuthProvider',
'OAuthProvider',
'OAuthCredentials',
'ProxyAuthProvider',
'ProxyAuthCredentials'
)
#: BasicAuth credentials
BasicAuthCredentials = namedtuple('BasicAuthCredentials', [
'username', 'password'])
#: OAuth credentials
OAuthCredentials = namedtuple('OAuthCredentials', [
'consumer_key', 'consumer_secret', 'resource_key', 'resource_secret'])
#: ProxyAuth credentials
ProxyAuthCredentials = namedtuple('ProxyAuthCredentials', [
'username', 'roles', 'secret'])
class AuthProvider(object, metaclass=abc.ABCMeta):
"""Abstract authentication provider class."""
@abc.abstractmethod
def reset(self):
"""Resets provider instance to default state."""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def credentials(self):
"""Returns authentication credentials if any."""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def set_credentials(self, *args, **kwargs):
"""Sets authentication credentials."""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def apply(self, url, headers):
"""Applies authentication routines on further request. Mostly used
to set right `Authorization` header or cookies to pass the challenge.
:param str url: Request URL
:param dict headers: Request headers
"""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def update(self, response):
"""Updates provider routines from the HTTP response data.
:param response: :class:`aiocouchdb.client.HttpResponse` instance
"""
raise NotImplementedError # pragma: no cover
def wrap(self, request_func):
"""Wraps request coroutine function to apply the authentication context.
"""
@functools.wraps(request_func)
@asyncio.coroutine
def wrapper(method, url, headers, **kwargs):
self.apply(url, headers)
response = yield from request_func(method, url,
headers=headers, **kwargs)
self.update(response)
return response
return wrapper
class NoAuthProvider(AuthProvider):
"""Dummy provider to apply no authentication routines."""
def reset(self):
pass # pragma: no cover
def credentials(self):
pass # pragma: no cover
def set_credentials(self):
pass # pragma: no cover
def apply(self, url, headers):
pass # pragma: no cover
def update(self, response):
pass # pragma: no cover
def wrap(self, request_func):
return request_func
class BasicAuthProvider(AuthProvider):
"""Provides authentication via BasicAuth method."""
_auth_header = None
_credentials = None
def __init__(self, name=None, password=None):
if name or password:
self.set_credentials(name, password)
def reset(self):
"""Resets provider instance to default state."""
self._auth_header = None
self._credentials = None
def credentials(self):
"""Returns authentication credentials.
:rtype: :class:`aiocouchdb.authn.BasicAuthCredentials`
"""
return self._credentials
def set_credentials(self, name, password):
"""Sets authentication credentials.
:param str name: Username
:param str password: User's password
"""
if name and password:
self._credentials = BasicAuthCredentials(name, password)
elif not name:
raise ValueError("Basic Auth username is missing")
elif not password:
raise ValueError("Basic Auth password is missing")
def apply(self, url, headers):
"""Adds BasicAuth header to ``headers``.
:param str url: Request URL
:param dict headers: Request headers
"""
if self._auth_header is None:
if self._credentials is None:
raise ValueError('Basic Auth credentials was not specified')
token = base64.b64encode(
('%s:%s' % self._credentials).encode('utf8'))
self._auth_header = 'Basic %s' % (token.strip().decode('utf8'))
headers[AUTHORIZATION] = self._auth_header
def update(self, response):
pass # pragma: no cover
class CookieAuthProvider(AuthProvider):
"""Provides authentication by cookies."""
_cookies = None
def reset(self):
"""Resets provider instance to default state."""
self._cookies = None
def credentials(self):
# Reserved for future use.
pass # pragma: no cover
def set_credentials(self, name, password):
# Reserved for future use.
pass # pragma: no cover
def apply(self, url, headers):
"""Adds cookies to provided ``headers``. If ``headers`` already
contains any cookies, they would be merged with instance ones.
:param str url: Request URL
:param dict headers: Request headers
"""
if self._cookies is None:
return
cookie = http.cookies.SimpleCookie()
if COOKIE in headers:
cookie.load(headers.get(COOKIE, ''))
del headers[COOKIE]
for name, value in self._cookies.items():
if isinstance(value, http.cookies.Morsel):
# use dict method because SimpleCookie class modifies value
dict.__setitem__(cookie, name, value)
else:
cookie[name] = value
headers[COOKIE] = cookie.output(header='', sep=';').strip()
def update(self, response):
"""Updates cookies from the response.
:param response: :class:`aiocouchdb.client.HttpResponse` instance
"""
if response.cookies:
self._cookies = response.cookies
class OAuthProvider(AuthProvider):
"""Provides authentication via OAuth1. Requires ``oauthlib`` package."""
_credentials = None
def __init__(self, *, consumer_key=None, consumer_secret=None,
resource_key=None, resource_secret=None):
from oauthlib import oauth1 # pylint: disable=import-error
self._oauth1 = oauth1
self.set_credentials(consumer_key=consumer_key,
consumer_secret=consumer_secret,
resource_key=resource_key,
resource_secret=resource_secret)
def reset(self):
"""Resets provider instance to default state."""
self._credentials = None
def credentials(self):
"""Returns OAuth credentials.
:rtype: :class:`aiocouchdb.authn.OAuthCredentials`
"""
return self._credentials
def set_credentials(self, *, consumer_key=None, consumer_secret=None,
resource_key=None, resource_secret=None):
"""Sets OAuth credentials. Currently, all keyword arguments are
required for successful auth.
:param str consumer_key: Consumer key (consumer token)
:param str consumer_secret: Consumer secret
:param str resource_key: Resource key (oauth token)
:param str resource_secret: Resource secret (oauth token secret)
"""
creds = (consumer_key, consumer_secret, resource_key, resource_secret)
if not all(creds):
return
self._credentials = OAuthCredentials(*creds)
def apply(self, url, headers):
"""Adds OAuth1 signature to ``headers``.
:param str url: Request URL
:param dict headers: Request headers
"""
if self._credentials is None:
raise ValueError('OAuth credentials was not specified')
client = self._oauth1.Client(
client_key=self._credentials.consumer_key,
client_secret=self._credentials.consumer_secret,
resource_owner_key=self._credentials.resource_key,
resource_owner_secret=self._credentials.resource_secret,
signature_type=self._oauth1.SIGNATURE_TYPE_AUTH_HEADER)
_, oauth_headers, _ = client.sign(url)
headers[AUTHORIZATION] = oauth_headers['Authorization']
def update(self, response):
pass # pragma: no cover
class ProxyAuthProvider(AuthProvider):
"""Provides CouchDB proxy authentication methods."""
_credentials = None
#: Controls the name of header used to specify CouchDB username
x_auth_username = X_AUTH_COUCHDB_USERNAME
#: Controls the name of header used to specify list of CouchDB user roles
x_auth_roles = X_AUTH_COUCHDB_ROLES
#: Controls the name of header used to provide authentication token
x_auth_token = X_AUTH_COUCHDB_TOKEN
def __init__(self, username=None, roles=None, secret=None, *,
x_auth_username=None, x_auth_roles=None, x_auth_token=None):
if x_auth_username is not None:
self.x_auth_username = x_auth_username
if x_auth_roles is not None:
self.x_auth_roles = x_auth_roles
if x_auth_token is not None:
self.x_auth_token = x_auth_token
if username or roles or secret:
self.set_credentials(username, roles, secret)
def reset(self):
"""Resets provider instance to default state."""
self._credentials = None
def credentials(self):
"""Returns three-element tuple of defined username, roles and secret."""
return self._credentials
def set_credentials(self, username, roles=None, secret=None):
"""Sets ProxyAuth credentials.
:param str username: CouchDB username
:param list roles: List of username roles
:param str secret: ProxyAuth secret. Should match the one which defined
on target CouchDB server.
"""
if not username:
raise ValueError('Proxy Auth username should have non-empty value')
self._credentials = ProxyAuthCredentials(username, roles, secret)
def apply(self, url, headers):
"""Adds ProxyAuth credentials to ``headers``.
:param str url: Request URL
:param dict headers: Request headers
"""
creds = self._credentials
if creds is None or not creds.username:
raise ValueError('Proxy Auth username is missing')
else:
headers[self.x_auth_username] = creds.username
if creds.roles is not None:
headers[self.x_auth_roles] = ','.join(creds.roles)
if creds.secret is not None:
headers[self.x_auth_token] = hmac.new(
creds.secret.encode('utf-8'),
creds.username.encode('utf-8'),
hashlib.sha1).hexdigest()
def update(self, response):
pass # pragma: no cover
|
|
"""Support for Prometheus metrics export."""
import logging
import string
from aiohttp import web
import prometheus_client
import voluptuous as vol
from homeassistant import core as hacore
from homeassistant.components.climate.const import (
ATTR_CURRENT_TEMPERATURE,
ATTR_HVAC_ACTION,
CURRENT_HVAC_ACTIONS,
)
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.humidifier.const import (
ATTR_AVAILABLE_MODES,
ATTR_HUMIDITY,
)
from homeassistant.const import (
ATTR_BATTERY_LEVEL,
ATTR_DEVICE_CLASS,
ATTR_FRIENDLY_NAME,
ATTR_MODE,
ATTR_TEMPERATURE,
ATTR_UNIT_OF_MEASUREMENT,
CONTENT_TYPE_TEXT_PLAIN,
EVENT_STATE_CHANGED,
PERCENTAGE,
STATE_ON,
STATE_UNAVAILABLE,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.helpers import entityfilter, state as state_helper
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_values import EntityValues
from homeassistant.util.temperature import fahrenheit_to_celsius
_LOGGER = logging.getLogger(__name__)
API_ENDPOINT = "/api/prometheus"
DOMAIN = "prometheus"
CONF_FILTER = "filter"
CONF_PROM_NAMESPACE = "namespace"
CONF_COMPONENT_CONFIG = "component_config"
CONF_COMPONENT_CONFIG_GLOB = "component_config_glob"
CONF_COMPONENT_CONFIG_DOMAIN = "component_config_domain"
CONF_DEFAULT_METRIC = "default_metric"
CONF_OVERRIDE_METRIC = "override_metric"
COMPONENT_CONFIG_SCHEMA_ENTRY = vol.Schema(
{vol.Optional(CONF_OVERRIDE_METRIC): cv.string}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
{
vol.Optional(CONF_FILTER, default={}): entityfilter.FILTER_SCHEMA,
vol.Optional(CONF_PROM_NAMESPACE): cv.string,
vol.Optional(CONF_DEFAULT_METRIC): cv.string,
vol.Optional(CONF_OVERRIDE_METRIC): cv.string,
vol.Optional(CONF_COMPONENT_CONFIG, default={}): vol.Schema(
{cv.entity_id: COMPONENT_CONFIG_SCHEMA_ENTRY}
),
vol.Optional(CONF_COMPONENT_CONFIG_GLOB, default={}): vol.Schema(
{cv.string: COMPONENT_CONFIG_SCHEMA_ENTRY}
),
vol.Optional(CONF_COMPONENT_CONFIG_DOMAIN, default={}): vol.Schema(
{cv.string: COMPONENT_CONFIG_SCHEMA_ENTRY}
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Activate Prometheus component."""
hass.http.register_view(PrometheusView(prometheus_client))
conf = config[DOMAIN]
entity_filter = conf[CONF_FILTER]
namespace = conf.get(CONF_PROM_NAMESPACE)
climate_units = hass.config.units.temperature_unit
override_metric = conf.get(CONF_OVERRIDE_METRIC)
default_metric = conf.get(CONF_DEFAULT_METRIC)
component_config = EntityValues(
conf[CONF_COMPONENT_CONFIG],
conf[CONF_COMPONENT_CONFIG_DOMAIN],
conf[CONF_COMPONENT_CONFIG_GLOB],
)
metrics = PrometheusMetrics(
prometheus_client,
entity_filter,
namespace,
climate_units,
component_config,
override_metric,
default_metric,
)
hass.bus.listen(EVENT_STATE_CHANGED, metrics.handle_event)
return True
class PrometheusMetrics:
"""Model all of the metrics which should be exposed to Prometheus."""
def __init__(
self,
prometheus_cli,
entity_filter,
namespace,
climate_units,
component_config,
override_metric,
default_metric,
):
"""Initialize Prometheus Metrics."""
self.prometheus_cli = prometheus_cli
self._component_config = component_config
self._override_metric = override_metric
self._default_metric = default_metric
self._filter = entity_filter
self._sensor_metric_handlers = [
self._sensor_override_component_metric,
self._sensor_override_metric,
self._sensor_attribute_metric,
self._sensor_default_metric,
self._sensor_fallback_metric,
]
if namespace:
self.metrics_prefix = f"{namespace}_"
else:
self.metrics_prefix = ""
self._metrics = {}
self._climate_units = climate_units
def handle_event(self, event):
"""Listen for new messages on the bus, and add them to Prometheus."""
state = event.data.get("new_state")
if state is None:
return
entity_id = state.entity_id
_LOGGER.debug("Handling state update for %s", entity_id)
domain, _ = hacore.split_entity_id(entity_id)
if not self._filter(state.entity_id):
return
handler = f"_handle_{domain}"
if hasattr(self, handler) and state.state != STATE_UNAVAILABLE:
getattr(self, handler)(state)
labels = self._labels(state)
state_change = self._metric(
"state_change", self.prometheus_cli.Counter, "The number of state changes"
)
state_change.labels(**labels).inc()
entity_available = self._metric(
"entity_available",
self.prometheus_cli.Gauge,
"Entity is available (not in the unavailable state)",
)
entity_available.labels(**labels).set(float(state.state != STATE_UNAVAILABLE))
last_updated_time_seconds = self._metric(
"last_updated_time_seconds",
self.prometheus_cli.Gauge,
"The last_updated timestamp",
)
last_updated_time_seconds.labels(**labels).set(state.last_updated.timestamp())
def _handle_attributes(self, state):
for key, value in state.attributes.items():
metric = self._metric(
f"{state.domain}_attr_{key.lower()}",
self.prometheus_cli.Gauge,
f"{key} attribute of {state.domain} entity",
)
try:
value = float(value)
metric.labels(**self._labels(state)).set(value)
except (ValueError, TypeError):
pass
def _metric(self, metric, factory, documentation, extra_labels=None):
labels = ["entity", "friendly_name", "domain"]
if extra_labels is not None:
labels.extend(extra_labels)
try:
return self._metrics[metric]
except KeyError:
full_metric_name = self._sanitize_metric_name(
f"{self.metrics_prefix}{metric}"
)
self._metrics[metric] = factory(full_metric_name, documentation, labels)
return self._metrics[metric]
@staticmethod
def _sanitize_metric_name(metric: str) -> str:
return "".join(
[
c
if c in string.ascii_letters
or c in string.digits
or c == "_"
or c == ":"
else f"u{hex(ord(c))}"
for c in metric
]
)
@staticmethod
def state_as_number(state):
"""Return a state casted to a float."""
try:
value = state_helper.state_as_number(state)
except ValueError:
_LOGGER.debug("Could not convert %s to float", state)
value = 0
return value
@staticmethod
def _labels(state):
return {
"entity": state.entity_id,
"domain": state.domain,
"friendly_name": state.attributes.get(ATTR_FRIENDLY_NAME),
}
def _battery(self, state):
if "battery_level" in state.attributes:
metric = self._metric(
"battery_level_percent",
self.prometheus_cli.Gauge,
"Battery level as a percentage of its capacity",
)
try:
value = float(state.attributes[ATTR_BATTERY_LEVEL])
metric.labels(**self._labels(state)).set(value)
except ValueError:
pass
def _handle_binary_sensor(self, state):
metric = self._metric(
"binary_sensor_state",
self.prometheus_cli.Gauge,
"State of the binary sensor (0/1)",
)
value = self.state_as_number(state)
metric.labels(**self._labels(state)).set(value)
def _handle_input_boolean(self, state):
metric = self._metric(
"input_boolean_state",
self.prometheus_cli.Gauge,
"State of the input boolean (0/1)",
)
value = self.state_as_number(state)
metric.labels(**self._labels(state)).set(value)
def _handle_device_tracker(self, state):
metric = self._metric(
"device_tracker_state",
self.prometheus_cli.Gauge,
"State of the device tracker (0/1)",
)
value = self.state_as_number(state)
metric.labels(**self._labels(state)).set(value)
def _handle_person(self, state):
metric = self._metric(
"person_state", self.prometheus_cli.Gauge, "State of the person (0/1)"
)
value = self.state_as_number(state)
metric.labels(**self._labels(state)).set(value)
def _handle_light(self, state):
metric = self._metric(
"light_state", self.prometheus_cli.Gauge, "Load level of a light (0..1)"
)
try:
if "brightness" in state.attributes and state.state == STATE_ON:
value = state.attributes["brightness"] / 255.0
else:
value = self.state_as_number(state)
value = value * 100
metric.labels(**self._labels(state)).set(value)
except ValueError:
pass
def _handle_lock(self, state):
metric = self._metric(
"lock_state", self.prometheus_cli.Gauge, "State of the lock (0/1)"
)
value = self.state_as_number(state)
metric.labels(**self._labels(state)).set(value)
def _handle_climate(self, state):
temp = state.attributes.get(ATTR_TEMPERATURE)
if temp:
if self._climate_units == TEMP_FAHRENHEIT:
temp = fahrenheit_to_celsius(temp)
metric = self._metric(
"temperature_c",
self.prometheus_cli.Gauge,
"Temperature in degrees Celsius",
)
metric.labels(**self._labels(state)).set(temp)
current_temp = state.attributes.get(ATTR_CURRENT_TEMPERATURE)
if current_temp:
if self._climate_units == TEMP_FAHRENHEIT:
current_temp = fahrenheit_to_celsius(current_temp)
metric = self._metric(
"current_temperature_c",
self.prometheus_cli.Gauge,
"Current Temperature in degrees Celsius",
)
metric.labels(**self._labels(state)).set(current_temp)
current_action = state.attributes.get(ATTR_HVAC_ACTION)
if current_action:
metric = self._metric(
"climate_action",
self.prometheus_cli.Gauge,
"HVAC action",
["action"],
)
for action in CURRENT_HVAC_ACTIONS:
metric.labels(**dict(self._labels(state), action=action)).set(
float(action == current_action)
)
def _handle_humidifier(self, state):
humidifier_target_humidity_percent = state.attributes.get(ATTR_HUMIDITY)
if humidifier_target_humidity_percent:
metric = self._metric(
"humidifier_target_humidity_percent",
self.prometheus_cli.Gauge,
"Target Relative Humidity",
)
metric.labels(**self._labels(state)).set(humidifier_target_humidity_percent)
metric = self._metric(
"humidifier_state",
self.prometheus_cli.Gauge,
"State of the humidifier (0/1)",
)
try:
value = self.state_as_number(state)
metric.labels(**self._labels(state)).set(value)
except ValueError:
pass
current_mode = state.attributes.get(ATTR_MODE)
available_modes = state.attributes.get(ATTR_AVAILABLE_MODES)
if current_mode and available_modes:
metric = self._metric(
"humidifier_mode",
self.prometheus_cli.Gauge,
"Humidifier Mode",
["mode"],
)
for mode in available_modes:
metric.labels(**dict(self._labels(state), mode=mode)).set(
float(mode == current_mode)
)
def _handle_sensor(self, state):
unit = self._unit_string(state.attributes.get(ATTR_UNIT_OF_MEASUREMENT))
for metric_handler in self._sensor_metric_handlers:
metric = metric_handler(state, unit)
if metric is not None:
break
if metric is not None:
_metric = self._metric(
metric, self.prometheus_cli.Gauge, f"Sensor data measured in {unit}"
)
try:
value = self.state_as_number(state)
if unit == TEMP_FAHRENHEIT:
value = fahrenheit_to_celsius(value)
_metric.labels(**self._labels(state)).set(value)
except ValueError:
pass
self._battery(state)
def _sensor_default_metric(self, state, unit):
"""Get default metric."""
return self._default_metric
@staticmethod
def _sensor_attribute_metric(state, unit):
"""Get metric based on device class attribute."""
metric = state.attributes.get(ATTR_DEVICE_CLASS)
if metric is not None:
return f"{metric}_{unit}"
return None
def _sensor_override_metric(self, state, unit):
"""Get metric from override in configuration."""
if self._override_metric:
return self._override_metric
return None
def _sensor_override_component_metric(self, state, unit):
"""Get metric from override in component confioguration."""
return self._component_config.get(state.entity_id).get(CONF_OVERRIDE_METRIC)
@staticmethod
def _sensor_fallback_metric(state, unit):
"""Get metric from fallback logic for compatibility."""
if unit in (None, ""):
_LOGGER.debug("Unsupported sensor: %s", state.entity_id)
return None
return f"sensor_unit_{unit}"
@staticmethod
def _unit_string(unit):
"""Get a formatted string of the unit."""
if unit is None:
return
units = {
TEMP_CELSIUS: "c",
TEMP_FAHRENHEIT: "c", # F should go into C metric
PERCENTAGE: "percent",
}
default = unit.replace("/", "_per_")
default = default.lower()
return units.get(unit, default)
def _handle_switch(self, state):
metric = self._metric(
"switch_state", self.prometheus_cli.Gauge, "State of the switch (0/1)"
)
try:
value = self.state_as_number(state)
metric.labels(**self._labels(state)).set(value)
except ValueError:
pass
self._handle_attributes(state)
def _handle_zwave(self, state):
self._battery(state)
def _handle_automation(self, state):
metric = self._metric(
"automation_triggered_count",
self.prometheus_cli.Counter,
"Count of times an automation has been triggered",
)
metric.labels(**self._labels(state)).inc()
class PrometheusView(HomeAssistantView):
"""Handle Prometheus requests."""
url = API_ENDPOINT
name = "api:prometheus"
def __init__(self, prometheus_cli):
"""Initialize Prometheus view."""
self.prometheus_cli = prometheus_cli
async def get(self, request):
"""Handle request for Prometheus metrics."""
_LOGGER.debug("Received Prometheus metrics request")
return web.Response(
body=self.prometheus_cli.generate_latest(),
content_type=CONTENT_TYPE_TEXT_PLAIN,
)
|
|
import traceback
import optparse
import inspect
import sys
import os
import shlex
from ast import literal_eval as safe_eval
try:
_maxsize = sys.maxint
except:
# python3
_maxsize = sys.maxsize
PY3 = sys.version_info[0] == 3
class MagicOptionParser(optparse.OptionParser):
def error(self, msg):
raise Exception('Magic Parse error: "%s"' % msg)
def exit(self, status=0, msg=None):
if msg:
sys.stderr.write(msg)
raise Exception(msg)
## FIXME: override help to also stop processing
## currently --help gives syntax error
class Magic(object):
"""
Base class to define magics for MetaKernel based kernels.
Users can redefine the default magics provided by Metakernel
by creating a module with the exact same name as the
Metakernel magic.
For example, you can override %matplotlib in your kernel by
writing a new magic inside magics/matplotlib_magic.py
"""
def __init__(self, kernel):
self.kernel = kernel
self.evaluate = True
self.code = ''
def get_args(self, mtype, name, code, args) :
self.code = code
old_args = args
mtype = mtype.replace('sticky', 'cell')
func = getattr(self, mtype + '_' + name)
try:
args, kwargs = _parse_args(func, args, usage=self.get_help(mtype, name))
except Exception as e:
self.kernel.Error(str(e))
return self
arg_spec = inspect.getfullargspec(func) if PY3 \
else inspect.getargspec(func)
fargs = arg_spec.args
if fargs[0] == 'self':
fargs = fargs[1:]
fargs = [f for f in fargs if not f in kwargs.keys()]
if len(args) > len(fargs) and not arg_spec.varargs:
extra = ' '.join(str(s) for s in (args[len(fargs) - 1:]))
args = args[:len(fargs) - 1] + [extra]
return (args, kwargs, old_args)
def call_magic(self, mtype, name, code, args):
self.code = code
old_args = args
mtype = mtype.replace('sticky', 'cell')
func = getattr(self, mtype + '_' + name)
try:
args, kwargs = _parse_args(func, args, usage=self.get_help(mtype, name))
except Exception as e:
self.kernel.Error(str(e))
return self
arg_spec = inspect.getfullargspec(func) if PY3 \
else inspect.getargspec(func)
fargs = arg_spec.args
if fargs[0] == 'self':
fargs = fargs[1:]
fargs = [f for f in fargs if not f in kwargs.keys()]
if len(args) > len(fargs) and not arg_spec.varargs:
extra = ' '.join(str(s) for s in (args[len(fargs) - 1:]))
args = args[:len(fargs) - 1] + [extra]
try:
try:
func(*args, **kwargs)
except TypeError:
func(old_args)
except Exception as exc:
msg = "Error in calling magic '%s' on %s:\n %s\n args: %s\n kwargs: %s" % (
name, mtype, str(exc), args, kwargs)
self.kernel.Error(msg)
self.kernel.Error(traceback.format_exc())
self.kernel.Error(self.get_help(mtype, name))
# return dummy magic to end processing:
return Magic(self.kernel)
return self
def get_help(self, mtype, name, level=0):
if hasattr(self, mtype + '_' + name):
func = getattr(self, mtype + '_' + name)
if level == 0:
if func.__doc__:
return _trim(func.__doc__)
else:
return "No help available for magic '%s' for %ss." % (name, mtype)
else:
filename = inspect.getfile(func)
if filename and os.path.exists(filename):
with open(filename) as f: return f.read()
else:
return "No help available for magic '%s' for %ss." % (name, mtype)
else:
return "No such magic '%s' for %ss." % (name, mtype)
def get_help_on(self, info, level=0):
return "Sorry, no help is available on '%s'." % info['code']
def get_completions(self, info):
"""
Get completions based on info dict from magic.
"""
return []
def get_magics(self, mtype):
magics = []
for name in dir(self):
if name.startswith(mtype + '_'):
magics.append(name.replace(mtype + '_', ''))
return magics
def get_code(self):
return self.code
def post_process(self, retval):
return retval
def option(*args, **kwargs):
"""Return decorator that adds a magic option to a function.
"""
def decorator(func):
help_text = ""
if not getattr(func, 'has_options', False):
func.has_options = True
func.options = []
help_text += 'Options:\n-------\n'
try:
option = optparse.Option(*args, **kwargs)
except optparse.OptionError:
help_text += args[0] + "\n"
else:
help_text += _format_option(option) + "\n"
func.options.append(option)
if func.__doc__:
func.__doc__ += _indent(func.__doc__, help_text)
else:
func.__doc__ = help_text
return func
return decorator
def _parse_args(func, args, usage=None):
"""Parse the arguments given to a magic function"""
if isinstance(args, list):
args = ' '.join(args)
args = _split_args(args)
kwargs = dict()
if getattr(func, 'has_options', False):
parser = MagicOptionParser(usage=usage, conflict_handler="resolve")
parser.add_options(func.options)
left = []
value = None
if '--' in args:
left = args[:args.index('--')]
value, args = parser.parse_args(args[args.index('--') + 1:])
else:
while args:
try:
value, args = parser.parse_args(args)
except Exception:
left.append(args.pop(0))
else:
break
args = left + args
if value:
kwargs = value.__dict__
new_args = []
for arg in args:
try:
new_args.append(safe_eval(arg))
except:
new_args.append(arg)
for (key, value) in kwargs.items():
try:
kwargs[key] = safe_eval(value)
except:
pass
return new_args, kwargs
def _split_args(args):
try:
# do not use posix mode, to avoid eating quote characters
args = shlex.split(args, posix=False)
except:
# parse error; let's pass args along rather than crashing
args = args.split()
new_args = []
temp = ''
for arg in args:
if arg.startswith('-'):
new_args.append(arg)
elif temp:
arg = temp + ' ' + arg
try:
safe_eval(arg)
except:
temp = arg
else:
new_args.append(arg)
temp = ''
elif arg.startswith(('(', '[', '{')) or '(' in arg:
try:
safe_eval(arg)
except:
temp = arg
else:
new_args.append(arg)
else:
new_args.append(arg)
if temp:
new_args.append(temp)
return new_args
def _format_option(option):
output = ''
if option._short_opts:
output = option._short_opts[0] + ' '
output += option.get_opt_string() + ' '
output += ' ' * (15 - len(output))
output += option.help + ' '
if not option.default == ('NO', 'DEFAULT'):
output += '[default: %s]' % option.default
return output
def _trim(docstring, return_lines=False):
"""
Trim of unnecessary leading indentations.
"""
# from: http://legacy.python.org/dev/peps/pep-0257/
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
indent = _min_indent(lines)
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < _maxsize:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
if return_lines:
return trimmed
else:
# Return a single string:
return '\n'.join(trimmed)
def _min_indent(lines):
"""
Determine minimum indentation (first line doesn't count):
"""
indent = _maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
return indent
def _indent(docstring, text):
"""
Returns text indented at appropriate indententation level.
"""
if not docstring:
return text
lines = docstring.expandtabs().splitlines()
indent = _min_indent(lines)
if indent < _maxsize:
newlines = _trim(text, return_lines=True)
return "\n" + ("\n".join([(" " * indent) + line for line in newlines]))
else:
return "\n" + text
|
|
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool to bundle root filesystem to a tarball.
Creates a tar bundle and a Manifest, which can be uploaded to image store.
"""
import logging
from optparse import OptionParser
import os
import shutil
import subprocess
import tempfile
import time
from gcimagebundlelib import block_disk
from gcimagebundlelib import exclude_spec
from gcimagebundlelib import platform_factory
from gcimagebundlelib import utils
def SetupArgsParser():
"""Sets up the command line flags."""
parser = OptionParser()
parser.add_option('-d', '--disk', dest='disk',
default='/dev/sda',
help='Disk to bundle.')
parser.add_option('-r', '--root', dest='root_directory',
default='/', metavar='ROOT',
help='Root of the file system to bundle.'
' Recursively bundles all sub directories.')
parser.add_option('-e', '--excludes', dest='excludes',
help='Comma separated list of sub directories to exclude.'
' The defaults are platform specific.')
parser.add_option('-o', '--output_directory', dest='output_directory',
default='/tmp/', metavar='DIR',
help='Output directory for image.')
parser.add_option('--output_file_name', dest='output_file_name',
default=None, metavar='FILENAME',
help=('Output filename for the image. Default is a digest'
' of the image bytes.'))
parser.add_option('--include_mounts', dest='include_mounts',
help='Don\'t ignore mounted filesystems under ROOT.',
action='store_true', default=False)
parser.add_option('-v', '--version',
action='store_true', dest='display_version', default=False,
help='Print the tool version.')
parser.add_option('--loglevel', dest='log_level',
help='Debug logging level.', default='INFO',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR' 'CRITICAL'])
parser.add_option('--log_file', dest='log_file',
help='Output file for log messages.')
parser.add_option('-k', '--key', dest='key', default='nebula',
help='Public key used for signing the image.')
parser.add_option('--nocleanup', dest='cleanup',
action='store_false', default=True,
help=' Do not clean up temporary and log files.')
#TODO(user): Get dehumanize.
parser.add_option('--fssize', dest='fs_size', default=10*1024*1024*1024,
type='int', help='File system size in bytes')
parser.add_option('-b', '--bucket', dest='bucket',
help='Destination storage bucket')
parser.add_option('-f', '--filesystem', dest='file_system',
default=None,
help='File system type for the image.')
parser.add_option('--skip_disk_space_check', dest='skip_disk_space_check',
default=False, action='store_true',
help='Skip the disk space requirement check.')
return parser
def VerifyArgs(parser, options):
"""Verifies that commandline flags are consistent."""
if not options.output_directory:
parser.error('output bundle directory must be specified.')
if not os.path.exists(options.output_directory):
parser.error('output bundle directory does not exist.')
# TODO(user): add more verification as needed
def EnsureSuperUser():
"""Ensures that current user has super user privileges."""
if os.getuid() != 0:
logging.warning('Tool must be run as root.')
exit(-1)
def GetLogLevel(options):
"""Log Level string to logging.LogLevel mapping."""
level = {
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL
}
if options.log_level in level:
return level[options.log_level]
print 'Invalid logging level. defaulting to INFO.'
return logging.INFO
def SetupLogging(options, log_dir='/tmp'):
"""Set up logging.
All messages above INFO level are also logged to console.
Args:
options: collection of command line options.
log_dir: directory used to generate log files.
"""
if options.log_file:
logfile = options.log_file
else:
logfile = tempfile.mktemp(dir=log_dir, prefix='bundle_log_')
print 'Starting logging in %s' % logfile
logging.basicConfig(filename=logfile,
level=GetLogLevel(options),
format='%(asctime)s %(levelname)s:%(name)s:%(message)s')
# Use GMT timestamp in logging.
logging.Formatter.converter=time.gmtime
console = logging.StreamHandler()
console.setLevel(GetLogLevel(options))
logging.getLogger().addHandler(console)
def PrintVersionInfo():
#TODO: Should read from the VERSION file instead.
print 'version 1.2.8'
def GetTargetFilesystem(options, guest_platform):
if options.file_system:
return options.file_system
else:
return guest_platform.GetPreferredFilesystemType()
def main():
parser = SetupArgsParser()
(options, _) = parser.parse_args()
if options.display_version:
PrintVersionInfo()
return 0
EnsureSuperUser()
VerifyArgs(parser, options)
scratch_dir = tempfile.mkdtemp(dir=options.output_directory)
SetupLogging(options, scratch_dir)
try:
guest_platform = platform_factory.PlatformFactory(
options.root_directory).GetPlatform()
except platform_factory.UnknownPlatformException:
logging.critical('Platform is not supported.'
' Platform rules can be added to platform_factory.py.')
return -1
temp_file_name = tempfile.mktemp(dir=scratch_dir, suffix='.tar.gz')
file_system = GetTargetFilesystem(options, guest_platform)
logging.info('File System: %s', file_system)
logging.info('Disk Size: %s bytes', options.fs_size)
bundle = block_disk.RootFsRaw(
options.fs_size, file_system, options.skip_disk_space_check)
bundle.SetTarfile(temp_file_name)
if options.disk:
readlink_command = ['readlink', '-f', options.disk]
final_path = utils.RunCommand(readlink_command).strip()
logging.info('Resolved %s to %s', options.disk, final_path)
bundle.AddDisk(final_path)
# TODO(user): Find the location where the first partition of the disk
# is mounted and add it as the source instead of relying on the source
# param flag
bundle.AddSource(options.root_directory)
bundle.SetKey(options.key)
bundle.SetScratchDirectory(scratch_dir)
# Merge platform specific exclude list, mounts points
# and user specified excludes
excludes = guest_platform.GetExcludeList()
if options.excludes:
excludes.extend([exclude_spec.ExcludeSpec(x) for x in
options.excludes.split(',')])
logging.info('exclude list: %s', ' '.join([x.GetSpec() for x in excludes]))
bundle.AppendExcludes(excludes)
if not options.include_mounts:
mount_points = utils.GetMounts(options.root_directory)
logging.info('ignoring mounts %s', ' '.join(mount_points))
bundle.AppendExcludes([exclude_spec.ExcludeSpec(x, preserve_dir=True) for x
in utils.GetMounts(options.root_directory)])
bundle.SetPlatform(guest_platform)
# Verify that bundle attributes are correct and create tar bundle.
bundle.Verify()
(fs_size, digest) = bundle.Bundleup()
if not digest:
logging.critical('Could not get digest for the bundle.'
' The bundle may not be created correctly')
return -1
if fs_size > options.fs_size:
logging.critical('Size of tar %d exceeds the file system size %d.', fs_size,
options.fs_size)
return -1
if options.output_file_name:
output_file = os.path.join(
options.output_directory, options.output_file_name)
else:
output_file = os.path.join(
options.output_directory, '%s.image.tar.gz' % digest)
os.rename(temp_file_name, output_file)
logging.info('Created tar.gz file at %s' % output_file)
if options.bucket:
bucket = options.bucket
if bucket.startswith('gs://'):
output_bucket = '%s/%s' % (
bucket, os.path.basename(output_file))
else:
output_bucket = 'gs://%s/%s' % (
bucket, os.path.basename(output_file))
# TODO: Consider using boto library directly.
cmd = ['gsutil', 'cp', output_file, output_bucket]
retcode = subprocess.call(cmd)
if retcode != 0:
logging.critical('Failed to copy image to bucket. '
'gsutil returned %d. To retry, run the command: %s',
retcode, ' '.join(cmd))
return -1
logging.info('Uploaded image to %s', output_bucket)
# If we've uploaded, then we can remove the local file.
os.remove(output_file)
if options.cleanup:
shutil.rmtree(scratch_dir)
|
|
# Copyright 2017 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`lib_path_combinator_test` --- lib.path_combinator test
============================================================
"""
# Stdlib
from itertools import product
from unittest.mock import patch, call
# External packages
import nose
import nose.tools as ntools
# SCION
from lib import path_combinator
from lib.packet.path import SCIONPath
from lib.sciond_api.path_meta import FwdPathMeta, PathInterface
from test.testcommon import assert_these_calls, create_mock, create_mock_full
class PathCombinatorBase(object):
def _mk_seg(self, asms):
seg = create_mock(["p"])
seg.p = create_mock_full({"asEntries": asms})
return seg
def _generate_none(self):
for up, down in (
(False, True),
(True, False),
(self._mk_seg(False), True),
(self._mk_seg(True), self._mk_seg(False)),
):
yield up, down
class TestPathCombinatorBuildShortcutPaths(object):
"""
Unit tests for lib.path_combinator.build_shortcut_paths
"""
@patch("lib.path_combinator._build_shortcuts",
new_callable=create_mock)
def test(self, build_path):
up_segments = ['up0', 'up1']
down_segments = ['down0', 'down1']
build_path.side_effect = [['path0'], ['path1'], [], ['path1']]
peer_revs = create_mock()
ntools.eq_(
path_combinator.build_shortcut_paths(
up_segments, down_segments, peer_revs),
["path0", "path1"])
calls = [call(*x, peer_revs)
for x in product(up_segments, down_segments)]
assert_these_calls(build_path, calls)
class TestPathCombinatorBuildShortcuts(PathCombinatorBase):
"""
Unit tests for lib.path_combinator._build_shortcuts
"""
def _check_none(self, up_seg, down_seg):
peer_revs = create_mock()
ntools.eq_(
path_combinator._build_shortcuts(up_seg, down_seg, peer_revs), [])
def test_none(self):
for up, down in self._generate_none():
yield self._check_none, up, down
@patch("lib.path_combinator._get_xovr_peer",
new_callable=create_mock)
def test_no_xovr_peer(self, get_xovr_peer):
up = self._mk_seg(True)
down = self._mk_seg(True)
get_xovr_peer.return_value = None, None
peer_revs = create_mock()
# Call
ntools.eq_(path_combinator._build_shortcuts(up, down, peer_revs), [])
# Tests
get_xovr_peer.assert_called_once_with(up, down, peer_revs)
@patch("lib.path_combinator._join_xovr",
new_callable=create_mock)
@patch("lib.path_combinator._join_peer",
new_callable=create_mock)
@patch("lib.path_combinator._get_xovr_peer",
new_callable=create_mock)
def _check_xovrs_peers(self, xovr, peer, is_peer, get_xovr_peer,
join_peer, join_xovr):
up = self._mk_seg(True)
down = self._mk_seg(True)
get_xovr_peer.return_value = xovr, peer
peer_revs = create_mock()
# Call
if is_peer:
ntools.eq_(path_combinator._build_shortcuts(up, down, peer_revs),
join_peer.return_value)
else:
ntools.eq_(path_combinator._build_shortcuts(up, down, peer_revs),
join_xovr.return_value)
# Tests
if is_peer:
join_peer.assert_called_once_with(up, down, peer, peer_revs)
else:
join_xovr.assert_called_once_with(up, down, xovr)
def test_with_both(self):
for xovr, peer, is_peer in (
[(1, 2), (3, 1), True],
[(1, 3), (3, 1), False],
[(1, 5), (3, 1), False],
):
yield self._check_xovrs_peers, xovr, peer, is_peer
def test_with_only_xovr(self):
yield self._check_xovrs_peers, (1, 2), None, False
def test_with_only_peer(self):
yield self._check_xovrs_peers, None, (1, 2), True
class TestPathCombinatorCopySegment(object):
"""
Unit tests for lib.path_combinator._copy_segment
"""
def test_no_segment(self):
ntools.eq_(path_combinator._copy_segment(None, False, False, "xovrs"),
(None, None, float("inf")))
@patch("lib.path_combinator._copy_hofs",
new_callable=create_mock)
def test_copy_up(self, copy_hofs):
seg = create_mock(["iter_asms", "infoF"])
info = create_mock(["up_flag"])
seg.infoF.return_value = info
hofs = []
for _ in range(3):
hof = create_mock(["xover"])
hof.xover = False
hofs.append(hof)
copy_hofs.return_value = hofs, None
# Call
ntools.eq_(path_combinator._copy_segment(seg, True, True),
(info, hofs, None))
# Tests
ntools.eq_(info.up_flag, True)
copy_hofs.assert_called_once_with(seg.iter_asms.return_value,
reverse=True)
ntools.eq_(hofs[0].xover, True)
ntools.eq_(hofs[1].xover, False)
ntools.eq_(hofs[2].xover, True)
@patch("lib.path_combinator._copy_hofs",
new_callable=create_mock)
def test_copy_down(self, copy_hofs):
seg = create_mock(["iter_asms", "infoF"])
info = create_mock(["up_flag"])
seg.infoF.return_value = info
copy_hofs.return_value = "hofs", None
# Call
ntools.eq_(path_combinator._copy_segment(seg, False, False, up=False),
(info, "hofs", None))
# Tests
copy_hofs.assert_called_once_with(seg.iter_asms.return_value,
reverse=False)
class TestPathCombinatorGetXovrPeer(object):
"""
Unit tests for lib.path_combinator._get_xovr_peer
"""
def test_none(self):
seg = create_mock_full({"iter_asms()": []})
peer_revs = create_mock()
# Call
ntools.eq_(path_combinator._get_xovr_peer(seg, seg, peer_revs),
(None, None))
@patch("lib.path_combinator._find_peer_hfs",
new_callable=create_mock)
def test_xovr(self, find):
up_asms = [
create_mock_full({"isd_as()": "1-1"}),
create_mock_full({"isd_as()": "1-2"}),
create_mock_full({"isd_as()": "1-3"}),
]
up_seg = create_mock_full({"iter_asms()": up_asms})
down_asms = [
create_mock_full({"isd_as()": "1-1"}),
create_mock_full({"isd_as()": "1-2"}),
create_mock_full({"isd_as()": "1-4"}),
]
down_seg = create_mock_full({"iter_asms()": down_asms})
find.return_value = False
peer_revs = create_mock()
# Call
ntools.eq_(path_combinator._get_xovr_peer(up_seg, down_seg, peer_revs),
((2, 2), None))
@patch("lib.path_combinator._find_peer_hfs",
new_callable=create_mock)
def test_peer(self, find):
up_asms = [
create_mock_full({"isd_as()": "1-1"}), # peers with 1-10
create_mock_full({"isd_as()": "1-2"}), # peers with 1-12
create_mock_full({"isd_as()": "1-3"}),
]
up_seg = create_mock_full({"iter_asms()": up_asms})
down_asms = [
create_mock_full({"isd_as()": "1-10"}), # peers with 1-1
create_mock_full({"isd_as()": "1-11"}),
create_mock_full({"isd_as()": "1-12"}), # peers with 1-2
]
down_seg = create_mock_full({"iter_asms()": down_asms})
peer_revs = create_mock()
def matching_peers(a, b, c):
return (a == up_asms[0] and b == down_asms[0]) or (
a == up_asms[1] and b == down_asms[2])
find.side_effect = matching_peers
# Call
ntools.eq_(path_combinator._get_xovr_peer(up_seg, down_seg, peer_revs),
(None, (2, 3)))
class PathCombinatorJoinShortcutsBase(object):
def _setup(self, path_args, copy_segment):
up_segment = create_mock(["asm"])
up_segment.asm = create_mock()
down_segment = create_mock(["asm"])
down_segment.asm = create_mock()
point = (1, 2)
up_iof = create_mock(["shortcut", "peer"])
down_iof = create_mock(["shortcut", "peer"])
copy_segment.side_effect = [(up_iof, ["A", "B"], "up hof", 1500),
(down_iof, ["C"], "down hof", 1400)]
path_args.return_value = ()
return up_segment, down_segment, point
class TestPathCombinatorJoinCrossover(PathCombinatorJoinShortcutsBase):
"""
Unit test for lib.path_combinator._join_xovr
"""
@patch("lib.path_combinator._copy_segment_shortcut",
new_callable=create_mock)
@patch("lib.path_combinator._shortcut_path_args",
new_callable=create_mock)
@patch("lib.path_combinator._build_shortcut_interface_list",
new_callable=create_mock)
def test_xovr(self, build_list, path_args, copy_segment):
up_segment, down_segment, point = self._setup(path_args, copy_segment)
path_meta = FwdPathMeta.from_values(SCIONPath(), [], 0)
ntools.eq_(
path_combinator._join_xovr(up_segment, down_segment, point)[0],
path_meta)
copy_segment.assert_any_call(up_segment, 1)
copy_segment.assert_any_call(down_segment, 2, up=False)
ntools.eq_(build_list.call_count, 1)
class TestPathCombinatorJoinPeer(PathCombinatorJoinShortcutsBase):
"""
Unit test for lib.path_combinator._join_xovr
"""
@patch("lib.path_combinator._copy_segment_shortcut",
new_callable=create_mock)
@patch("lib.path_combinator._shortcut_path_args",
new_callable=create_mock)
@patch("lib.path_combinator._build_shortcut_interface_list",
new_callable=create_mock)
@patch("lib.path_combinator._find_peer_hfs",
new_callable=create_mock)
def test_peer(self, find_peers, build_list, path_args, copy_segment):
up_segment, down_segment, point = self._setup(path_args, copy_segment)
find_peers.return_value = [("uph1", "dph1", 1500),
("uph2", "dph2", 1500)]
peer_revs = create_mock()
path_meta = FwdPathMeta.from_values(SCIONPath(), [], 0)
ntools.eq_(path_combinator._join_peer(
up_segment, down_segment, point, peer_revs)[0], path_meta)
copy_segment.assert_any_call(up_segment, 1)
copy_segment.assert_any_call(down_segment, 2, up=False)
ntools.eq_(build_list.call_count, 2)
class TestPathCombinatorShortcutPathArgs(object):
"""
Unit test for lib.path_combinator._shortcut_path_args
"""
def test(self):
up_iof = create_mock(["hops"])
up_hofs = ["up hof 1", "up hof 2", "up hof 3"]
down_iof = create_mock(["hops"])
down_hofs = ["down hof"]
ret = path_combinator._shortcut_path_args(up_iof, up_hofs,
down_iof, down_hofs)
ntools.eq_(ret, [up_iof, up_hofs])
ntools.eq_(up_iof.hops, 3)
class TestPathCombinatorBuildShortcutInterfaceList(object):
"""
Unit tests for
lib.path_combinator._build_shortcut_interface_list
"""
@patch("lib.path_combinator._build_interface_list",
new_callable=create_mock)
def _check_xovr_peers(self, peers, build_if_list):
up_asm = create_mock_full({"isd_as()": 11})
up_seg = create_mock_full({"iter_asms()": ["A", "B"], "asm()": up_asm})
up_idx = 1
down_asm = create_mock_full({"isd_as()": 12})
down_seg = create_mock_full({"iter_asms()": ["C", "D"],
"asm()": down_asm})
down_idx = 2
build_if_list.side_effect = [[], []]
if_list = path_combinator._build_shortcut_interface_list(
up_seg, up_idx, down_seg, down_idx, peers)
assert_these_calls(build_if_list, [call(["B", "A"]),
call(["C", "D"], up=False)])
if peers:
up_hof, down_hof = peers
ntools.eq_(
if_list, [PathInterface.from_values(11, up_hof.ingress_if),
PathInterface.from_values(12, down_hof.ingress_if)])
def test_xovr(self):
yield self._check_xovr_peers, None
def test_peers(self):
up_hof = create_mock(["ingress_if"])
up_hof.ingress_if = 3
down_hof = create_mock(["ingress_if"])
down_hof.ingress_if = 4
yield self._check_xovr_peers, (up_hof, down_hof)
class TestPathCombinatorBuildInterfaceList(object):
"""
Unit tests for lib.path_combinator._build_interface_list
"""
def _check_up_down(self, up):
asms = []
ifid = 0
for i in range(1, 4):
if up:
hof = create_mock_full({"egress_if": ifid,
"ingress_if": ifid + 1})
if i == 3:
hof.ingress_if = 0
else:
hof = create_mock_full({"egress_if": ifid + 1,
"ingress_if": ifid})
if i == 3:
hof.egress_if = 0
ifid += 2
pcbm = create_mock_full({"hof()": hof})
asms.append(create_mock_full({"isd_as()": i, "pcbm()": pcbm}))
if_list = path_combinator._build_interface_list(asms, up)
ntools.eq_(if_list, [PathInterface.from_values(1, 1),
PathInterface.from_values(2, 2),
PathInterface.from_values(2, 3),
PathInterface.from_values(3, 4)])
def test_up(self):
yield self._check_up_down, True
def test_down(self):
yield self._check_up_down, False
class TestPathCombinatorCheckConnected(object):
"""
Unit tests for lib.path_combinator._check_connected
"""
def _setup(self, up_first, core_last, core_first, down_first):
up = create_mock(['first_ia'])
up.first_ia.return_value = up_first
yield up
core = create_mock(['first_ia', 'last_ia'])
core.first_ia.return_value = core_first
core.last_ia.return_value = core_last
yield core
down = create_mock(['first_ia'])
down.first_ia.return_value = down_first
yield down
def test_with_core_up_discon(self):
up, core, down = self._setup(1, 2, 3, 3)
ntools.assert_false(path_combinator._check_connected(up, core, down))
def test_with_core_down_discon(self):
up, core, down = self._setup(1, 1, 2, 3)
ntools.assert_false(path_combinator._check_connected(up, core, down))
def test_with_core_conn(self):
up, core, down = self._setup(1, 1, 2, 2)
ntools.assert_true(path_combinator._check_connected(up, core, down))
def test_without_core_discon(self):
up, core, down = self._setup(1, 0, 0, 2)
ntools.assert_false(path_combinator._check_connected(up, None, down))
def test_without_core_conn(self):
up, core, down = self._setup(1, 0, 0, 1)
ntools.assert_true(path_combinator._check_connected(up, None, down))
class TestPathCombinatorCopyHofs(object):
"""
Unit tests for lib.path_combinator._copy_hofs
"""
def test_full(self):
asms = []
for i in range(4):
pcbm = create_mock(["hof", "p"])
pcbm.hof.return_value = i
pcbm.p = create_mock(["inMTU"])
pcbm.p.inMTU = (i + 1) * 2
asm = create_mock(["pcbm", "p"])
asm.pcbm.return_value = pcbm
asm.p = create_mock(["mtu"])
asm.p.mtu = (i + 1) * 0.5
asms.append(asm)
# Call
ntools.eq_(path_combinator._copy_hofs(asms), ([3, 2, 1, 0], 0.5))
class TestPathCombinatorCopySegmentShortcut(object):
"""
Unit tests for lib.path_combinator._copy_segment_shortcut
"""
def _setup(self, copy_hofs):
info = create_mock(["hops", "up_flag"])
info.hops = 10
upstream_hof = create_mock(["verify_only", "xover"])
pcbm = create_mock(["hof"])
pcbm.hof.return_value = upstream_hof
asm = create_mock(["pcbm"])
asm.pcbm.return_value = pcbm
seg = create_mock(["asm", "infoF", "iter_asms"])
seg.asm.return_value = asm
seg.infoF.return_value = info
hofs = []
for _ in range(6):
hofs.append(create_mock(["xover"]))
copy_hofs.return_value = hofs, "mtu"
return seg, info, hofs, upstream_hof
@patch("lib.path_combinator._copy_hofs",
new_callable=create_mock)
def test_up(self, copy_hofs):
seg, info, hofs, upstream_hof = self._setup(copy_hofs)
# Call
ntools.eq_(path_combinator._copy_segment_shortcut(seg, 4),
(info, hofs, upstream_hof, "mtu"))
# Tests
ntools.eq_(info.hops, 6)
ntools.ok_(info.up_flag)
copy_hofs.assert_called_once_with(seg.iter_asms.return_value,
reverse=True)
ntools.eq_(hofs[-1].xover, True)
ntools.eq_(upstream_hof.xover, False)
ntools.eq_(upstream_hof.verify_only, True)
@patch("lib.path_combinator._copy_hofs",
new_callable=create_mock)
def test_down(self, copy_hofs):
seg, info, hofs, upstream_hof = self._setup(copy_hofs)
# Call
ntools.eq_(path_combinator._copy_segment_shortcut(seg, 7, up=False),
(info, hofs, upstream_hof, "mtu"))
# Tests
ntools.assert_false(info.up_flag)
copy_hofs.assert_called_once_with(seg.iter_asms.return_value,
reverse=False)
ntools.eq_(hofs[0].xover, True)
ntools.eq_(upstream_hof.verify_only, True)
class TestPathCombinatorFindPeerHfs(object):
"""
Unit tests for lib.path_combinator._find_peer_hfs
"""
def _mk_pcbms(self):
up_pcbms = [
self._mk_pcbm("2-1", 1, 1, 500),
self._mk_pcbm("2-1", 2, 2, 600), # Not reciprocated
self._mk_pcbm("2-1", 3, 3, 700),
]
down_pcbms = [
# Local 2-1
self._mk_pcbm("1-1", 1, 1, 500),
self._mk_pcbm("1-1", 3, 3, 700),
]
return up_pcbms, down_pcbms
def _mk_pcbm(self, inIA, remoteInIF, hof_ingress, mtu):
hof = create_mock_full({"ingress_if": hof_ingress})
p = create_mock_full({"remoteInIF": remoteInIF, "inMTU": mtu})
return create_mock_full({"inIA()": inIA, "p": p, "hof()": hof})
def test(self):
up_pcbms, down_pcbms = self._mk_pcbms()
p = create_mock_full({"hashTreeRoot": b"1234"})
up_asm = create_mock_full({"isd_as()": "1-1", "iter_pcbms()": up_pcbms,
"p": p})
down_asm = create_mock_full({"isd_as()": "2-1",
"iter_pcbms()": down_pcbms,
"p": p})
peer_revs = create_mock_full({"get()": None})
# Call
ntools.eq_(path_combinator._find_peer_hfs(up_asm, down_asm, peer_revs),
[(up_pcbms[0].hof(), down_pcbms[0].hof(), 500),
(up_pcbms[2].hof(), down_pcbms[1].hof(), 700)])
@patch("lib.path_combinator._skip_peer",
new_callable=create_mock)
def test_with_revocation(self, skip_peer):
up_pcbms, down_pcbms = self._mk_pcbms()
p = create_mock_full({"hashTreeRoot": b"1234"})
up_asm = create_mock_full({"isd_as()": "1-1",
"iter_pcbms()": up_pcbms,
"p": p})
down_asm = create_mock_full({"isd_as()": "2-1",
"iter_pcbms()": down_pcbms,
"p": p})
up_peer_rev = create_mock()
down_peer_rev = create_mock()
peer_revs = create_mock(["get"])
def get_side_effect(key):
data = {("1-1", 3): up_peer_rev, ("2-1", 3): down_peer_rev}
return data.get(key)
peer_revs.get.side_effect = get_side_effect
def skip_peer_side_effect(rev, ht_root):
if rev in [up_peer_rev, down_peer_rev] and ht_root == b"1234":
return True
return False
skip_peer.side_effect = skip_peer_side_effect
# Call
peers = path_combinator._find_peer_hfs(up_asm, down_asm, peer_revs)
# Tests
ntools.eq_(peers, [(up_pcbms[0].hof(), down_pcbms[0].hof(), 500)])
skip_peer.assert_has_calls(
[call(None, b"1234"), call(up_peer_rev, b"1234")], any_order=True)
if __name__ == "__main__":
nose.run(defaultTest=__name__)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import shutil
import verilog as vl
import userlogic
import communication
import software as sw
CLOCK = "clk"
RESET = "rst"
TEMPLATE = os.path.dirname(os.path.abspath(__file__)) + '/template/'
class Component(object):
def __init__(self, compname):
self.clock = "clk"
self.reset = "rst"
self.module = {
"input": [], # class object input
"output": [], # class object output
"inout": [], # class object inout
"userlogic": [], # class object userlogic
"reg": [],# class object reg
"wire": [], # class object wire
"communication": [] # class object for communication
}
self.name = compname
self.ros_package = False
self.assginlist = []
def show_info(self):
module = self.module
compname = self.name
print "===== Component name ====="
print self.name
print "===== input ====="
for port in module["input"]:
print port.__class__.__name__, port.bit, port.name
print "\n===== output ====="
for port in module["output"]:
print port.__class__.__name__, port.bit, port.name
print "\n===== inout ====="
for port in module["inout"]:
print port.__class__.__name__, port.bit, port.name
print "\n===== reg ====="
for port in module["reg"]:
print port.__class__.__name__, port.bit, port.name
print "\n===== wire ====="
for port in module["wire"]:
print port.__class__.__name__, port.bit, port.name
print "\n===== usrlogic ====="
for ul in module["userlogic"]:
print ul.name
print "\n===== communication ====="
for com in module["communication"]:
print com.__class__.__name__, com.fifo_width
print "rcv cycle",com.rcv_cycle
print "rcv signal list:"
for rcv in com.rcvlist:
print rcv[0]
print "snd cycle",com.snd_cycle
print "snd signal list:",
for snd in com.sndlist:
print snd[0]
print "switch condition", com.rs_cond
print "\n"
print "\n===== ROS package generation ====="
print self.ros_package
def add_clk(self, name):
self.clock = name
def add_rst(self, name):
self.reset = name
def add_input(self, name, bit=1):
if name == CLOCK or name == RESET:
print "pre defined signal %s"%name
return
input = vl.Input(name, bit)
self.module["input"].append(input)
def add_output(self, name, bit=1):
output = vl.Output(name, bit)
self.module["output"].append(output)
def add_inout(self, name, bit=1):
inout = vl.Inout(name, bit)
self.module["inout"].append(inout)
def add_reg(self, name, bit=1):
reg = vl.Reg(name, bit)
self.module["reg"].append(reg)
def add_wire(self, name, bit=1):
wire = vl.Wire(name, bit)
self.module["wire"].append(wire)
def add_ul(self, ul):
self.module["userlogic"].append(ul)
def add_com(self, com):
if com.__class__.__name__ == "Xillybus_fifo":
for port in com.signals:
if port.__class__.__name__ == "Input":
self.module["input"].append(port)
if port.__class__.__name__ == "Output":
self.module["output"].append(port)
if port.__class__.__name__ == "Inout":
self.module["inout"].append(port)
if port.__class__.__name__ == "Reg":
self.module["reg"].append(port)
if port.__class__.__name__ == "Wire":
self.module["wire"].append(port)
self.module["communication"].append(com)
def assgin(self, to_sig="", to_lsb=0, to_msb=0, from_sig="", from_lsb=0, from_msb=0):
self.assginlist.append()
def ros_packaging(self):
self.ros_package = True
def componentize(self):
self.module['input'].append(vl.Input(self.clock, 1))
self.module['input'].append(vl.Input(self.reset, 1))
compname = self.name
module = self.module
self.generate_hardware()
self.generate_software()
self.show_info()
print "Generate component successfully"
def generate_hardware(self):
compname = self.name
module = self.module
# ===================== hardware generation =====================
if os.path.isdir("%s/hardware"%compname) == False:
os.makedirs("%s/hardware"%compname)
for ul in module["userlogic"]:
shutil.copy(ul.filepath , "%s/hardware/%s.v"%(compname,ul.name))
fo = open("%s/hardware/%s.v"%(compname, compname), "w")
# generate in or out ports
fo.write(vl.generate_ports(module, compname, self))
#generate user register and wire
fo.write(vl.generate_regwire(module))
# generate instance for top module
fo.write(vl.generate_inst4top(compname,module))
if module["userlogic"] != []:
for x in xrange(0,len(module["userlogic"])):
userlogic.check_ulassign(module["userlogic"][x], module)
fo.write(vl.generate_userlogic_inst(module["userlogic"][x]))
fo.write("\n")
#generate communication logic
if module["communication"] != []:
for x in xrange(0,len(module["communication"])):
if module["communication"][x].__class__.__name__ == "Xillybus_fifo":
communication.check_xillybus_assign(module["communication"][x], module)
fo.write(vl.generate_xillybus(module["communication"][x], module))
fo.write("\nendmodule")
fo.close()
def generate_software(self):
compname = self.name
module = self.module
if os.path.isdir("%s/software"%compname) == False:
os.makedirs("%s/software"%compname)
shutil.copy("%ssoftware/lib_cpp.h"%TEMPLATE, "%s/software/lib_cpp.h"%compname)
shutil.copy("%ssoftware/bridge.py"%TEMPLATE, "%s/software/bridge.py"%compname)
# generate software interface
fo = open("%s/software/%s.cpp"%(compname, compname), "w")
fo.write(sw.generate_cpp_xillybus_interface(self))
fo.close()
fo = open("%s/software/Makefile"%(compname), "w")
fo.write(sw.generate_cpp_xillibus_makefile(module,compname))
fo.close()
# generate software on python
fo = open("%s/software/%s.py"%(compname, compname), "w")
fo.write(sw.generate_py_xillybus_interface(self))
fo.close()
if self.ros_package == True:
sw.generate_ros_package(self)
if __name__ == '__main__':
pass
|
|
# This file is part of Spyrk.
#
# Spyrk is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Spyrk is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Spyrk. If not, see <http://www.gnu.org/licenses/>.
from collections import namedtuple
from hammock import Hammock # pip install hammock
from cached_property import timed_cached_property # pip install cached-property
import urllib
from config import proxies
"""proxy settings"""
proxies = proxies
def check_for_proxy_conn():
try:
urllib.urlopen(
"http://example.com",
proxies= proxies
)
except IOError:
return False
else:
return True
class SparkCloud(object):
"""Provides access to the Spark Cloud.
>>> spark = SparkCloud(USERNAME, PASSWORD)
# Or
>>> spark = SparkCloud(ACCESS_TOKEN)
# List devices
>>> print spark.devices
# Access device
>>> spark.devices['captain_hamster']
# Or, shortcut form
>>> spark.captain_hamster
# List functions and variables of a device
>>> print spark.captain_hamster.functions
>>> print spark.captain_hamster.variables
# Tell if a device is connected
>>> print spark.captain_hamster.connected
# Call a function
>>> spark.captain_hamster.digitalwrite('D7', 'HIGH')
>>> print spark.captain_hamster.analogread('A0')
# (or any of your own custom function)
# Get variable value
>>> spark.captain_hamster.myvariable
"""
def __init__(self, username_or_access_token, password=None, spark_api = Hammock('https://api.particle.io')):
"""Initialise the connection to a Spark Cloud.
If you give a user name and password an access token will be requested.
The list of known devices attached to your account will be requested.
If you have several devices and not all of them are connected it will
take a long time to create the object. The Spark Cloud will take ~30
seconds (per device?) to reply as it waits for an answer from the
disconnected devices.
"""
self.spark_api = spark_api
if password is None:
self.access_token = username_or_access_token
else:
self.access_token = self._login(username_or_access_token, password)
self.spark_api = self.spark_api.v1.devices
self.proxy_setting = check_for_proxy_conn()
@staticmethod
def _check_error(response):
"""Raises an exception if the Spark Cloud returned an error."""
if (not response.ok) or (response.status_code != 200):
print "Error from Spark Cloud"
# raise Exception(
# # try:
# # response.json()['error'] + ': ' + response.json()['error_description']
# # except KeyError:
# "Error from SparkCloud"
# )
def _login(self, username, password):
"""Proceed to login to the Spark Cloud and returns an access token."""
data = {
'username': username,
'password': password,
'grant_type': 'password'
}
if self.proxy_setting:
r = self.spark_api.oauth.token.POST(auth=('spark', 'spark'), data=data, timeout=600, proxies=proxies)
else:
r = self.spark_api.oauth.token.POST(auth=('spark', 'spark'), data=data, timeout=600)
self._check_error(r)
return r.json()['access_token']
@timed_cached_property(ttl=10) # cache the device for 10 seconds.
def devices(self):
"""Create a dictionary of devices known to the user account."""
params = {'access_token': self.access_token}
if self.proxy_setting:
r = self.spark_api.GET(params=params, timeout=600, proxies=proxies)
else:
r = self.spark_api.GET(params=params, timeout=600)
self._check_error(r)
json_list = r.json()
devices_dict = {}
if json_list:
# it is possible the keys in json responses varies from one device to another: compute the set of all keys
allKeys = {'functions', 'variables', 'api', 'requires_deep_update', 'status'} # added by device_info
for device_json in json_list:
allKeys.update(device_json.keys())
Device = _BaseDevice.make_device_class(self, allKeys)
for d in json_list:
if d["connected"]:
info = self._get_device_info(d['id'])
d['functions'] = info.get('functions')
d['variables'] = info.get('variables')
d['api'] = self.spark_api(d['id'])
d['requires_deep_update'] = d.get('requires_deep_update', False)
d['status'] = info.get('status')
# ensure the set of all keys is present in the dictionnary (Device constructor requires all keys present)
[d.setdefault(key, None) for key in allKeys]
#print d
devices_dict[d['name']] = Device(**d)
return devices_dict
def _get_device_info(self, device_id):
"""Queries the Spark Cloud for detailed information about a device."""
params = {'access_token': self.access_token}
if self.proxy_setting:
r = self.spark_api(device_id).GET(params=params, timeout=600, proxies=proxies)
else:
r = self.spark_api(device_id).GET(params=params, timeout=600)
self._check_error(r)
return r.json()
def __getattr__(self, name):
"""Returns a Device object as an attribute of the SparkCloud object."""
if name in self.devices:
return self.devices[name]
else:
raise AttributeError()
class _BaseDevice(object):
"""Parent class for the dynamic Device class.
The Device class being made of whatever fields the Spark Cloud API gives us,
it has to be contructed on the fly once we know those fields.
The generated Device class is subclassing this _BaseDevice as well as a
nametuple.
The namedtuple host all static fields while _BaseDevice host methods
extending how a Device object should behave.
"""
@staticmethod
def make_device_class(spark_cloud, entries):
"""Returns a dynamic Device class based on what a GET device list from
the Spark Cloud returns.
spark_cloud parameter should be the caller instance of SparkCloud.
entries parameter should be the list of fields the Spark Cloud API is
returning.
"""
attrs = list(
set(
list(entries) + [
'requires_deep_update', 'functions', 'variables', 'api', 'status'
]
)
)
return type(
'Device',
(_BaseDevice, namedtuple('Device', attrs)),
{'__slots__': (), 'spark_cloud': spark_cloud}
)
def __getattr__(self, name):
"""Returns virtual attributes corresponding to function or variable
names.
"""
params = {'access_token': self.spark_cloud.access_token}
if not self.connected:
raise IOError("{}.{} is not available: the spark device is not connected.".format(self.name, name))
if name in self.functions:
def fcall(*args):
data = {'params': ','.join(args)}
if check_for_proxy_conn():
r = self.api(name).POST(params=params, data=data, timeout=600, proxies=proxies)
else:
r = self.api(name).POST(params=params, data=data, timeout=600)
self.spark_cloud._check_error(r)
return r.json()['return_value']
return fcall
elif name in self.variables:
if check_for_proxy_conn():
r = self.api(name).GET(params=params, timeout=600, proxies=proxies)
else:
r = self.api(name).GET(params=params, timeout=600)
self.spark_cloud._check_error(r)
return r.json()['result']
else:
raise AttributeError()
|
|
# -*- coding: utf-8 -*-
import datetime
import random
import re
import os
import tempfile
import json
import string
from PIL import Image
from slugify import slugify
import requests
from django.core.files import File
from django.db import transaction
from django.utils import timezone
from django.conf import settings
from airmozilla.main import models as main_models
from airmozilla.uploads import models as upload_models
_here = os.path.dirname(__file__)
json_file = os.path.join(_here, 'random-data.json')
DATA = json.load(open(json_file))
def random_string(min_, max_=None):
if max_ is None:
max_ = min_
length = random.randint(min_, max_)
chars = []
for __ in range(length):
chars.append(random.choice(list(string.lowercase)))
return ''.join(chars)
def random_past():
now = timezone.now()
return now - datetime.timedelta(days=random.randint(1, 400))
def random_slug_title():
title_words = DATA['title_words']
p = random.randint(0, len(title_words) - 10)
length = random.randint(3, 10)
words = title_words[p:p + length]
title = ' '.join(words)
title = title.title() # funny
title = title.strip()
if not re.search('^[A-Z]', title):
# try again
return random_slug_title()
slug = slugify(title.lower())
return slug, title
def random_channels():
channel_names = DATA['channel_names']
channels = []
for prob in (3, 8): # thus might generate more than 1 non-main channel
if random.randint(1, prob) == 1:
# make it belong to a none-main channel
name = random.choice(channel_names)
slug = slugify(name.lower())
channel, created = main_models.Channel.objects.get_or_create(
slug=slug,
name=name
)
if created and not channel.description:
channel.description = "Here's the description for %s" % name
channel.save()
channels.append(channel)
if random.randint(1, 5) != 1 or not channels:
main, _ = main_models.Channel.objects.get_or_create(
slug=settings.DEFAULT_CHANNEL_SLUG,
name=settings.DEFAULT_CHANNEL_NAME
)
channels.append(main)
return channels
def random_tags():
names = set()
tags_words = DATA['tags_words']
for _ in range(random.randint(0, 5)):
names.add(random.choice(tags_words))
for name in names:
tag, _ = main_models.Tag.objects.get_or_create(name=name)
yield tag
vidly_template_content = """
{% if event.is_public() %}
{% set token = None %}
{% else %}
{% set token = vidly_tokenize(tag, 90) %}
{% endif %}
<script type="text/javascript" src="//vid.ly/{{ tag }}/em
bed{% if token %}?token={{ token }}{% endif %}"></script>
""".replace('em\nbed', 'embed').strip()
edgecast_template_content = (
'<script src="//jwpsrv.com/library/_JGfOmN3EeOSkCIACrqE1A.js"></script>\n'
'<script type="text/javascript">jwplayer.key="ZlZDNVcx3SYZWRdfbffTesf'
'IPo+pT4L9/WniJa2YXSI=";</script>'
) + """
<div id="player"></div>
<script>
jwplayer("player").setup({
file:"https://air.mozilla.org/edgecast.smil?venue={{ venue }}{% if not ev
ent.is_public() %}&token={{ edgecast_tokenize(seconds=90) }}{% endif %}",
autostart: true,
rtmp: { subscribe: true },
image:"https://videos.cdn.mozilla.net/serv/air_mozilla/PleaseStandBy896.png",
width: 896,
height: 504,
debug: false
});
</script>
""".replace('ev\nent', 'event').strip()
def get_archive_template():
name = "Vid.ly"
try:
return main_models.Template.objects.get(name=name)
except main_models.Template.DoesNotExist:
return main_models.Template.objects.create(
name=name,
content=vidly_template_content
)
def get_live_template():
name = "Edgecast"
try:
return main_models.Template.objects.get(name=name)
except main_models.Template.DoesNotExist:
return main_models.Template.objects.create(
name=name,
content=edgecast_template_content
)
def random_start_time(span):
days = random.randint(0, span)
date = timezone.now().replace(microsecond=0, second=0)
if random.randint(1, 4) == 1:
date = date.replace(minute=30)
elif random.randint(1, 4) == 1:
date = date.replace(minute=45)
elif random.randint(1, 4) == 1:
date = date.replace(minute=15)
else:
date = date.replace(minute=0)
# to prevent it all being on the same minute
date += datetime.timedelta(hours=random.randint(-10, 20))
if random.randint(1, 10) == 1:
# let's make it a future day
date += datetime.timedelta(days=days)
else:
date -= datetime.timedelta(days=days)
return date
def random_status():
# if it's a future event, we don't want to make it
if random.randint(1, 12) == 1:
return main_models.Event.STATUS_INITIATED
if random.randint(1, 15) == 1:
return main_models.Event.STATUS_REMOVED
return main_models.Event.STATUS_SCHEDULED
def random_vidly_tag():
return random.choice(DATA['vidly_tags'])
def url_to_localfile(url):
dest = os.path.join(
tempfile.gettempdir(),
'airmozillafakedata'
)
if not os.path.isdir(dest):
os.mkdir(dest)
filename = os.path.basename(url)
filepath = os.path.join(dest, filename)
if not os.path.isfile(filepath):
r = requests.get(url)
assert r.status_code == 200, r.status_code
with open(filepath, 'wb') as f:
f.write(r.content)
return filepath
def setup_gallery():
gallery_pictures = DATA['gallery_pictures']
if len(set(gallery_pictures)) != len(gallery_pictures):
_once = set()
for each in gallery_pictures:
if each in _once:
raise Exception("Duplicate picture %s" % each)
_once.add(each)
for url in random.sample(gallery_pictures, len(gallery_pictures)):
try:
filepath = url_to_localfile(url)
except AssertionError as x:
print "Skipping", url, x
continue
if main_models.Picture.objects.filter(notes=filepath[-100:]):
# we already have this image
continue
image = Image.open(filepath)
width, height = image.size
with open(filepath, 'rb') as f:
opened = File(f)
picture = main_models.Picture(
notes=filepath[-100:],
size=opened.size,
width=width,
height=height,
)
picture.file.save(os.path.basename(filepath), opened, save=True)
def attach_picture(event):
use_picture = random.randint(1, 4) != 1
if use_picture:
# most events get a picture from the gallery
picture, = main_models.Picture.objects.all().order_by('?')[:1]
event.picture = picture
event.save()
placeholder_pictures = DATA['placeholder_pictures']
if not use_picture or random.randint(1, 4) == 1:
# some events get a placeholder picture
while True:
try:
filepath = url_to_localfile(
random.choice(placeholder_pictures)
)
break
except AssertionError:
# try again
pass
with open(filepath, 'rb') as f:
opened = File(f)
event.placeholder_img.save(
os.path.basename(filepath),
opened, save=True
)
assert event.picture or event.placeholder_img
def random_privacy():
r = random.random()
if r >= 0.8:
# 20% chance it's company private
return main_models.Event.PRIVACY_COMPANY
if r >= 0.6:
# 20% chance it's contributor privacy
return main_models.Event.PRIVACY_CONTRIBUTORS
return main_models.Event.PRIVACY_PUBLIC
def random_description(no_sents=5):
sents = []
words = DATA['title_words']
for i in range(random.randint(2, no_sents)):
start = random.randint(0, len(words) - 10)
l = random.randint(3, 10)
sents.append(' '.join(words[start: start + l]))
return '. '.join([x.title() for x in sents])
def random_short_description():
if random.randint(1, 2) == 1:
return ''
return random_description(no_sents=2)
def random_location():
location, = (
main_models.Location.objects.filter(is_active=True).order_by('?')[:1]
)
return location
def setup_locations():
for name in DATA['locations']:
if main_models.Location.objects.filter(name=name):
# we already have this one
continue
is_active = random.randint(1, 4) != 1
ts = random.choice(DATA['timezones'])
main_models.Location.objects.create(
name=name,
timezone=ts,
is_active=is_active,
)
def setup_regions():
picked = set()
for name in DATA['regions']:
locations = (
main_models.Location.objects
.exclude(id__in=picked)
.order_by('?')[0:random.randint(0, 4)]
)
region = main_models.Region.objects.create(name=name)
for l in locations:
l.regions.add(region)
picked.add(l.id)
def setup_users(howmany):
for i in range(howmany):
email = '%s-example@mozilla.com' % random_string(5, 15)
try:
main_models.User.objects.get(email=email)
except main_models.User.DoesNotExist:
main_models.User.objects.create(
email=email,
is_staff=random.randint(1, 20) == 1,
username=random_string(20)
)
def random_duration():
if random.randint(1, 5) == 1:
return None
return random.randint(10, 200)
def create_vidlysubmission(event):
if event.template_environment and event.template_environment.get('tag'):
upload = random_upload(event)
main_models.VidlySubmission.objects.create(
tag=event.template_environment.get('tag'),
event=event,
url=upload.url,
submission_time=random_past(),
token_protection=event.privacy != main_models.Event.PRIVACY_PUBLIC,
)
def random_upload(event):
choices = DATA['video_urls']
url = random.choice(choices)
user, = main_models.User.objects.all().order_by('?')[:1]
mime_types = {
'.mp4': 'video/mp4',
'.mov': 'video/quicktime',
'.f4v': 'video/x-f4v',
'.flv': 'video/x-flv',
'.m4v': 'video/x-m4v',
'.webm': 'video/webm',
}
mime_type = mime_types[os.path.splitext(url)[1]]
return upload_models.Upload.objects.create(
user=user,
url=url,
mime_type=mime_type,
file_name=os.path.basename(url),
size=random.randint(10000, 1000000),
event=event
)
def create_statistics(event):
if event.status != main_models.Event.STATUS_SCHEDULED:
return
yesterday = timezone.now() - datetime.timedelta(days=1)
if not event.archive_time or event.archive_time > yesterday:
return
submission = None
for each in main_models.VidlySubmission.objects.filter(event=event):
submission = each
break
else:
return
main_models.EventHitStats.objects.create(
event=event,
shortcode=submission.tag,
total_hits=random.randint(10, 100000)
)
@transaction.atomic
def generate(events=100, verbose=False):
archive_template = get_archive_template()
live_template = get_live_template()
now = timezone.now()
setup_gallery()
setup_locations()
setup_regions()
setup_users(int(float(events) / 10))
_slugs = set(
main_models.Event.objects.all().values_list('slug', flat=True)
)
created_events = 0
for _ in range(events):
slug, title = random_slug_title()
while slug in _slugs:
slug += str(random.randint(1, 100))
_slugs.add(slug)
if verbose: # pragma: no cover
print (slug, title)
channels = random_channels()
tags = random_tags()
start_time = random_start_time(events)
if start_time > now:
archive_time = None
template = live_template
template_environment = {'venue': 'AirMoMTV'}
else:
archive_time = start_time + datetime.timedelta(minutes=60)
template = archive_template
template_environment = {'tag': random_vidly_tag()}
status = random_status()
privacy = random_privacy()
description = random_description()
short_description = random_short_description()
location = random_location()
duration = random_duration()
event = main_models.Event.objects.create(
slug=slug,
title=title,
start_time=start_time,
archive_time=archive_time,
template=template,
template_environment=template_environment,
status=status,
privacy=privacy,
description=description,
short_description=short_description,
location=location,
featured=random.randint(1, 20) == 1,
duration=duration,
)
created_events += 1
attach_picture(event)
for t in tags:
event.tags.add(t)
for c in channels:
event.channels.add(c)
create_vidlysubmission(event)
create_statistics(event)
print "Created", created_events, "events"
# raise Exception("Stopping there")
|
|
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
import logging
import time
from concurrent.futures import ThreadPoolExecutor
from mock import Mock
from cassandra import OperationTimedOut
from cassandra.cluster import (EXEC_PROFILE_DEFAULT, Cluster, ExecutionProfile,
_Scheduler, NoHostAvailable)
from cassandra.policies import HostStateListener, RoundRobinPolicy
from cassandra.io.asyncorereactor import AsyncoreConnection
from tests.integration import (CASSANDRA_VERSION, PROTOCOL_VERSION,
requiressimulacron)
from tests.integration.util import assert_quiescent_pool_state
from tests.integration.simulacron import SimulacronBase
from tests.integration.simulacron.utils import (NO_THEN, PrimeOptions,
prime_query, prime_request,
start_and_prime_cluster_defaults,
start_and_prime_singledc,
clear_queries)
class TrackDownListener(HostStateListener):
def __init__(self):
self.hosts_marked_down = []
def on_down(self, host):
self.hosts_marked_down.append(host)
class ThreadTracker(ThreadPoolExecutor):
called_functions = []
def submit(self, fn, *args, **kwargs):
self.called_functions.append(fn.__name__)
return super(ThreadTracker, self).submit(fn, *args, **kwargs)
class OrderedRoundRobinPolicy(RoundRobinPolicy):
def make_query_plan(self, working_keyspace=None, query=None):
self._position += 1
hosts = []
for _ in range(10):
hosts.extend(sorted(self._live_hosts, key=lambda x : x.address))
return hosts
@requiressimulacron
class ConnectionTests(SimulacronBase):
def test_heart_beat_timeout(self):
"""
Test to ensure the hosts are marked as down after a OTO is received.
Also to ensure this happens within the expected timeout
@since 3.10
@jira_ticket PYTHON-762
@expected_result all the hosts have been marked as down at some point
@test_category metadata
"""
number_of_dcs = 3
nodes_per_dc = 20
query_to_prime = "INSERT INTO test3rf.test (k, v) VALUES (0, 1);"
idle_heartbeat_timeout = 5
idle_heartbeat_interval = 1
start_and_prime_cluster_defaults(number_of_dcs, nodes_per_dc, CASSANDRA_VERSION)
listener = TrackDownListener()
executor = ThreadTracker(max_workers=8)
# We need to disable compression since it's not supported in simulacron
cluster = Cluster(compression=False,
idle_heartbeat_interval=idle_heartbeat_interval,
idle_heartbeat_timeout=idle_heartbeat_timeout,
executor_threads=8,
execution_profiles={
EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=RoundRobinPolicy())})
self.addCleanup(cluster.shutdown)
cluster.scheduler.shutdown()
cluster.executor = executor
cluster.scheduler = _Scheduler(executor)
session = cluster.connect(wait_for_all_pools=True)
cluster.register_listener(listener)
log = logging.getLogger()
log.setLevel('CRITICAL')
self.addCleanup(log.setLevel, "DEBUG")
prime_query(query_to_prime, then=NO_THEN)
futures = []
for _ in range(number_of_dcs * nodes_per_dc):
future = session.execute_async(query_to_prime)
futures.append(future)
for f in futures:
f._event.wait()
self.assertIsInstance(f._final_exception, OperationTimedOut)
prime_request(PrimeOptions(then=NO_THEN))
# We allow from some extra time for all the hosts to be to on_down
# The callbacks should start happening after idle_heartbeat_timeout + idle_heartbeat_interval
time.sleep((idle_heartbeat_timeout + idle_heartbeat_interval) * 2.5)
for host in cluster.metadata.all_hosts():
self.assertIn(host, listener.hosts_marked_down)
# In this case HostConnection._replace shouldn't be called
self.assertNotIn("_replace", executor.called_functions)
def test_callbacks_and_pool_when_oto(self):
"""
Test to ensure the callbacks are correcltly called and the connection
is returned when there is an OTO
@since 3.12
@jira_ticket PYTHON-630
@expected_result the connection is correctly returned to the pool
after an OTO, also the only the errback is called and not the callback
when the message finally arrives.
@test_category metadata
"""
start_and_prime_singledc()
cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False)
session = cluster.connect()
self.addCleanup(cluster.shutdown)
query_to_prime = "SELECT * from testkesypace.testtable"
server_delay = 2 # seconds
prime_query(query_to_prime, then={"delay_in_ms": server_delay * 1000})
future = session.execute_async(query_to_prime, timeout=1)
callback, errback = Mock(name='callback'), Mock(name='errback')
future.add_callbacks(callback, errback)
self.assertRaises(OperationTimedOut, future.result)
assert_quiescent_pool_state(self, cluster)
time.sleep(server_delay + 1)
# PYTHON-630 -- only the errback should be called
errback.assert_called_once()
callback.assert_not_called()
def test_close_when_query(self):
"""
Test to ensure the driver behaves correctly if the connection is closed
just when querying
@since 3.12
@expected_result NoHostAvailable is risen
@test_category connection
"""
start_and_prime_singledc()
cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False)
session = cluster.connect()
self.addCleanup(cluster.shutdown)
query_to_prime = "SELECT * from testkesypace.testtable"
for close_type in ("disconnect", "shutdown_read", "shutdown_write"):
then = {
"result": "close_connection",
"delay_in_ms": 0,
"close_type": close_type,
"scope": "connection"
}
prime_query(query_to_prime, then=then, rows=None, column_types=None)
self.assertRaises(NoHostAvailable, session.execute, query_to_prime)
def test_retry_after_defunct(self):
"""
We test cluster._retry is called if an the connection is defunct
in the middle of a query
Finally we verify the driver recovers correctly in the event
of a network partition
@since 3.12
@expected_result the driver is able to query even if a host is marked
as down in the middle of the query, it will go to the next one if the timeout
hasn't expired
@test_category connection
"""
number_of_dcs = 3
nodes_per_dc = 2
query_to_prime = "INSERT INTO test3rf.test (k, v) VALUES (0, 1);"
idle_heartbeat_timeout = 1
idle_heartbeat_interval = 5
simulacron_cluster = start_and_prime_cluster_defaults(number_of_dcs, nodes_per_dc, CASSANDRA_VERSION)
dc_ids = sorted(simulacron_cluster.data_center_ids)
last_host = dc_ids.pop()
prime_query(query_to_prime,
cluster_name="{}/{}".format(simulacron_cluster.cluster_name, last_host))
roundrobin_lbp = OrderedRoundRobinPolicy()
cluster = Cluster(compression=False,
idle_heartbeat_interval=idle_heartbeat_interval,
idle_heartbeat_timeout=idle_heartbeat_timeout,
execution_profiles={
EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=roundrobin_lbp)})
session = cluster.connect(wait_for_all_pools=True)
self.addCleanup(cluster.shutdown)
# This simulates we only have access to one DC
for dc_id in dc_ids:
datacenter_path = "{}/{}".format(simulacron_cluster.cluster_name, dc_id)
prime_query(query_to_prime, then=NO_THEN, cluster_name=datacenter_path)
prime_request(PrimeOptions(then=NO_THEN, cluster_name=datacenter_path))
# Only the last datacenter will respond, therefore the first host won't
# We want to make sure the returned hosts are 127.0.0.1, 127.0.0.2, ... 127.0.0.8
roundrobin_lbp._position = 0
# After 3 + 1 seconds the connection should be marked and down and another host retried
response_future = session.execute_async(query_to_prime, timeout=4 * idle_heartbeat_interval
+ idle_heartbeat_timeout)
response_future.result()
self.assertGreater(len(response_future.attempted_hosts), 1)
# No error should be raised here since the hosts have been marked
# as down and there's still 1 DC available
for _ in range(10):
session.execute(query_to_prime)
# Might take some time to close the previous connections and reconnect
time.sleep(10)
assert_quiescent_pool_state(self, cluster)
clear_queries()
time.sleep(10)
assert_quiescent_pool_state(self, cluster)
def test_idle_connection_is_not_closed(self):
"""
Test to ensure that the connections aren't closed if they are idle
@since 3.12
@jira_ticket PYTHON-573
@expected_result the connections aren't closed nor the hosts are
set to down if the connection is idle
@test_category connection
"""
start_and_prime_singledc()
idle_heartbeat_timeout = 1
idle_heartbeat_interval = 1
listener = TrackDownListener()
cluster = Cluster(compression=False,
idle_heartbeat_interval=idle_heartbeat_interval,
idle_heartbeat_timeout=idle_heartbeat_timeout)
session = cluster.connect(wait_for_all_pools=True)
cluster.register_listener(listener)
self.addCleanup(cluster.shutdown)
time.sleep(20)
self.assertEqual(listener.hosts_marked_down, [])
def test_host_is_not_set_to_down_after_query_oto(self):
"""
Test to ensure that the connections aren't closed if there's an
OperationTimedOut in a normal query. This should only happen from the
heart beat thread (in the case of a OperationTimedOut) with the default
configuration
@since 3.12
@expected_result the connections aren't closed nor the hosts are
set to down
@test_category connection
"""
start_and_prime_singledc()
query_to_prime = "SELECT * FROM madeup_keyspace.madeup_table"
prime_query(query_to_prime, then=NO_THEN)
listener = TrackDownListener()
cluster = Cluster(compression=False)
session = cluster.connect(wait_for_all_pools=True)
cluster.register_listener(listener)
futures = []
for _ in range(10):
future = session.execute_async(query_to_prime)
futures.append(future)
for f in futures:
f._event.wait()
self.assertIsInstance(f._final_exception, OperationTimedOut)
self.assertEqual(listener.hosts_marked_down, [])
assert_quiescent_pool_state(self, cluster)
def test_can_shutdown_asyncoreconnection_subclass(self):
start_and_prime_singledc()
class ExtendedConnection(AsyncoreConnection):
pass
cluster = Cluster(contact_points=["127.0.0.2"],
connection_class=ExtendedConnection)
cluster.connect()
cluster.shutdown()
|
|
from __future__ import print_function, division
from sympy.core.basic import C
from sympy.core.expr import Expr
from sympy.core.relational import Eq
from sympy.core.sets import Interval
from sympy.core.singleton import S
from sympy.core.symbol import (Dummy, Wild, Symbol)
from sympy.core.sympify import sympify
from sympy.core.compatibility import is_sequence, xrange
from sympy.core.containers import Tuple
from sympy.functions.elementary.piecewise import piecewise_fold, Piecewise
from sympy.utilities import flatten
def _process_limits(*symbols):
"""Process the list of symbols and convert them to canonical limits,
storing them as Tuple(symbol, lower, upper). The orientation of
the function is also returned when the upper limit is missing
so (x, 1, None) becomes (x, None, 1) and the orientation is changed.
"""
limits = []
orientation = 1
for V in symbols:
if isinstance(V, Symbol):
limits.append(Tuple(V))
continue
elif is_sequence(V, Tuple):
V = sympify(flatten(V))
if V[0].is_Symbol:
newsymbol = V[0]
if len(V) == 2 and isinstance(V[1], Interval):
V[1:] = [V[1].start, V[1].end]
if len(V) == 3:
if V[1] is None and V[2] is not None:
nlim = [V[2]]
elif V[1] is not None and V[2] is None:
orientation *= -1
nlim = [V[1]]
elif V[1] is None and V[2] is None:
nlim = []
else:
nlim = V[1:]
limits.append(Tuple(newsymbol, *nlim ))
continue
elif len(V) == 1 or (len(V) == 2 and V[1] is None):
limits.append(Tuple(newsymbol))
continue
elif len(V) == 2:
limits.append(Tuple(newsymbol, V[1]))
continue
raise ValueError('Invalid limits given: %s' % str(symbols))
return limits, orientation
class ExprWithLimits(Expr):
__slots__ = ['is_commutative']
def __new__(cls, function, *symbols, **assumptions):
# Any embedded piecewise functions need to be brought out to the
# top level so that integration can go into piecewise mode at the
# earliest possible moment.
function = piecewise_fold(sympify(function))
if function is S.NaN:
return S.NaN
if symbols:
limits, orientation = _process_limits(*symbols)
else:
# symbol not provided -- we can still try to compute a general form
free = function.free_symbols
if len(free) != 1:
raise ValueError(
"specify dummy variables for %s" % function)
limits, orientation = [Tuple(s) for s in free], 1
# denest any nested calls
while cls == type(function):
limits = list(function.limits) + limits
function = function.function
# Only limits with lower and upper bounds are supported; the indefinite form
# is not supported
if any(len(l) != 3 or None in l for l in limits):
raise ValueError('ExprWithLimits requires values for lower and upper bounds.')
obj = Expr.__new__(cls, **assumptions)
arglist = [function]
arglist.extend(limits)
obj._args = tuple(arglist)
obj.is_commutative = function.is_commutative # limits already checked
return obj
@property
def function(self):
"""Return the function applied across limits.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x
>>> Integral(x**2, (x,)).function
x**2
See Also
========
limits, variables, free_symbols
"""
return self._args[0]
@property
def limits(self):
"""Return the limits of expression.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, i
>>> Integral(x**i, (i, 1, 3)).limits
((i, 1, 3),)
See Also
========
function, variables, free_symbols
"""
return self._args[1:]
@property
def variables(self):
"""Return a list of the dummy variables
>>> from sympy import Sum
>>> from sympy.abc import x, i
>>> Sum(x**i, (i, 1, 3)).variables
[i]
See Also
========
function, limits, free_symbols
as_dummy : Rename dummy variables
transform : Perform mapping on the dummy variable
"""
return [l[0] for l in self.limits]
@property
def free_symbols(self):
if self.function.is_zero:
return set()
return self._free_symbols()
def as_dummy(self):
"""
see _as_dummy() for documentation
"""
return self._as_dummy()
def _free_symbols(self):
"""
This method returns the symbols that will exist when the object is
evaluated. This is useful if one is trying to determine whether the
objet contains a certain symbol or not.
Examples
========
>>> from sympy import Sum
>>> from sympy.abc import x, y
>>> Sum(x, (x, y, 1)).free_symbols
set([y])
"""
function, limits = self.function, self.limits
if function.is_zero:
return set()
isyms = function.free_symbols
for xab in limits:
if len(xab) == 1:
isyms.add(xab[0])
continue
# take out the target symbol
if xab[0] in isyms:
isyms.remove(xab[0])
# add in the new symbols
for i in xab[1:]:
isyms.update(i.free_symbols)
return isyms
def _as_dummy(self):
"""
Replace instances of the given dummy variables with explicit dummy
counterparts to make clear what are dummy variables and what
are real-world symbols in an object.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> Integral(x, (x, x, y), (y, x, y)).as_dummy()
Integral(_x, (_x, x, _y), (_y, x, y))
If the object supperts the "integral at" limit ``(x,)`` it
is not treated as a dummy, but the explicit form, ``(x, x)``
of length 2 does treat the variable as a dummy.
>>> Integral(x, x).as_dummy()
Integral(x, x)
>>> Integral(x, (x, x)).as_dummy()
Integral(_x, (_x, x))
If there were no dummies in the original expression, then the
the symbols which cannot be changed by subs() are clearly seen as
those with an underscore prefix.
See Also
========
variables : Lists the integration variables
transform : Perform mapping on the integration variable
"""
reps = {}
f = self.function
limits = list(self.limits)
for i in xrange(-1, -len(limits) - 1, -1):
xab = list(limits[i])
if len(xab) == 1:
continue
x = xab[0]
xab[0] = x.as_dummy()
for j in range(1, len(xab)):
xab[j] = xab[j].subs(reps)
reps[x] = xab[0]
limits[i] = xab
f = f.subs(reps)
return self.func(f, *limits)
def _eval_subs(self, old, new):
"""
Perform substitutions over non-dummy variables
of an expression with limits. Also, can be used
to specify point-evaluation of an abstract antiderivative.
Examples
========
>>> from sympy import Sum, oo
>>> from sympy.abc import s,n
>>> Sum(1/n**s, (n, 1, oo)).subs(s, 2)
Sum(n**(-2), (n, 1, oo))
>>> from sympy import Integral
>>> from sympy.abc import x,a
>>> Integral(a*x**2,x).subs(x,4)
Integral(a*x**2, (x, 4))
See Also
========
variables : Lists the integration variables
transform : Perform mapping on the dummy variable for intgrals
change_index : Perform mapping on the sum and product dummy variables
"""
func, limits = self.function, list(self.limits)
# If one of the expressions we are replacing is used as a func index
# one of two things happens.
# - the old variable first appears as a free variable
# so we perform all free substitutions before it becomes
# a func index.
# - the old variable first appears as a func index, in
# which case we ignore. See change_index.
# Reorder limits to match standard mathematical practice for scoping
limits.reverse()
if not isinstance(old, C.Symbol) or \
old.free_symbols.intersection(self.free_symbols):
sub_into_func = True
for i, xab in enumerate(limits):
if 1 == len(xab) and old == xab[0]:
xab = (old, old)
limits[i] = Tuple(xab[0], *[l._subs(old, new) for l in xab[1:]])
if len(xab[0].free_symbols.intersection(old.free_symbols)) != 0:
sub_into_func = False
break
if isinstance(old,C.AppliedUndef) or isinstance(old,C.UndefinedFunction):
sy2 = set(self.variables).intersection(set(new.atoms(Symbol)))
sy1 = set(self.variables).intersection(set(old.args))
if not sy2.issubset(sy1):
raise ValueError(
"substitution can not create dummy dependencies")
sub_into_func = True
if sub_into_func:
func = func.subs(old, new)
else:
# old is a Symbol and a dummy variable of some limit
for i, xab in enumerate(limits):
if len(xab) == 3:
limits[i] = Tuple(xab[0], *[l._subs(old, new) for l in xab[1:]])
if old == xab[0]:
break
# simplify redundant limits (x, x) to (x, )
for i, xab in enumerate(limits):
if len(xab) == 2 and (xab[0] - xab[1]).is_zero:
limits[i] = Tuple(xab[0], )
# Reorder limits back to representation-form
limits.reverse()
return self.func(func, *limits)
class AddWithLimits(ExprWithLimits):
r"""Represents unevaluated oriented additions of integer sequences.
"""
def __new__(cls, function, *symbols, **assumptions):
# Any embedded piecewise functions need to be brought out to the
# top level so that integration can go into piecewise mode at the
# earliest possible moment.
#
# This constructor only differs from ExprWithLimits
# in the application of the orientation variable. Perhaps merge?
function = piecewise_fold(sympify(function))
if function is S.NaN:
return S.NaN
# delete dx, dy, dx, etc.
free = function.free_symbols
for f in free:
if len(f.name) > 1 and f.name[0] == "d":
function = function.subs(f, 1)
if symbols:
limits, orientation = _process_limits(*symbols)
else:
# symbol not provided -- we can still try to compute a general form
new_free = set()
limits = []
# if f is dx, then the variable is x
for f in free:
if len(f.name) > 1 and f.name[0] == "d":
limits.append((Symbol(f.name[1:]),))
else:
new_free.add(f)
free = new_free
del new_free
if len(limits) == 0:
if len(free) != 1:
raise ValueError(
"specify dummy variables for %s" % function)
limits = [Tuple(s) for s in free]
orientation = 1
# denest any nested calls
while cls == type(function):
limits = list(function.limits) + limits
function = function.function
obj = Expr.__new__(cls, **assumptions)
arglist = [orientation*function]
arglist.extend(limits)
obj._args = tuple(arglist)
obj.is_commutative = function.is_commutative # limits already checked
return obj
def _eval_adjoint(self):
if all([x.is_real for x in flatten(self.limits)]):
return self.func(self.function.adjoint(), *self.limits)
return None
def _eval_conjugate(self):
if all([x.is_real for x in flatten(self.limits)]):
return self.func(self.function.conjugate(), *self.limits)
return None
def _eval_transpose(self):
if all([x.is_real for x in flatten(self.limits)]):
return self.func(self.function.transpose(), *self.limits)
return None
def _eval_factor(self, **hints):
summand = self.function.factor(**hints)
keep_inside = []
pull_outside = []
if summand.is_Mul and summand.is_commutative:
for i in summand.args:
if not i.atoms(C.Symbol).intersection(self.variables):
pull_outside.append(i)
else:
keep_inside.append(i)
return C.Mul(*pull_outside) * self.func(C.Mul(*keep_inside), *self.limits)
return self
def _eval_expand_basic(self, **hints):
summand = self.function.expand(**hints)
if summand.is_Add and summand.is_commutative:
return C.Add(*[ self.func(i, *self.limits) for i in summand.args ])
elif summand != self.function:
return self.func(summand, *self.limits)
return self
|
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-12 17:33
from __future__ import unicode_literals
import uuid
import django.db.models.deletion
import django.db.models.manager
import mptt.fields
from django.conf import settings
from django.db import migrations
from django.db import models
import contentcuration.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('email', models.EmailField(max_length=100, unique=True)),
('first_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('is_admin', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=False)),
],
options={
'verbose_name': 'User',
'verbose_name_plural': 'Users',
},
),
migrations.CreateModel(
name='AssessmentItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(default=b'multiplechoice', max_length=50)),
('question', models.TextField(blank=True)),
('answers', models.TextField(default=b'[]')),
],
),
migrations.CreateModel(
name='Channel',
fields=[
('id', contentcuration.models.UUIDField(default=uuid.uuid4, max_length=32, primary_key=True, serialize=False)),
('name', models.CharField(max_length=200)),
('description', models.CharField(blank=True, max_length=400)),
('version', models.IntegerField(default=0)),
('thumbnail', models.TextField(blank=True)),
('deleted', models.BooleanField(default=False)),
('public', models.BooleanField(default=False)),
('bookmarked_by', models.ManyToManyField(related_name='bookmarked_channels', to=settings.AUTH_USER_MODEL, verbose_name='bookmarked by')),
],
options={
'verbose_name': 'Channel',
'verbose_name_plural': 'Channels',
},
),
migrations.CreateModel(
name='ContentKind',
fields=[
('kind', models.CharField(choices=[(b'topic', 'Topic'), (b'video', 'Video'), (b'audio', 'Audio'), (b'exercise',
'Exercise'), (b'document', 'Document'), (b'image', 'Image')], max_length=200, primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='ContentNode',
fields=[
('id', contentcuration.models.UUIDField(default=uuid.uuid4, max_length=32, primary_key=True, serialize=False)),
('content_id', contentcuration.models.UUIDField(default=uuid.uuid4, editable=False, max_length=32)),
('title', models.CharField(max_length=200)),
('description', models.CharField(blank=True, max_length=400)),
('sort_order', models.FloatField(default=0, help_text='Ascending, lowest number shown first', max_length=50, verbose_name='sort order')),
('license_owner', models.CharField(blank=True, help_text='Organization of person who holds the essential rights', max_length=200)),
('author', models.CharField(blank=True, help_text='Person who created content', max_length=200)),
('created', models.DateTimeField(auto_now_add=True, verbose_name='created')),
('modified', models.DateTimeField(auto_now=True, verbose_name='modified')),
('changed', models.BooleanField(default=True)),
('lft', models.PositiveIntegerField(db_index=True, editable=False)),
('rght', models.PositiveIntegerField(db_index=True, editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(db_index=True, editable=False)),
('cloned_source', mptt.fields.TreeForeignKey(blank=True, null=True,
on_delete=django.db.models.deletion.SET_NULL, related_name='clones', to='contentcuration.ContentNode')),
],
options={
'verbose_name': 'Topic',
'verbose_name_plural': 'Topics',
},
),
migrations.CreateModel(
name='ContentTag',
fields=[
('id', contentcuration.models.UUIDField(default=uuid.uuid4, max_length=32, primary_key=True, serialize=False)),
('tag_name', models.CharField(max_length=30)),
('channel', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='tags', to='contentcuration.Channel')),
],
),
migrations.CreateModel(
name='Exercise',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default='Title', help_text='Title of the content item', max_length=50, verbose_name='title')),
('description', models.TextField(default='Description', help_text='Brief description of what this content item is', max_length=200, verbose_name='description')),
],
),
migrations.CreateModel(
name='File',
fields=[
('id', contentcuration.models.UUIDField(default=uuid.uuid4, max_length=32, primary_key=True, serialize=False)),
('checksum', models.CharField(blank=True, max_length=400)),
('file_size', models.IntegerField(blank=True, null=True)),
('file_on_disk', models.FileField(blank=True, max_length=500,
storage=contentcuration.models.FileOnDiskStorage(), upload_to=contentcuration.models.file_on_disk_name)),
('original_filename', models.CharField(blank=True, max_length=255)),
('contentnode', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='files', to='contentcuration.ContentNode')),
],
),
migrations.CreateModel(
name='FileFormat',
fields=[
('extension', models.CharField(choices=[(b'mp4', 'mp4'), (b'vtt', 'vtt'), (b'srt', 'srt'),
(b'mp3', 'mp3'), (b'pdf', 'pdf')], max_length=40, primary_key=True, serialize=False)),
('mimetype', models.CharField(blank=True, max_length=200)),
],
),
migrations.CreateModel(
name='FormatPreset',
fields=[
('id', models.CharField(choices=[(b'high_res_video', 'High resolution video'), (b'low_res_video', 'Low resolution video'), (b'vector_video', 'Vertor video'), (
b'thumbnail', 'Thumbnail'), (b'thumbnail', 'Thumbnail'), (b'caption', 'Caption')], max_length=150, primary_key=True, serialize=False)),
('readable_name', models.CharField(max_length=400)),
('multi_language', models.BooleanField(default=False)),
('supplementary', models.BooleanField(default=False)),
('order', models.IntegerField()),
('allowed_formats', models.ManyToManyField(blank=True, to='contentcuration.FileFormat')),
('kind', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='format_presets', to='contentcuration.ContentKind')),
],
),
migrations.CreateModel(
name='Invitation',
fields=[
('id', contentcuration.models.UUIDField(default=uuid.uuid4, max_length=32, primary_key=True, serialize=False)),
('email', models.EmailField(max_length=100)),
('first_name', models.CharField(default=b'Guest', max_length=100)),
('last_name', models.CharField(blank=True, max_length=100, null=True)),
('channel', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='pending_editors', to='contentcuration.Channel')),
('invited', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='sent_to', to=settings.AUTH_USER_MODEL)),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sent_by', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Invitation',
'verbose_name_plural': 'Invitations',
},
),
migrations.CreateModel(
name='Language',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lang_code', models.CharField(db_index=True, max_length=2)),
('lang_subcode', models.CharField(db_index=True, max_length=2)),
],
),
migrations.CreateModel(
name='License',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('license_name', models.CharField(max_length=50)),
('license_url', models.URLField(blank=True)),
('license_description', models.TextField(blank=True)),
('exists', models.BooleanField(default=False, help_text='Tells whether or not a content item is licensed to share', verbose_name='license exists')),
],
),
migrations.CreateModel(
name='PrerequisiteContentRelationship',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('prerequisite', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='contentcuration_prerequisitecontentrelationship_prerequisite', to='contentcuration.ContentNode')),
('target_node', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='contentcuration_prerequisitecontentrelationship_target_node', to='contentcuration.ContentNode')),
],
),
migrations.CreateModel(
name='RelatedContentRelationship',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('contentnode_1', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='contentcuration_relatedcontentrelationship_1', to='contentcuration.ContentNode')),
('contentnode_2', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='contentcuration_relatedcontentrelationship_2', to='contentcuration.ContentNode')),
],
),
migrations.AddField(
model_name='file',
name='file_format',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='files', to='contentcuration.FileFormat'),
),
migrations.AddField(
model_name='file',
name='lang',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='contentcuration.Language'),
),
migrations.AddField(
model_name='file',
name='preset',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
related_name='files', to='contentcuration.FormatPreset'),
),
migrations.AddField(
model_name='contentnode',
name='is_related',
field=models.ManyToManyField(blank=True, related_name='relate_to',
through='contentcuration.RelatedContentRelationship', to='contentcuration.ContentNode'),
),
migrations.AddField(
model_name='contentnode',
name='kind',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='contentnodes', to='contentcuration.ContentKind'),
),
migrations.AddField(
model_name='contentnode',
name='license',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='contentcuration.License'),
),
migrations.AddField(
model_name='contentnode',
name='original_node',
field=mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL,
related_name='duplicates', to='contentcuration.ContentNode'),
),
migrations.AddField(
model_name='contentnode',
name='parent',
field=mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
related_name='children', to='contentcuration.ContentNode'),
),
migrations.AddField(
model_name='contentnode',
name='prerequisite',
field=models.ManyToManyField(blank=True, related_name='is_prerequisite_of',
through='contentcuration.PrerequisiteContentRelationship', to='contentcuration.ContentNode'),
),
migrations.AddField(
model_name='contentnode',
name='tags',
field=models.ManyToManyField(blank=True, related_name='tagged_content', to='contentcuration.ContentTag'),
),
migrations.AddField(
model_name='channel',
name='clipboard_tree',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
related_name='channel_clipboard', to='contentcuration.ContentNode'),
),
migrations.AddField(
model_name='channel',
name='editors',
field=models.ManyToManyField(help_text='Users with edit rights', related_name='editable_channels',
to=settings.AUTH_USER_MODEL, verbose_name='editors'),
),
migrations.AddField(
model_name='channel',
name='main_tree',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
related_name='channel_main', to='contentcuration.ContentNode'),
),
migrations.AddField(
model_name='channel',
name='trash_tree',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
related_name='channel_trash', to='contentcuration.ContentNode'),
),
migrations.AddField(
model_name='assessmentitem',
name='exercise',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='all_assessment_items', to='contentcuration.Exercise'),
),
migrations.AlterUniqueTogether(
name='relatedcontentrelationship',
unique_together=set([('contentnode_1', 'contentnode_2')]),
),
migrations.AlterUniqueTogether(
name='prerequisitecontentrelationship',
unique_together=set([('target_node', 'prerequisite')]),
),
migrations.AlterUniqueTogether(
name='contenttag',
unique_together=set([('tag_name', 'channel')]),
),
]
|
|
import maya.cmds
class RibMaya(object):
"""Methodes for Rib managment in Maya"""
def __init__( self ):
self.output = 'E:/141031_defaultProject/maya/3delight/rib_scene_001/rib/obj/ribObject_python.rib'
def verbose( self ):
print 'pouet'
# print self.ribObject( name='obj1', face='1 1', vtxPerFace='4 4', vtxIndex='0 1 2 3', vtxP='0 0 0 0', n='0 0 0 0', stP='0 0 0 0' )
return
print self.help()
@staticmethod
def meshInfo( shapeNode ):
"""Return dict meshInfo for Rib"""
'@parameter string shapeNode Name of the shape.'
# init var
face = ' '
vtxPerFace = ' '
vtxIndex = ' '
vtxP = ' '
vtxN = ' '
stP = ' '
for i in maya.cmds.polyInfo( shapeNode, fv=True ):
# face
face += '1 '
# vertex index and vertex per face
countVtx = 0
for j in i.split( ' ' ) :
if j.isdigit():
countVtx += 1
vtxIndex += '%s ' % ( j )
vtxPerFace += '%i ' % ( countVtx )
# vertex position
for i in maya.cmds.getAttr( '%s.vrts' % ( shapeNode ), mi=True ):
tmpP = maya.cmds.xform( '%s.pnts[%i]' % ( shapeNode, i ), q=True, t=True, ws=True )
tmpN = maya.cmds.polyNormalPerVertex('%s.pnts[%i]' % ( shapeNode, i ), q=True, xyz=True )
vtxP += '%s %s %s ' % ( str(tmpP[0]), str(tmpP[1]), str(tmpP[2]) )
vtxN += '%s %s %s ' % ( str(tmpN[0]), str(tmpN[1]), str(tmpN[2]) )
# st position
for i in range( maya.cmds.polyEvaluate( shapeNode, uvcoord=True ) ) :
tmpST = maya.cmds.polyEditUV( '%s.map[%i]' % ( shapeNode, i ), q=True )
stP += '%s %s ' % ( str(tmpST[0]), str(tmpST[1]) )
# Output in mesh info dict
meshInfo = { 'face':face, 'vtxPerFace':vtxPerFace, 'vtxIndex':vtxIndex, 'vtxP':vtxP, 'vtxN':vtxN, 'stP':stP }
return meshInfo
@staticmethod
def help():
"""Return string the help of the class RibMaya"""
helpString = '\n ######## class Rib ########'
helpString += '\n - ribWrite() : Write the rib file.'
helpString += '\n - string ribPath Path of the rib file.'
helpString += '\n - string content Content of the rib file.'
helpString += '\n - bool force Force the write of the rib for override.'
return helpString
''''''''''''''''''''' tmp work '''''''''''''''''
# TODO fix normal problem and st problem
def writeObjectRib( shapeNode ):
"""Return dict meshInfo for Rib"""
'@parameter string shapeNode Name of the shape.'
path = 'E:/141031_defaultProject/maya/3delight/rib_scene_001/rib/obj/test_python.rib'
# init var
face = ' '
vtxPerFace = ' '
vtxIndex = ' '
vtxP = ' '
vtxN = ' '
stP = ' '
# init file
rib = open( path, 'w' )
rib.write( '\nObjectBegin "%s"' % ( shapeNode ) )
rib.write( '\n PointsGeneralPolygons' )
rib.close()
# face
rib = open( path, 'a' )
rib.write( ' [ ' )
countLine = 0
countOp = 0
for i in maya.cmds.polyInfo( shapeNode, fv=True ) :
rib.write( '1 ' )
countLine += 1
if countLine == 18 :
countLine = 0
countOp += 0.001
rib.write( '\n ' )
if countOp > 10 :
countOp = 0
rib.close()
rib = open( path, 'a' )
rib.write( ']\n' )
rib.close()
# vtxPerFace
rib = open( path, 'a' )
rib.write( '\n [ ' )
countLine = 0
countOp = 0
for i in maya.cmds.polyInfo( shapeNode, fv=True ):
# vertex index and vertex per face
countVtx = 0
for j in i.split( ' ' ) :
if j.isdigit():
countVtx += 1
rib.write( '%i ' % ( countVtx ) )
countLine += 1
if countLine == 18 :
countLine = 0
countOp += 0.001
rib.write( '\n ' )
if countOp > 10 :
countOp = 0
rib.close()
rib = open( path, 'a' )
rib.write( ']\n' )
rib.close()
# vtxIndex
rib = open( path, 'a' )
rib.write( '\n [ ' )
countLine = 0
countOp = 0
for i in maya.cmds.polyInfo( shapeNode, fv=True ):
for j in i.split( ' ' ) :
if j.isdigit():
rib.write( '%s ' % ( j ) )
countLine += 1
if countLine == 18 :
countLine = 0
countOp += 0.001
rib.write( '\n ' )
if countOp > 10 :
countOp = 0
rib.close()
rib = open( path, 'a' )
rib.write( ']\n' )
rib.close()
# vtxP
rib = open( path, 'a' )
rib.write( '\n "vertex point P" [ ' )
countLine = 0
countOp = 0
for i in maya.cmds.getAttr( '%s.vrts' % ( shapeNode ), mi=True ):
tmpP = maya.cmds.xform( '%s.pnts[%i]' % ( shapeNode, i ), q=True, t=True, ws=True )
rib.write( '%s %s %s ' % ( str(round(tmpP[0], 7)), str(round(tmpP[1], 7)), str(round(tmpP[2], 7)) ) )
countLine += 1
if countLine == 4 :
countLine = 0
countOp += 0.001
rib.write( '\n ' )
if countOp > 10 :
countOp = 0
rib.close()
rib = open( path, 'a' )
rib.write( ']\n' )
rib.close()
# vtxN
rib = open( path, 'a' )
rib.write( '\n "facevarying normal N" [ ' )
countLine = 0
countOp = 0
for i in maya.cmds.getAttr( '%s.vrts' % ( shapeNode ), mi=True ):
tmpN = maya.cmds.polyNormalPerVertex('%s.pnts[%i]' % ( shapeNode, i ), q=True, xyz=True )
rib.write( '%s %s %s ' % ( str(round(tmpN[0], 7)), str(round(tmpN[1], 7)), str(round(tmpN[2], 7)) ) )
countLine += 1
if countLine == 4 :
countLine = 0
countOp += 0.001
rib.write( '\n ' )
if countOp > 10 :
countOp = 0
rib.close()
rib = open( path, 'a' )
rib.write( ']\n' )
rib.close()
# stP
rib = open( path, 'a' )
rib.write( '\n "facevarying float[2] st" [ ' )
countLine = 0
countOp = 0
for i in range( maya.cmds.polyEvaluate( shapeNode, uvcoord=True ) ):
tmpST = maya.cmds.polyEditUV( '%s.map[%i]' % ( shapeNode, i ), q=True )
rib.write( '%s %s ' % ( str(round(tmpST[0], 7)), str(round(tmpST[1], 7)) ) )
countLine += 1
if countLine == 6 :
countLine = 0
countOp += 0.001
rib.write( '\n ' )
if countOp > 10 :
countOp = 0
rib.close()
rib = open( path, 'a' )
rib.write( ']\n' )
rib.close()
# close file
rib = open( path, 'a' )
rib.write( '\nObjectEnd\n' )
rib.write( '\nAttributeBegin' )
rib.write( '\n ObjectInstance "%s"' % ( shapeNode ) )
rib.write( '\nAttributeEnd' )
rib.write( '\n' )
rib.close()
# writeObjectRib( 'pSphereShape1' )
# TODO fix st problem
def writeObjectSubdivRib( shapeNode ):
"""Return dict meshInfo for Rib"""
'@parameter string shapeNode Name of the shape.'
path = 'E:/141031_defaultProject/maya/3delight/rib_scene_001/rib/obj/test_python.rib'
# init var
face = ' '
vtxPerFace = ' '
vtxIndex = ' '
vtxP = ' '
vtxN = ' '
stP = ' '
# init file
rib = open( path, 'w' )
rib.write( '\nObjectBegin "%s"' % ( shapeNode ) )
rib.write( '\n SubdivisionMesh "catmull-clark"' )
rib.close()
# vtxPerFace
rib = open( path, 'a' )
rib.write( ' [ ' )
countLine = 0
countOp = 0
for i in maya.cmds.polyInfo( shapeNode, fv=True ):
countVtx = 0
item = i.split( ' ' )
item.reverse()
for j in item :
if j.isdigit():
countVtx += 1
rib.write( '%i ' % ( countVtx ) )
countLine += 1
if countLine == 18 :
countLine = 0
countOp += 0.001
rib.write( '\n ' )
if countOp > 10 :
countOp = 0
rib.close()
rib = open( path, 'a' )
rib.write( ']\n' )
rib.close()
# vtxIndex
rib = open( path, 'a' )
rib.write( '\n [ ' )
countLine = 0
countOp = 0
for i in maya.cmds.polyInfo( shapeNode, fv=True ):
item = i.split( ' ' )
item.reverse()
for j in item :
if j.isdigit():
rib.write( '%s ' % ( j ) )
countLine += 1
if countLine == 18 :
countLine = 0
countOp += 0.001
rib.write( '\n ' )
if countOp > 10 :
countOp = 0
rib.close()
rib = open( path, 'a' )
rib.write( ']\n' )
rib.close()
# interp
rib = open( path, 'a' )
rib.write( '\n[ "interpolateboundary" "facevaryinginterpolateboundary" ] [ 1 0 1 0 ] [ 2 1 ] [ ]\n' )
rib.close()
# vtxP
rib = open( path, 'a' )
rib.write( '\n "vertex point P" [ ' )
countLine = 0
countOp = 0
for i in maya.cmds.getAttr( '%s.vrts' % ( shapeNode ), mi=True ):
tmpP = maya.cmds.xform( '%s.pnts[%i]' % ( shapeNode, i ), q=True, t=True, ws=True )
rib.write( '%s %s %s ' % ( str(round(tmpP[0], 7)), str(round(tmpP[1], 7)), str(round(tmpP[2], 7)) ) )
countLine += 1
if countLine == 4 :
countLine = 0
countOp += 0.001
rib.write( '\n ' )
if countOp > 20 :
countOp = 0
rib.close()
rib = open( path, 'a' )
rib.write( ']\n' )
rib.close()
# stP
rib = open( path, 'a' )
rib.write( '\n "facevarying float[2] st" [ ' )
countLine = 0
countOp = 0
for i in range( maya.cmds.polyEvaluate( shapeNode, uvcoord=True ) ):
tmpST = maya.cmds.polyEditUV( '%s.map[%i]' % ( shapeNode, i ), q=True )
rib.write( '%s %s ' % ( str(round(tmpST[0], 7)), str(round(tmpST[1], 7)) ) )
countLine += 1
if countLine == 6 :
countLine = 0
countOp += 0.001
rib.write( '\n ' )
if countOp > 20 :
countOp = 0
rib.close()
rib = open( path, 'a' )
rib.write( ']\n' )
rib.close()
# close file
rib = open( path, 'a' )
rib.write( '\nObjectEnd\n' )
rib.write( '\nAttributeBegin' )
rib.write( '\n ObjectInstance "%s"' % ( shapeNode ) )
rib.write( '\nAttributeEnd' )
rib.write( '\n' )
rib.close()
# writeObjectSubdivRib( 'pSphereShape1' )
'''
shapeNode = 'pCubeShape1'
for i in maya.cmds.polyInfo( fv=True ):
item = i.split( ' ' )
item.reverse()
for j in item :
if j.isdigit():
bool = False
for k in maya.cmds.polyEditUV( '%s.map[%s]' % ( shapeNode, j ), q=True ):
if bool == True : print round(1-k, 8)
else : print round(k, 8)
bool = True
'''
|
|
"""
Contains functions to perform technical analysis on pandas OHLCV data frames
"""
import logging
from typing import Union
import pandas as pd
import pytech.utils.pandas_utils as pd_utils
logger = logging.getLogger(__name__)
def sma(df: pd.DataFrame,
period: int = 50,
col: str = pd_utils.CLOSE_COL) -> pd.Series:
"""
Simple moving average
:param df: The data frame to perform the sma on.
:param period: The length of the moving average
:param col: The column in the data frame to use.
:return: A series with the simple moving average
"""
sma = df[col].rolling(center=False,
window=period,
min_periods=period - 1).mean()
return pd.Series(sma, name='sma', index=df.index).dropna()
def smm(df: pd.DataFrame,
period: int = 50,
col: str = pd_utils.CLOSE_COL) -> pd.Series:
"""
Compute the simple moving median over a given period.
:param df: The data frame.
:param period: The number of days to use.
:param col: The name of the column to use to compute the median.
:return: Series containing the simple moving median.
"""
temp_series = df[col].rolling(center=False,
window=period,
min_periods=period - 1).median()
return pd.Series(temp_series, index=df.index, name='smm')
def ewma(df: pd.DataFrame, period: int = 50,
col: str = pd_utils.CLOSE_COL) -> pd.Series:
"""
Exponential weighted moving average.
:param df:
:param period:
:param col:
:return:
"""
return df[col].ewm(ignore_na=False,
min_periods=period - 1,
span=period).mean()
# noinspection PyTypeChecker,PyUnresolvedReferences
def triple_ewma(df: pd.DataFrame, period: int = 50,
col: str = pd_utils.CLOSE_COL) -> pd.Series:
"""
Triple Exponential Weighted Moving Average.
:param df: The data frame to preform the calculation on.
:param period: The number of periods.
:param col: The column to perform the calculation on.
:return:
"""
ewma_ = ewma(df, period, col)
triple_ema = 3 * ewma_
ema_ema_ema = (ewma_.ewm(ignore_na=False, span=period).mean()
.ewm(ignore_na=False, span=period).mean())
series = triple_ema - 3 * (ewma_.ewm(ignore_na=False,
min_periods=period - 1,
span=period).mean()) + ema_ema_ema
return series.dropna()
def triangle_ma(df: pd.DataFrame, period: int = 50,
col: str = pd_utils.CLOSE_COL) -> pd.Series:
"""
Triangle Moving Average. The SMA of the SMA.
:param df: The data frame to preform the calculation on.
:param period: The number of periods.
:param col: The column to use to do the calculation.
:return:
"""
sma_ = sma(df, period, col)
return sma_.rolling(center=False, window=period,
min_periods=period - 1).mean().dropna()
def trix(df: pd.DataFrame, period: int = 50,
col: str = pd_utils.CLOSE_COL) -> pd.Series:
"""
Triple Exponential Moving Average Oscillator (trix)
Calculates the tripe EMA of `n` periods and finds the percent change
between 1 period of EMA3
Oscillates around 0. positive numbers indicate a bullish indicator.
:param df: The data frame to preform the calculation on.
:param period: The number of periods.
:param col: The column to use to do the calculation.
:return:
"""
emwa_one = ewma(df, period, col)
emwa_two = emwa_one.ewm(ignore_na=False,
min_periods=period - 1,
span=period).mean()
emwa_three = emwa_two.ewm(ignore_na=False,
min_periods=period - 1,
span=period).mean()
return emwa_three.pct_change(periods=1).dropna()
def efficiency_ratio(df: pd.DataFrame,
period: int = 10,
col: str = pd_utils.CLOSE_COL) -> pd.Series:
"""
Kaufman Efficiency Indicator.
Oscillates between +100 and -100 where positive is bullish.
:param df: The data frame to preform the calculation on.
:param period: The number of periods.
:param col: The column to use to do the calculation.
:return:
"""
change = df[col].diff(periods=period).abs()
vol = df[col].diff().abs().rolling(window=period).sum()
return pd.Series(change / vol).dropna()
def kama(df: pd.DataFrame,
period: int = 20,
col: str = pd_utils.CLOSE_COL,
efficiency_ratio_periods: int = 10,
ema_fast: int = 2,
ema_slow: int = 30) -> pd.Series:
"""
Kaufman's Adaptive Moving Average.
:param df: The data frame.
:param period:
:param col: The column to use.
:param efficiency_ratio_periods: Number of periods to use for the
Efficiency Ratio.
:param ema_fast: Number of periods to use for the fastest EMA constant.
:param ema_slow: Number of periods to use for the slowest EMA constant.
:return:
"""
er = efficiency_ratio(df, efficiency_ratio_periods, col)
fast_alpha = 2 / (ema_fast + 1)
slow_alpha = 2 / (ema_slow + 1)
# smoothing constant
# noinspection PyTypeChecker
sc = pd.Series((er * (fast_alpha - slow_alpha) + slow_alpha) ** 2)
sma_ = sma(df, period, col)
kama_ = []
for smooth, ma, price in zip(sc, sma_.shift(-1), df[col]):
try:
kama_.append(kama_[-1] + smooth * (price - kama_[-1]))
except (IndexError, TypeError):
if pd.notnull(ma):
kama_.append(ma + smooth * (price - ma))
else:
kama_.append(None)
return pd.Series(kama_, index=sma_.index, name='KAMA')
def zero_lag_ema(df: pd.DataFrame, period: int = 30,
col: str = pd_utils.CLOSE_COL) -> pd.Series:
"""
Zero Lag Exponential Moving Average.
:param df: The data frame.
:param period: Number of periods.
:param col: The column to use.
:return:
"""
lag = (period - 1) / 2
return pd.Series(df[col] + (df[col].diff(lag)),
name='zero_lag_ema').dropna()
def wma(df: pd.DataFrame, period: int = 30,
col: str = pd_utils.CLOSE_COL) -> pd.Series:
"""
Weighted Moving Average.
:param df:
:param period:
:param col:
:return:
"""
wma_ = []
for chunk in _chunks(df, period, col):
try:
wma_.append(_chunked_wma(chunk, period))
except AttributeError:
wma_.append(None)
wma_.reverse()
return pd.Series(wma_, index=df.index, name='wma')
def _chunks(df: Union[pd.DataFrame, pd.Series],
period: int,
col: str = pd_utils.CLOSE_COL):
"""
Create `n` chunks based on the number of periods.
:param df:
:param period:
:param col:
:return:
"""
df_rev = df[col].iloc[::-1]
for i in enumerate(df_rev):
chunk = df_rev.iloc[i[0]:i[0] + period]
if len(chunk) != period:
yield None
else:
yield chunk
def _chunked_wma(chunk, period) -> float:
denominator = (period * (period + 1)) / 2
ma = []
for price, i in zip(chunk.iloc[::-1].items(),
range(period + 1)[1:]):
ma.append(price[1] * (i / denominator))
return sum(ma)
def true_range(df: pd.DataFrame, period: int = 14) -> pd.Series:
"""
Finds the true range a asset is trading within.
Most recent period's high - most recent periods low.
Absolute value of the most recent period's high minus the previous close.
Absolute value of the most recent period's low minus the previous close.
:param df:
:param period:
:return:
"""
high_low = pd.Series(df[pd_utils.HIGH_COL].tail(period)
- df[pd_utils.LOW_COL].tail(period),
name='high_low')
high_close = pd.Series(df[pd_utils.HIGH_COL].tail(period)
- (df[pd_utils.CLOSE_COL].shift(-1)
.abs().tail(period)),
name='high_prev_close')
low_close = pd.Series(df[pd_utils.CLOSE_COL].shift(-1).tail(period)
- df[pd_utils.LOW_COL].abs().tail(period),
name='prev_close_low')
true_range = pd.concat([high_low, high_close, low_close], axis=1)
true_range_list = []
for row in true_range.itertuples():
# TODO: fix this so it doesn't throw an exception for weekends
try:
true_range_list.append(max(row.high_low,
row.high_prev_close,
row.prev_close_low))
except TypeError:
continue
return pd.Series(true_range_list,
index=df.index[-period:],
name='true_range').dropna()
def avg_true_range(df: pd.DataFrame, period=14) -> pd.Series:
"""
Moving average of an asset's true range.
:param df: The data frame with the OHLCV data.
:param period:
:return:
"""
tr = true_range(df, period * 2)
atr = tr.rolling(center=False,
window=period,
min_periods=period - 1).mean()
return pd.Series(atr, name='atr').dropna()
def smoothed_ma(df: pd.DataFrame,
period: int = 30,
col: str = pd_utils.CLOSE_COL) -> pd.Series:
"""
Moving average where equal weights are given to historic
and current prices
:param df:
:param period:
:param col:
:return:
"""
ma = df[col].ewm(alpha=1 / period).mean()
return pd.Series(ma, index=df.index, name='smoothed_ma')
def rsi(df: pd.DataFrame, period: int = 14, col: str = pd_utils.CLOSE_COL):
"""
Relative strength indicator.
RSI oscillates between 0 and 100 and traditionally
+70 is considered overbought and under 30 is oversold.
:param df:
:param period:
:param col:
:return:
"""
rsi_series = pd.DataFrame(index=df.index)
gain = [0]
loss = [0]
for row, shifted_row in zip(df[col], df[col].shift(-1)):
if row - shifted_row > 0:
gain.append(row - shifted_row)
loss.append(0)
elif row - shifted_row < 0:
gain.append(0)
loss.append(abs(row - shifted_row))
elif row - shifted_row == 0:
gain.append(0)
loss.append(0)
rsi_series['gain'] = gain
rsi_series['loss'] = loss
avg_gain = rsi_series['gain'].rolling(window=period).mean()
avg_loss = rsi_series['loss'].rolling(window=period).mean()
relative_strength = avg_gain / avg_loss
rsi_ = 100 - (100 / (1 + relative_strength))
return pd.Series(rsi_, index=df.index, name='rsi')
def macd_signal(df: pd.DataFrame,
period_fast: int = 12,
period_slow: int = 26,
signal: int = 9,
col: str = pd_utils.CLOSE_COL) -> pd.DataFrame:
"""
When the MACD falls below the signal line this is a bearish signal,
and vice versa.
When security price diverges from MACD it signals the end of a trend.
If MACD rises dramatically quickly, the shorter moving averages pulls
away from the slow moving average, it is a signal that the security is
overbought and should come back to normal levels soon.
As with any signals this can be misleading and should be combined with
something to avoid being faked out.
NOTE: be careful changing the default periods,
the method wont break but this is the 'traditional' way of doing this.
:param df:
:param period_fast: Traditionally 12.
:param period_slow: Traditionally 26.
:param signal: Traditionally 9.
:param col: The name of the column.
:return:
"""
ema_fast = pd.Series(df[col].ewm(ignore_na=False,
min_periods=period_fast - 1,
span=period_fast).mean(),
index=df.index)
ema_slow = pd.Series(df[col].ewm(ignore_na=False,
min_periods=period_slow - 1,
span=period_slow).mean(),
index=df.index)
macd_series = pd.Series(ema_fast - ema_slow, index=df.index, name='macd')
macd_signal_series = macd_series.ewm(ignore_na=False,
span=signal).mean()
macd_signal_series = pd.Series(macd_signal_series,
index=df.index,
name='macd_signal')
macd_df = pd.concat([macd_signal_series, macd_series], axis=1)
return pd.DataFrame(macd_df).dropna()
def dmi(df: pd.DataFrame, period: int = 14):
"""
DMI also known as Average Directional Movement Index (ADX)
This is a lagging indicator that only indicates a trend's strength rather
than trend direction so it is best coupled with another movement indicator
to determine the strength of a trend.
A strategy created by Alexander Elder states a buy signal is triggered
when the DMI peaks and starts to decline, when the positive dmi is above
the negative dmi.
A sell signal is triggered when dmi stops falling and goes flat.
:param df:
:param period:
:return:
"""
temp_df = pd.DataFrame()
temp_df['up_move'] = df[pd_utils.HIGH_COL].diff()
temp_df['down_move'] = df[pd_utils.LOW_COL].diff()
positive_dm = []
negative_dm = []
for row in temp_df.itertuples():
if row.up_move > row.down_move and row.up_move > 0:
positive_dm.append(row.up_move)
else:
positive_dm.append(0)
if row.down_move > row.up_move and row.down_move > 0:
negative_dm.append(row.down_move)
else:
negative_dm.append(0)
temp_df['positive_dm'] = positive_dm
temp_df['negative_dm'] = negative_dm
atr = avg_true_range(df, period=period * 6)
dir_plus = pd.Series(
100 * (temp_df['positive_dm'] / atr).ewm(span=period,
min_periods=period - 1).mean())
dir_minus = pd.Series(
100 * (temp_df['negative_dm'] / atr).ewm(span=period,
min_periods=period - 1).mean())
return pd.concat([dir_plus, dir_minus])
# noinspection PyTypeChecker
def bollinger_bands(df: pd.DataFrame,
period: int = 30,
col: str = pd_utils.CLOSE_COL):
"""
TODO.
:param df:
:param period:
:param col:
:return:
"""
std_dev = df[col].std()
middle_band = sma(df, period=period, col=col)
upper_bband = pd.Series(middle_band + (2 * std_dev),
name='upper_bband')
lower_bband = pd.Series(middle_band - (2 * std_dev),
name='lower_bband')
percent_b = (df[col] - lower_bband) / (upper_bband - lower_bband)
b_bandwidth = pd.Series((upper_bband - lower_bband) / middle_band)
return pd.concat([upper_bband, middle_band, lower_bband, b_bandwidth,
percent_b], axis=1)
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helpers to convert variables to constants in TensorFlow 2.0."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import variable_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import tensor_util
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.saver import export_meta_graph
def _run_inline_graph_optimization(func):
"""Apply function inline optimization to the graph.
Returns the GraphDef after Grappler's function inlining optimization is
applied. This optimization does not work on models with control flow.
Args:
func: ConcreteFunction.
Returns:
GraphDef
"""
meta_graph = export_meta_graph(
graph_def=func.graph.as_graph_def(), graph=func.graph)
# Clear the initializer_name for the variables collections, since they are not
# needed after saved to saved_model.
for name in [
"variables", "model_variables", "trainable_variables", "local_variables"
]:
raw_list = []
for raw in meta_graph.collection_def["variables"].bytes_list.value:
variable = variable_pb2.VariableDef()
variable.ParseFromString(raw)
variable.ClearField("initializer_name")
raw_list.append(variable.SerializeToString())
meta_graph.collection_def[name].bytes_list.value[:] = raw_list
# Add a collection 'train_op' so that Grappler knows the outputs.
fetch_collection = meta_graph_pb2.CollectionDef()
for array in func.inputs + func.outputs:
fetch_collection.node_list.value.append(array.name)
meta_graph.collection_def["train_op"].CopyFrom(fetch_collection)
# Initialize RewriterConfig with everything disabled except function inlining.
config = config_pb2.ConfigProto()
rewrite_options = config.graph_options.rewrite_options
rewrite_options.min_graph_nodes = -1 # do not skip small graphs
rewrite_options.optimizers.append("function")
return tf_optimizer.OptimizeGraph(config, meta_graph)
def _get_tensors_from_graph(graph, tensors):
"""Gets the Tensors in `graph` with the name of the tensors in `tensors`.
Args:
graph: TensorFlow Graph.
tensors: List of Tensors.
Returns:
List of Tensors.
"""
new_tensors = []
for orig_tensor in tensors:
new_tensor = graph.get_tensor_by_name(orig_tensor.name)
if new_tensor.shape.rank is None:
new_tensor.set_shape(orig_tensor.shape)
new_tensors.append(new_tensor)
return new_tensors
def convert_variables_to_constants_v2(func):
"""Replaces all the variables in a graph with constants of the same values.
TensorFlow 2.0 function for converting all Variable ops into Const ops holding
the same values. This makes it possible to describe the network fully with a
single GraphDef file, and allows the removal of a lot of ops related to
loading and saving the variables. This function runs Grappler's function
inlining optimization in order to return a single subgraph.
The current implementation only works for graphs that do not contain any
control flow or embedding related ops.
Args:
func: ConcreteFunction.
Returns:
ConcreteFunction containing a simplified version of the original.
"""
# TODO(nupurgarg): Replace ResourceGather with Gather.
# TODO(nupurgarg): Change attr for Variables in control flow and functions.
graph_def = _run_inline_graph_optimization(func)
# Identify the ReadVariableOps.
get_name = lambda name: name.split(":")[0]
map_name_to_node = {get_name(node.name): node for node in graph_def.node}
# TODO(b/125838789): Use `func.graph.captures`.
# Get mapping from input name to variable value.
tensor_data = {}
map_name_to_handle = {}
input_tensors = func.inputs[-len(func.captured_inputs):]
for var in func.graph.variables:
index = func.captured_inputs.index(var.handle)
tensor_name = get_name(input_tensors[index].name)
tensor_data[tensor_name] = var.numpy()
map_name_to_handle[tensor_name] = var.handle
# Get mapping from input name to value for non-variable placeholders.
map_name_to_value = {}
for name_tensor, value_tensor in zip(input_tensors, func.captured_inputs):
tensor_name = get_name(name_tensor.name)
if tensor_name not in map_name_to_handle:
map_name_to_value[tensor_name] = value_tensor
resource_identities = {}
placeholders = {}
converted_input_indices = set()
reference_variables = []
for node in graph_def.node:
if node.name in map_name_to_value:
# Get the dtype and data for the Placeholders whose values are stored as
# Tensors. This is the case for values that were originally Const ops.
tensor = map_name_to_value[node.name]
placeholders[node.name] = {
"dtype": node.attr["dtype"],
"data": tensor.numpy(),
}
converted_input_indices.add(
func.captured_inputs.index(map_name_to_value[node.name]))
# Collect the reference variables that cannot be lifted.
if node.op == "VariableV2":
reference_variables.append(node)
if node.op == "ReadVariableOp":
# Get name of Placeholder op associated with ReadVariableOp. There can be
# an Identity in between the ReadVariableOp and Placeholder. Store the
# Identity ops with the associated dtypes.
input_name = get_name(node.input[0])
while map_name_to_node[input_name].op == "Identity":
resource_identities[input_name] = node.attr["dtype"]
input_name = get_name(map_name_to_node[input_name].input[0])
if map_name_to_node[input_name].op != "Placeholder":
raise ValueError("Cannot find the Placeholder op that is an input "
"to the ReadVariableOp.")
# Build a map of Placeholder ops that are inputs to ReadVariableOps to the
# variable's dtype and data.
placeholders[input_name] = {
"dtype": node.attr["dtype"],
"data": tensor_data[input_name],
}
converted_input_indices.add(
func.captured_inputs.index(map_name_to_handle[input_name]))
# Reconstruct the graph with constants in place of variables.
output_graph_def = graph_pb2.GraphDef()
how_many_converted = 0
# Add identity node after the reference variable and get the tensor values
# for them.
if reference_variables:
reference_variable_tensors = []
with func.graph.as_default():
for node in reference_variables:
identity_node = array_ops.identity(
func.graph.as_graph_element(node.name + ":0"))
reference_variable_tensors.append(identity_node.name)
reference_variable_values = func.prune([], reference_variable_tensors)()
# Add values of reference variables as constant nodes.
for node, value in zip(reference_variables, reference_variable_values):
output_node = output_graph_def.node.add()
dtype = attr_value_pb2.AttrValue()
dtype.type = value.dtype.as_datatype_enum
output_node.op = "Const"
output_node.name = node.name
output_node.attr["dtype"].CopyFrom(dtype)
output_node.attr["value"].tensor.CopyFrom(
tensor_util.make_tensor_proto(value))
how_many_converted += 1
for input_node in graph_def.node:
# Skip VariableV2 node, since their values are added by the identity nodes.
if input_node.op == "VariableV2":
continue
output_node = output_graph_def.node.add()
# Convert Placeholder ops to Const ops.
if input_node.name in placeholders:
dtype = placeholders[input_node.name]["dtype"]
data = placeholders[input_node.name]["data"]
output_node.op = "Const"
output_node.name = input_node.name
output_node.attr["dtype"].CopyFrom(dtype)
output_node.attr["value"].tensor.CopyFrom(
tensor_util.make_tensor_proto(
data, dtype=dtype.type, shape=data.shape))
how_many_converted += 1
# Change the dtype for Identity ops that are inputs to ReadVariableOps.
elif input_node.name in resource_identities:
output_node.CopyFrom(input_node)
output_node.attr["T"].CopyFrom(resource_identities[input_node.name])
# Convert ReadVariableOps into Identity ops.
elif input_node.op == "ReadVariableOp":
output_node.op = "Identity"
output_node.name = input_node.name
output_node.input.extend([input_node.input[0]])
output_node.attr["T"].CopyFrom(input_node.attr["dtype"])
if "_class" in input_node.attr:
output_node.attr["_class"].CopyFrom(input_node.attr["_class"])
else:
output_node.CopyFrom(input_node)
logging.info("Converted %d variables to const ops.", how_many_converted)
# Create a ConcreteFunction from the new GraphDef.
converted_inputs = set(
[input_tensors[index] for index in converted_input_indices])
not_converted_inputs = set(func.inputs).difference(converted_inputs)
not_converted_inputs_map = {
tensor.name: tensor for tensor in not_converted_inputs
}
new_input_names = [tensor.name for tensor in not_converted_inputs]
new_output_names = [tensor.name for tensor in func.outputs]
new_func = wrap_function.function_from_graph_def(output_graph_def,
new_input_names,
new_output_names)
# Manually propagate shape for input tensors where the shape is not correctly
# propagated. Scalars shapes are lost when wrapping the function.
for input_tensor in new_func.inputs:
input_tensor.set_shape(not_converted_inputs_map[input_tensor.name].shape)
return new_func
|
|
"""SCons.Tool
SCons tool selection.
This looks for modules that define a callable object that can modify
a construction environment as appropriate for a given tool (or tool
chain).
Note that because this subsystem just *selects* a callable that can
modify a construction environment, it's possible for people to define
their own "tool specification" in an arbitrary callable function. No
one needs to use or tie in to this subsystem in order to roll their own
tool definition.
"""
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/__init__.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import imp
import sys
import re
import os
import shutil
import SCons.Builder
import SCons.Errors
import SCons.Node.FS
import SCons.Scanner
import SCons.Scanner.C
import SCons.Scanner.D
import SCons.Scanner.LaTeX
import SCons.Scanner.Prog
import SCons.Scanner.SWIG
DefaultToolpath=[]
CScanner = SCons.Scanner.C.CScanner()
DScanner = SCons.Scanner.D.DScanner()
LaTeXScanner = SCons.Scanner.LaTeX.LaTeXScanner()
PDFLaTeXScanner = SCons.Scanner.LaTeX.PDFLaTeXScanner()
ProgramScanner = SCons.Scanner.Prog.ProgramScanner()
SourceFileScanner = SCons.Scanner.Base({}, name='SourceFileScanner')
SWIGScanner = SCons.Scanner.SWIG.SWIGScanner()
CSuffixes = [".c", ".C", ".cxx", ".cpp", ".c++", ".cc",
".h", ".H", ".hxx", ".hpp", ".hh",
".F", ".fpp", ".FPP",
".m", ".mm",
".S", ".spp", ".SPP", ".sx"]
DSuffixes = ['.d']
IDLSuffixes = [".idl", ".IDL"]
LaTeXSuffixes = [".tex", ".ltx", ".latex"]
SWIGSuffixes = ['.i']
for suffix in CSuffixes:
SourceFileScanner.add_scanner(suffix, CScanner)
for suffix in DSuffixes:
SourceFileScanner.add_scanner(suffix, DScanner)
for suffix in SWIGSuffixes:
SourceFileScanner.add_scanner(suffix, SWIGScanner)
# FIXME: what should be done here? Two scanners scan the same extensions,
# but look for different files, e.g., "picture.eps" vs. "picture.pdf".
# The builders for DVI and PDF explicitly reference their scanners
# I think that means this is not needed???
for suffix in LaTeXSuffixes:
SourceFileScanner.add_scanner(suffix, LaTeXScanner)
SourceFileScanner.add_scanner(suffix, PDFLaTeXScanner)
class Tool(object):
def __init__(self, name, toolpath=[], **kw):
self.name = name
self.toolpath = toolpath + DefaultToolpath
# remember these so we can merge them into the call
self.init_kw = kw
module = self._tool_module()
self.generate = module.generate
self.exists = module.exists
if hasattr(module, 'options'):
self.options = module.options
def _tool_module(self):
# TODO: Interchange zipimport with normal initialization for better error reporting
oldpythonpath = sys.path
sys.path = self.toolpath + sys.path
try:
try:
file, path, desc = imp.find_module(self.name, self.toolpath)
try:
return imp.load_module(self.name, file, path, desc)
finally:
if file:
file.close()
except ImportError, e:
if str(e)!="No module named %s"%self.name:
raise SCons.Errors.EnvironmentError(e)
try:
import zipimport
except ImportError:
pass
else:
for aPath in self.toolpath:
try:
importer = zipimport.zipimporter(aPath)
return importer.load_module(self.name)
except ImportError, e:
pass
finally:
sys.path = oldpythonpath
full_name = 'SCons.Tool.' + self.name
try:
return sys.modules[full_name]
except KeyError:
try:
smpath = sys.modules['SCons.Tool'].__path__
try:
file, path, desc = imp.find_module(self.name, smpath)
module = imp.load_module(full_name, file, path, desc)
setattr(SCons.Tool, self.name, module)
if file:
file.close()
return module
except ImportError, e:
if str(e)!="No module named %s"%self.name:
raise SCons.Errors.EnvironmentError(e)
try:
import zipimport
importer = zipimport.zipimporter( sys.modules['SCons.Tool'].__path__[0] )
module = importer.load_module(full_name)
setattr(SCons.Tool, self.name, module)
return module
except ImportError, e:
m = "No tool named '%s': %s" % (self.name, e)
raise SCons.Errors.EnvironmentError(m)
except ImportError, e:
m = "No tool named '%s': %s" % (self.name, e)
raise SCons.Errors.EnvironmentError(m)
def __call__(self, env, *args, **kw):
if self.init_kw is not None:
# Merge call kws into init kws;
# but don't bash self.init_kw.
if kw is not None:
call_kw = kw
kw = self.init_kw.copy()
kw.update(call_kw)
else:
kw = self.init_kw
env.Append(TOOLS = [ self.name ])
if hasattr(self, 'options'):
import SCons.Variables
if 'options' not in env:
from SCons.Script import ARGUMENTS
env['options']=SCons.Variables.Variables(args=ARGUMENTS)
opts=env['options']
self.options(opts)
opts.Update(env)
self.generate(env, *args, **kw)
def __str__(self):
return self.name
##########################################################################
# Create common executable program / library / object builders
def createProgBuilder(env):
"""This is a utility function that creates the Program
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
program = env['BUILDERS']['Program']
except KeyError:
import SCons.Defaults
program = SCons.Builder.Builder(action = SCons.Defaults.LinkAction,
emitter = '$PROGEMITTER',
prefix = '$PROGPREFIX',
suffix = '$PROGSUFFIX',
src_suffix = '$OBJSUFFIX',
src_builder = 'Object',
target_scanner = ProgramScanner)
env['BUILDERS']['Program'] = program
return program
def createStaticLibBuilder(env):
"""This is a utility function that creates the StaticLibrary
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
static_lib = env['BUILDERS']['StaticLibrary']
except KeyError:
action_list = [ SCons.Action.Action("$ARCOM", "$ARCOMSTR") ]
if env.Detect('ranlib'):
ranlib_action = SCons.Action.Action("$RANLIBCOM", "$RANLIBCOMSTR")
action_list.append(ranlib_action)
static_lib = SCons.Builder.Builder(action = action_list,
emitter = '$LIBEMITTER',
prefix = '$LIBPREFIX',
suffix = '$LIBSUFFIX',
src_suffix = '$OBJSUFFIX',
src_builder = 'StaticObject')
env['BUILDERS']['StaticLibrary'] = static_lib
env['BUILDERS']['Library'] = static_lib
return static_lib
def _call_linker_cb(env, callback, args, result = None):
"""Returns the result of env['LINKCALLBACKS'][callback](*args)
if env['LINKCALLBACKS'] is a dictionary and env['LINKCALLBACKS'][callback]
is callable. If these conditions are not met, return the value provided as
the *result* argument. This function is mainly used for generating library
info such as versioned suffixes, symlink maps, sonames etc. by delegating
the core job to callbacks configured by current linker tool"""
Verbose = False
if Verbose:
print '_call_linker_cb: args=%r' % args
print '_call_linker_cb: callback=%r' % callback
try:
cbfun = env['LINKCALLBACKS'][callback]
except (KeyError, TypeError):
if Verbose:
print '_call_linker_cb: env["LINKCALLBACKS"][%r] not found or can not be used' % callback
pass
else:
if Verbose:
print '_call_linker_cb: env["LINKCALLBACKS"][%r] found' % callback
print '_call_linker_cb: env["LINKCALLBACKS"][%r]=%r' % (callback, cbfun)
if(callable(cbfun)):
if Verbose:
print '_call_linker_cb: env["LINKCALLBACKS"][%r] is callable' % callback
result = cbfun(env, *args)
return result
def _call_env_subst(env, string, *args, **kw):
kw2 = {}
for k in ('raw', 'target', 'source', 'conv', 'executor'):
try: kw2[k] = kw[k]
except KeyError: pass
return env.subst(string, *args, **kw2)
class _ShLibInfoSupport(object):
def get_libtype(self):
return 'ShLib'
def get_lib_prefix(self, env, *args, **kw):
return _call_env_subst(env,'$SHLIBPREFIX', *args, **kw)
def get_lib_suffix(self, env, *args, **kw):
return _call_env_subst(env,'$SHLIBSUFFIX', *args, **kw)
def get_lib_version(self, env, *args, **kw):
return _call_env_subst(env,'$SHLIBVERSION', *args, **kw)
def get_lib_noversionsymlinks(self, env, *args, **kw):
return _call_env_subst(env,'$SHLIBNOVERSIONSYMLINKS', *args, **kw)
class _LdModInfoSupport(object):
def get_libtype(self):
return 'LdMod'
def get_lib_prefix(self, env, *args, **kw):
return _call_env_subst(env,'$LDMODULEPREFIX', *args, **kw)
def get_lib_suffix(self, env, *args, **kw):
return _call_env_subst(env,'$LDMODULESUFFIX', *args, **kw)
def get_lib_version(self, env, *args, **kw):
return _call_env_subst(env,'$LDMODULEVERSION', *args, **kw)
def get_lib_noversionsymlinks(self, env, *args, **kw):
return _call_env_subst(env,'$LDMODULENOVERSIONSYMLINKS', *args, **kw)
class _ImpLibInfoSupport(object):
def get_libtype(self):
return 'ImpLib'
def get_lib_prefix(self, env, *args, **kw):
return _call_env_subst(env,'$IMPLIBPREFIX', *args, **kw)
def get_lib_suffix(self, env, *args, **kw):
return _call_env_subst(env,'$IMPLIBSUFFIX', *args, **kw)
def get_lib_version(self, env, *args, **kw):
version = _call_env_subst(env,'$IMPLIBVERSION', *args, **kw)
if not version:
try: lt = kw['implib_libtype']
except KeyError: pass
else:
if lt == 'ShLib':
version = _call_env_subst(env,'$SHLIBVERSION', *args, **kw)
elif lt == 'LdMod':
version = _call_env_subst(env,'$LDMODULEVERSION', *args, **kw)
return version
def get_lib_noversionsymlinks(self, env, *args, **kw):
disable = None
try: env['IMPLIBNOVERSIONSYMLINKS']
except KeyError:
try: lt = kw['implib_libtype']
except KeyError: pass
else:
if lt == 'ShLib':
disable = _call_env_subst(env,'$SHLIBNOVERSIONSYMLINKS', *args, **kw)
elif lt == 'LdMod':
disable = _call_env_subst(env,'$LDMODULENOVERSIONSYMLINKS', *args, **kw)
else:
disable = _call_env_subst(env,'$IMPLIBNOVERSIONSYMLINKS', *args, **kw)
return disable
class _LibInfoGeneratorBase(object):
"""Generator base class for library-related info such as suffixes for
versioned libraries, symlink maps, sonames etc. It handles commonities
of SharedLibrary and LoadableModule
"""
_support_classes = { 'ShLib' : _ShLibInfoSupport,
'LdMod' : _LdModInfoSupport,
'ImpLib' : _ImpLibInfoSupport }
def __init__(self, libtype, infoname):
self.set_libtype(libtype)
self.set_infoname(infoname)
def set_libtype(self, libtype):
try:
support_class = self._support_classes[libtype]
except KeyError:
raise ValueError('unsupported libtype %r' % libtype)
self._support = support_class()
def get_libtype(self):
return self._support.get_libtype()
def set_infoname(self, infoname):
self.infoname = infoname
def get_infoname(self):
return self.infoname
def get_lib_prefix(self, env, *args, **kw):
return self._support.get_lib_prefix(env,*args,**kw)
def get_lib_suffix(self, env, *args, **kw):
return self._support.get_lib_suffix(env,*args,**kw)
def get_lib_version(self, env, *args, **kw):
return self._support.get_lib_version(env,*args,**kw)
def get_lib_noversionsymlinks(self, env, *args, **kw):
return self._support.get_lib_noversionsymlinks(env,*args,**kw)
# Returns name of generator linker callback that shall be used to generate
# our info for a versioned library. For example, if our libtype is 'ShLib'
# and infoname is 'Prefix', it would return 'VersionedShLibPrefix'.
def get_versioned_lib_info_generator(self, **kw):
try: libtype = kw['generator_libtype']
except KeyError: libtype = self.get_libtype()
infoname = self.get_infoname()
return 'Versioned%s%s' % (libtype, infoname)
def generate_versioned_lib_info(self, env, args, result = None, **kw):
callback = self.get_versioned_lib_info_generator(**kw)
return _call_linker_cb(env, callback, args, result)
class _LibPrefixGenerator(_LibInfoGeneratorBase):
"""Library prefix generator, used as target_prefix in SharedLibrary and
LoadableModule builders"""
def __init__(self, libtype):
super(_LibPrefixGenerator, self).__init__(libtype, 'Prefix')
def __call__(self, env, sources = None, **kw):
Verbose = False
if sources and 'source' not in kw:
kw2 = kw.copy()
kw2['source'] = sources
else:
kw2 = kw
prefix = self.get_lib_prefix(env,**kw2)
if Verbose:
print "_LibPrefixGenerator: input prefix=%r" % prefix
version = self.get_lib_version(env, **kw2)
if Verbose:
print "_LibPrefixGenerator: version=%r" % version
if version:
prefix = self.generate_versioned_lib_info(env, [prefix, version], prefix, **kw2)
if Verbose:
print "_LibPrefixGenerator: return prefix=%r" % prefix
return prefix
ShLibPrefixGenerator = _LibPrefixGenerator('ShLib')
LdModPrefixGenerator = _LibPrefixGenerator('LdMod')
ImpLibPrefixGenerator = _LibPrefixGenerator('ImpLib')
class _LibSuffixGenerator(_LibInfoGeneratorBase):
"""Library suffix generator, used as target_suffix in SharedLibrary and
LoadableModule builders"""
def __init__(self, libtype):
super(_LibSuffixGenerator, self).__init__(libtype, 'Suffix')
def __call__(self, env, sources = None, **kw):
Verbose = False
if sources and 'source' not in kw:
kw2 = kw.copy()
kw2['source'] = sources
else:
kw2 = kw
suffix = self.get_lib_suffix(env, **kw2)
if Verbose:
print "_LibSuffixGenerator: input suffix=%r" % suffix
version = self.get_lib_version(env, **kw2)
if Verbose:
print "_LibSuffixGenerator: version=%r" % version
if version:
suffix = self.generate_versioned_lib_info(env, [suffix, version], suffix, **kw2)
if Verbose:
print "_LibSuffixGenerator: return suffix=%r" % suffix
return suffix
ShLibSuffixGenerator = _LibSuffixGenerator('ShLib')
LdModSuffixGenerator = _LibSuffixGenerator('LdMod')
ImpLibSuffixGenerator = _LibSuffixGenerator('ImpLib')
class _LibSymlinkGenerator(_LibInfoGeneratorBase):
"""Library symlink map generator. It generates a list of symlinks that
should be created by SharedLibrary or LoadableModule builders"""
def __init__(self, libtype):
super(_LibSymlinkGenerator, self).__init__(libtype, 'Symlinks')
def __call__(self, env, libnode, **kw):
Verbose = False
if libnode and 'target' not in kw:
kw2 = kw.copy()
kw2['target'] = libnode
else:
kw2 = kw
if Verbose:
print "_LibSymLinkGenerator: libnode=%r" % libnode.get_path()
symlinks = None
version = self.get_lib_version(env, **kw2)
disable = self.get_lib_noversionsymlinks(env, **kw2)
if Verbose:
print '_LibSymlinkGenerator: version=%r' % version
print '_LibSymlinkGenerator: disable=%r' % disable
if version and not disable:
prefix = self.get_lib_prefix(env,**kw2)
suffix = self.get_lib_suffix(env,**kw2)
symlinks = self.generate_versioned_lib_info(env, [libnode, version, prefix, suffix], **kw2)
if Verbose:
print '_LibSymlinkGenerator: return symlinks=%r' % StringizeLibSymlinks(symlinks)
return symlinks
ShLibSymlinkGenerator = _LibSymlinkGenerator('ShLib')
LdModSymlinkGenerator = _LibSymlinkGenerator('LdMod')
ImpLibSymlinkGenerator = _LibSymlinkGenerator('ImpLib')
class _LibNameGenerator(_LibInfoGeneratorBase):
"""Generates "unmangled" library name from a library file node.
Generally, it's thought to revert modifications done by prefix/suffix
generators (_LibPrefixGenerator/_LibSuffixGenerator) used by a library
builder. For example, on gnulink the suffix generator used by SharedLibrary
builder appends $SHLIBVERSION to $SHLIBSUFFIX producing node name which
ends with "$SHLIBSUFFIX.$SHLIBVERSION". Correspondingly, the implementation
of _LibNameGenerator replaces "$SHLIBSUFFIX.$SHLIBVERSION" with
"$SHLIBSUFFIX" in the node's basename. So that, if $SHLIBSUFFIX is ".so",
$SHLIBVERSION is "0.1.2" and the node path is "/foo/bar/libfoo.so.0.1.2",
the _LibNameGenerator shall return "libfoo.so". Other link tools may
implement it's own way of library name unmangling.
"""
def __init__(self, libtype):
super(_LibNameGenerator, self).__init__(libtype, 'Name')
def __call__(self, env, libnode, **kw):
"""Returns "demangled" library name"""
Verbose = False
if libnode and 'target' not in kw:
kw2 = kw.copy()
kw2['target'] = libnode
else:
kw2 = kw
if Verbose:
print "_LibNameGenerator: libnode=%r" % libnode.get_path()
version = self.get_lib_version(env, **kw2)
if Verbose:
print '_LibNameGenerator: version=%r' % version
name = None
if version:
prefix = self.get_lib_prefix(env,**kw2)
suffix = self.get_lib_suffix(env,**kw2)
name = self.generate_versioned_lib_info(env, [libnode, version, prefix, suffix], **kw2)
if not name:
name = os.path.basename(libnode.get_path())
if Verbose:
print '_LibNameGenerator: return name=%r' % name
return name
ShLibNameGenerator = _LibNameGenerator('ShLib')
LdModNameGenerator = _LibNameGenerator('LdMod')
ImpLibNameGenerator = _LibNameGenerator('ImpLib')
class _LibSonameGenerator(_LibInfoGeneratorBase):
"""Library soname generator. Returns library soname (e.g. libfoo.so.0) for
a given node (e.g. /foo/bar/libfoo.so.0.1.2)"""
def __init__(self, libtype):
super(_LibSonameGenerator, self).__init__(libtype, 'Soname')
def __call__(self, env, libnode, **kw):
"""Returns a SONAME based on a shared library's node path"""
Verbose = False
if libnode and 'target' not in kw:
kw2 = kw.copy()
kw2['target'] = libnode
else:
kw2 = kw
if Verbose:
print "_LibSonameGenerator: libnode=%r" % libnode.get_path()
soname = _call_env_subst(env, '$SONAME', **kw2)
if not soname:
version = self.get_lib_version(env,**kw2)
if Verbose:
print "_LibSonameGenerator: version=%r" % version
if version:
prefix = self.get_lib_prefix(env,**kw2)
suffix = self.get_lib_suffix(env,**kw2)
soname = self.generate_versioned_lib_info(env, [libnode, version, prefix, suffix], **kw2)
if not soname:
# fallback to library name (as returned by appropriate _LibNameGenerator)
soname = _LibNameGenerator(self.get_libtype())(env, libnode)
if Verbose:
print "_LibSonameGenerator: FALLBACK: soname=%r" % soname
if Verbose:
print "_LibSonameGenerator: return soname=%r" % soname
return soname
ShLibSonameGenerator = _LibSonameGenerator('ShLib')
LdModSonameGenerator = _LibSonameGenerator('LdMod')
def StringizeLibSymlinks(symlinks):
"""Converts list with pairs of nodes to list with pairs of node paths
(strings). Used mainly for debugging."""
if SCons.Util.is_List(symlinks):
try:
return [ (k.get_path(), v.get_path()) for k,v in symlinks ]
except (TypeError, ValueError):
return symlinks
else:
return symlinks
def EmitLibSymlinks(env, symlinks, libnode, **kw):
"""Used by emitters to handle (shared/versioned) library symlinks"""
Verbose = False
# nodes involved in process... all symlinks + library
nodes = list(set([ x for x,y in symlinks ] + [libnode]))
clean_targets = kw.get('clean_targets', [])
if not SCons.Util.is_List(clean_targets):
clean_targets = [ clean_targets ]
for link, linktgt in symlinks:
env.SideEffect(link, linktgt)
if(Verbose):
print "EmitLibSymlinks: SideEffect(%r,%r)" % (link.get_path(), linktgt.get_path())
clean_list = filter(lambda x : x != linktgt, nodes)
env.Clean(list(set([linktgt] + clean_targets)), clean_list)
if(Verbose):
print "EmitLibSymlinks: Clean(%r,%r)" % (linktgt.get_path(), map(lambda x : x.get_path(), clean_list))
def CreateLibSymlinks(env, symlinks):
"""Physically creates symlinks. The symlinks argument must be a list in
form [ (link, linktarget), ... ], where link and linktarget are SCons
nodes.
"""
Verbose = False
for link, linktgt in symlinks:
linktgt = link.get_dir().rel_path(linktgt)
link = link.get_path()
if(Verbose):
print "CreateLibSymlinks: preparing to add symlink %r -> %r" % (link, linktgt)
# Delete the (previously created) symlink if exists. Let only symlinks
# to be deleted to prevent accidental deletion of source files...
if env.fs.islink(link):
env.fs.unlink(link)
if(Verbose):
print "CreateLibSymlinks: removed old symlink %r" % link
# If a file or directory exists with the same name as link, an OSError
# will be thrown, which should be enough, I think.
env.fs.symlink(linktgt, link)
if(Verbose):
print "CreateLibSymlinks: add symlink %r -> %r" % (link, linktgt)
return 0
def LibSymlinksActionFunction(target, source, env):
for tgt in target:
symlinks = getattr(getattr(tgt,'attributes', None), 'shliblinks', None)
if symlinks:
CreateLibSymlinks(env, symlinks)
return 0
def LibSymlinksStrFun(target, source, env, *args):
cmd = None
for tgt in target:
symlinks = getattr(getattr(tgt,'attributes', None), 'shliblinks', None)
if symlinks:
if cmd is None: cmd = ""
if cmd: cmd += "\n"
cmd += "Create symlinks for: %r" % tgt.get_path()
try:
linkstr = ', '.join([ "%r->%r" %(k,v) for k,v in StringizeLibSymlinks(symlinks)])
except (KeyError, ValueError):
pass
else:
cmd += ": %s" % linkstr
return cmd
LibSymlinksAction = SCons.Action.Action(LibSymlinksActionFunction, LibSymlinksStrFun)
def createSharedLibBuilder(env):
"""This is a utility function that creates the SharedLibrary
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
shared_lib = env['BUILDERS']['SharedLibrary']
except KeyError:
import SCons.Defaults
action_list = [ SCons.Defaults.SharedCheck,
SCons.Defaults.ShLinkAction,
LibSymlinksAction ]
shared_lib = SCons.Builder.Builder(action = action_list,
emitter = "$SHLIBEMITTER",
prefix = ShLibPrefixGenerator,
suffix = ShLibSuffixGenerator,
target_scanner = ProgramScanner,
src_suffix = '$SHOBJSUFFIX',
src_builder = 'SharedObject')
env['BUILDERS']['SharedLibrary'] = shared_lib
return shared_lib
def createLoadableModuleBuilder(env):
"""This is a utility function that creates the LoadableModule
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
ld_module = env['BUILDERS']['LoadableModule']
except KeyError:
import SCons.Defaults
action_list = [ SCons.Defaults.SharedCheck,
SCons.Defaults.LdModuleLinkAction,
LibSymlinksAction ]
ld_module = SCons.Builder.Builder(action = action_list,
emitter = "$LDMODULEEMITTER",
prefix = LdModPrefixGenerator,
suffix = LdModSuffixGenerator,
target_scanner = ProgramScanner,
src_suffix = '$SHOBJSUFFIX',
src_builder = 'SharedObject')
env['BUILDERS']['LoadableModule'] = ld_module
return ld_module
def createObjBuilders(env):
"""This is a utility function that creates the StaticObject
and SharedObject Builders in an Environment if they
are not there already.
If they are there already, we return the existing ones.
This is a separate function because soooo many Tools
use this functionality.
The return is a 2-tuple of (StaticObject, SharedObject)
"""
try:
static_obj = env['BUILDERS']['StaticObject']
except KeyError:
static_obj = SCons.Builder.Builder(action = {},
emitter = {},
prefix = '$OBJPREFIX',
suffix = '$OBJSUFFIX',
src_builder = ['CFile', 'CXXFile'],
source_scanner = SourceFileScanner,
single_source = 1)
env['BUILDERS']['StaticObject'] = static_obj
env['BUILDERS']['Object'] = static_obj
try:
shared_obj = env['BUILDERS']['SharedObject']
except KeyError:
shared_obj = SCons.Builder.Builder(action = {},
emitter = {},
prefix = '$SHOBJPREFIX',
suffix = '$SHOBJSUFFIX',
src_builder = ['CFile', 'CXXFile'],
source_scanner = SourceFileScanner,
single_source = 1)
env['BUILDERS']['SharedObject'] = shared_obj
return (static_obj, shared_obj)
def createCFileBuilders(env):
"""This is a utility function that creates the CFile/CXXFile
Builders in an Environment if they
are not there already.
If they are there already, we return the existing ones.
This is a separate function because soooo many Tools
use this functionality.
The return is a 2-tuple of (CFile, CXXFile)
"""
try:
c_file = env['BUILDERS']['CFile']
except KeyError:
c_file = SCons.Builder.Builder(action = {},
emitter = {},
suffix = {None:'$CFILESUFFIX'})
env['BUILDERS']['CFile'] = c_file
env.SetDefault(CFILESUFFIX = '.c')
try:
cxx_file = env['BUILDERS']['CXXFile']
except KeyError:
cxx_file = SCons.Builder.Builder(action = {},
emitter = {},
suffix = {None:'$CXXFILESUFFIX'})
env['BUILDERS']['CXXFile'] = cxx_file
env.SetDefault(CXXFILESUFFIX = '.cc')
return (c_file, cxx_file)
##########################################################################
# Create common Java builders
def CreateJarBuilder(env):
try:
java_jar = env['BUILDERS']['Jar']
except KeyError:
fs = SCons.Node.FS.get_default_fs()
jar_com = SCons.Action.Action('$JARCOM', '$JARCOMSTR')
java_jar = SCons.Builder.Builder(action = jar_com,
suffix = '$JARSUFFIX',
src_suffix = '$JAVACLASSSUFIX',
src_builder = 'JavaClassFile',
source_factory = fs.Entry)
env['BUILDERS']['Jar'] = java_jar
return java_jar
def CreateJavaHBuilder(env):
try:
java_javah = env['BUILDERS']['JavaH']
except KeyError:
fs = SCons.Node.FS.get_default_fs()
java_javah_com = SCons.Action.Action('$JAVAHCOM', '$JAVAHCOMSTR')
java_javah = SCons.Builder.Builder(action = java_javah_com,
src_suffix = '$JAVACLASSSUFFIX',
target_factory = fs.Entry,
source_factory = fs.File,
src_builder = 'JavaClassFile')
env['BUILDERS']['JavaH'] = java_javah
return java_javah
def CreateJavaClassFileBuilder(env):
try:
java_class_file = env['BUILDERS']['JavaClassFile']
except KeyError:
fs = SCons.Node.FS.get_default_fs()
javac_com = SCons.Action.Action('$JAVACCOM', '$JAVACCOMSTR')
java_class_file = SCons.Builder.Builder(action = javac_com,
emitter = {},
#suffix = '$JAVACLASSSUFFIX',
src_suffix = '$JAVASUFFIX',
src_builder = ['JavaFile'],
target_factory = fs.Entry,
source_factory = fs.File)
env['BUILDERS']['JavaClassFile'] = java_class_file
return java_class_file
def CreateJavaClassDirBuilder(env):
try:
java_class_dir = env['BUILDERS']['JavaClassDir']
except KeyError:
fs = SCons.Node.FS.get_default_fs()
javac_com = SCons.Action.Action('$JAVACCOM', '$JAVACCOMSTR')
java_class_dir = SCons.Builder.Builder(action = javac_com,
emitter = {},
target_factory = fs.Dir,
source_factory = fs.Dir)
env['BUILDERS']['JavaClassDir'] = java_class_dir
return java_class_dir
def CreateJavaFileBuilder(env):
try:
java_file = env['BUILDERS']['JavaFile']
except KeyError:
java_file = SCons.Builder.Builder(action = {},
emitter = {},
suffix = {None:'$JAVASUFFIX'})
env['BUILDERS']['JavaFile'] = java_file
env['JAVASUFFIX'] = '.java'
return java_file
class ToolInitializerMethod(object):
"""
This is added to a construction environment in place of a
method(s) normally called for a Builder (env.Object, env.StaticObject,
etc.). When called, it has its associated ToolInitializer
object search the specified list of tools and apply the first
one that exists to the construction environment. It then calls
whatever builder was (presumably) added to the construction
environment in place of this particular instance.
"""
def __init__(self, name, initializer):
"""
Note: we store the tool name as __name__ so it can be used by
the class that attaches this to a construction environment.
"""
self.__name__ = name
self.initializer = initializer
def get_builder(self, env):
"""
Returns the appropriate real Builder for this method name
after having the associated ToolInitializer object apply
the appropriate Tool module.
"""
builder = getattr(env, self.__name__)
self.initializer.apply_tools(env)
builder = getattr(env, self.__name__)
if builder is self:
# There was no Builder added, which means no valid Tool
# for this name was found (or possibly there's a mismatch
# between the name we were called by and the Builder name
# added by the Tool module).
return None
self.initializer.remove_methods(env)
return builder
def __call__(self, env, *args, **kw):
"""
"""
builder = self.get_builder(env)
if builder is None:
return [], []
return builder(*args, **kw)
class ToolInitializer(object):
"""
A class for delayed initialization of Tools modules.
Instances of this class associate a list of Tool modules with
a list of Builder method names that will be added by those Tool
modules. As part of instantiating this object for a particular
construction environment, we also add the appropriate
ToolInitializerMethod objects for the various Builder methods
that we want to use to delay Tool searches until necessary.
"""
def __init__(self, env, tools, names):
if not SCons.Util.is_List(tools):
tools = [tools]
if not SCons.Util.is_List(names):
names = [names]
self.env = env
self.tools = tools
self.names = names
self.methods = {}
for name in names:
method = ToolInitializerMethod(name, self)
self.methods[name] = method
env.AddMethod(method)
def remove_methods(self, env):
"""
Removes the methods that were added by the tool initialization
so we no longer copy and re-bind them when the construction
environment gets cloned.
"""
for method in self.methods.values():
env.RemoveMethod(method)
def apply_tools(self, env):
"""
Searches the list of associated Tool modules for one that
exists, and applies that to the construction environment.
"""
for t in self.tools:
tool = SCons.Tool.Tool(t)
if tool.exists(env):
env.Tool(tool)
return
# If we fall through here, there was no tool module found.
# This is where we can put an informative error message
# about the inability to find the tool. We'll start doing
# this as we cut over more pre-defined Builder+Tools to use
# the ToolInitializer class.
def Initializers(env):
ToolInitializer(env, ['install'], ['_InternalInstall', '_InternalInstallAs', '_InternalInstallVersionedLib'])
def Install(self, *args, **kw):
return self._InternalInstall(*args, **kw)
def InstallAs(self, *args, **kw):
return self._InternalInstallAs(*args, **kw)
def InstallVersionedLib(self, *args, **kw):
return self._InternalInstallVersionedLib(*args, **kw)
env.AddMethod(Install)
env.AddMethod(InstallAs)
env.AddMethod(InstallVersionedLib)
def FindTool(tools, env):
for tool in tools:
t = Tool(tool)
if t.exists(env):
return tool
return None
def FindAllTools(tools, env):
def ToolExists(tool, env=env):
return Tool(tool).exists(env)
return list(filter (ToolExists, tools))
def tool_list(platform, env):
other_plat_tools=[]
# XXX this logic about what tool to prefer on which platform
# should be moved into either the platform files or
# the tool files themselves.
# The search orders here are described in the man page. If you
# change these search orders, update the man page as well.
if str(platform) == 'win32':
"prefer Microsoft tools on Windows"
linkers = ['mslink', 'gnulink', 'ilink', 'linkloc', 'ilink32' ]
c_compilers = ['msvc', 'mingw', 'gcc', 'intelc', 'icl', 'icc', 'cc', 'bcc32' ]
cxx_compilers = ['msvc', 'intelc', 'icc', 'g++', 'c++', 'bcc32' ]
assemblers = ['masm', 'nasm', 'gas', '386asm' ]
fortran_compilers = ['gfortran', 'g77', 'ifl', 'cvf', 'f95', 'f90', 'fortran']
ars = ['mslib', 'ar', 'tlib']
other_plat_tools = ['msvs', 'midl']
elif str(platform) == 'os2':
"prefer IBM tools on OS/2"
linkers = ['ilink', 'gnulink', ]#'mslink']
c_compilers = ['icc', 'gcc',]# 'msvc', 'cc']
cxx_compilers = ['icc', 'g++',]# 'msvc', 'c++']
assemblers = ['nasm',]# 'masm', 'gas']
fortran_compilers = ['ifl', 'g77']
ars = ['ar',]# 'mslib']
elif str(platform) == 'irix':
"prefer MIPSPro on IRIX"
linkers = ['sgilink', 'gnulink']
c_compilers = ['sgicc', 'gcc', 'cc']
cxx_compilers = ['sgic++', 'g++', 'c++']
assemblers = ['as', 'gas']
fortran_compilers = ['f95', 'f90', 'f77', 'g77', 'fortran']
ars = ['sgiar']
elif str(platform) == 'sunos':
"prefer Forte tools on SunOS"
linkers = ['sunlink', 'gnulink']
c_compilers = ['suncc', 'gcc', 'cc']
cxx_compilers = ['sunc++', 'g++', 'c++']
assemblers = ['as', 'gas']
fortran_compilers = ['sunf95', 'sunf90', 'sunf77', 'f95', 'f90', 'f77',
'gfortran', 'g77', 'fortran']
ars = ['sunar']
elif str(platform) == 'hpux':
"prefer aCC tools on HP-UX"
linkers = ['hplink', 'gnulink']
c_compilers = ['hpcc', 'gcc', 'cc']
cxx_compilers = ['hpc++', 'g++', 'c++']
assemblers = ['as', 'gas']
fortran_compilers = ['f95', 'f90', 'f77', 'g77', 'fortran']
ars = ['ar']
elif str(platform) == 'aix':
"prefer AIX Visual Age tools on AIX"
linkers = ['aixlink', 'gnulink']
c_compilers = ['aixcc', 'gcc', 'cc']
cxx_compilers = ['aixc++', 'g++', 'c++']
assemblers = ['as', 'gas']
fortran_compilers = ['f95', 'f90', 'aixf77', 'g77', 'fortran']
ars = ['ar']
elif str(platform) == 'darwin':
"prefer GNU tools on Mac OS X, except for some linkers and IBM tools"
linkers = ['applelink', 'gnulink']
c_compilers = ['gcc', 'cc']
cxx_compilers = ['g++', 'c++']
assemblers = ['as']
fortran_compilers = ['gfortran', 'f95', 'f90', 'g77']
ars = ['ar']
elif str(platform) == 'cygwin':
"prefer GNU tools on Cygwin, except for a platform-specific linker"
linkers = ['cyglink', 'mslink', 'ilink']
c_compilers = ['gcc', 'msvc', 'intelc', 'icc', 'cc']
cxx_compilers = ['g++', 'msvc', 'intelc', 'icc', 'c++']
assemblers = ['gas', 'nasm', 'masm']
fortran_compilers = ['gfortran', 'g77', 'ifort', 'ifl', 'f95', 'f90', 'f77']
ars = ['ar', 'mslib']
else:
"prefer GNU tools on all other platforms"
linkers = ['gnulink', 'mslink', 'ilink']
c_compilers = ['gcc', 'msvc', 'intelc', 'icc', 'cc']
cxx_compilers = ['g++', 'msvc', 'intelc', 'icc', 'c++']
assemblers = ['gas', 'nasm', 'masm']
fortran_compilers = ['gfortran', 'g77', 'ifort', 'ifl', 'f95', 'f90', 'f77']
ars = ['ar', 'mslib']
if not str(platform) == 'win32':
other_plat_tools += ['m4', 'rpm']
c_compiler = FindTool(c_compilers, env) or c_compilers[0]
# XXX this logic about what tool provides what should somehow be
# moved into the tool files themselves.
if c_compiler and c_compiler == 'mingw':
# MinGW contains a linker, C compiler, C++ compiler,
# Fortran compiler, archiver and assembler:
cxx_compiler = None
linker = None
assembler = None
fortran_compiler = None
ar = None
else:
# Don't use g++ if the C compiler has built-in C++ support:
if c_compiler in ('msvc', 'intelc', 'icc'):
cxx_compiler = None
else:
cxx_compiler = FindTool(cxx_compilers, env) or cxx_compilers[0]
linker = FindTool(linkers, env) or linkers[0]
assembler = FindTool(assemblers, env) or assemblers[0]
fortran_compiler = FindTool(fortran_compilers, env) or fortran_compilers[0]
ar = FindTool(ars, env) or ars[0]
d_compilers = ['dmd', 'gdc', 'ldc']
d_compiler = FindTool(d_compilers, env) or d_compilers[0]
other_tools = FindAllTools(other_plat_tools + [
#TODO: merge 'install' into 'filesystem' and
# make 'filesystem' the default
'filesystem',
'wix', #'midl', 'msvs',
# Parser generators
'lex', 'yacc',
# Foreign function interface
'rpcgen', 'swig',
# Java
'jar', 'javac', 'javah', 'rmic',
# TeX
'dvipdf', 'dvips', 'gs',
'tex', 'latex', 'pdflatex', 'pdftex',
# Archivers
'tar', 'zip',
# SourceCode factories
'BitKeeper', 'CVS', 'Perforce',
'RCS', 'SCCS', # 'Subversion',
], env)
tools = ([linker, c_compiler, cxx_compiler,
fortran_compiler, assembler, ar, d_compiler]
+ other_tools)
return [x for x in tools if x]
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
|
from pandac.PandaModules import *
from DistributedNPCToonBase import *
from direct.gui.DirectGui import *
from pandac.PandaModules import *
import NPCToons
from direct.task.Task import Task
import TailorClothesGUI
from toontown.toonbase import TTLocalizer
import ToonDNA
from toontown.estate import ClosetGlobals
class DistributedNPCTailor(DistributedNPCToonBase):
def __init__(self, cr):
DistributedNPCToonBase.__init__(self, cr)
self.isLocalToon = 0
self.clothesGUI = None
self.av = None
self.oldStyle = None
self.browsing = 0
self.roomAvailable = 0
self.button = None
self.popupInfo = None
return
def disable(self):
self.ignoreAll()
taskMgr.remove(self.uniqueName('popupPurchaseGUI'))
taskMgr.remove(self.uniqueName('lerpCamera'))
if self.clothesGUI:
self.clothesGUI.exit()
self.clothesGUI.unload()
self.clothesGUI = None
if self.button != None:
self.button.destroy()
del self.button
self.cancelButton.destroy()
del self.cancelButton
del self.gui
self.counter.show()
del self.counter
if self.popupInfo:
self.popupInfo.destroy()
self.popupInfo = None
self.av = None
self.oldStyle = None
base.localAvatar.posCamera(0, 0)
DistributedNPCToonBase.disable(self)
return
def handleCollisionSphereEnter(self, collEntry):
base.cr.playGame.getPlace().fsm.request('purchase')
self.sendUpdate('avatarEnter', [])
def __handleUnexpectedExit(self):
self.notify.warning('unexpected exit')
self.av = None
self.oldStyle = None
return
def resetTailor(self):
self.ignoreAll()
taskMgr.remove(self.uniqueName('popupPurchaseGUI'))
taskMgr.remove(self.uniqueName('lerpCamera'))
if self.clothesGUI:
self.clothesGUI.hideButtons()
self.clothesGUI.exit()
self.clothesGUI.unload()
self.clothesGUI = None
if self.button != None:
self.button.destroy()
del self.button
self.cancelButton.destroy()
del self.cancelButton
del self.gui
self.counter.show()
del self.counter
self.show()
self.startLookAround()
self.detectAvatars()
self.clearMat()
if self.isLocalToon:
self.freeAvatar()
return Task.done
def setMovie(self, mode, npcId, avId, timestamp):
timeStamp = ClockDelta.globalClockDelta.localElapsedTime(timestamp)
self.remain = NPCToons.CLERK_COUNTDOWN_TIME - timeStamp
self.npcId = npcId
self.isLocalToon = avId == base.localAvatar.doId
if mode == NPCToons.PURCHASE_MOVIE_CLEAR:
return
if mode == NPCToons.PURCHASE_MOVIE_TIMEOUT:
taskMgr.remove(self.uniqueName('lerpCamera'))
if self.isLocalToon:
self.ignore(self.purchaseDoneEvent)
self.ignore(self.swapEvent)
if self.popupInfo:
self.popupInfo.reparentTo(hidden)
if self.clothesGUI:
self.clothesGUI.resetClothes(self.oldStyle)
self.__handlePurchaseDone(timeout=1)
self.setChatAbsolute(TTLocalizer.STOREOWNER_TOOKTOOLONG, CFSpeech | CFTimeout)
self.resetTailor()
elif mode == NPCToons.PURCHASE_MOVIE_START or mode == NPCToons.PURCHASE_MOVIE_START_BROWSE or mode == NPCToons.PURCHASE_MOVIE_START_NOROOM:
if mode == NPCToons.PURCHASE_MOVIE_START:
self.browsing = 0
self.roomAvailable = 1
elif mode == NPCToons.PURCHASE_MOVIE_START_BROWSE:
self.browsing = 1
self.roomAvailable = 1
elif mode == NPCToons.PURCHASE_MOVIE_START_NOROOM:
self.browsing = 0
self.roomAvailable = 0
self.av = base.cr.doId2do.get(avId)
if self.av is None:
self.notify.warning('Avatar %d not found in doId' % avId)
return
else:
self.accept(self.av.uniqueName('disable'), self.__handleUnexpectedExit)
style = self.av.getStyle()
self.oldStyle = ToonDNA.ToonDNA()
self.oldStyle.makeFromNetString(style.makeNetString())
self.setupAvatars(self.av)
if self.isLocalToon:
camera.wrtReparentTo(render)
camera.lerpPosHpr(-5, 9, self.getHeight() - 0.5, -150, -2, 0, 1, other=self, blendType='easeOut', task=self.uniqueName('lerpCamera'))
if self.browsing == 0:
if self.roomAvailable == 0:
self.setChatAbsolute(TTLocalizer.STOREOWNER_NOROOM, CFSpeech | CFTimeout)
else:
self.setChatAbsolute(TTLocalizer.STOREOWNER_GREETING, CFSpeech | CFTimeout)
else:
self.setChatAbsolute(TTLocalizer.STOREOWNER_BROWSING, CFSpeech | CFTimeout)
if self.isLocalToon:
taskMgr.doMethodLater(3.0, self.popupPurchaseGUI, self.uniqueName('popupPurchaseGUI'))
print '-----------Starting tailor interaction-----------'
print 'avid: %s, gender: %s' % (self.av.doId, self.av.style.gender)
print 'current top = %s,%s,%s,%s and bot = %s,%s,' % (self.av.style.topTex,
self.av.style.topTexColor,
self.av.style.sleeveTex,
self.av.style.sleeveTexColor,
self.av.style.botTex,
self.av.style.botTexColor)
print 'topsList = %s' % self.av.getClothesTopsList()
print 'bottomsList = %s' % self.av.getClothesBottomsList()
print '-------------------------------------------------'
elif mode == NPCToons.PURCHASE_MOVIE_COMPLETE:
self.setChatAbsolute(TTLocalizer.STOREOWNER_GOODBYE, CFSpeech | CFTimeout)
if self.av and self.isLocalToon:
print '-----------ending tailor interaction-----------'
print 'avid: %s, gender: %s' % (self.av.doId, self.av.style.gender)
print 'current top = %s,%s,%s,%s and bot = %s,%s,' % (self.av.style.topTex,
self.av.style.topTexColor,
self.av.style.sleeveTex,
self.av.style.sleeveTexColor,
self.av.style.botTex,
self.av.style.botTexColor)
print 'topsList = %s' % self.av.getClothesTopsList()
print 'bottomsList = %s' % self.av.getClothesBottomsList()
print '-------------------------------------------------'
self.resetTailor()
elif mode == NPCToons.PURCHASE_MOVIE_NO_MONEY:
self.notify.warning('PURCHASE_MOVIE_NO_MONEY should not be called')
self.resetTailor()
return
def popupPurchaseGUI(self, task):
self.setChatAbsolute('', CFSpeech)
self.purchaseDoneEvent = 'purchaseDone'
self.swapEvent = 'swap'
self.acceptOnce(self.purchaseDoneEvent, self.__handlePurchaseDone)
self.accept(self.swapEvent, self.__handleSwap)
self.clothesGUI = TailorClothesGUI.TailorClothesGUI(self.purchaseDoneEvent, self.swapEvent, self.npcId)
self.clothesGUI.load()
self.clothesGUI.enter(self.av)
self.clothesGUI.showButtons()
self.gui = loader.loadModel('phase_3/models/gui/create_a_toon_gui')
if self.browsing == 0:
self.button = DirectButton(relief=None, image=(self.gui.find('**/CrtAtoon_Btn1_UP'), self.gui.find('**/CrtAtoon_Btn1_DOWN'), self.gui.find('**/CrtAtoon_Btn1_RLLVR')), pos=(-0.15, 0, -0.85), command=self.__handleButton, text=('', TTLocalizer.MakeAToonDone, TTLocalizer.MakeAToonDone), text_font=ToontownGlobals.getInterfaceFont(), text_scale=0.08, text_pos=(0, -0.03), text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1))
else:
self.button = None
self.cancelButton = DirectButton(relief=None, image=(self.gui.find('**/CrtAtoon_Btn2_UP'), self.gui.find('**/CrtAtoon_Btn2_DOWN'), self.gui.find('**/CrtAtoon_Btn2_RLLVR')), pos=(0.15, 0, -0.85), command=self.__handleCancel, text=('', TTLocalizer.MakeAToonCancel, TTLocalizer.MakeAToonCancel), text_font=ToontownGlobals.getInterfaceFont(), text_scale=0.08, text_pos=(0, -0.03), text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1))
camera.setPosHpr(base.localAvatar, -4.16, 8.25, 2.47, -152.89, 0.0, 0.0)
self.counter = render.find('**/*mo1_TI_counter')
self.counter.hide()
self.hide()
return Task.done
def __handleButton(self):
messenger.send('next')
def __handleCancel(self):
self.clothesGUI.resetClothes(self.oldStyle)
messenger.send('last')
def __handleSwap(self):
self.d_setDNA(self.av.getStyle().makeNetString(), 0)
def __handlePurchaseDone(self, timeout = 0):
if self.clothesGUI.doneStatus == 'last' or timeout == 1:
self.d_setDNA(self.oldStyle.makeNetString(), 1)
else:
which = 0
if self.clothesGUI.topChoice != -1:
which = which | ClosetGlobals.SHIRT
if self.clothesGUI.bottomChoice != -1:
which = which | ClosetGlobals.SHORTS
print 'setDNA: which = %d, top = %d, bot = %d' % (which, self.clothesGUI.topChoice, self.clothesGUI.bottomChoice)
if self.roomAvailable == 0:
if self.isLocalToon:
if self.av.isClosetFull() or which & ClosetGlobals.SHIRT and which & ClosetGlobals.SHORTS:
self.__enterConfirmLoss(2, which)
self.clothesGUI.hideButtons()
self.button.hide()
self.cancelButton.hide()
else:
self.d_setDNA(self.av.getStyle().makeNetString(), 2, which)
else:
self.d_setDNA(self.av.getStyle().makeNetString(), 2, which)
def __enterConfirmLoss(self, finished, which):
if self.popupInfo == None:
buttons = loader.loadModel('phase_3/models/gui/dialog_box_buttons_gui')
okButtonImage = (buttons.find('**/ChtBx_OKBtn_UP'), buttons.find('**/ChtBx_OKBtn_DN'), buttons.find('**/ChtBx_OKBtn_Rllvr'))
cancelButtonImage = (buttons.find('**/CloseBtn_UP'), buttons.find('**/CloseBtn_DN'), buttons.find('**/CloseBtn_Rllvr'))
self.popupInfo = DirectFrame(
parent=hidden,
relief=None,
state='normal',
text=TTLocalizer.STOREOWNER_CONFIRM_LOSS,
text_wordwrap=10,
textMayChange=0,
frameSize=(-1, 1, -1, 1),
text_pos=(0, -0.05),
geom=DGG.getDefaultDialogGeom(),
geom_color=ToontownGlobals.GlobalDialogColor,
geom_scale=(0.88, 1, 0.55),
geom_pos=(0, 0, -.18),
text_scale=0.08)
DirectButton(
self.popupInfo,
image=okButtonImage,
relief=None,
text=TTLocalizer.STOREOWNER_OK,
text_scale=0.05,
text_pos=(0.0, -0.1),
textMayChange=0,
pos=(-0.08, 0.0, -0.31),
command=self.__handleConfirmLossOK,
extraArgs=[finished, which])
DirectButton(
self.popupInfo,
image=cancelButtonImage,
relief=None,
text=TTLocalizer.STOREOWNER_CANCEL,
text_scale=0.05,
text_pos=(0.0, -0.1),
textMayChange=0,
pos=(0.08, 0.0, -0.31),
command=self.__handleConfirmLossCancel)
buttons.removeNode()
self.popupInfo.reparentTo(aspect2d)
def __handleConfirmLossOK(self, finished, which):
self.d_setDNA(self.av.getStyle().makeNetString(), finished, which)
self.popupInfo.reparentTo(hidden)
def __handleConfirmLossCancel(self):
self.d_setDNA(self.oldStyle.makeNetString(), 1)
self.popupInfo.reparentTo(hidden)
def d_setDNA(self, dnaString, finished, whichItems = ClosetGlobals.SHIRT | ClosetGlobals.SHORTS):
self.sendUpdate('setDNA', [dnaString, finished, whichItems])
def setCustomerDNA(self, avId, dnaString):
if avId != base.localAvatar.doId:
av = base.cr.doId2do.get(avId, None)
if av:
if self.av == av:
oldTorso = self.av.style.torso
self.av.style.makeFromNetString(dnaString)
if len(oldTorso) == 2 and len(self.av.style.torso) == 2 and self.av.style.torso[1] != oldTorso[1]:
self.av.swapToonTorso(self.av.style.torso, genClothes=0)
self.av.loop('neutral', 0)
self.av.generateToonClothes()
return
|