gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
import json
import unittest
import mock
from custom_resource import BaseHandler, Defer, Failed, Responder, Success
class TestCase(unittest.TestCase):
def setUp(self):
self.upload_response_data_mock = mock.patch.object(Responder, "_upload_response_data")
self.upload_response_data_mock.start()
def tearDown(self):
self.upload_response_data_mock.stop()
def test_create_update_delete_required(self):
class Handler(BaseHandler):
pass
with self.assertRaisesRegexp(TypeError, "Can't instantiate abstract class Handler with abstract methods create, delete, update"):
Handler()
def test_create(self):
event = {
"RequestType": "Create",
"StackId": "1",
"RequestId": "2",
"LogicalResourceId": "3",
"ResponseURL": "http://response"
}
handler = self.handler(create=lambda self, *args: Success("PhysicalResourceId", {"Meta": "Data"}))
handler(event, context=None)
(_, (url, data), kwargs), = Responder._upload_response_data.mock_calls
self.assertEqual(url, "http://response")
self.assertEqual(json.loads(data), {
"StackId": "1",
"RequestId": "2",
"LogicalResourceId": "3",
"Status": "SUCCESS",
"PhysicalResourceId": "PhysicalResourceId",
"Data": {
"Meta": "Data"
}
})
self.assertEqual(kwargs, {})
def test_update(self):
event = {
"RequestType": "Update",
"StackId": "1",
"RequestId": "2",
"LogicalResourceId": "3",
"ResponseURL": "http://response"
}
handler = self.handler(update=lambda self, *args: Success("PhysicalResourceId", {"Meta": "Data"}))
handler(event, context=None)
(_, (url, data), kwargs), = Responder._upload_response_data.mock_calls
self.assertEqual(url, "http://response")
self.assertEqual(json.loads(data), {
"StackId": "1",
"RequestId": "2",
"LogicalResourceId": "3",
"Status": "SUCCESS",
"PhysicalResourceId": "PhysicalResourceId",
"Data": {
"Meta": "Data"
}
})
self.assertEqual(kwargs, {})
def test_delete(self):
event = {
"RequestType": "Delete",
"StackId": "1",
"RequestId": "2",
"LogicalResourceId": "3",
"ResponseURL": "http://response"
}
handler = self.handler(delete=lambda self, *args: Success("PhysicalResourceId", {"Meta": "Data"}))
handler(event, context=None)
(_, (url, data), kwargs), = Responder._upload_response_data.mock_calls
self.assertEqual(url, "http://response")
self.assertEqual(json.loads(data), {
"StackId": "1",
"RequestId": "2",
"LogicalResourceId": "3",
"Status": "SUCCESS",
"PhysicalResourceId": "PhysicalResourceId",
"Data": {
"Meta": "Data"
}
})
self.assertEqual(kwargs, {})
def test_success(self):
event = {
"RequestType": "Create",
"StackId": "1",
"RequestId": "2",
"LogicalResourceId": "3",
"ResponseURL": "http://response"
}
handler = self.handler(create=lambda self, *args: Success("PhysicalResourceId", {"Meta": "Data"}))
handler(event, context=None)
(_, (url, data), kwargs), = Responder._upload_response_data.mock_calls
self.assertEqual(url, "http://response")
self.assertEqual(json.loads(data), {
"StackId": "1",
"RequestId": "2",
"LogicalResourceId": "3",
"Status": "SUCCESS",
"PhysicalResourceId": "PhysicalResourceId",
"Data": {
"Meta": "Data"
}
})
self.assertEqual(kwargs, {})
def test_failed(self):
event = {
"RequestType": "Create",
"StackId": "1",
"RequestId": "2",
"LogicalResourceId": "3",
"ResponseURL": "http://response"
}
handler = self.handler(create=lambda self, *args: Failed("PhysicalResourceId", "Broken"))
handler(event, context=None)
(_, (url, data), kwargs), = Responder._upload_response_data.mock_calls
self.assertEqual(url, "http://response")
self.assertEqual(json.loads(data), {
"StackId": "1",
"RequestId": "2",
"LogicalResourceId": "3",
"Status": "FAILED",
"PhysicalResourceId": "PhysicalResourceId",
"Reason": "Broken"
})
self.assertEqual(kwargs, {})
def test_defer(self):
event = {
"RequestType": "Create",
"StackId": "1",
"RequestId": "2",
"LogicalResourceId": "3",
"ResponseURL": "http://response"
}
handler = self.handler(create=lambda self, *args: Defer())
handler(event, context=None)
Responder._upload_response_data.assert_not_called()
def test_exception(self):
def raise_exc(exc):
raise exc
event = {
"RequestType": "Create",
"StackId": "1",
"RequestId": "2",
"LogicalResourceId": "3",
"ResponseURL": "http://response"
}
handler = self.handler(create=lambda self, *args: raise_exc(Exception("Couldn't create")))
with self.assertRaisesRegexp(Exception, "Couldn't create"):
handler(event, context=None)
(_, (url, data), kwargs), = Responder._upload_response_data.mock_calls
self.assertEqual(url, "http://response")
self.assertEqual(json.loads(data), {
"StackId": "1",
"RequestId": "2",
"LogicalResourceId": "3",
"Status": "FAILED",
"PhysicalResourceId": "n/a",
"Reason": "Couldn't create"
})
self.assertEqual(kwargs, {})
def test_no_response(self):
class Handler(BaseHandler):
def create(self, event, context):
pass
update = None
delete = None
event = {
"RequestType": "Create",
"StackId": "1",
"RequestId": "2",
"LogicalResourceId": "3",
"ResponseURL": "http://response"
}
context = None
with self.assertRaisesRegexp(TypeError, "No response returned"):
Handler()(event, context)
def test_physical_resource_id_result(self):
event = {
"RequestType": "Create",
"StackId": "1",
"RequestId": "2",
"LogicalResourceId": "3",
"ResponseURL": "http://response"
}
handler = self.handler(create=lambda self, *args: "PhysicalResourceId")
handler(event, context=None)
(_, (url, data), kwargs), = Responder._upload_response_data.mock_calls
self.assertEqual(url, "http://response")
self.assertEqual(json.loads(data), {
"StackId": "1",
"RequestId": "2",
"LogicalResourceId": "3",
"Status": "SUCCESS",
"PhysicalResourceId": "PhysicalResourceId",
"Data": {}
})
self.assertEqual(kwargs, {})
def test_physical_resource_id_and_data_result(self):
event = {
"RequestType": "Create",
"StackId": "1",
"RequestId": "2",
"LogicalResourceId": "3",
"ResponseURL": "http://response"
}
handler = self.handler(create=lambda self, *args: ("PhysicalResourceId", {"Meta": "Data"}))
handler(event, context=None)
(_, (url, data), kwargs), = Responder._upload_response_data.mock_calls
self.assertEqual(url, "http://response")
self.assertEqual(json.loads(data), {
"StackId": "1",
"RequestId": "2",
"LogicalResourceId": "3",
"Status": "SUCCESS",
"PhysicalResourceId": "PhysicalResourceId",
"Data": {
"Meta": "Data"
}
})
self.assertEqual(kwargs, {})
def test_unknown_return_type(self):
class Handler(BaseHandler):
def create(self, event, context):
return object()
update = None
delete = None
event = {
"RequestType": "Create",
"StackId": "1",
"RequestId": "2",
"LogicalResourceId": "3",
"ResponseURL": "http://response"
}
context = None
with self.assertRaisesRegexp(TypeError, "Unexpected response .*"):
Handler()(event, context)
def test_schema_validation_pass(self):
event = {
"RequestType": "Create",
"StackId": "1",
"RequestId": "2",
"LogicalResourceId": "3",
"ResponseURL": "http://response",
"ResourceProperties": {},
"OldResourceProperties": {}
}
handler = self.handler(
create=lambda self, *args: Success("PhysicalResourceId", {"Meta": "Data"}),
schema={}
)
handler(event, context=None)
(_, (url, data), kwargs), = Responder._upload_response_data.mock_calls
self.assertEqual(url, "http://response")
self.assertEqual(json.loads(data), {
"StackId": "1",
"RequestId": "2",
"LogicalResourceId": "3",
"Status": "SUCCESS",
"PhysicalResourceId": "PhysicalResourceId",
"Data": {
"Meta": "Data"
}
})
self.assertEqual(kwargs, {})
def test_schema_validation_resource_properties_fail(self):
event = {
"RequestType": "Create",
"StackId": "1",
"RequestId": "2",
"LogicalResourceId": "3",
"ResponseURL": "http://response",
"ResourceProperties": {},
"OldResourceProperties": {
"Validating": True
}
}
handler = self.handler(
create=lambda self, *args: Success("PhysicalResourceId", {"Meta": "Data"}),
schema={
"required": ["Validating"],
"properties": {
"Validating": {
"enum": [True]
}
}
}
)
handler(event, context=None)
(_, (url, data), kwargs), = Responder._upload_response_data.mock_calls
self.assertEqual(url, "http://response")
self.assertEqual(json.loads(data), {
"StackId": "1",
"RequestId": "2",
"LogicalResourceId": "3",
"Status": "FAILED",
"PhysicalResourceId": "n/a",
"Reason": (
"'Validating' is a required property\n"
"\n"
"Failed validating 'required' in schema:\n"
" {'properties': {'Validating': {'enum': [True]}},\n"
" 'required': ['Validating']}\n"
"\n"
"On instance:\n"
" {}"
)
})
self.assertEqual(kwargs, {})
def test_schema_validation_old_resource_properties_fail(self):
event = {
"RequestType": "Create",
"StackId": "1",
"RequestId": "2",
"LogicalResourceId": "3",
"ResponseURL": "http://response",
"ResourceProperties": {
"Validating": True
},
"OldResourceProperties": {}
}
handler = self.handler(
create=lambda self, *args: Success("PhysicalResourceId", {"Meta": "Data"}),
schema={
"required": ["Validating"],
"properties": {
"Validating": {
"enum": [True]
}
}
}
)
handler(event, context=None)
(_, (url, data), kwargs), = Responder._upload_response_data.mock_calls
self.assertEqual(url, "http://response")
self.assertEqual(json.loads(data), {
"StackId": "1",
"RequestId": "2",
"LogicalResourceId": "3",
"Status": "FAILED",
"PhysicalResourceId": "n/a",
"Reason": (
"'Validating' is a required property\n"
"\n"
"Failed validating 'required' in schema:\n"
" {'properties': {'Validating': {'enum': [True]}},\n"
" 'required': ['Validating']}\n"
"\n"
"On instance:\n"
" {}"
)
})
self.assertEqual(kwargs, {})
def handler(self, create=None, update=None, delete=None, schema=None):
Handler = type("Handler", (BaseHandler,), {
"create": create,
"update": update,
"delete": delete,
"RESOURCE_PROPERTIES_SCHEMA": schema
})
return Handler()
|
|
""" Contains functions used to write specific server packets to byte streams """
import packetHelper
import dataTypes
import userHelper
import glob
import userRanks
import packetIDs
import slotStatuses
import matchModModes
import random
""" Login errors packets
(userID packets derivates) """
def loginFailed():
return packetHelper.buildPacket(packetIDs.server_userID, [[-1, dataTypes.sInt32]])
def forceUpdate():
return packetHelper.buildPacket(packetIDs.server_userID, [[-2, dataTypes.sInt32]])
def loginBanned():
return packetHelper.buildPacket(packetIDs.server_userID, [[-3, dataTypes.sInt32]])
def loginError():
return packetHelper.buildPacket(packetIDs.server_userID, [[-5, dataTypes.sInt32]])
def needSupporter():
return packetHelper.buildPacket(packetIDs.server_userID, [[-6, dataTypes.sInt32]])
""" Login packets """
def userID(uid):
return packetHelper.buildPacket(packetIDs.server_userID, [[uid, dataTypes.sInt32]])
def silenceEndTime(seconds):
return packetHelper.buildPacket(packetIDs.server_silenceEnd, [[seconds, dataTypes.uInt32]])
def protocolVersion(version = 19):
return packetHelper.buildPacket(packetIDs.server_protocolVersion, [[version, dataTypes.uInt32]])
def mainMenuIcon(icon):
return packetHelper.buildPacket(packetIDs.server_mainMenuIcon, [[icon, dataTypes.string]])
def userSupporterGMT(supporter, GMT):
result = 1
if supporter == True:
result += 4
if GMT == True:
result += 2
return packetHelper.buildPacket(packetIDs.server_supporterGMT, [[result, dataTypes.uInt32]])
def friendList(userID):
friendsData = []
# Get friend IDs from db
friends = userHelper.getFriendList(userID)
# Friends number
friendsData.append([len(friends), dataTypes.uInt16])
# Add all friend user IDs to friendsData
for i in friends:
friendsData.append([i, dataTypes.sInt32])
return packetHelper.buildPacket(packetIDs.server_friendsList, friendsData)
def onlineUsers():
onlineUsersData = []
users = glob.tokens.tokens
# Users number
onlineUsersData.append([len(users), dataTypes.uInt16])
# Add all users user IDs to onlineUsersData
for _,value in users.items():
onlineUsersData.append([value.userID, dataTypes.sInt32])
return packetHelper.buildPacket(packetIDs.server_userPresenceBundle, onlineUsersData)
""" Users packets """
def userLogout(userID):
return packetHelper.buildPacket(packetIDs.server_userLogout, [[userID, dataTypes.sInt32], [0, dataTypes.byte]])
def userPanel(userID):
# Get user data
userToken = glob.tokens.getTokenFromUserID(userID)
username = userHelper.getUsername(userID)
timezone = 24 # TODO: Timezone
country = userToken.getCountry()
gameRank = userHelper.getGameRank(userID, userToken.gameMode)
latitude = userToken.getLatitude()
longitude = userToken.getLongitude()
# Get username color according to rank
# Only admins and normal users are currently supported
rank = userHelper.getRankPrivileges(userID)
if username == "FokaBot":
userRank = userRanks.MOD
elif rank == 4:
userRank = userRanks.ADMIN
elif rank == 3:
userRank = userRank.MOD
elif rank == 2:
userRank = userRanks.SUPPORTER
else:
userRank = userRanks.NORMAL
return packetHelper.buildPacket(packetIDs.server_userPanel,
[
[userID, dataTypes.sInt32],
[username, dataTypes.string],
[timezone, dataTypes.byte],
[country, dataTypes.byte],
[userRank, dataTypes.byte],
[longitude, dataTypes.ffloat],
[latitude, dataTypes.ffloat],
[gameRank, dataTypes.uInt32]
])
def userStats(userID):
# Get userID's token from tokens list
userToken = glob.tokens.getTokenFromUserID(userID)
# Get stats from DB
# TODO: Caching system
rankedScore = userHelper.getRankedScore(userID, userToken.gameMode)
accuracy = userHelper.getAccuracy(userID, userToken.gameMode)/100
playcount = userHelper.getPlaycount(userID, userToken.gameMode)
totalScore = userHelper.getTotalScore(userID, userToken.gameMode)
gameRank = userHelper.getGameRank(userID, userToken.gameMode)
pp = int(userHelper.getPP(userID, userToken.gameMode))
return packetHelper.buildPacket(packetIDs.server_userStats,
[
[userID, dataTypes.uInt32],
[userToken.actionID, dataTypes.byte],
[userToken.actionText, dataTypes.string],
[userToken.actionMd5, dataTypes.string],
[userToken.actionMods, dataTypes.sInt32],
[userToken.gameMode, dataTypes.byte],
[0, dataTypes.sInt32],
[rankedScore, dataTypes.uInt64],
[accuracy, dataTypes.ffloat],
[playcount, dataTypes.uInt32],
[totalScore, dataTypes.uInt64],
[gameRank, dataTypes.uInt32],
[pp, dataTypes.uInt16]
])
""" Chat packets """
def sendMessage(fro, to, message):
return packetHelper.buildPacket(packetIDs.server_sendMessage, [[fro, dataTypes.string], [message, dataTypes.string], [to, dataTypes.string], [userHelper.getID(fro), dataTypes.sInt32]])
def channelJoinSuccess(userID, chan):
return packetHelper.buildPacket(packetIDs.server_channelJoinSuccess, [[chan, dataTypes.string]])
def channelInfo(chan):
channel = glob.channels.channels[chan]
return packetHelper.buildPacket(packetIDs.server_channelInfo, [[chan, dataTypes.string], [channel.description, dataTypes.string], [channel.getConnectedUsersCount(), dataTypes.uInt16]])
def channelInfoEnd():
return packetHelper.buildPacket(packetIDs.server_channelInfoEnd, [[0, dataTypes.uInt32]])
def channelKicked(chan):
return packetHelper.buildPacket(packetIDs.server_channelKicked, [[chan, dataTypes.string]])
""" Spectator packets """
def addSpectator(userID):
return packetHelper.buildPacket(packetIDs.server_spectatorJoined, [[userID, dataTypes.sInt32]])
def removeSpectator(userID):
return packetHelper.buildPacket(packetIDs.server_spectatorLeft, [[userID, dataTypes.sInt32]])
def spectatorFrames(data):
return packetHelper.buildPacket(packetIDs.server_spectateFrames, [[data, dataTypes.bbytes]])
def noSongSpectator(userID):
return packetHelper.buildPacket(packetIDs.server_spectatorCantSpectate, [[userID, dataTypes.sInt32]])
""" Multiplayer Packets """
def createMatch(matchID):
# Make sure the match exists
if matchID not in glob.matches.matches:
return None
# Get match binary data and build packet
match = glob.matches.matches[matchID]
return packetHelper.buildPacket(packetIDs.server_newMatch, match.getMatchData())
def updateMatch(matchID):
# Make sure the match exists
if matchID not in glob.matches.matches:
return None
# Get match binary data and build packet
match = glob.matches.matches[matchID]
return packetHelper.buildPacket(packetIDs.server_updateMatch, match.getMatchData())
def matchStart(matchID):
# Make sure the match exists
if matchID not in glob.matches.matches:
return None
# Get match binary data and build packet
match = glob.matches.matches[matchID]
return packetHelper.buildPacket(packetIDs.server_matchStart, match.getMatchData())
def disposeMatch(matchID):
return packetHelper.buildPacket(packetIDs.server_disposeMatch, [[matchID, dataTypes.uInt16]])
def matchJoinSuccess(matchID):
# Make sure the match exists
if matchID not in glob.matches.matches:
return None
# Get match binary data and build packet
match = glob.matches.matches[matchID]
data = packetHelper.buildPacket(packetIDs.server_matchJoinSuccess, match.getMatchData())
return data
def matchJoinFail():
return packetHelper.buildPacket(packetIDs.server_matchJoinFail)
def changeMatchPassword(newPassword):
return packetHelper.buildPacket(packetIDs.server_matchChangePassword, [[newPassword, dataTypes.string]])
def allPlayersLoaded():
return packetHelper.buildPacket(packetIDs.server_matchAllPlayersLoaded)
def playerSkipped(userID):
return packetHelper.buildPacket(packetIDs.server_matchPlayerSkipped, [[userID, dataTypes.sInt32]])
def allPlayersSkipped():
return packetHelper.buildPacket(packetIDs.server_matchSkip)
def matchFrames(slotID, data):
return packetHelper.buildPacket(packetIDs.server_matchScoreUpdate, [[data[7:11], dataTypes.bbytes], [slotID, dataTypes.byte], [data[12:], dataTypes.bbytes]])
def matchComplete():
return packetHelper.buildPacket(packetIDs.server_matchComplete)
def playerFailed(slotID):
return packetHelper.buildPacket(packetIDs.server_matchPlayerFailed, [[slotID, dataTypes.uInt32]])
def matchTransferHost():
return packetHelper.buildPacket(packetIDs.server_matchTransferHost)
""" Other packets """
def notification(message):
return packetHelper.buildPacket(packetIDs.server_notification, [[message, dataTypes.string]])
def jumpscare(message):
return packetHelper.buildPacket(packetIDs.server_jumpscare, [[message, dataTypes.string]])
def banchoRestart(msUntilReconnection):
return packetHelper.buildPacket(packetIDs.server_restart, [[msUntilReconnection, dataTypes.uInt32]])
""" WIP Packets """
def getAttention():
return packetHelper.buildPacket(packetIDs.server_getAttention)
def packet80():
return packetHelper.buildPacket(packetIDs.server_topBotnet)
|
|
from __future__ import print_function
import argparse
import codecs
import json
import shutil
import time
from os import listdir
from os import makedirs
from os.path import isfile, join
from entity2vec.node2vec import Node2Vec
from entity2vec.sparql import Sparql
####################################################################################
## Generates a set of property-speficic entity embeddings from a Knowledge Graph ###
####################################################################################
class Entity2Vec(Node2Vec):
def __init__(self, is_directed, preprocessing, is_weighted, p, q, walk_length, num_walks, dimensions, window_size,
workers, iterations, config, sparql, dataset, entities, default_graph, entity_class, feedback_file):
Node2Vec.__init__(self, is_directed, preprocessing, is_weighted, p, q, walk_length, num_walks, dimensions,
window_size, workers, iterations)
self.config_file = config
self.sparql = sparql
self.default_graph = default_graph
self.dataset = dataset
self.entities = entities
self.entity_class = entity_class
self.feedback_file = feedback_file
self._define_properties()
def _define_properties(self):
with codecs.open(self.config_file, 'r', encoding='utf-8') as config_read:
property_file = json.loads(config_read.read())
try:
self.properties = [i for i in property_file[self.dataset]]
self.properties.append('feedback')
except KeyError: # if no list of properties is specified, take them all
if self.sparql: # get all the properties from the sparql endpoint
sparql_query = Sparql(self.entities, self.config_file, self.dataset, self.sparql, self.default_graph,
self.entity_class)
self.properties = sparql_query.properties
self.properties.append('feedback') # add the feedback property that is not defined in the graph
else: # get everything you have in the folder
path_to_graphs = 'datasets/%s/graphs' % self.dataset
onlyfiles = [f for f in listdir(path_to_graphs) if isfile(join(path_to_graphs, f))]
self.properties = [file.replace('.edgelist', '') for file in onlyfiles]
if 'feedback' in self.properties: # feedback property always the last one of the list
self.properties.remove('feedback')
self.properties.append('feedback')
def e2v_walks_learn(self):
n = self.num_walks
p = int(self.p)
q = int(self.q)
l = self.walk_length
d = self.dimensions
it = self.iter
win = self.window_size
try:
makedirs('emb/%s' % self.dataset)
except:
pass
# copy define feedback_file, if declared
if self.feedback_file:
print('Copying feedback file %s' % self.feedback_file)
shutil.copy2(self.feedback_file, "datasets/%s/graphs/feedback.edgelist" % self.dataset)
# iterate through properties
for prop_name in self.properties:
print(prop_name)
prop_short = prop_name
if '/' in prop_name:
prop_short = prop_name.split('/')[-1]
graph = "datasets/%s/graphs/%s.edgelist" % (self.dataset, prop_short)
try:
makedirs('emb/%s/%s' % (self.dataset, prop_short))
except:
pass
emb_output = "emb/%s/%s/num%d_p%d_q%d_l%d_d%d_iter%d_winsize%d.emd" % (self.dataset,
prop_short, n, p, q, l, d, it, win)
print('running with', graph)
super(Entity2Vec, self).run(graph, emb_output) # call the run function defined in parent class node2vec
# generate node2vec walks and learn embeddings for each property
def run(self):
if self.sparql:
sparql_query = Sparql(self.entities, self.config_file, self.dataset, self.sparql, self.default_graph,
self.entity_class)
sparql_query.get_property_graphs()
self.e2v_walks_learn() # run node2vec for each property-specific graph
@staticmethod
def parse_args():
"""
Parses the entity2vec arguments.
"""
parser = argparse.ArgumentParser(description="Run entity2vec.")
parser.add_argument('--walk_length', type=int, default=10,
help='Length of walk per source. Default is 10.')
parser.add_argument('--num_walks', type=int, default=500,
help='Number of walks per source. Default is 40.')
parser.add_argument('--p', type=float, default=1,
help='Return hyperparameter. Default is 1.')
parser.add_argument('--q', type=float, default=1,
help='Inout hyperparameter. Default is 1.')
parser.add_argument('--weighted', dest='weighted', action='store_true',
help='Boolean specifying (un)weighted. Default is unweighted.')
parser.add_argument('--unweighted', dest='unweighted', action='store_false')
parser.set_defaults(weighted=False)
parser.add_argument('--directed', dest='directed', action='store_true',
help='Graph is (un)directed. Default is directed.')
parser.set_defaults(directed=False)
parser.add_argument('--no_preprocessing', dest='preprocessing', action='store_false',
help='Whether preprocess all transition probabilities or compute on the fly')
parser.set_defaults(preprocessing=True)
parser.add_argument('--dimensions', type=int, default=500,
help='Number of dimensions. Default is 128.')
parser.add_argument('--window-size', type=int, default=10,
help='Context size for optimization. Default is 10.')
parser.add_argument('--iter', default=5, type=int,
help='Number of epochs in SGD')
parser.add_argument('--workers', type=int, default=8,
help='Number of parallel workers. Default is 8.')
parser.add_argument('--config_file', nargs='?', default='config/properties.json',
help='Path to configuration file')
parser.add_argument('--dataset', nargs='?', default='movielens_1m',
help='Dataset')
parser.add_argument('--sparql', dest='sparql',
help='Whether downloading the graphs from a sparql endpoint')
parser.set_defaults(sparql=False)
parser.add_argument('--entities', dest='entities', default="all",
help='A specific list of entities for which the embeddings have to be computed')
parser.add_argument('--default_graph', dest='default_graph', default=False,
help='Default graph to query when using a Sparql endpoint')
parser.add_argument('--entity_class', dest='entity_class', help='entity class', default=False)
parser.add_argument('--feedback_file', dest='feedback_file', default=False,
help='Path to a DAT file that contains all the couples user-item')
return parser.parse_args()
if __name__ == '__main__':
start_time = time.time()
args = Entity2Vec.parse_args()
print('Parameters:\n')
print('walk length = %d\n' % args.walk_length)
print('number of walks per entity = %d\n' % args.num_walks)
print('p = %s\n' % args.p)
print('q = %s\n' % args.q)
print('weighted = %s\n' % args.weighted)
print('directed = %s\n' % args.directed)
print('no_preprocessing = %s\n' % args.preprocessing)
print('dimensions = %s\n' % args.dimensions)
print('iterations = %s\n' % args.iter)
print('window size = %s\n' % args.window_size)
print('workers = %s\n' % args.workers)
print('config_file = %s\n' % args.config_file)
print('sparql endpoint = %s\n' % args.sparql)
print('dataset = %s\n' % args.dataset)
print('entities = %s\n' % args.entities)
print('default graph = %s\n' % args.default_graph)
print('entity class = %s\n' % args.entity_class)
print('feedback file = %s\n' % args.feedback_file)
e2v = Entity2Vec(args.directed, args.preprocessing, args.weighted, args.p, args.q, args.walk_length, args.num_walks,
args.dimensions, args.window_size, args.workers, args.iter, args.config_file, args.sparql,
args.dataset, args.entities, args.default_graph, args.entity_class, args.feedback_file)
e2v.run()
print("--- %s seconds ---" % (time.time() - start_time))
|
|
# Copyright (c) 2014 OpenStack Foundation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from neutron.api.v2 import attributes
from neutron.common import constants as l3_const
from neutron.common import exceptions as n_exc
from neutron.db import l3_attrs_db
from neutron.db import l3_db
from neutron.db import l3_dvrscheduler_db as l3_dvrsched_db
from neutron.db import models_v2
from neutron.extensions import l3
from neutron.extensions import portbindings
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
DEVICE_OWNER_DVR_INTERFACE = l3_const.DEVICE_OWNER_DVR_INTERFACE
DEVICE_OWNER_DVR_SNAT = l3_const.DEVICE_OWNER_ROUTER_SNAT
FLOATINGIP_AGENT_INTF_KEY = l3_const.FLOATINGIP_AGENT_INTF_KEY
DEVICE_OWNER_AGENT_GW = l3_const.DEVICE_OWNER_AGENT_GW
SNAT_ROUTER_INTF_KEY = l3_const.SNAT_ROUTER_INTF_KEY
router_distributed_opts = [
cfg.BoolOpt('router_distributed',
default=False,
help=_("System-wide flag to determine the type of router "
"that tenants can create. Only admin can override.")),
]
cfg.CONF.register_opts(router_distributed_opts)
class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin,
l3_attrs_db.ExtraAttributesMixin):
"""Mixin class to enable DVR support."""
router_device_owners = (
l3_db.L3_NAT_db_mixin.router_device_owners +
(DEVICE_OWNER_DVR_INTERFACE,))
extra_attributes = (
l3_attrs_db.ExtraAttributesMixin.extra_attributes + [{
'name': "distributed",
'default': cfg.CONF.router_distributed
}])
def _create_router_db(self, context, router, tenant_id):
"""Create a router db object with dvr additions."""
router['distributed'] = _is_distributed_router(router)
with context.session.begin(subtransactions=True):
router_db = super(
L3_NAT_with_dvr_db_mixin, self)._create_router_db(
context, router, tenant_id)
self._process_extra_attr_router_create(context, router_db, router)
return router_db
def _validate_router_migration(self, router_db, router_res):
"""Allow centralized -> distributed state transition only."""
if (router_db.extra_attributes.distributed and
router_res.get('distributed') is False):
LOG.info(_("Centralizing distributed router %s "
"is not supported"), router_db['id'])
raise NotImplementedError()
def _update_distributed_attr(
self, context, router_id, router_db, data, gw_info):
"""Update the model to support the dvr case of a router."""
if not attributes.is_attr_set(gw_info) and data.get('distributed'):
admin_ctx = context.elevated()
filters = {'device_id': [router_id],
'device_owner': [l3_const.DEVICE_OWNER_ROUTER_INTF]}
ports = self._core_plugin.get_ports(admin_ctx, filters=filters)
for p in ports:
port_db = self._core_plugin._get_port(admin_ctx, p['id'])
port_db.update({'device_owner': DEVICE_OWNER_DVR_INTERFACE})
def _update_router_db(self, context, router_id, data, gw_info):
with context.session.begin(subtransactions=True):
router_db = super(
L3_NAT_with_dvr_db_mixin, self)._update_router_db(
context, router_id, data, gw_info)
self._validate_router_migration(router_db, data)
# FIXME(swami): need to add migration status so that the scheduler
# can pick the migration request and move stuff over. For now
# only the distributed flag and router interface's owner change.
# Instead of complaining on _validate_router_migration, let's
# succeed here and complete the task in a follow-up patch
router_db.extra_attributes.update(data)
self._update_distributed_attr(
context, router_id, router_db, data, gw_info)
return router_db
def _delete_current_gw_port(self, context, router_id, router, new_network):
super(L3_NAT_with_dvr_db_mixin,
self)._delete_current_gw_port(context, router_id,
router, new_network)
if router.extra_attributes.distributed:
self.delete_csnat_router_interface_ports(
context.elevated(), router)
def _create_gw_port(self, context, router_id, router, new_network):
super(L3_NAT_with_dvr_db_mixin,
self)._create_gw_port(context, router_id,
router, new_network)
if router.extra_attributes.distributed and router.gw_port:
snat_p_list = self.create_snat_intf_ports_if_not_exists(
context.elevated(), router['id'])
if not snat_p_list:
LOG.debug("SNAT interface ports not created: %s", snat_p_list)
def _get_device_owner(self, context, router=None):
"""Get device_owner for the specified router."""
router_is_uuid = isinstance(router, basestring)
if router_is_uuid:
router = self._get_router(context, router)
if _is_distributed_router(router):
return DEVICE_OWNER_DVR_INTERFACE
return super(L3_NAT_with_dvr_db_mixin,
self)._get_device_owner(context, router)
def _get_interface_ports_for_network(self, context, network_id):
router_intf_qry = (context.session.query(models_v2.Port).
filter_by(network_id=network_id))
return (router_intf_qry.
filter(models_v2.Port.device_owner.in_(
[l3_const.DEVICE_OWNER_ROUTER_INTF,
DEVICE_OWNER_DVR_INTERFACE])))
def _update_fip_assoc(self, context, fip, floatingip_db, external_port):
previous_router_id = floatingip_db.router_id
port_id, internal_ip_address, router_id = (
self._check_and_get_fip_assoc(context, fip, floatingip_db))
agt_gw_port_check = False
admin_ctx = context.elevated()
if (not ('port_id' in fip and fip['port_id'])) and (
floatingip_db['fixed_port_id'] is not None):
port_db = self._core_plugin._get_port(
context, floatingip_db['fixed_port_id'])
LOG.debug("VM Port info: %s", port_db)
fip_hostid = self.get_vm_port_hostid(context, port_db['id'])
if fip_hostid:
agt_gw_port_check = self.check_fips_availability_on_host(
admin_ctx, fip['id'], fip_hostid)
floatingip_db.update({'fixed_ip_address': internal_ip_address,
'fixed_port_id': port_id,
'router_id': router_id,
'last_known_router_id': previous_router_id})
if agt_gw_port_check:
LOG.debug('Deleting the Agent GW Port')
self.delete_floatingip_agent_gateway_port(admin_ctx, fip_hostid)
def add_router_interface(self, context, router_id, interface_info):
add_by_port, add_by_sub = self._validate_interface_info(interface_info)
router = self._get_router(context, router_id)
device_owner = self._get_device_owner(context, router)
if add_by_port:
port = self._add_interface_by_port(
context, router_id, interface_info['port_id'], device_owner)
elif add_by_sub:
port = self._add_interface_by_subnet(
context, router_id, interface_info['subnet_id'], device_owner)
if router.extra_attributes.distributed and router.gw_port:
self.add_csnat_router_interface_port(
context.elevated(), router_id, port['network_id'],
port['fixed_ips'][0]['subnet_id'])
return self.notify_router_interface_action(
context, router_id, port['tenant_id'], port['id'],
port['fixed_ips'][0]['subnet_id'], 'add')
def remove_router_interface(self, context, router_id, interface_info):
if not interface_info:
msg = _("Either subnet_id or port_id must be specified")
raise n_exc.BadRequest(resource='router', msg=msg)
port_id = interface_info.get('port_id')
subnet_id = interface_info.get('subnet_id')
router = self._get_router(context, router_id)
device_owner = self._get_device_owner(context, router)
if port_id:
port, subnet = self._remove_interface_by_port(
context, router_id, port_id, subnet_id, device_owner)
elif subnet_id:
port, subnet = self._remove_interface_by_subnet(
context, router_id, subnet_id, device_owner)
if router.extra_attributes.distributed and router.gw_port:
self.delete_csnat_router_interface_ports(
context.elevated(), router, subnet_id=subnet_id)
return self.notify_router_interface_action(
context, router_id, port['tenant_id'], port['id'],
subnet['id'], 'remove')
def get_snat_sync_interfaces(self, context, router_ids):
"""Query router interfaces that relate to list of router_ids."""
if not router_ids:
return []
filters = {'device_id': router_ids,
'device_owner': [DEVICE_OWNER_DVR_SNAT]}
interfaces = self._core_plugin.get_ports(context, filters)
LOG.debug("Return the SNAT ports: %s", interfaces)
if interfaces:
self._populate_subnet_for_ports(context, interfaces)
return interfaces
def _build_routers_list(self, context, routers, gw_ports):
# Perform a single query up front for all routers
router_ids = [r['id'] for r in routers]
snat_binding = l3_dvrsched_db.CentralizedSnatL3AgentBinding
query = (context.session.query(snat_binding).
filter(snat_binding.router_id.in_(router_ids))).all()
bindings = dict((b.router_id, b) for b in query)
for rtr in routers:
gw_port_id = rtr['gw_port_id']
if gw_port_id:
rtr['gw_port'] = gw_ports[gw_port_id]
if 'enable_snat' in rtr[l3.EXTERNAL_GW_INFO]:
rtr['enable_snat'] = (
rtr[l3.EXTERNAL_GW_INFO]['enable_snat'])
binding = bindings.get(rtr['id'])
if not binding:
rtr['gw_port_host'] = None
LOG.debug('No snat is bound to router %s', rtr['id'])
continue
rtr['gw_port_host'] = binding.l3_agent.host
return routers
def _process_routers(self, context, routers):
routers_dict = {}
for router in routers:
routers_dict[router['id']] = router
router_ids = [router['id']]
if router['gw_port_id']:
snat_router_intfs = self.get_snat_sync_interfaces(context,
router_ids)
LOG.debug("SNAT ports returned: %s ", snat_router_intfs)
router[SNAT_ROUTER_INTF_KEY] = snat_router_intfs
return routers_dict
def _process_floating_ips(self, context, routers_dict, floating_ips):
for floating_ip in floating_ips:
router = routers_dict.get(floating_ip['router_id'])
if router:
router_floatingips = router.get(l3_const.FLOATINGIP_KEY, [])
floatingip_agent_intfs = []
if router['distributed']:
floating_ip['host'] = self.get_vm_port_hostid(
context, floating_ip['port_id'])
LOG.debug("Floating IP host: %s", floating_ip['host'])
fip_agent = self._get_agent_by_type_and_host(
context, l3_const.AGENT_TYPE_L3,
floating_ip['host'])
LOG.debug("FIP Agent : %s ", fip_agent['id'])
floatingip_agent_intfs = self.get_fip_sync_interfaces(
context, fip_agent['id'])
LOG.debug("FIP Agent ports: %s", floatingip_agent_intfs)
router_floatingips.append(floating_ip)
router[l3_const.FLOATINGIP_KEY] = router_floatingips
router[l3_const.FLOATINGIP_AGENT_INTF_KEY] = (
floatingip_agent_intfs)
def get_fip_sync_interfaces(self, context, fip_agent_id):
"""Query router interfaces that relate to list of router_ids."""
if not fip_agent_id:
return []
filters = {'device_id': [fip_agent_id],
'device_owner': [DEVICE_OWNER_AGENT_GW]}
interfaces = self._core_plugin.get_ports(context.elevated(), filters)
LOG.debug("Return the FIP ports: %s ", interfaces)
if interfaces:
self._populate_subnet_for_ports(context, interfaces)
return interfaces
def get_sync_data(self, context, router_ids=None, active=None):
routers, interfaces, floating_ips = self._get_router_info_list(
context, router_ids=router_ids, active=active,
device_owners=[l3_const.DEVICE_OWNER_ROUTER_INTF,
DEVICE_OWNER_DVR_INTERFACE])
# Add the port binding host to the floatingip dictionary
for fip in floating_ips:
fip['host'] = self.get_vm_port_hostid(context, fip['port_id'])
routers_dict = self._process_routers(context, routers)
self._process_floating_ips(context, routers_dict, floating_ips)
self._process_interfaces(routers_dict, interfaces)
return routers_dict.values()
def get_vm_port_hostid(self, context, port_id, port=None):
"""Return the portbinding host_id."""
vm_port_db = port or self._core_plugin.get_port(context, port_id)
allowed_device_owners = ("neutron:LOADBALANCER", DEVICE_OWNER_AGENT_GW)
device_owner = vm_port_db['device_owner'] if vm_port_db else ""
if (device_owner in allowed_device_owners or
device_owner.startswith("compute:")):
return vm_port_db[portbindings.HOST_ID]
def get_agent_gw_ports_exist_for_network(
self, context, network_id, host, agent_id):
"""Return agent gw port if exist, or None otherwise."""
if not network_id:
LOG.debug("Network not specified")
return
filters = {
'network_id': [network_id],
'device_id': [agent_id],
'device_owner': [DEVICE_OWNER_AGENT_GW]
}
ports = self._core_plugin.get_ports(context, filters)
if ports:
return ports[0]
def check_fips_availability_on_host(self, context, fip_id, host_id):
"""Query all floating_ips and filter by particular host."""
fip_count_on_host = 0
with context.session.begin(subtransactions=True):
routers = self._get_sync_routers(context, router_ids=None)
router_ids = [router['id'] for router in routers]
floating_ips = self._get_sync_floating_ips(context, router_ids)
# Check for the active floatingip in the host
for fip in floating_ips:
f_host = self.get_vm_port_hostid(context, fip['port_id'])
if f_host == host_id:
fip_count_on_host += 1
# If fip_count greater than 1 or equal to zero no action taken
# if the fip_count is equal to 1, then this would be last active
# fip in the host, so the agent gateway port can be deleted.
if fip_count_on_host == 1:
return True
return False
def delete_floatingip_agent_gateway_port(self, context, host_id):
"""Function to delete the FIP agent gateway port on host."""
# delete any fip agent gw port
device_filter = {'device_owner': [DEVICE_OWNER_AGENT_GW]}
ports = self._core_plugin.get_ports(context,
filters=device_filter)
for p in ports:
if self.get_vm_port_hostid(context, p['id'], p) == host_id:
self._core_plugin._delete_port(context, p['id'])
return
def create_fip_agent_gw_port_if_not_exists(
self, context, network_id, host):
"""Function to return the FIP Agent GW port.
This function will create a FIP Agent GW port
if required. If the port already exists, it
will return the existing port and will not
create a new one.
"""
l3_agent_db = self._get_agent_by_type_and_host(
context, l3_const.AGENT_TYPE_L3, host)
if l3_agent_db:
LOG.debug("Agent ID exists: %s", l3_agent_db['id'])
f_port = self.get_agent_gw_ports_exist_for_network(
context, network_id, host, l3_agent_db['id'])
if not f_port:
LOG.info(_('Agent Gateway port does not exist,'
' so create one: %s'), f_port)
agent_port = self._core_plugin.create_port(
context,
{'port': {'tenant_id': '',
'network_id': network_id,
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'fixed_ips': attributes.ATTR_NOT_SPECIFIED,
'device_id': l3_agent_db['id'],
'device_owner': DEVICE_OWNER_AGENT_GW,
'admin_state_up': True,
'name': ''}})
if agent_port:
self._populate_subnet_for_ports(context, [agent_port])
return agent_port
msg = _("Unable to create the Agent Gateway Port")
raise n_exc.BadRequest(resource='router', msg=msg)
else:
self._populate_subnet_for_ports(context, [f_port])
return f_port
def get_snat_interface_ports_for_router(self, context, router_id):
"""Return all existing snat_router_interface ports."""
filters = {'device_id': [router_id],
'device_owner': [DEVICE_OWNER_DVR_SNAT]}
return self._core_plugin.get_ports(context, filters)
def add_csnat_router_interface_port(
self, context, router_id, network_id, subnet_id, do_pop=True):
"""Add SNAT interface to the specified router and subnet."""
snat_port = self._core_plugin.create_port(
context,
{'port': {'tenant_id': '',
'network_id': network_id,
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'fixed_ips': [{'subnet_id': subnet_id}],
'device_id': router_id,
'device_owner': DEVICE_OWNER_DVR_SNAT,
'admin_state_up': True,
'name': ''}})
if not snat_port:
msg = _("Unable to create the SNAT Interface Port")
raise n_exc.BadRequest(resource='router', msg=msg)
elif do_pop:
return self._populate_subnet_for_ports(context, [snat_port])
return snat_port
def create_snat_intf_ports_if_not_exists(
self, context, router_id):
"""Function to return the snat interface port list.
This function will return the snat interface port list
if it exists. If the port does not exist it will create
new ports and then return the list.
"""
port_list = self.get_snat_interface_ports_for_router(
context, router_id)
if port_list:
self._populate_subnet_for_ports(context, port_list)
return port_list
port_list = []
filters = {
'device_id': [router_id],
'device_owner': [DEVICE_OWNER_DVR_INTERFACE]}
int_ports = self._core_plugin.get_ports(context, filters)
LOG.info(_('SNAT interface port list does not exist,'
' so create one: %s'), port_list)
for intf in int_ports:
if intf.get('fixed_ips'):
# Passing the subnet for the port to make sure the IP's
# are assigned on the right subnet if multiple subnet
# exists
snat_port = self.add_csnat_router_interface_port(
context, router_id, intf['network_id'],
intf['fixed_ips'][0]['subnet_id'], do_pop=False)
port_list.append(snat_port)
if port_list:
self._populate_subnet_for_ports(context, port_list)
return port_list
def dvr_vmarp_table_update(self, context, port_id, action):
"""Notify the L3 agent of VM ARP table changes.
Provide the details of the VM ARP to the L3 agent when
a Nova instance gets created or deleted.
"""
port_dict = self._core_plugin._get_port(context, port_id)
# Check this is a valid VM port
if ("compute:" not in port_dict['device_owner'] or
not port_dict['fixed_ips']):
return
ip_address = port_dict['fixed_ips'][0]['ip_address']
subnet = port_dict['fixed_ips'][0]['subnet_id']
filters = {'fixed_ips': {'subnet_id': [subnet]}}
ports = self._core_plugin.get_ports(context, filters=filters)
for port in ports:
if port['device_owner'] == DEVICE_OWNER_DVR_INTERFACE:
router_id = port['device_id']
router_dict = self._get_router(context, router_id)
if router_dict.extra_attributes.distributed:
arp_table = {'ip_address': ip_address,
'mac_address': port_dict['mac_address'],
'subnet_id': subnet}
if action == "add":
notify_action = self.l3_rpc_notifier.add_arp_entry
elif action == "del":
notify_action = self.l3_rpc_notifier.del_arp_entry
notify_action(context, router_id, arp_table)
return
def delete_csnat_router_interface_ports(self, context,
router, subnet_id=None):
# Each csnat router interface port is associated
# with a subnet, so we need to pass the subnet id to
# delete the right ports.
device_filter = {
'device_id': [router['id']],
'device_owner': [DEVICE_OWNER_DVR_SNAT]}
c_snat_ports = self._core_plugin.get_ports(
context, filters=device_filter)
for p in c_snat_ports:
if subnet_id is None:
self._core_plugin.delete_port(context,
p['id'],
l3_port_check=False)
else:
if p['fixed_ips'][0]['subnet_id'] == subnet_id:
LOG.debug("Subnet matches: %s", subnet_id)
self._core_plugin.delete_port(context,
p['id'],
l3_port_check=False)
def _is_distributed_router(router):
"""Return True if router to be handled is distributed."""
try:
# See if router is a DB object first
requested_router_type = router.extra_attributes.distributed
except AttributeError:
# if not, try to see if it is a request body
requested_router_type = router.get('distributed')
if attributes.is_attr_set(requested_router_type):
return requested_router_type
return cfg.CONF.router_distributed
|
|
from distutils.command.config import config
from scipy import ndimage
from collections import Counter
from showattendtell.core.vggnet import Vgg19
from showattendtell.core.utils import *
from showattendtell.core import utils
import tensorflow as tf
import numpy as np
import pandas as pd
import hickle
import os
import json
def _process_caption_data(caption_file, image_dir, max_length):
with open(caption_file) as f:
caption_data = json.load(f)
# id_to_filename is a dictionary such as {image_id: filename]}
id_to_filename = {image['id']: image['file_name'] for image in caption_data['images']}
# data is a list of dictionary which contains 'captions', 'file_name' and 'image_id' as key.
data = []
for annotation in caption_data['annotations']:
image_id = annotation['image_id']
annotation['file_name'] = os.path.join(image_dir, id_to_filename[image_id])
data += [annotation]
# convert to pandas dataframe (for later visualization or debugging)
caption_data = pd.DataFrame.from_dict(data)
del caption_data['id']
caption_data.sort_values(by='image_id', inplace=True)
caption_data = caption_data.reset_index(drop=True)
del_idx = []
for i, caption in enumerate(caption_data['caption']):
caption = caption.replace('.','').replace(',','').replace("'","").replace('"','')
caption = caption.replace('&','and').replace('(','').replace(")","").replace('-',' ')
caption = " ".join(caption.split()) # replace multiple spaces
caption_data.set_value(i, 'caption', caption.lower())
if len(caption.split(" ")) > max_length:
del_idx.append(i)
# delete captions if size is larger than max_length
print "The number of captions before deletion: %d" %len(caption_data)
caption_data = caption_data.drop(caption_data.index[del_idx])
caption_data = caption_data.reset_index(drop=True)
print "The number of captions after deletion: %d" %len(caption_data)
return caption_data
def _build_vocab(annotations, threshold=1):
counter = Counter()
max_len = 0
for i, caption in enumerate(annotations['caption']):
words = caption.split(' ') # caption contrains only lower-case words
for w in words:
counter[w] +=1
if len(caption.split(" ")) > max_len:
max_len = len(caption.split(" "))
vocab = [word for word in counter if counter[word] >= threshold]
print ('Filtered %d words to %d words with word count threshold %d.' % (len(counter), len(vocab), threshold))
word_to_idx = {u'<NULL>': 0, u'<START>': 1, u'<END>': 2}
idx = 3
for word in vocab:
word_to_idx[word] = idx
idx += 1
print "Max length of caption: ", max_len
return word_to_idx
def _build_caption_vector(annotations, word_to_idx, max_length=15):
n_examples = len(annotations)
captions = np.ndarray((n_examples,max_length+2)).astype(np.int32)
for i, caption in enumerate(annotations['caption']):
words = caption.split(" ") # caption contrains only lower-case words
cap_vec = []
cap_vec.append(word_to_idx['<START>'])
for word in words:
if word in word_to_idx:
cap_vec.append(word_to_idx[word])
cap_vec.append(word_to_idx['<END>'])
# pad short caption with the special null token '<NULL>' to make it fixed-size vector
if len(cap_vec) < (max_length + 2):
for j in range(max_length + 2 - len(cap_vec)):
cap_vec.append(word_to_idx['<NULL>'])
captions[i, :] = np.asarray(cap_vec)
print "Finished building caption vectors"
return captions
def _build_file_names(annotations):
image_file_names = []
id_to_idx = {}
idx = 0
image_ids = annotations['image_id']
file_names = annotations['file_name']
for image_id, file_name in zip(image_ids, file_names):
if not image_id in id_to_idx:
id_to_idx[image_id] = idx
image_file_names.append(file_name)
idx += 1
file_names = np.asarray(image_file_names)
return file_names, id_to_idx
def _build_image_idxs(annotations, id_to_idx):
image_idxs = np.ndarray(len(annotations), dtype=np.int32)
image_ids = annotations['image_id']
for i, image_id in enumerate(image_ids):
image_idxs[i] = id_to_idx[image_id]
return image_idxs
def main():
# batch size for extracting feature vectors from vggnet.
batch_size = 100
# maximum length of caption(number of word). if caption is longer than max_length, deleted.
max_length = 15
# if word occurs less than word_count_threshold in training dataset, the word index is special unknown token.
word_count_threshold = 1
# vgg model path
vgg_model_path = './data/imagenet-vgg-verydeep-19.mat'
caption_file = 'data/annotations/captions_train2014.json'
image_dir = 'image/%2014_resized/'
# about 80000 images and 400000 captions for train dataset
train_dataset = _process_caption_data(caption_file='data/annotations/captions_train2014.json',
image_dir='image/train2014_resized/',
max_length=max_length)
# about 40000 images and 200000 captions
val_dataset = _process_caption_data(caption_file='data/annotations/captions_val2014.json',
image_dir='image/val2014_resized/',
max_length=max_length)
# about 4000 images and 20000 captions for val / test dataset
val_cutoff = int(0.1 * len(val_dataset))
test_cutoff = int(0.2 * len(val_dataset))
print 'Finished processing caption data'
save_pickle(train_dataset, 'data/train/train.annotations.pkl')
save_pickle(val_dataset[:val_cutoff], 'data/val/val.annotations.pkl')
save_pickle(val_dataset[val_cutoff:test_cutoff].reset_index(drop=True), 'data/test/test.annotations.pkl')
for split in ['train', 'val', 'test']:
annotations = load_pickle('./data/%s/%s.annotations.pkl' % (split, split))
if split == 'train':
word_to_idx = _build_vocab(annotations=annotations, threshold=word_count_threshold)
save_pickle(word_to_idx, './data/%s/word_to_idx.pkl' % split)
captions = _build_caption_vector(annotations=annotations, word_to_idx=word_to_idx, max_length=max_length)
save_pickle(captions, './data/%s/%s.captions.pkl' % (split, split))
file_names, id_to_idx = _build_file_names(annotations)
save_pickle(file_names, './data/%s/%s.file.names.pkl' % (split, split))
image_idxs = _build_image_idxs(annotations, id_to_idx)
save_pickle(image_idxs, './data/%s/%s.image.idxs.pkl' % (split, split))
# prepare reference captions to compute bleu scores later
image_ids = {}
feature_to_captions = {}
i = -1
for caption, image_id in zip(annotations['caption'], annotations['image_id']):
if not image_id in image_ids:
image_ids[image_id] = 0
i += 1
feature_to_captions[i] = []
feature_to_captions[i].append(caption.lower() + ' .')
save_pickle(feature_to_captions, './data/%s/%s.references.pkl' % (split, split))
print "Finished building %s caption dataset" %split
# extract conv5_3 feature vectors
vggnet = Vgg19(vgg_model_path)
vggnet.build()
with tf.Session(config=utils.config) as sess:
tf.initialize_all_variables().run()
for split in ['train', 'val', 'test']:
anno_path = './data/%s/%s.annotations.pkl' % (split, split)
save_path = './data/%s/%s.features.hkl' % (split, split)
annotations = load_pickle(anno_path)
image_path = list(annotations['file_name'].unique())
n_examples = len(image_path)
all_feats = np.ndarray([n_examples, 196, 512], dtype=np.float32)
for start, end in zip(range(0, n_examples, batch_size),
range(batch_size, n_examples + batch_size, batch_size)):
image_batch_file = image_path[start:end]
image_batch = np.array(map(lambda x: ndimage.imread(x, mode='RGB'), image_batch_file)).astype(
np.float32)
feats = sess.run(vggnet.features, feed_dict={vggnet.images: image_batch})
all_feats[start:end, :] = feats
print ("Processed %d %s features.." % (end, split))
# use hickle to save huge feature vectors
hickle.dump(all_feats, save_path)
print ("Saved %s.." % (save_path))
if __name__ == "__main__":
main()
|
|
"""The tests for the automation component."""
import unittest
from blumate.bootstrap import _setup_component
import blumate.components.automation as automation
from blumate.const import ATTR_ENTITY_ID
from tests.common import get_test_home_assistant
class TestAutomation(unittest.TestCase):
"""Test the event automation."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.hass.config.components.append('group')
self.calls = []
def record_call(service):
self.calls.append(service)
self.hass.services.register('test', 'automation', record_call)
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_service_data_not_a_dict(self):
"""Test service data not dict."""
assert not _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'action': {
'service': 'test.automation',
'data': 100,
}
}
})
def test_service_specify_data(self):
"""Test service data."""
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'action': {
'service': 'test.automation',
'data_template': {
'some': '{{ trigger.platform }} - '
'{{ trigger.event.event_type }}'
},
}
}
})
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual('event - test_event', self.calls[0].data['some'])
def test_service_specify_entity_id(self):
"""Test service data."""
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'action': {
'service': 'test.automation',
'entity_id': 'hello.world'
}
}
})
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual(['hello.world'],
self.calls[0].data.get(ATTR_ENTITY_ID))
def test_service_specify_entity_id_list(self):
"""Test service data."""
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'action': {
'service': 'test.automation',
'entity_id': ['hello.world', 'hello.world2']
}
}
})
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual(['hello.world', 'hello.world2'],
self.calls[0].data.get(ATTR_ENTITY_ID))
def test_two_triggers(self):
"""Test triggers."""
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': [
{
'platform': 'event',
'event_type': 'test_event',
},
{
'platform': 'state',
'entity_id': 'test.entity',
}
],
'action': {
'service': 'test.automation',
}
}
})
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.hass.states.set('test.entity', 'hello')
self.hass.pool.block_till_done()
self.assertEqual(2, len(self.calls))
def test_two_conditions_with_and(self):
"""Test two and conditions."""
entity_id = 'test.entity'
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': [
{
'platform': 'event',
'event_type': 'test_event',
},
],
'condition': [
{
'condition': 'state',
'entity_id': entity_id,
'state': '100'
},
{
'condition': 'numeric_state',
'entity_id': entity_id,
'below': 150
}
],
'action': {
'service': 'test.automation',
}
}
})
self.hass.states.set(entity_id, 100)
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.hass.states.set(entity_id, 101)
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.hass.states.set(entity_id, 151)
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_two_conditions_with_or(self):
"""Test two or conditions."""
entity_id = 'test.entity'
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': [
{
'platform': 'event',
'event_type': 'test_event',
},
],
'condition_type': 'OR',
'condition': [
{
'platform': 'state',
'entity_id': entity_id,
'state': '200'
},
{
'platform': 'numeric_state',
'entity_id': entity_id,
'below': 150
}
],
'action': {
'service': 'test.automation',
}
}
})
self.hass.states.set(entity_id, 200)
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.hass.states.set(entity_id, 100)
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(2, len(self.calls))
self.hass.states.set(entity_id, 250)
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(2, len(self.calls))
def test_using_trigger_as_condition(self):
"""Test triggers as condition."""
entity_id = 'test.entity'
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': [
{
'platform': 'state',
'entity_id': entity_id,
'from': '120',
'state': '100'
},
{
'platform': 'numeric_state',
'entity_id': entity_id,
'below': 150
}
],
'condition': 'use_trigger_values',
'action': {
'service': 'test.automation',
}
}
})
self.hass.states.set(entity_id, 100)
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.hass.states.set(entity_id, 120)
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.hass.states.set(entity_id, 100)
self.hass.pool.block_till_done()
self.assertEqual(2, len(self.calls))
self.hass.states.set(entity_id, 151)
self.hass.pool.block_till_done()
self.assertEqual(2, len(self.calls))
def test_using_trigger_as_condition_with_invalid_condition(self):
"""Event is not a valid condition."""
entity_id = 'test.entity'
self.hass.states.set(entity_id, 100)
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': [
{
'platform': 'event',
'event_type': 'test_event',
},
{
'platform': 'numeric_state',
'entity_id': entity_id,
'below': 150
}
],
'condition': 'use_trigger_values',
'action': {
'service': 'test.automation',
}
}
})
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_automation_list_setting(self):
"""Event is not a valid condition."""
self.assertTrue(_setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: [{
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'action': {
'service': 'test.automation',
}
}, {
'trigger': {
'platform': 'event',
'event_type': 'test_event_2',
},
'action': {
'service': 'test.automation',
}
}]
}))
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.hass.bus.fire('test_event_2')
self.hass.pool.block_till_done()
self.assertEqual(2, len(self.calls))
def test_automation_calling_two_actions(self):
"""Test if we can call two actions from automation definition."""
self.assertTrue(_setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'action': [{
'service': 'test.automation',
'data': {'position': 0},
}, {
'service': 'test.automation',
'data': {'position': 1},
}],
}
}))
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
assert len(self.calls) == 2
assert self.calls[0].data['position'] == 0
assert self.calls[1].data['position'] == 1
|
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_policy import policy as oslo_policy
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel
import six
import webob
from nova.api.openstack import api_version_request
from nova.api.openstack.compute import shelve as shelve_v21
from nova.compute import task_states
from nova.compute import vm_states
from nova import exception
from nova import policy
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
class ShelvePolicyTestV21(test.NoDBTestCase):
plugin = shelve_v21
def setUp(self):
super(ShelvePolicyTestV21, self).setUp()
self.controller = self.plugin.ShelveController()
self.req = fakes.HTTPRequest.blank('')
@mock.patch('nova.api.openstack.common.get_instance')
def test_shelve_locked_server(self, get_instance_mock):
get_instance_mock.return_value = (
fake_instance.fake_instance_obj(self.req.environ['nova.context']))
self.stub_out('nova.compute.api.API.shelve',
fakes.fake_actions_to_locked_server)
self.assertRaises(webob.exc.HTTPConflict, self.controller._shelve,
self.req, uuidsentinel.fake, {})
@mock.patch('nova.api.openstack.common.get_instance')
@mock.patch('nova.objects.instance.Instance.save')
def test_shelve_task_state_race(self, mock_save, get_instance_mock):
instance = fake_instance.fake_instance_obj(
self.req.environ['nova.context'],
vm_state=vm_states.ACTIVE, task_state=None)
instance.launched_at = instance.created_at
get_instance_mock.return_value = instance
mock_save.side_effect = exception.UnexpectedTaskStateError(
instance_uuid=instance.uuid, expected=None,
actual=task_states.SHELVING)
ex = self.assertRaises(webob.exc.HTTPConflict, self.controller._shelve,
self.req, uuidsentinel.fake, body={'shelve': {}})
self.assertIn('Conflict updating instance', six.text_type(ex))
mock_save.assert_called_once_with(expected_task_state=[None])
@mock.patch('nova.api.openstack.common.get_instance')
def test_unshelve_locked_server(self, get_instance_mock):
get_instance_mock.return_value = (
fake_instance.fake_instance_obj(self.req.environ['nova.context']))
self.stub_out('nova.compute.api.API.unshelve',
fakes.fake_actions_to_locked_server)
self.assertRaises(webob.exc.HTTPConflict, self.controller._unshelve,
self.req, uuidsentinel.fake, body={'unshelve': {}})
@mock.patch('nova.api.openstack.common.get_instance')
def test_shelve_offload_locked_server(self, get_instance_mock):
get_instance_mock.return_value = (
fake_instance.fake_instance_obj(self.req.environ['nova.context']))
self.stub_out('nova.compute.api.API.shelve_offload',
fakes.fake_actions_to_locked_server)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._shelve_offload,
self.req, uuidsentinel.fake, {})
class ShelvePolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(ShelvePolicyEnforcementV21, self).setUp()
self.controller = shelve_v21.ShelveController()
self.req = fakes.HTTPRequest.blank('')
@mock.patch('nova.api.openstack.common.get_instance')
def test_shelve_restricted_by_role(self, get_instance_mock):
get_instance_mock.return_value = (
fake_instance.fake_instance_obj(self.req.environ['nova.context']))
rules = {'os_compute_api:os-shelve:shelve': 'role:admin'}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
self.assertRaises(exception.Forbidden, self.controller._shelve,
self.req, uuidsentinel.fake, {})
@mock.patch('nova.api.openstack.common.get_instance')
def test_shelve_policy_failed_with_other_project(self, get_instance_mock):
get_instance_mock.return_value = (
fake_instance.fake_instance_obj(self.req.environ['nova.context']))
rule_name = "os_compute_api:os-shelve:shelve"
self.policy.set_rules({rule_name: "project_id:%(project_id)s"})
# Change the project_id in request context.
self.req.environ['nova.context'].project_id = 'other-project'
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller._shelve, self.req, fakes.FAKE_UUID,
body={'shelve': {}})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
@mock.patch('nova.compute.api.API.shelve')
@mock.patch('nova.api.openstack.common.get_instance')
def test_shelve_overridden_policy_pass_with_same_project(self,
get_instance_mock,
shelve_mock):
instance = fake_instance.fake_instance_obj(
self.req.environ['nova.context'],
project_id=self.req.environ['nova.context'].project_id)
get_instance_mock.return_value = instance
rule_name = "os_compute_api:os-shelve:shelve"
self.policy.set_rules({rule_name: "project_id:%(project_id)s"})
self.controller._shelve(self.req, fakes.FAKE_UUID, body={'shelve': {}})
shelve_mock.assert_called_once_with(self.req.environ['nova.context'],
instance)
@mock.patch('nova.api.openstack.common.get_instance')
def test_shelve_overridden_policy_failed_with_other_user_in_same_project(
self, get_instance_mock):
get_instance_mock.return_value = (
fake_instance.fake_instance_obj(self.req.environ['nova.context']))
rule_name = "os_compute_api:os-shelve:shelve"
self.policy.set_rules({rule_name: "user_id:%(user_id)s"})
# Change the user_id in request context.
self.req.environ['nova.context'].user_id = 'other-user'
exc = self.assertRaises(exception.PolicyNotAuthorized,
self.controller._shelve, self.req,
fakes.FAKE_UUID, body={'shelve': {}})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
@mock.patch('nova.compute.api.API.shelve')
@mock.patch('nova.api.openstack.common.get_instance')
def test_shelve_overridden_policy_pass_with_same_user(self,
get_instance_mock,
shelve_mock):
instance = fake_instance.fake_instance_obj(
self.req.environ['nova.context'],
user_id=self.req.environ['nova.context'].user_id)
get_instance_mock.return_value = instance
rule_name = "os_compute_api:os-shelve:shelve"
self.policy.set_rules({rule_name: "user_id:%(user_id)s"})
self.controller._shelve(self.req, fakes.FAKE_UUID, body={'shelve': {}})
shelve_mock.assert_called_once_with(self.req.environ['nova.context'],
instance)
def test_shelve_offload_restricted_by_role(self):
rules = {'os_compute_api:os-shelve:shelve_offload': 'role:admin'}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
self.assertRaises(exception.Forbidden,
self.controller._shelve_offload, self.req,
uuidsentinel.fake, {})
def test_shelve_offload_policy_failed(self):
rule_name = "os_compute_api:os-shelve:shelve_offload"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller._shelve_offload, self.req, fakes.FAKE_UUID,
body={'shelve_offload': {}})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_unshelve_restricted_by_role(self):
rules = {'os_compute_api:os-shelve:unshelve': 'role:admin'}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
self.assertRaises(exception.Forbidden, self.controller._unshelve,
self.req, uuidsentinel.fake, body={'unshelve': {}})
def test_unshelve_policy_failed(self):
rule_name = "os_compute_api:os-shelve:unshelve"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller._unshelve, self.req, fakes.FAKE_UUID,
body={'unshelve': {}})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
class UnshelveServerControllerTestV277(test.NoDBTestCase):
"""Server controller test for microversion 2.77
Add availability_zone parameter to unshelve a shelved-offloaded server of
2.77 microversion.
"""
wsgi_api_version = '2.77'
def setUp(self):
super(UnshelveServerControllerTestV277, self).setUp()
self.controller = shelve_v21.ShelveController()
self.req = fakes.HTTPRequest.blank(
'/%s/servers/a/action' % fakes.FAKE_PROJECT_ID,
use_admin_context=True, version=self.wsgi_api_version)
# These tests don't care about ports with QoS bandwidth resources.
self.stub_out('nova.api.openstack.common.'
'instance_has_port_with_resource_request',
lambda *a, **kw: False)
def fake_get_instance(self):
ctxt = self.req.environ['nova.context']
return fake_instance.fake_instance_obj(
ctxt, uuid=fakes.FAKE_UUID, vm_state=vm_states.SHELVED_OFFLOADED)
@mock.patch('nova.api.openstack.common.get_instance')
def test_unshelve_with_az_pre_2_77_failed(self, mock_get_instance):
"""Make sure specifying an AZ before microversion 2.77 is ignored."""
instance = self.fake_get_instance()
mock_get_instance.return_value = instance
body = {
'unshelve': {
'availability_zone': 'us-east'
}}
self.req.body = jsonutils.dump_as_bytes(body)
self.req.api_version_request = (api_version_request.
APIVersionRequest('2.76'))
with mock.patch.object(self.controller.compute_api,
'unshelve') as mock_unshelve:
self.controller._unshelve(self.req, fakes.FAKE_UUID, body=body)
mock_unshelve.assert_called_once_with(
self.req.environ['nova.context'], instance, new_az=None)
@mock.patch('nova.compute.api.API.unshelve')
@mock.patch('nova.api.openstack.common.get_instance')
def test_unshelve_with_none_pre_2_77_success(
self, mock_get_instance, mock_unshelve):
"""Make sure we can unshelve server with None
before microversion 2.77.
"""
instance = self.fake_get_instance()
mock_get_instance.return_value = instance
body = {'unshelve': None}
self.req.body = jsonutils.dump_as_bytes(body)
self.req.api_version_request = (api_version_request.
APIVersionRequest('2.76'))
self.controller._unshelve(self.req, fakes.FAKE_UUID, body=body)
mock_unshelve.assert_called_once_with(
self.req.environ['nova.context'], instance, new_az=None)
@mock.patch('nova.compute.api.API.unshelve')
@mock.patch('nova.api.openstack.common.get_instance')
def test_unshelve_with_empty_dict_with_v2_77_failed(
self, mock_get_instance, mock_unshelve):
"""Make sure we cannot unshelve server with empty dict."""
instance = self.fake_get_instance()
mock_get_instance.return_value = instance
body = {'unshelve': {}}
self.req.body = jsonutils.dump_as_bytes(body)
exc = self.assertRaises(exception.ValidationError,
self.controller._unshelve,
self.req, fakes.FAKE_UUID,
body=body)
self.assertIn("\'availability_zone\' is a required property",
six.text_type(exc))
def test_invalid_az_name_with_int(self):
body = {
'unshelve': {
'availability_zone': 1234
}}
self.req.body = jsonutils.dump_as_bytes(body)
self.assertRaises(exception.ValidationError,
self.controller._unshelve,
self.req, fakes.FAKE_UUID,
body=body)
def test_no_az_value(self):
body = {
'unshelve': {
'availability_zone': None
}}
self.req.body = jsonutils.dump_as_bytes(body)
self.assertRaises(exception.ValidationError,
self.controller._unshelve,
self.req, fakes.FAKE_UUID,
body=body)
def test_unshelve_with_additional_param(self):
body = {
'unshelve': {
'availability_zone': 'us-east',
'additional_param': 1
}}
self.req.body = jsonutils.dump_as_bytes(body)
exc = self.assertRaises(
exception.ValidationError,
self.controller._unshelve, self.req,
fakes.FAKE_UUID, body=body)
self.assertIn("Additional properties are not allowed",
six.text_type(exc))
|
|
# -*- test-case-name: twisted.test.test_compat -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Compatibility module to provide backwards compatibility for useful Python
features.
This is mainly for use of internal Twisted code. We encourage you to use
the latest version of Python directly from your code, if possible.
@var unicode: The type of Unicode strings, C{unicode} on Python 2 and C{str}
on Python 3.
@var NativeStringIO: An in-memory file-like object that operates on the native
string type (bytes in Python 2, unicode in Python 3).
@var urllib_parse: a URL-parsing module (urlparse on Python 2, urllib.parse on
Python 3)
"""
from __future__ import absolute_import, division
import inspect
import os
import platform
import socket
import string
import struct
import sys
from types import MethodType as _MethodType
from io import TextIOBase, IOBase
if sys.version_info < (3, 0):
_PY3 = False
else:
_PY3 = True
if platform.python_implementation() == 'PyPy':
_PYPY = True
else:
_PYPY = False
def _shouldEnableNewStyle():
"""
Returns whether or not we should enable the new-style conversion of
old-style classes. It inspects the environment for C{TWISTED_NEWSTYLE},
accepting an empty string, C{no}, C{false}, C{False}, and C{0} as falsey
values and everything else as a truthy value.
@rtype: L{bool}
"""
value = os.environ.get('TWISTED_NEWSTYLE', '')
if value in ['', 'no', 'false', 'False', '0']:
return False
else:
return True
_EXPECT_NEWSTYLE = _PY3 or _shouldEnableNewStyle()
def _shouldEnableNewStyle():
"""
Returns whether or not we should enable the new-style conversion of
old-style classes. It inspects the environment for C{TWISTED_NEWSTYLE},
accepting an empty string, C{no}, C{false}, C{False}, and C{0} as falsey
values and everything else as a truthy value.
@rtype: L{bool}
"""
value = os.environ.get('TWISTED_NEWSTYLE', '')
if value in ['', 'no', 'false', 'False', '0']:
return False
else:
return True
_EXPECT_NEWSTYLE = _PY3 or _shouldEnableNewStyle()
def currentframe(n=0):
"""
In Python 3, L{inspect.currentframe} does not take a stack-level argument.
Restore that functionality from Python 2 so we don't have to re-implement
the C{f_back}-walking loop in places where it's called.
@param n: The number of stack levels above the caller to walk.
@type n: L{int}
@return: a frame, n levels up the stack from the caller.
@rtype: L{types.FrameType}
"""
f = inspect.currentframe()
for x in range(n + 1):
f = f.f_back
return f
def inet_pton(af, addr):
if af == socket.AF_INET:
return socket.inet_aton(addr)
elif af == getattr(socket, 'AF_INET6', 'AF_INET6'):
illegalChars = [x for x in addr if x not in string.hexdigits + ':.']
if illegalChars:
raise ValueError("Illegal characters: %r" %
(''.join(illegalChars),))
parts = addr.split(':')
elided = parts.count('')
ipv4Component = '.' in parts[-1]
if len(parts) > (8 - ipv4Component) or elided > 3:
raise ValueError("Syntactically invalid address")
if elided == 3:
return '\x00' * 16
if elided:
zeros = ['0'] * (8 - len(parts) - ipv4Component + elided)
if addr.startswith('::'):
parts[:2] = zeros
elif addr.endswith('::'):
parts[-2:] = zeros
else:
idx = parts.index('')
parts[idx:idx+1] = zeros
if len(parts) != 8 - ipv4Component:
raise ValueError("Syntactically invalid address")
else:
if len(parts) != (8 - ipv4Component):
raise ValueError("Syntactically invalid address")
if ipv4Component:
if parts[-1].count('.') != 3:
raise ValueError("Syntactically invalid address")
rawipv4 = socket.inet_aton(parts[-1])
unpackedipv4 = struct.unpack('!HH', rawipv4)
parts[-1:] = [hex(x)[2:] for x in unpackedipv4]
parts = [int(x, 16) for x in parts]
return struct.pack('!8H', *parts)
else:
raise socket.error(97, 'Address family not supported by protocol')
def inet_ntop(af, addr):
if af == socket.AF_INET:
return socket.inet_ntoa(addr)
elif af == socket.AF_INET6:
if len(addr) != 16:
raise ValueError("address length incorrect")
parts = struct.unpack('!8H', addr)
curBase = bestBase = None
for i in range(8):
if not parts[i]:
if curBase is None:
curBase = i
curLen = 0
curLen += 1
else:
if curBase is not None:
bestLen = None
if bestBase is None or curLen > bestLen:
bestBase = curBase
bestLen = curLen
curBase = None
if curBase is not None and (bestBase is None or curLen > bestLen):
bestBase = curBase
bestLen = curLen
parts = [hex(x)[2:] for x in parts]
if bestBase is not None:
parts[bestBase:bestBase + bestLen] = ['']
if parts[0] == '':
parts.insert(0, '')
if parts[-1] == '':
parts.insert(len(parts) - 1, '')
return ':'.join(parts)
else:
raise socket.error(97, 'Address family not supported by protocol')
try:
socket.AF_INET6
except AttributeError:
socket.AF_INET6 = 'AF_INET6'
try:
socket.inet_pton(socket.AF_INET6, "::")
except (AttributeError, NameError, socket.error):
socket.inet_pton = inet_pton
socket.inet_ntop = inet_ntop
adict = dict
if _PY3:
# These are actually useless in Python 2 as well, but we need to go
# through deprecation process there (ticket #5895):
del adict, inet_pton, inet_ntop
set = set
frozenset = frozenset
try:
from functools import reduce
except ImportError:
reduce = reduce
def execfile(filename, globals, locals=None):
"""
Execute a Python script in the given namespaces.
Similar to the execfile builtin, but a namespace is mandatory, partly
because that's a sensible thing to require, and because otherwise we'd
have to do some frame hacking.
This is a compatibility implementation for Python 3 porting, to avoid the
use of the deprecated builtin C{execfile} function.
"""
if locals is None:
locals = globals
with open(filename, "rbU") as fin:
source = fin.read()
code = compile(source, filename, "exec")
exec(code, globals, locals)
try:
cmp = cmp
except NameError:
def cmp(a, b):
"""
Compare two objects.
Returns a negative number if C{a < b}, zero if they are equal, and a
positive number if C{a > b}.
"""
if a < b:
return -1
elif a == b:
return 0
else:
return 1
def comparable(klass):
"""
Class decorator that ensures support for the special C{__cmp__} method.
On Python 2 this does nothing.
On Python 3, C{__eq__}, C{__lt__}, etc. methods are added to the class,
relying on C{__cmp__} to implement their comparisons.
"""
# On Python 2, __cmp__ will just work, so no need to add extra methods:
if not _PY3:
return klass
def __eq__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c == 0
def __ne__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c != 0
def __lt__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c < 0
def __le__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c <= 0
def __gt__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c > 0
def __ge__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c >= 0
klass.__lt__ = __lt__
klass.__gt__ = __gt__
klass.__le__ = __le__
klass.__ge__ = __ge__
klass.__eq__ = __eq__
klass.__ne__ = __ne__
return klass
if _PY3:
unicode = str
long = int
else:
unicode = unicode
long = long
def ioType(fileIshObject, default=unicode):
"""
Determine the type which will be returned from the given file object's
read() and accepted by its write() method as an argument.
In other words, determine whether the given file is 'opened in text mode'.
@param fileIshObject: Any object, but ideally one which resembles a file.
@type fileIshObject: L{object}
@param default: A default value to return when the type of C{fileIshObject}
cannot be determined.
@type default: L{type}
@return: There are 3 possible return values:
1. L{unicode}, if the file is unambiguously opened in text mode.
2. L{bytes}, if the file is unambiguously opened in binary mode.
3. L{basestring}, if we are on python 2 (the L{basestring} type
does not exist on python 3) and the file is opened in binary
mode, but has an encoding and can therefore accept both bytes
and text reliably for writing, but will return L{bytes} from
read methods.
4. The C{default} parameter, if the given type is not understood.
@rtype: L{type}
"""
if isinstance(fileIshObject, TextIOBase):
# If it's for text I/O, then it's for text I/O.
return unicode
if isinstance(fileIshObject, IOBase):
# If it's for I/O but it's _not_ for text I/O, it's for bytes I/O.
return bytes
encoding = getattr(fileIshObject, 'encoding', None)
import codecs
if isinstance(fileIshObject, (codecs.StreamReader, codecs.StreamWriter)):
# On StreamReaderWriter, the 'encoding' attribute has special meaning;
# it is unambiguously unicode.
if encoding:
return unicode
else:
return bytes
if not _PY3:
# Special case: if we have an encoding file, we can *give* it unicode,
# but we can't expect to *get* unicode.
if isinstance(fileIshObject, file):
if encoding is not None:
return basestring
else:
return bytes
from cStringIO import InputType, OutputType
from StringIO import StringIO
if isinstance(fileIshObject, (StringIO, InputType, OutputType)):
return bytes
return default
def nativeString(s):
"""
Convert C{bytes} or C{unicode} to the native C{str} type, using ASCII
encoding if conversion is necessary.
@raise UnicodeError: The input string is not ASCII encodable/decodable.
@raise TypeError: The input is neither C{bytes} nor C{unicode}.
"""
if not isinstance(s, (bytes, unicode)):
raise TypeError("%r is neither bytes nor unicode" % s)
if _PY3:
if isinstance(s, bytes):
return s.decode("ascii")
else:
# Ensure we're limited to ASCII subset:
s.encode("ascii")
else:
if isinstance(s, unicode):
return s.encode("ascii")
else:
# Ensure we're limited to ASCII subset:
s.decode("ascii")
return s
def _matchingString(constantString, inputString):
"""
Some functions, such as C{os.path.join}, operate on string arguments which
may be bytes or text, and wish to return a value of the same type. In
those cases you may wish to have a string constant (in the case of
C{os.path.join}, that constant would be C{os.path.sep}) involved in the
parsing or processing, that must be of a matching type in order to use
string operations on it. L{_matchingString} will take a constant string
(either L{bytes} or L{unicode}) and convert it to the same type as the
input string. C{constantString} should contain only characters from ASCII;
to ensure this, it will be encoded or decoded regardless.
@param constantString: A string literal used in processing.
@type constantString: L{unicode} or L{bytes}
@param inputString: A byte string or text string provided by the user.
@type inputString: L{unicode} or L{bytes}
@return: C{constantString} converted into the same type as C{inputString}
@rtype: the type of C{inputString}
"""
if isinstance(constantString, bytes):
otherType = constantString.decode("ascii")
else:
otherType = constantString.encode("ascii")
if type(constantString) == type(inputString):
return constantString
else:
return otherType
if _PY3:
def reraise(exception, traceback):
raise exception.with_traceback(traceback)
else:
exec("""def reraise(exception, traceback):
raise exception.__class__, exception, traceback""")
reraise.__doc__ = """
Re-raise an exception, with an optional traceback, in a way that is compatible
with both Python 2 and Python 3.
Note that on Python 3, re-raised exceptions will be mutated, with their
C{__traceback__} attribute being set.
@param exception: The exception instance.
@param traceback: The traceback to use, or L{None} indicating a new traceback.
"""
if _PY3:
from io import StringIO as NativeStringIO
else:
from io import BytesIO as NativeStringIO
# Functions for dealing with Python 3's bytes type, which is somewhat
# different than Python 2's:
if _PY3:
def iterbytes(originalBytes):
for i in range(len(originalBytes)):
yield originalBytes[i:i+1]
def intToBytes(i):
return ("%d" % i).encode("ascii")
# Ideally we would use memoryview, but it has a number of differences from
# the Python 2 buffer() that make that impractical
# (http://bugs.python.org/issue15945, incompatibility with pyOpenSSL due to
# PyArg_ParseTuple differences.)
def lazyByteSlice(object, offset=0, size=None):
"""
Return a copy of the given bytes-like object.
If an offset is given, the copy starts at that offset. If a size is
given, the copy will only be of that length.
@param object: C{bytes} to be copied.
@param offset: C{int}, starting index of copy.
@param size: Optional, if an C{int} is given limit the length of copy
to this size.
"""
if size is None:
return object[offset:]
else:
return object[offset:(offset + size)]
def networkString(s):
if not isinstance(s, unicode):
raise TypeError("Can only convert text to bytes on Python 3")
return s.encode('ascii')
else:
def iterbytes(originalBytes):
return originalBytes
def intToBytes(i):
return b"%d" % i
lazyByteSlice = buffer
def networkString(s):
if not isinstance(s, str):
raise TypeError("Can only pass-through bytes on Python 2")
# Ensure we're limited to ASCII subset:
s.decode('ascii')
return s
iterbytes.__doc__ = """
Return an iterable wrapper for a C{bytes} object that provides the behavior of
iterating over C{bytes} on Python 2.
In particular, the results of iteration are the individual bytes (rather than
integers as on Python 3).
@param originalBytes: A C{bytes} object that will be wrapped.
"""
intToBytes.__doc__ = """
Convert the given integer into C{bytes}, as ASCII-encoded Arab numeral.
In other words, this is equivalent to calling C{bytes} in Python 2 on an
integer.
@param i: The C{int} to convert to C{bytes}.
@rtype: C{bytes}
"""
networkString.__doc__ = """
Convert the native string type to C{bytes} if it is not already C{bytes} using
ASCII encoding if conversion is necessary.
This is useful for sending text-like bytes that are constructed using string
interpolation. For example, this is safe on Python 2 and Python 3:
networkString("Hello %d" % (n,))
@param s: A native string to convert to bytes if necessary.
@type s: C{str}
@raise UnicodeError: The input string is not ASCII encodable/decodable.
@raise TypeError: The input is neither C{bytes} nor C{unicode}.
@rtype: C{bytes}
"""
try:
StringType = basestring
except NameError:
# Python 3+
StringType = str
try:
from types import InstanceType
except ImportError:
# Python 3+
InstanceType = object
try:
from types import FileType
except ImportError:
# Python 3+
FileType = IOBase
if _PY3:
import urllib.parse as urllib_parse
from html import escape
from urllib.parse import quote as urlquote
from urllib.parse import unquote as urlunquote
from http import cookiejar as cookielib
else:
import urlparse as urllib_parse
from cgi import escape
from urllib import quote as urlquote
from urllib import unquote as urlunquote
import cookielib
# Dealing with the differences in items/iteritems
if _PY3:
def iteritems(d):
return d.items()
def itervalues(d):
return d.values()
def items(d):
return list(d.items())
xrange = range
izip = zip
else:
def iteritems(d):
return d.iteritems()
def itervalues(d):
return d.itervalues()
def items(d):
return d.items()
xrange = xrange
from itertools import izip
izip # shh pyflakes
iteritems.__doc__ = """
Return an iterable of the items of C{d}.
@type d: L{dict}
@rtype: iterable
"""
itervalues.__doc__ = """
Return an iterable of the values of C{d}.
@type d: L{dict}
@rtype: iterable
"""
items.__doc__ = """
Return a list of the items of C{d}.
@type d: L{dict}
@rtype: L{list}
"""
def _keys(d):
"""
Return a list of the keys of C{d}.
@type d: L{dict}
@rtype: L{list}
"""
if _PY3:
return list(d.keys())
else:
return d.keys()
def bytesEnviron():
"""
Return a L{dict} of L{os.environ} where all text-strings are encoded into
L{bytes}.
This function is POSIX only; environment variables are always text strings
on Windows.
"""
if not _PY3:
# On py2, nothing to do.
return dict(os.environ)
target = dict()
for x, y in os.environ.items():
target[os.environ.encodekey(x)] = os.environ.encodevalue(y)
return target
def _constructMethod(cls, name, self):
"""
Construct a bound method.
@param cls: The class that the method should be bound to.
@type cls: L{types.ClassType} or L{type}.
@param name: The name of the method.
@type name: native L{str}
@param self: The object that the method is bound to.
@type self: any object
@return: a bound method
@rtype: L{types.MethodType}
"""
func = cls.__dict__[name]
if _PY3:
return _MethodType(func, self)
return _MethodType(func, self, cls)
from incremental import Version
from twisted.python.deprecate import deprecatedModuleAttribute
from collections import OrderedDict
deprecatedModuleAttribute(
Version("Twisted", 15, 5, 0),
"Use collections.OrderedDict instead.",
"twisted.python.compat",
"OrderedDict")
if _PY3:
from base64 import encodebytes as _b64encodebytes
from base64 import decodebytes as _b64decodebytes
else:
from base64 import encodestring as _b64encodebytes
from base64 import decodestring as _b64decodebytes
def _bytesChr(i):
"""
Like L{chr} but always works on ASCII, returning L{bytes}.
@param i: The ASCII code point to return.
@type i: L{int}
@rtype: L{bytes}
"""
if _PY3:
return bytes([i])
else:
return chr(i)
try:
from sys import intern
except ImportError:
intern = intern
def _coercedUnicode(s):
"""
Coerce ASCII-only byte strings into unicode for Python 2.
In Python 2 C{unicode(b'bytes')} returns a unicode string C{'bytes'}. In
Python 3, the equivalent C{str(b'bytes')} will return C{"b'bytes'"}
instead. This function mimics the behavior for Python 2. It will decode the
byte string as ASCII. In Python 3 it simply raises a L{TypeError} when
passing a byte string. Unicode strings are returned as-is.
@param s: The string to coerce.
@type s: L{bytes} or L{unicode}
@raise UnicodeError: The input L{bytes} is not ASCII decodable.
@raise TypeError: The input is L{bytes} on Python 3.
"""
if isinstance(s, bytes):
if _PY3:
raise TypeError("Expected str not %r (bytes)" % (s,))
else:
return s.decode('ascii')
else:
return s
def _maybeMBCS(s):
"""
Convert the string C{s} to a L{unicode} string, if required.
@param s: The string to convert.
@type s: L{bytes} or L{unicode}
@return: The string, decoded using MBCS if needed.
@rtype: L{unicode}
@raises UnicodeDecodeError: If passed a byte string that cannot be decoded
using MBCS.
"""
assert sys.platform == "win32", "only reasonable on Windows"
assert type(s) in [bytes, unicode], str(type(s)) + " is not a string"
if isinstance(s, bytes):
return s.decode('mbcs')
return s
if _PY3:
unichr = chr
raw_input = input
else:
unichr = unichr
raw_input = raw_input
def _bytesRepr(bytestring):
"""
Provide a repr for a byte string that begins with 'b' on both
Python 2 and 3.
@param bytestring: The string to repr.
@type bytestring: L{bytes}
@raise TypeError: The input is not L{bytes}.
@return: The repr with a leading 'b'.
@rtype: L{bytes}
"""
if not isinstance(bytestring, bytes):
raise TypeError("Expected bytes not %r" % (bytestring,))
if _PY3:
return repr(bytestring)
else:
return 'b' + repr(bytestring)
__all__ = [
"reraise",
"execfile",
"frozenset",
"reduce",
"set",
"cmp",
"comparable",
"OrderedDict",
"nativeString",
"NativeStringIO",
"networkString",
"unicode",
"iterbytes",
"intToBytes",
"lazyByteSlice",
"StringType",
"InstanceType",
"FileType",
"items",
"iteritems",
"itervalues",
"xrange",
"urllib_parse",
"bytesEnviron",
"escape",
"urlquote",
"urlunquote",
"cookielib",
"_keys",
"_b64encodebytes",
"_b64decodebytes",
"_bytesChr",
"_coercedUnicode",
"_bytesRepr",
"intern",
"unichr",
"raw_input",
"_maybeMBCS",
]
|
|
#!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Fan, Yugang <yugang.fan@intel.com>
import os
import glob
import time
import sys
import json
import subprocess
import commands
import platform
from optparse import OptionParser
import build_android
import build_cordova
import build_embeddingapi
import build_extension
import build_deb
import build_msi
import build_ios
import build_iot
import varshop
import utils
import resource_only
reload(sys)
if platform.system() == "Windows":
sys.setdefaultencoding('gbk')
else:
sys.setdefaultencoding('utf8')
TOOL_VERSION = "v0.1"
VERSION_FILE = "VERSION"
PKG_TYPES = [
"apk",
"apk-aio",
"cordova-aio",
"cordova",
"embeddingapi",
"deb",
"msi",
"ios",
"iot",
"iot-aio"]
PKG_BLACK_LIST = []
PACK_TYPES = ["ant", "gradle", "maven"]
CORDOVA_PACK_TYPES = ["npm", "local"]
PKG_NAME = None
BUILD_ROOT_SRC_PKG = None
BUILD_ROOT_SRC_PKG_APP = None
BUILD_ROOT_SRC_SUB_APP = None
BUILD_ROOT_PKG = None
BUILD_ROOT_PKG_APP = None
# Variables which store in config.py
BUILD_PARAMETERS = None
BUILD_ROOT = None
BUILD_ROOT_SRC = None
BUILD_TIME = time.strftime('%Y%m%d', time.localtime(time.time()))
CROSSWALK_BRANCH = ""
CROSSWALK_VERSION = ""
DEFAULT_CMD_TIMEOUT = 600
PKG_MODES = ["shared", "embedded", "lite"]
PKG_ARCHS = ["x86", "arm", "x86_64", "arm64"]
def updateCopylistPrefix(src_default, dest_default, src_sub, dest_sub):
src_new = ""
dest_new = ""
PACK_TOOL_TAG = "PACK-TOOL-ROOT"
if src_sub[0:len(PACK_TOOL_TAG)] == PACK_TOOL_TAG:
src_new = src_sub.replace(PACK_TOOL_TAG, BUILD_PARAMETERS.pkgpacktools)
else:
src_new = os.path.join(src_default, src_sub)
if dest_sub[0:len(PACK_TOOL_TAG)] == PACK_TOOL_TAG:
dest_new = dest_sub.replace(PACK_TOOL_TAG, BUILD_ROOT)
else:
dest_new = os.path.join(dest_default, dest_sub)
return (src_new, dest_new)
def buildSRC(src=None, dest=None, build_json=None):
if not os.path.exists(src):
LOG.info("+Src dir does not exist, skip build src process ...")
return True
if not utils.doCopy(src, dest):
return False
if "blacklist" in build_json:
if build_json["blacklist"].count("") > 0:
build_json["blacklist"].remove("")
black_file_list = []
for i_black in build_json["blacklist"]:
black_file_list = black_file_list + \
glob.glob(os.path.join(dest, i_black))
black_file_list = list(set(black_file_list))
if not utils.doRemove(black_file_list):
return False
if "copylist" in build_json:
for i_s_key in build_json["copylist"].keys():
if i_s_key and build_json["copylist"][i_s_key]:
(src_updated, dest_updated) = updateCopylistPrefix(
src, dest, i_s_key, build_json["copylist"][i_s_key])
if not utils.doCopy(src_updated, dest_updated):
return False
return True
def exitHandler(return_code=1):
LOG.info("+Cleaning build root folder ...")
if not BUILD_PARAMETERS.bnotclean and os.path.exists(BUILD_ROOT):
if not utils.doRemove([BUILD_ROOT]):
LOG.error("Fail to clean build root, exit ...")
sys.exit(1)
if return_code == 0:
LOG.info("================ DONE ================")
else:
LOG.error(
"================ Found Something Wrong !!! ================")
sys.exit(return_code)
def prepareBuildRoot():
LOG.info("+Preparing build root folder ...")
global BUILD_ROOT
global BUILD_ROOT_SRC
global BUILD_ROOT_SRC_PKG
global BUILD_ROOT_SRC_PKG_APP
global BUILD_ROOT_SRC_SUB_APP
global BUILD_ROOT_PKG
global BUILD_ROOT_PKG_APP
while True:
BUILD_ROOT = os.path.join("/tmp", utils.getRandomStr())
if os.path.exists(BUILD_ROOT):
continue
else:
break
BUILD_ROOT_SRC = os.path.join(BUILD_ROOT, PKG_NAME)
BUILD_ROOT_SRC_PKG = os.path.join(BUILD_ROOT, "pkg")
BUILD_ROOT_SRC_PKG_APP = os.path.join(BUILD_ROOT, "pkg-app")
BUILD_ROOT_SRC_SUB_APP = os.path.join(BUILD_ROOT, "sub-app")
BUILD_ROOT_PKG = os.path.join(BUILD_ROOT, "pkg", "opt", PKG_NAME)
BUILD_ROOT_PKG_APP = os.path.join(BUILD_ROOT, "pkg-app", "opt", PKG_NAME)
if not utils.doCopy(BUILD_PARAMETERS.srcdir, BUILD_ROOT_SRC):
return False
else:
utils.replaceUserString(
BUILD_ROOT_SRC,
'*',
'TESTER-HOME-DIR',
"/home/%s" %
BUILD_PARAMETERS.user)
if not utils.doRemove(
glob.glob(os.path.join(BUILD_ROOT_SRC, "%s*.zip" % PKG_NAME))):
return False
return True
def packAPP(build_json=None, app_src=None, app_dest=None, app_name=None):
LOG.info("Packing %s(%s)" % (app_name, app_src))
if not os.path.exists(app_dest):
try:
os.makedirs(app_dest)
except Exception as e:
LOG.error("Fail to init package install dest dir: %s" % e)
return False
app_tpye = utils.safelyGetValue(build_json, 'app-type')
if utils.checkContains(BUILD_PARAMETERS.pkgtype, "APK") and app_tpye == "EXTENSION":
if not build_extension.packExtension(build_json, app_src, app_dest, app_name):
return False
if not build_android.packAPK(build_json, app_src, app_dest, app_name):
return False
elif utils.checkContains(BUILD_PARAMETERS.pkgtype, "APK") and app_tpye != "EMBEDDINGAPI":
if not build_android.packAPK(build_json, app_src, app_dest, app_name):
return False
elif utils.checkContains(BUILD_PARAMETERS.pkgtype, "CORDOVA"):
if not build_cordova.packCordova(build_json, app_src, app_dest, app_name):
return False
elif utils.checkContains(BUILD_PARAMETERS.pkgtype, "EMBEDDINGAPI") or app_tpye == "EMBEDDINGAPI":
app_version = None
if "_" in app_name:
index_flag = app_name.index("_")
app_version = app_name[index_flag + 1:]
if app_version:
utils.replaceUserString(
app_src,
'AndroidManifest.xml',
'org.xwalk.embedding.test',
"org.xwalk.embedding.test." +
app_version)
utils.replaceUserString(
app_src,
'AndroidManifest.xml',
'org.xwalk.embedding.asynctest',
"org.xwalk.embedding.asynctest." +
app_version)
utils.replaceUserString(
app_src,
'AndroidManifest.xml',
'EmbeddingApiTestUnit',
"EmbeddingApiTestUnit" +
app_version)
utils.replaceUserString(
app_src,
'AndroidManifest.xml',
'EmbeddingApiAsynctestUnit',
"EmbeddingApiAsynctestUnit" +
app_version)
if app_version != "v6":
utils.replaceUserString(
app_src,
'AndroidManifest.xml',
'<provider android:name=\"org.xwalk.embedding.base.TestContentProvider\"' +
' android:authorities=\"org.xwalk.embedding.base.TestContentProvider\" />',
"")
main_dest = os.path.join(app_src, "src/org/xwalk/embedding")
utils.replaceUserString(
main_dest,
'MainActivity.java',
'org.xwalk.embedding.test',
"org.xwalk.embedding.test." +
app_version)
utils.replaceUserString(
main_dest,
'MainActivity.java',
'org.xwalk.embedding.asynctest',
"org.xwalk.embedding.asynctest." +
app_version)
if BUILD_PARAMETERS.packtype and utils.checkContains(
BUILD_PARAMETERS.packtype, "GRADLE"):
if not build_embeddingapi.packEmbeddingAPI_gradle(
build_json, app_src, app_dest, app_name, app_version):
return False
elif BUILD_PARAMETERS.packtype and utils.checkContains(BUILD_PARAMETERS.packtype, "MAVEN"):
if not build_embeddingapi.packEmbeddingAPI_maven(
build_json, app_src, app_dest, app_name, app_version):
return False
else:
if not build_embeddingapi.packEmbeddingAPI_ant(
build_json, app_src, app_dest, app_name, app_version):
return False
elif utils.checkContains(BUILD_PARAMETERS.pkgtype, "DEB"):
if not build_deb.packDeb(build_json, app_src, app_dest, app_name):
return False
elif utils.checkContains(BUILD_PARAMETERS.pkgtype, "MSI"):
if not build_msi.packMsi(build_json, app_src, app_dest, app_name):
return False
elif utils.checkContains(BUILD_PARAMETERS.pkgtype, "ios"):
if not build_ios.packIOS(build_json, app_src, app_dest, app_name):
return False
elif utils.checkContains(BUILD_PARAMETERS.pkgtype, "iot"):
if not build_iot.packIoT(build_json, app_src, app_dest, app_name):
return False
else:
LOG.error("Got wrong pkg type: %s" % BUILD_PARAMETERS.pkgtype)
return False
LOG.info("Success to pack APP: %s" % app_name)
return True
def createIndexFile(index_file_path=None, hosted_app=None):
try:
if hosted_app:
index_url = "http://127.0.0.1:8080/opt/%s/webrunner/index.html?" \
"testsuite=../tests.xml&testprefix=../../.." % PKG_NAME
else:
index_url = "opt/%s/webrunner/index.html?testsuite=../tests.xml" \
"&testprefix=../../.." % PKG_NAME
html_content = "<!doctype html><head><meta http-equiv='Refresh' " \
"content='1; url=%s'></head>" % index_url
index_file = open(index_file_path, "w")
index_file.write(html_content)
index_file.close()
except Exception as e:
LOG.error("Fail to create index.html for top-app: %s" % e)
return False
LOG.info("Success to create index file %s" % index_file_path)
return True
def buildSubAPP(app_dir=None, build_json=None, app_dest_default=None):
app_dir_inside = utils.safelyGetValue(build_json, "app-dir")
if app_dir_inside:
app_dir = app_dir_inside
LOG.info("+Building sub APP(s) from %s ..." % app_dir)
app_dir = os.path.join(BUILD_ROOT_SRC, app_dir)
app_name = utils.safelyGetValue(build_json, "app-name")
if not app_name:
app_name = os.path.basename(app_dir)
app_src = os.path.join(BUILD_ROOT_SRC_SUB_APP, app_name)
if buildSRC(app_dir, app_src, build_json):
app_dest = utils.safelyGetValue(build_json, "install-path")
if app_dest:
app_dest = os.path.join(app_dest_default, app_dest)
else:
app_dest = app_dest_default
if utils.safelyGetValue(build_json, "all-apps") == "true":
app_dirs = os.listdir(app_src)
apps_num = 0
for i_app_dir in app_dirs:
if os.path.isdir(os.path.join(app_src, i_app_dir)):
i_app_name = os.path.basename(i_app_dir)
if not packAPP(
build_json, os.path.join(app_src, i_app_name),
app_dest, i_app_name):
return False
else:
apps_num = apps_num + 1
if apps_num > 0:
LOG.info("Totally packed %d apps in %s" % (apps_num, app_dir))
return True
else:
return packAPP(build_json, app_src, app_dest, app_name)
return False
def buildPKGAPP(build_json=None):
LOG.info("+Building package APP ...")
if utils.safelyGetValue(build_json, "apk-type") == "MANIFEST":
if not os.path.exists(os.path.join(BUILD_ROOT_SRC, "manifest.json")):
LOG.error("Not found manifest.json in suite folder, please check!")
sys.exit(1)
if not utils.doCopy(os.path.join(BUILD_ROOT_SRC, "manifest.json"),
os.path.join(BUILD_ROOT_SRC_PKG_APP, "manifest.json")):
return False
if os.path.exists(os.path.join(BUILD_ROOT_SRC, "icon.png")):
if not utils.doCopy(os.path.join(BUILD_ROOT_SRC, "icon.png"),
os.path.join(BUILD_ROOT_SRC_PKG_APP, "icon.png")):
return False
if os.path.exists(os.path.join(BUILD_ROOT_SRC, "icon.ico")):
if not utils.doCopy(os.path.join(BUILD_ROOT_SRC, "icon.ico"),
os.path.join(BUILD_ROOT_SRC_PKG_APP, "icon.ico")):
return False
hosted_app = False
if utils.safelyGetValue(build_json, "hosted-app") == "true":
hosted_app = True
if not createIndexFile(
os.path.join(BUILD_ROOT_SRC_PKG_APP, "index.html"), hosted_app):
return False
if not hosted_app:
if "blacklist" not in build_json:
build_json.update({"blacklist": []})
build_json["blacklist"].extend(PKG_BLACK_LIST)
if not buildSRC(BUILD_ROOT_SRC, BUILD_ROOT_PKG_APP, build_json):
return False
if "subapp-list" in build_json:
for i_sub_app in build_json["subapp-list"].keys():
if not buildSubAPP(
i_sub_app, build_json["subapp-list"][i_sub_app],
BUILD_ROOT_PKG_APP):
return False
if not packAPP(
build_json, BUILD_ROOT_SRC_PKG_APP, BUILD_ROOT_PKG, PKG_NAME):
return False
return True
def buildPKG(build_json=None):
if "blacklist" not in build_json:
build_json.update({"blacklist": []})
build_json["blacklist"].extend(PKG_BLACK_LIST)
if not buildSRC(BUILD_ROOT_SRC, BUILD_ROOT_PKG, build_json):
return False
if BUILD_PARAMETERS.docrootonly:
return True
if "subapp-list" in build_json:
for i_sub_app in build_json["subapp-list"].keys():
if not buildSubAPP(
i_sub_app, build_json["subapp-list"][i_sub_app],
BUILD_ROOT_PKG):
return False
if "pkg-app" in build_json:
if not buildPKGAPP(build_json["pkg-app"]):
return False
return True
def main():
global LOG
LOG = utils.getLogger("pack-tool")
try:
usage = "Usage: ./pack.py -t apk -m shared -a x86"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-c",
"--cfg",
dest="pkgcfg",
help="specify the path of config json file")
opts_parser.add_option(
"-t",
"--type",
dest="pkgtype",
help="specify the pkg type, e.g. apk, cordova ...")
opts_parser.add_option(
"-m",
"--mode",
dest="pkgmode",
help="specify the apk mode, not for embeddingapi, e.g. shared, embedded")
opts_parser.add_option(
"-a",
"--arch",
dest="pkgarch",
help="specify the apk arch, not for embeddingapi, e.g. x86, arm")
opts_parser.add_option(
"-d",
"--dest",
dest="destdir",
help="specify the installation folder for packed package")
opts_parser.add_option(
"-s",
"--src",
dest="srcdir",
help="specify the path of pkg resource for packing")
opts_parser.add_option(
"--tools",
dest="pkgpacktools",
help="specify the parent folder of pack tools")
opts_parser.add_option(
"--notclean",
dest="bnotclean",
action="store_true",
help="disable the build root clean after the packing")
opts_parser.add_option(
"-v",
"--version",
dest="bversion",
action="store_true",
help="show this tool's version")
opts_parser.add_option(
"-u",
"--user",
dest="user",
help="specify the user in inst.py")
opts_parser.add_option(
"--sub-version",
dest="subversion",
help="specify the embeddingapi, cordova sub version, e.g. v1, v2, v3 ...")
opts_parser.add_option(
"--pkg-version",
dest="pkgversion",
help="specify the crosswalk version, e.g. 18.48.477.13 " \
"or the absolute path of the specific crosswalk binary")
opts_parser.add_option(
"--pack-type",
dest="packtype",
help="specify the pack type, e.g. gradle, maven")
opts_parser.add_option(
"--notdebug",
dest="bnotdebug",
action="store_true",
help="specify the packed pkg is not debug mode")
opts_parser.add_option(
"--resource-only",
dest="resourceonly",
action="store_true",
help="only restore resources to project root")
opts_parser.add_option(
"--docroot-only",
dest = "docrootonly",
action = "store_true",
default = False,
help = "pack docroot only for webtestingservice")
if len(sys.argv) == 1:
sys.argv.append("-h")
global BUILD_PARAMETERS
(BUILD_PARAMETERS, args) = opts_parser.parse_args()
except Exception as e:
LOG.error("Got wrong options: %s, exit ..." % e)
sys.exit(1)
if BUILD_PARAMETERS.bversion:
print "Version: %s" % TOOL_VERSION
sys.exit(0)
if not BUILD_PARAMETERS.srcdir:
BUILD_PARAMETERS.srcdir = os.getcwd()
BUILD_PARAMETERS.srcdir = os.path.expanduser(BUILD_PARAMETERS.srcdir)
if not BUILD_PARAMETERS.user:
BUILD_PARAMETERS.user = "app"
if not os.path.exists(
os.path.join(BUILD_PARAMETERS.srcdir, "..", "..", VERSION_FILE)):
if not os.path.exists(
os.path.join(BUILD_PARAMETERS.srcdir, "..", VERSION_FILE)):
if not os.path.exists(
os.path.join(BUILD_PARAMETERS.srcdir, VERSION_FILE)):
LOG.info(
"Not found pkg version file, try to use option --pkg-version")
pkg_version_file_path = None
else:
pkg_version_file_path = os.path.join(
BUILD_PARAMETERS.srcdir, VERSION_FILE)
else:
pkg_version_file_path = os.path.join(
BUILD_PARAMETERS.srcdir, "..", VERSION_FILE)
else:
pkg_version_file_path = os.path.join(
BUILD_PARAMETERS.srcdir, "..", "..", VERSION_FILE)
try:
pkg_main_version = 0
pkg_release_version = 1
if BUILD_PARAMETERS.pkgversion:
LOG.info("Using %s as pkg version " % BUILD_PARAMETERS.pkgversion)
pkg_main_version = BUILD_PARAMETERS.pkgversion
CROSSWALK_BRANCH = "master"
else:
if pkg_version_file_path is not None:
LOG.info("Using pkg version file: %s" % pkg_version_file_path)
with open(pkg_version_file_path, "rt") as pkg_version_file:
pkg_version_raw = pkg_version_file.read()
pkg_version_file.close()
pkg_version_json = json.loads(pkg_version_raw)
pkg_main_version = pkg_version_json["main-version"]
pkg_release_version = pkg_version_json["release-version"]
CROSSWALK_BRANCH = pkg_version_json["crosswalk-branch"]
except Exception as e:
LOG.error("Fail to read pkg version file: %s, exit ..." % e)
sys.exit(1)
CROSSWALK_VERSION = pkg_main_version
if not BUILD_PARAMETERS.pkgtype:
LOG.error("No pkg type provided, exit ...")
sys.exit(1)
elif not BUILD_PARAMETERS.pkgtype in PKG_TYPES:
LOG.error("Wrong pkg type, only support: %s, exit ..." %
PKG_TYPES)
sys.exit(1)
if BUILD_PARAMETERS.resourceonly:
LOG.info("Starting copy resource only")
resource_only.copy_resource(BUILD_PARAMETERS.pkgtype)
sys.exit(0)
if BUILD_PARAMETERS.pkgtype == "apk" or \
BUILD_PARAMETERS.pkgtype == "apk-aio":
if not BUILD_PARAMETERS.pkgmode:
LOG.error("No pkg mode option provided, exit ...")
sys.exit(1)
elif not BUILD_PARAMETERS.pkgmode in PKG_MODES:
LOG.error(
"Wrong pkg mode option provided, only support:%s, exit ..." %
PKG_MODES)
sys.exit(1)
if not BUILD_PARAMETERS.pkgarch:
LOG.error("No pkg arch option provided, exit ...")
sys.exit(1)
elif not BUILD_PARAMETERS.pkgarch in PKG_ARCHS:
LOG.error(
"Wrong pkg arch option provided, only support:%s, exit ..." %
PKG_ARCHS)
sys.exit(1)
if BUILD_PARAMETERS.pkgtype == "apk-aio" or \
BUILD_PARAMETERS.pkgtype == "cordova-aio":
if not BUILD_PARAMETERS.destdir or not os.path.exists(
BUILD_PARAMETERS.destdir):
LOG.error("No all-in-one installation dest dir found, exit ...")
sys.exit(1)
elif not BUILD_PARAMETERS.destdir:
BUILD_PARAMETERS.destdir = BUILD_PARAMETERS.srcdir
BUILD_PARAMETERS.destdir = os.path.expanduser(BUILD_PARAMETERS.destdir)
if not BUILD_PARAMETERS.pkgpacktools:
BUILD_PARAMETERS.pkgpacktools = os.path.join(
BUILD_PARAMETERS.srcdir, "..", "..", "tools")
BUILD_PARAMETERS.pkgpacktools = os.path.expanduser(
BUILD_PARAMETERS.pkgpacktools)
config_json = None
if BUILD_PARAMETERS.pkgcfg:
config_json_file_path = BUILD_PARAMETERS.pkgcfg
else:
config_json_file_path = os.path.join(
BUILD_PARAMETERS.srcdir, "suite.json")
try:
LOG.info("Using config json file: %s" % config_json_file_path)
with open(config_json_file_path, "rt") as config_json_file:
config_raw = config_json_file.read()
config_json_file.close()
config_json = json.loads(config_raw)
except Exception as e:
LOG.error("Fail to read config json file: %s, exit ..." % e)
sys.exit(1)
global PKG_NAME
PKG_NAME = utils.safelyGetValue(config_json, "pkg-name")
if not PKG_NAME:
PKG_NAME = os.path.basename(BUILD_PARAMETERS.srcdir)
LOG.warning(
"Fail to read pkg name from json, "
"using src dir name as pkg name ...")
LOG.info("================= %s (%s-%s) ================" %
(PKG_NAME, pkg_main_version, pkg_release_version))
if not utils.safelyGetValue(config_json, "pkg-list"):
LOG.error("Fail to read pkg-list, exit ...")
sys.exit(1)
pkg_json = None
global parameters_type
parameters_type = None
if BUILD_PARAMETERS.pkgtype == "cordova" or BUILD_PARAMETERS.pkgtype == "cordova-aio":
if BUILD_PARAMETERS.pkgarch and not BUILD_PARAMETERS.pkgarch in PKG_ARCHS:
LOG.error("Wrong pkg-arch, only support: %s, exit ..." %
PKG_ARCHS)
sys.exit(1)
if BUILD_PARAMETERS.pkgmode and not BUILD_PARAMETERS.pkgmode in PKG_MODES:
LOG.error("Wrong pkg-mode, only support: %s, exit ..." %
PKG_MODES)
sys.exit(1)
if BUILD_PARAMETERS.packtype and not BUILD_PARAMETERS.packtype in CORDOVA_PACK_TYPES:
LOG.error("cordova packtype can only be npm, local")
sys.exit(1)
if BUILD_PARAMETERS.pkgtype == "embeddingapi":
if BUILD_PARAMETERS.packtype and not BUILD_PARAMETERS.packtype in PACK_TYPES:
LOG.error("embeddingapi packtype can only be gradle, maven or ant")
sys.exit(1)
if BUILD_PARAMETERS.subversion:
BUILD_PARAMETERS.pkgtype = BUILD_PARAMETERS.pkgtype + \
BUILD_PARAMETERS.subversion
all_pkg_string = "".join(config_json["pkg-list"].keys())
if parameters_type and parameters_type in all_pkg_string:
for i_pkg in config_json["pkg-list"].keys():
i_pkg_list = i_pkg.replace(" ", "").split(",")
if parameters_type in i_pkg_list:
pkg_json = config_json["pkg-list"][i_pkg]
break
elif BUILD_PARAMETERS.pkgtype in all_pkg_string:
for i_pkg in config_json["pkg-list"].keys():
i_pkg_list = i_pkg.replace(" ", "").split(",")
if BUILD_PARAMETERS.pkgtype in i_pkg_list:
pkg_json = config_json["pkg-list"][i_pkg]
break
if pkg_json == config_json['pkg-list'].get('apk') and BUILD_PARAMETERS.subversion is not None:
pkg_json = config_json["pkg-list"][BUILD_PARAMETERS.subversion]
if not pkg_json:
LOG.error("Fail to read pkg json, exit ...")
sys.exit(1)
if not prepareBuildRoot():
exitHandler(1)
if "pkg-blacklist" in config_json:
PKG_BLACK_LIST.extend(config_json["pkg-blacklist"])
try:
varshop.setValue("BUILD_PARAMETERS", BUILD_PARAMETERS)
varshop.setValue("BUILD_ROOT", BUILD_ROOT)
varshop.setValue("BUILD_ROOT_SRC", BUILD_ROOT_SRC)
varshop.setValue("BUILD_TIME", BUILD_TIME)
varshop.setValue("CROSSWALK_BRANCH", CROSSWALK_BRANCH)
varshop.setValue("CROSSWALK_VERSION", CROSSWALK_VERSION)
varshop.setValue("DEFAULT_CMD_TIMEOUT", DEFAULT_CMD_TIMEOUT)
varshop.setValue("PKG_MODES", PKG_MODES)
varshop.setValue("PKG_ARCHS", PKG_ARCHS)
except Exception as e:
LOG.error("Fail to set global vars: %s, exit ..." % e)
sys.exit(1)
if not buildPKG(pkg_json):
exitHandler(1)
LOG.info("+Building package ...")
if BUILD_PARAMETERS.pkgtype == "apk-aio" or \
BUILD_PARAMETERS.pkgtype == "cordova-aio" or \
BUILD_PARAMETERS.pkgtype == "iot-aio":
pkg_file_list = os.listdir(os.path.join(BUILD_ROOT, "pkg"))
for i_file in pkg_file_list:
if not utils.doCopy(
os.path.join(BUILD_ROOT, "pkg", i_file),
os.path.join(BUILD_PARAMETERS.destdir, i_file)):
exitHandler(1)
elif BUILD_PARAMETERS.pkgtype == "embeddingapi" and BUILD_PARAMETERS.subversion:
pkg_file = os.path.join(
BUILD_PARAMETERS.destdir,
"%s-%s-%s-%s.%s.zip" %
(PKG_NAME,
pkg_main_version,
pkg_release_version,
BUILD_PARAMETERS.subversion,
BUILD_PARAMETERS.pkgtype))
LOG.info("pkg_file: %s" % pkg_file)
if not utils.zipDir(os.path.join(BUILD_ROOT, "pkg"), pkg_file):
exitHandler(1)
elif BUILD_PARAMETERS.pkgtype.startswith("embeddingapi") and BUILD_PARAMETERS.packtype:
pkg_file = os.path.join(
BUILD_PARAMETERS.destdir,
"%s-%s-%s.%s-%s.zip" %
(PKG_NAME,
pkg_main_version,
pkg_release_version,
BUILD_PARAMETERS.pkgtype,
BUILD_PARAMETERS.packtype))
if not utils.zipDir(os.path.join(BUILD_ROOT, "pkg"), pkg_file):
exitHandler(1)
else:
pkg_file = os.path.join(
BUILD_PARAMETERS.destdir,
"%s-%s-%s.%s.zip" %
(PKG_NAME,
pkg_main_version,
pkg_release_version,
BUILD_PARAMETERS.pkgtype))
if not utils.zipDir(os.path.join(BUILD_ROOT, "pkg"), pkg_file):
exitHandler(1)
if __name__ == "__main__":
main()
exitHandler(0)
|
|
# -*- coding: utf-8 -*-
# Copyright 2006 - 2012 Philipp Wollermann
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
## Model classes
class Domain(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=255, unique=True)
ns1 = models.CharField(max_length=255, default="ns.inwx.de.")
ns2 = models.CharField(max_length=255, default="ns2.inwx.de.", blank=True)
ns3 = models.CharField(max_length=255, default="ns3.inwx.de.", blank=True)
ns4 = models.CharField(max_length=255, blank=True)
ns5 = models.CharField(max_length=255, blank=True)
ns6 = models.CharField(max_length=255, blank=True)
mx1 = models.CharField(max_length=255, blank=True)
mx2 = models.CharField(max_length=255, blank=True)
mx3 = models.CharField(max_length=255, blank=True)
jabber = models.CharField(max_length=255, blank=True)
is_gafyd = models.BooleanField(default=False)
serial = models.IntegerField(default=0, blank=True, editable=False)
active = models.BooleanField(default=True)
zonehash = models.CharField(max_length=128, blank=True, editable=False)
def __unicode__(self):
return u"%s" % (self.name)
def save(self, *args, **kwargs):
from datetime import date
t = date.today().strftime("%Y%m%d")
# Do we already have a serial?
if self.serial == 0:
# No
c = 1
else:
# Yes, but is it from today?
d = str(self.serial)[:8]
if (t != d):
# No
c = 1
else:
# Yes
c = int(str(self.serial)[8:]) + 1
self.serial = "%s%02i" % (t, c)
super(Domain, self).save(*args, **kwargs)
class Meta:
ordering = ["name"]
verbose_name = _(u"Domain")
verbose_name_plural = _(u"Domains")
class NSEntry(models.Model):
NSENTRY_TYPE_CHOICES = (
("A", "A"),
("AAAA", "AAAA"),
("CNAME", "CNAME"),
("MX", "MX"),
("TXT", "TXT"),
("SRV", "SRV"),
("NS", "NS"),
)
id = models.AutoField(primary_key=True)
host = models.CharField(max_length=64, blank=True)
domain = models.ForeignKey(Domain)
type = models.CharField(max_length=16, choices=NSENTRY_TYPE_CHOICES, default="A")
value = models.CharField(max_length=512)
port = models.IntegerField(null=True, blank=True)
weight = models.IntegerField(null=True, blank=True)
priority = models.IntegerField(null=True, blank=True)
def __unicode__(self):
return u"%s.%s %s %s" % (self.host, self.domain, self.type, self.value)
def fqdn(self):
return (u"%s.%s." % (self.host, self.domain)).strip(".")
class Meta:
ordering = ["domain__name", "type", "host"]
unique_together = (("host", "domain", "type"),)
verbose_name = _(u"Custom DNS record")
verbose_name_plural = _(u"Custom DNS records")
class IPAddress(models.Model):
id = models.AutoField(primary_key=True)
ip = models.IPAddressField(unique=True)
def __unicode__(self):
return u"%s" % (self.ip,)
class Meta:
ordering = ["ip"]
verbose_name = _(u"IP address")
verbose_name_plural = _(u"IP addresses")
CHOICES_FORCE_WWW = (
("strip", _(u"strip www subdomain")),
("prepend", _(u"force www subdomain")),
("ignore", _(u"both is okay")),
)
class VirtualHost(models.Model):
id = models.AutoField(primary_key=True)
owner = models.ForeignKey(User)
name = models.CharField(max_length=255, blank=True, verbose_name=_(u"Hostname"), help_text=_(u"Sometimes also called 'subdomain' ..."))
domain = models.ForeignKey(Domain)
ipport = models.ForeignKey(IPAddress, verbose_name=_(u"IP"))
active = models.BooleanField(default=True)
force_www = models.CharField(max_length=16, choices=CHOICES_FORCE_WWW, default="ignore", verbose_name=_(u"Force WWW"))
ssl_enabled = models.BooleanField(default=False, verbose_name=_(u"SSL enabled"))
ssl_force = models.BooleanField(default=False, verbose_name=_(u"Force SSL"))
ssl_cert = models.CharField(max_length=250, blank=True, verbose_name=_(u"SSL certificate"))
ssl_key = models.CharField(max_length=250, blank=True, verbose_name=_(u"SSL Private Key"))
enable_php = models.BooleanField(default=True, verbose_name=_(u"Enable PHP"))
apache_config = models.TextField(blank=True, verbose_name=_(u"Apache config"))
apache_enabled = models.BooleanField(default=True, verbose_name=_(u"Apache enabled"))
nginx_config = models.TextField(blank=True, verbose_name=_(u"nginx config"))
php_config = models.ForeignKey("PHPConfig", null=True, blank=True)
sortkey = models.CharField(max_length=255, blank=True, editable=False)
def __unicode__(self):
return u"%s.%s" % (self.name, self.domain)
def save(self, *args, **kwargs):
self.sortkey = "%s-%s" % (self.domain.name.replace(".", "-"), self.name.replace(".", "-"))
super(VirtualHost, self).save(*args, **kwargs)
def fqdn(self):
return (u"%s.%s." % (self.name, self.domain)).strip(".")
fqdn.short_description = "FQDN"
class Meta:
ordering = ["sortkey", "domain__name", "name"]
unique_together = (("name", "domain"),)
verbose_name = _(u"Webspace")
verbose_name_plural = _(u"Webspaces")
class Alias(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=64, blank=True, verbose_name=_(u"Hostname"))
domain = models.ForeignKey(Domain)
target = models.CharField(max_length=255)
www_alias = models.BooleanField(default=True)
active = models.BooleanField(default=True)
def __unicode__(self):
return u"%s.%s -> %s" % (self.name, self.domain.name, self.target)
def fqdn(self):
return (u"%s.%s." % (self.name, self.domain)).strip(".")
class Meta:
ordering = ["domain__name", "name"]
unique_together = (("name", "domain"),)
verbose_name = _(u"HTTP forwarding")
verbose_name_plural = _(u"HTTP forwardings")
class DirectAlias(models.Model):
id = models.AutoField(primary_key=True)
host = models.ForeignKey(VirtualHost)
name = models.CharField(max_length=255, blank=True, verbose_name=_(u"Hostname"), help_text=_(u"Sometimes also called 'subdomain' ..."))
domain = models.ForeignKey(Domain)
active = models.BooleanField(default=True)
def fqdn(self):
return (u"%s.%s." % (self.name, self.domain)).strip(".")
class Meta:
verbose_name = _(u"Direct alias")
verbose_name_plural = _(u"Direct aliases")
class Mailbox(models.Model):
id = models.AutoField(primary_key=True)
mail = models.CharField(max_length=75, verbose_name=_(u"Username"), help_text=_(u"This is the username, the part before the @ sign!"))
domain = models.ForeignKey(Domain, help_text=_(u"Which domain should become part of the e-mail address? (This is the part after the @ sign!)"))
password = models.CharField(max_length=256)
quota = models.IntegerField(verbose_name=_(u"Quota"), help_text=_(u"Specify the quota of this mail account in megabytes"))
active = models.BooleanField(default=True)
def __unicode__(self):
return u"%s@%s (%s)" % (self.mail, self.domain, self.quota)
class Meta:
ordering = ["domain", "mail"]
unique_together = (("mail", "domain"),)
verbose_name = _(u"Mailbox")
verbose_name_plural = _(u"Mailboxes")
class Forwarding(models.Model):
id = models.AutoField(primary_key=True)
source = models.CharField(max_length=75, verbose_name=_(u"Username"), help_text=_(u"This is the username, the part before the @ sign! Leave blank for catch-all."), blank=True)
domain = models.ForeignKey(Domain, help_text=_(u"Which domain should become part of the e-mail address? (This is the part after the @ sign!)"))
target = models.CharField(max_length=8000, verbose_name=_(u"Destination address"), help_text=_(u"To which destination address shall the mail be forwarded?"))
active = models.BooleanField(default=True)
def email(self):
return u"%s@%s" % (self.source, self.domain)
email.short_description = _(u"E-Mail Address")
def __unicode__(self):
return u"%s -> %s" % (self.email(), self.target)
class Meta:
ordering = ["domain", "source"]
unique_together = (("source", "domain"),)
verbose_name = _(u"E-Mail forwarding")
verbose_name_plural = _(u"E-Mail forwardings")
class PHPConfig(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=75, unique=True, verbose_name=_(u"Config name"))
short_open_tag = models.BooleanField(verbose_name=_(u"Short open tag"), help_text=_(u"""
<strong>This directive determines whether or not PHP will recognize code between
<? and ?> tags as PHP source which should be processed as such.</strong><br /><br />
It's been recommended for several years that you not use the short tag "short cut" and
instead to use the full <?php and ?> tag combination. With the wide spread use
of XML and use of these tags by other languages, the server can become easily
confused and end up parsing the wrong code in the wrong context. But because
this short cut has been a feature for such a long time, it's currently still
supported for backwards compatibility, but we recommend you don't use them.<br /><br />
see <a href="http://php.net/short-open-tag">http://php.net/short-open-tag</a>
"""))
max_execution_time = models.IntegerField(verbose_name=_(u"Max. execution time"), help_text=_(u"""
<strong>Maximum execution time of each script, in seconds</strong><br /><br />
see <a href="http://php.net/max-execution-time">http://php.net/max-execution-time</a>
"""))
max_input_time = models.IntegerField(verbose_name=_(u"Max. input time"), help_text=_(u"""
<strong>Maximum amount of time each script may spend parsing request data.</strong><br /><br />
It's a good idea to limit this time on productions servers in order to eliminate unexpectedly
long running scripts.<br /><br />
see <a href="http://php.net/max-input-time">http://php.net/max-input-time</a>
"""))
memory_limit = models.CharField(max_length=20, verbose_name=_(u"Memory limit"), help_text=_(u"""
<strong>Maximum amount of memory a script may consume (128MB)</strong><br /><br />
see <a href="http://php.net/memory-limit">http://php.net/memory-limit</a>
"""))
post_max_size = models.CharField(max_length=20, verbose_name=_(u"POST request max. size"), help_text=_(u"""
<strong>Maximum size of POST data that PHP will accept.</strong><br /><br />
see <a href="http://php.net/post-max-size">http://php.net/post-max-size</a>
"""))
upload_max_filesize = models.CharField(max_length=20, verbose_name=_(u"File-upload max. filesize"), help_text=_(u"""
<strong>Maximum allowed size for uploaded files.</strong><br /><br />
see <a href="http://php.net/upload-max-filesize">http://php.net/upload-max-filesize</a>
"""))
allow_call_time_pass_reference = models.BooleanField(verbose_name=_(u"Allow call time pass reference"), help_text=_(u"""
<strong>This directive allows you to enable and disable warnings which PHP will issue
if you pass a value by reference at function call time.</strong><br /><br />
Passing values by
reference at function call time is a deprecated feature which will be removed
from PHP at some point in the near future. The acceptable method for passing a
value by reference to a function is by declaring the reference in the functions
definition, not at call time. This directive does not disable this feature, it
only determines whether PHP will warn you about it or not. These warnings
should enabled in development environments only.
see <a href="http://php.net/allow-call-time-pass-reference">http://php.net/allow-call-time-pass-reference</a>
"""))
error_reporting = models.CharField(max_length=100, verbose_name=_(u"Error reporting"), help_text=_(u"""
<strong>This directive informs PHP of which errors, warnings and notices you would like
it to take action for.</strong><br /><br />
The recommended way of setting values for this
directive is through the use of the error level constants and bitwise
operators. The error level constants are below here for convenience as well as
some common settings and their meanings.<br /><br />
<strong>Error Level Constants:</strong><br /><br />
E_ALL: All errors and warnings (includes E_STRICT as of PHP 6.0.0)<br /><br />
E_ERROR: fatal run-time errors<br /><br />
E_RECOVERABLE_ERROR: almost fatal run-time errors<br /><br />
E_WARNING: run-time warnings (non-fatal errors)<br /><br />
E_PARSE: compile-time parse errors<br /><br />
E_NOTICE: run-time notices (these are warnings which often result from a bug in your code, but it's possible that it was
intentional (e.g., using an uninitialized variable and relying on the fact it's automatically initialized to an empty string)<br /><br />
E_STRICT: run-time notices, enable to have PHP suggest changes to your code which will ensure the best interoperability and forward compatibility of your code<br /><br />
E_CORE_ERROR: fatal errors that occur during PHP's initial startup<br /><br />
E_CORE_WARNING: warnings (non-fatal errors) that occur during PHP's initial startup<br /><br />
E_COMPILE_ERROR: fatal compile-time errors<br /><br />
E_COMPILE_WARNING: compile-time warnings (non-fatal errors)<br /><br />
E_USER_ERROR: user-generated error message<br /><br />
E_USER_WARNING: user-generated warning message<br /><br />
E_USER_NOTICE: user-generated notice message<br /><br />
E_DEPRECATED: warn about code that will not work in future versions of PHP<br /><br />
E_USER_DEPRECATED: user-generated deprecation warnings<br /><br />
<strong>Common Values:</strong><br /><br />
E_ALL & ~E_NOTICE (Show all errors, except for notices and coding standards warnings.)<br /><br />
E_ALL & ~E_NOTICE | E_STRICT (Show all errors, except for notices)<br /><br />
E_COMPILE_ERROR|E_RECOVERABLE_ERROR|E_ERROR|E_CORE_ERROR (Show only errors)<br /><br />
E_ALL | E_STRICT (Show all errors, warnings and notices including coding standards.)<br /><br />
"""))
display_errors = models.BooleanField(verbose_name=_(u"Display errors"), help_text=_(u"""
<strong>This directive controls whether or not and where PHP will output errors,
notices and warnings too.</strong><br /><br />
Error output is very useful during development, but
it could be very dangerous in production environments. Depending on the code
which is triggering the error, sensitive information could potentially leak
out of your application such as database usernames and passwords or worse.
It's recommended that errors be logged on production servers rather than
having the errors sent to STDOUT.<br /><br />
see <a href="http://php.net/display-errors">http://php.net/display-errors</a>
"""))
display_startup_errors = models.BooleanField(verbose_name=_(u"Display start-up errors"), help_text=_(u"""
<strong>The display of errors which occur during PHP's startup sequence are handled
separately from display_errors.</strong><br /><br />
PHP's default behavior is to suppress those
errors from clients. Turning the display of startup errors on can be useful in
debugging configuration problems. But, it's strongly recommended that you
leave this setting off on production servers.<br /><br />
see <a href="http://php.net/display-startup-errors">http://php.net/display-startup-errors</a>
"""))
log_errors = models.BooleanField(verbose_name=_(u"Log errors to file"), help_text=_(u"""
<strong>Besides displaying errors, PHP can also log errors to locations such as a
server-specific log, STDERR, or a location specified by the error_log
directive found below.</strong><br /><br />
While errors should not be displayed on productions
servers they should still be monitored and logging is a great way to do that.<br /><br />
see <a href="http://php.net/log-errors">http://php.net/log-errors</a>
"""))
track_errors = models.BooleanField(verbose_name=_(u"Track errors in variable"), help_text=_(u"""
<strong>Store the last error/warning message in $php_errormsg (boolean).</strong><br /><br />
Setting this value
to On can assist in debugging and is appropriate for development servers. It should
however be disabled on production servers.<br /><br />
see <a href="http://php.net/track-errors">http://php.net/track-errors</a>
"""))
html_errors = models.BooleanField(verbose_name=_(u"Link to documentation on error"), help_text=_(u"""
<strong>When PHP displays or logs an error, it has the capability of inserting html
links to documentation related to that error.</strong><br /><br />
This directive controls whether
those HTML links appear in error messages or not. For performance and security
reasons, it's recommended you disable this on production servers.<br /><br />
see <a href="http://php.net/html-errors">http://php.net/html-errors</a>
"""))
session_bug_compat_42 = models.BooleanField(verbose_name=_(u"Stay compatible with session bug (~ PHP 4.2)"), help_text=_(u"""
<strong>PHP 4.2 and less have an undocumented feature/bug that allows you to
to initialize a session variable in the global scope, even when register_globals
is disabled.</strong><br /><br />
PHP 4.3 and later will warn you, if this feature is used.
You can disable the feature and the warning separately. At this time,
the warning is only displayed, if bug_compat_42 is enabled. This feature
introduces some serious security problems if not handled correctly. It's
recommended that you do not use this feature on production servers. But you
should enable this on development servers and enable the warning as well. If you
do not enable the feature on development servers, you won't be warned when it's
used and debugging errors caused by this can be difficult to track down.<br /><br />
see <a href="http://php.net/session.bug-compat-42">http://php.net/session.bug-compat-42</a>
"""))
session_bug_compat_warn = models.BooleanField(verbose_name=_(u"Warn on use of session bug (~ PHP 4.2)"), help_text=_(u"""
<strong>This setting controls whether or not you are warned by PHP when initializing a
session value into the global space.</strong><br /><br />
session.bug_compat_42 must be enabled before
these warnings can be issued by PHP. See the directive above for more information.<br /><br />
see <a href="http://php.net/session.bug-compat-warn">http://php.net/session.bug-compat-warn</a>
"""))
def __unicode__(self):
return u"%s" % (self.name,)
class Meta:
ordering = ["name"]
verbose_name = _(u"PHP config")
verbose_name_plural = _(u"PHP configs")
class PHPExtension(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=75, unique=True, verbose_name=_(u"Extension module"))
enabled = models.BooleanField(default=False)
def __unicode__(self):
return u"%s" % (self.name,)
class Meta:
ordering = ["name"]
verbose_name = _(u"PHP extension")
verbose_name_plural = _(u"PHP extensions")
class ServerConfig(models.Model):
id = models.AutoField(primary_key=True)
active = models.BooleanField(unique=True, default=True)
default_php_config = models.ForeignKey(PHPConfig)
class FTPUser(models.Model):
id = models.AutoField(primary_key=True)
owner = models.ForeignKey(User, help_text=_(u"The user, from which the FTP user inherits its permissions."))
password = models.CharField(max_length=256, help_text=_(u"The password for the FTP user."))
suffix = models.CharField(max_length=16, help_text=_(u"The suffix, which gets appended to the username specified above."))
home = models.CharField(max_length=512, help_text=_(u"An absolute path to the home directory of the FTP user, like: /home/myuser/www/mysite.de/htdocs"))
def username(self):
return u"%s-%s" % (self.owner.username, self.suffix)
def save(self, *args, **kwargs):
# Encrypt password if still in clear-text
if not self.password.startswith("crypt$$1$"):
from django.contrib.auth.models import get_hexdigest
import random
algo = 'crypt'
salt = "$1$%s$" % (get_hexdigest("sha1", str(random.random()), str(random.random()))[:5],)
salt_and_hsh = get_hexdigest(algo, salt, self.password)
self.password = '%s$%s' % (algo, salt_and_hsh)
super(FTPUser, self).save(*args, **kwargs)
def __unicode__(self):
return u"%s" % (self.username())
class Meta:
ordering = ["owner", "suffix"]
verbose_name = _(u"FTP user")
verbose_name_plural = _(u"FTP users")
|
|
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import os
import yaml
from tempest.common import cred_provider
from tempest import config
from tempest import exceptions
from tempest.openstack.common import lockutils
from tempest.openstack.common import log as logging
CONF = config.CONF
LOG = logging.getLogger(__name__)
def read_accounts_yaml(path):
yaml_file = open(path, 'r')
accounts = yaml.load(yaml_file)
return accounts
class Accounts(cred_provider.CredentialProvider):
def __init__(self, name):
super(Accounts, self).__init__(name)
if os.path.isfile(CONF.auth.test_accounts_file):
accounts = read_accounts_yaml(CONF.auth.test_accounts_file)
self.use_default_creds = False
else:
accounts = {}
self.use_default_creds = True
self.hash_dict = self.get_hash_dict(accounts)
self.accounts_dir = os.path.join(CONF.lock_path, 'test_accounts')
self.isolated_creds = {}
@classmethod
def get_hash_dict(cls, accounts):
hash_dict = {}
for account in accounts:
temp_hash = hashlib.md5()
temp_hash.update(str(account))
hash_dict[temp_hash.hexdigest()] = account
return hash_dict
def is_multi_user(self):
# Default credentials is not a valid option with locking Account
if self.use_default_creds:
raise exceptions.InvalidConfiguration(
"Account file %s doesn't exist" % CONF.auth.test_accounts_file)
else:
return len(self.hash_dict) > 1
def is_multi_tenant(self):
return self.is_multi_user()
def _create_hash_file(self, hash_string):
path = os.path.join(os.path.join(self.accounts_dir, hash_string))
if not os.path.isfile(path):
open(path, 'w').close()
return True
return False
@lockutils.synchronized('test_accounts_io', external=True)
def _get_free_hash(self, hashes):
if not os.path.isdir(self.accounts_dir):
os.mkdir(self.accounts_dir)
# Create File from first hash (since none are in use)
self._create_hash_file(hashes[0])
return hashes[0]
for _hash in hashes:
res = self._create_hash_file(_hash)
if res:
return _hash
msg = 'Insufficient number of users provided'
raise exceptions.InvalidConfiguration(msg)
def _get_creds(self):
if self.use_default_creds:
raise exceptions.InvalidConfiguration(
"Account file %s doesn't exist" % CONF.auth.test_accounts_file)
free_hash = self._get_free_hash(self.hash_dict.keys())
return self.hash_dict[free_hash]
@lockutils.synchronized('test_accounts_io', external=True)
def remove_hash(self, hash_string):
hash_path = os.path.join(self.accounts_dir, hash_string)
if not os.path.isfile(hash_path):
LOG.warning('Expected an account lock file %s to remove, but '
'one did not exist')
else:
os.remove(hash_path)
if not os.listdir(self.accounts_dir):
os.rmdir(self.accounts_dir)
def get_hash(self, creds):
for _hash in self.hash_dict:
# Comparing on the attributes that were read from the YAML
if all([getattr(creds, k) == self.hash_dict[_hash][k] for k in
creds.get_init_attributes()]):
return _hash
raise AttributeError('Invalid credentials %s' % creds)
def remove_credentials(self, creds):
_hash = self.get_hash(creds)
self.remove_hash(_hash)
def get_primary_creds(self):
if self.isolated_creds.get('primary'):
return self.isolated_creds.get('primary')
creds = self._get_creds()
primary_credential = cred_provider.get_credentials(**creds)
self.isolated_creds['primary'] = primary_credential
return primary_credential
def get_alt_creds(self):
if self.isolated_creds.get('alt'):
return self.isolated_creds.get('alt')
creds = self._get_creds()
alt_credential = cred_provider.get_credentials(**creds)
self.isolated_creds['alt'] = alt_credential
return alt_credential
def clear_isolated_creds(self):
for creds in self.isolated_creds.values():
self.remove_credentials(creds)
def get_admin_creds(self):
msg = ('If admin credentials are available tenant_isolation should be'
' used instead')
raise NotImplementedError(msg)
class NotLockingAccounts(Accounts):
"""Credentials provider which always returns the first and second
configured accounts as primary and alt users.
This credential provider can be used in case of serial test execution
to preserve the current behaviour of the serial tempest run.
"""
def _unique_creds(self, cred_arg=None):
"""Verify that the configured credentials are valid and distinct """
if self.use_default_creds:
try:
user = self.get_primary_creds()
alt_user = self.get_alt_creds()
return getattr(user, cred_arg) != getattr(alt_user, cred_arg)
except exceptions.InvalidCredentials as ic:
msg = "At least one of the configured credentials is " \
"not valid: %s" % ic.message
raise exceptions.InvalidConfiguration(msg)
else:
# TODO(andreaf) Add a uniqueness check here
return len(self.hash_dict) > 1
def is_multi_user(self):
return self._unique_creds('username')
def is_multi_tenant(self):
return self._unique_creds('tenant_id')
def get_creds(self, id):
try:
# No need to sort the dict as within the same python process
# the HASH seed won't change, so subsequent calls to keys()
# will return the same result
_hash = self.hash_dict.keys()[id]
except IndexError:
msg = 'Insufficient number of users provided'
raise exceptions.InvalidConfiguration(msg)
return self.hash_dict[_hash]
def get_primary_creds(self):
if self.isolated_creds.get('primary'):
return self.isolated_creds.get('primary')
if not self.use_default_creds:
creds = self.get_creds(0)
primary_credential = cred_provider.get_credentials(**creds)
else:
primary_credential = cred_provider.get_configured_credentials(
'user')
self.isolated_creds['primary'] = primary_credential
return primary_credential
def get_alt_creds(self):
if self.isolated_creds.get('alt'):
return self.isolated_creds.get('alt')
if not self.use_default_creds:
creds = self.get_creds(1)
alt_credential = cred_provider.get_credentials(**creds)
else:
alt_credential = cred_provider.get_configured_credentials(
'alt_user')
self.isolated_creds['alt'] = alt_credential
return alt_credential
def clear_isolated_creds(self):
self.isolated_creds = {}
def get_admin_creds(self):
return cred_provider.get_configured_credentials(
"identity_admin", fill_in=False)
|
|
from __future__ import unicode_literals
import datetime
from django.apps import apps
from django.contrib import admin
from django.contrib.auth.models import User as AuthUser
from django.contrib.contenttypes.models import ContentType
from django.core import checks, management
from django.core.urlresolvers import reverse
from django.db import DEFAULT_DB_ALIAS, models
from django.db.models import signals
from django.test import TestCase, override_settings
from .admin import admin as force_admin_model_registration # NOQA
from .models import (
Abstract, BaseUser, Bug, Country, Improvement, Issue, LowerStatusPerson,
MyPerson, MyPersonProxy, OtherPerson, Person, ProxyBug, ProxyImprovement,
ProxyProxyBug, ProxyTrackerUser, State, StateProxy, StatusPerson,
TrackerUser, User, UserProxy, UserProxyProxy,
)
class ProxyModelTests(TestCase):
def test_same_manager_queries(self):
"""
The MyPerson model should be generating the same database queries as
the Person model (when the same manager is used in each case).
"""
my_person_sql = MyPerson.other.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
person_sql = Person.objects.order_by("name").query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
self.assertEqual(my_person_sql, person_sql)
def test_inheritance_new_table(self):
"""
The StatusPerson models should have its own table (it's using ORM-level
inheritance).
"""
sp_sql = StatusPerson.objects.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
p_sql = Person.objects.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
self.assertNotEqual(sp_sql, p_sql)
def test_basic_proxy(self):
"""
Creating a Person makes them accessible through the MyPerson proxy.
"""
person = Person.objects.create(name="Foo McBar")
self.assertEqual(len(Person.objects.all()), 1)
self.assertEqual(len(MyPerson.objects.all()), 1)
self.assertEqual(MyPerson.objects.get(name="Foo McBar").id, person.id)
self.assertFalse(MyPerson.objects.get(id=person.id).has_special_name())
def test_no_proxy(self):
"""
Person is not proxied by StatusPerson subclass.
"""
Person.objects.create(name="Foo McBar")
self.assertEqual(list(StatusPerson.objects.all()), [])
def test_basic_proxy_reverse(self):
"""
A new MyPerson also shows up as a standard Person.
"""
MyPerson.objects.create(name="Bazza del Frob")
self.assertEqual(len(MyPerson.objects.all()), 1)
self.assertEqual(len(Person.objects.all()), 1)
LowerStatusPerson.objects.create(status="low", name="homer")
lsps = [lsp.name for lsp in LowerStatusPerson.objects.all()]
self.assertEqual(lsps, ["homer"])
def test_correct_type_proxy_of_proxy(self):
"""
Correct type when querying a proxy of proxy
"""
Person.objects.create(name="Foo McBar")
MyPerson.objects.create(name="Bazza del Frob")
LowerStatusPerson.objects.create(status="low", name="homer")
pp = sorted(mpp.name for mpp in MyPersonProxy.objects.all())
self.assertEqual(pp, ['Bazza del Frob', 'Foo McBar', 'homer'])
def test_proxy_included_in_ancestors(self):
"""
Proxy models are included in the ancestors for a model's DoesNotExist
and MultipleObjectsReturned
"""
Person.objects.create(name="Foo McBar")
MyPerson.objects.create(name="Bazza del Frob")
LowerStatusPerson.objects.create(status="low", name="homer")
max_id = Person.objects.aggregate(max_id=models.Max('id'))['max_id']
self.assertRaises(
Person.DoesNotExist,
MyPersonProxy.objects.get,
name='Zathras'
)
self.assertRaises(
Person.MultipleObjectsReturned,
MyPersonProxy.objects.get,
id__lt=max_id + 1
)
self.assertRaises(
Person.DoesNotExist,
StatusPerson.objects.get,
name='Zathras'
)
StatusPerson.objects.create(name='Bazza Jr.')
StatusPerson.objects.create(name='Foo Jr.')
max_id = Person.objects.aggregate(max_id=models.Max('id'))['max_id']
self.assertRaises(
Person.MultipleObjectsReturned,
StatusPerson.objects.get,
id__lt=max_id + 1
)
def test_abc(self):
"""
All base classes must be non-abstract
"""
def build_abc():
class NoAbstract(Abstract):
class Meta:
proxy = True
self.assertRaises(TypeError, build_abc)
def test_no_cbc(self):
"""
The proxy must actually have one concrete base class
"""
def build_no_cbc():
class TooManyBases(Person, Abstract):
class Meta:
proxy = True
self.assertRaises(TypeError, build_no_cbc)
def test_no_base_classes(self):
def build_no_base_classes():
class NoBaseClasses(models.Model):
class Meta:
proxy = True
self.assertRaises(TypeError, build_no_base_classes)
def test_new_fields(self):
class NoNewFields(Person):
newfield = models.BooleanField()
class Meta:
proxy = True
# don't register this model in the app_cache for the current app,
# otherwise the check fails when other tests are being run.
app_label = 'no_such_app'
errors = NoNewFields.check()
expected = [
checks.Error(
"Proxy model 'NoNewFields' contains model fields.",
hint=None,
obj=None,
id='models.E017',
)
]
self.assertEqual(errors, expected)
@override_settings(TEST_SWAPPABLE_MODEL='proxy_models.AlternateModel')
def test_swappable(self):
# The models need to be removed after the test in order to prevent bad
# interactions with the flush operation in other tests.
_old_models = apps.app_configs['proxy_models'].models.copy()
try:
class SwappableModel(models.Model):
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
class AlternateModel(models.Model):
pass
# You can't proxy a swapped model
with self.assertRaises(TypeError):
class ProxyModel(SwappableModel):
class Meta:
proxy = True
finally:
apps.app_configs['proxy_models'].models = _old_models
apps.all_models['proxy_models'] = _old_models
apps.clear_cache()
def test_myperson_manager(self):
Person.objects.create(name="fred")
Person.objects.create(name="wilma")
Person.objects.create(name="barney")
resp = [p.name for p in MyPerson.objects.all()]
self.assertEqual(resp, ['barney', 'fred'])
resp = [p.name for p in MyPerson._default_manager.all()]
self.assertEqual(resp, ['barney', 'fred'])
def test_otherperson_manager(self):
Person.objects.create(name="fred")
Person.objects.create(name="wilma")
Person.objects.create(name="barney")
resp = [p.name for p in OtherPerson.objects.all()]
self.assertEqual(resp, ['barney', 'wilma'])
resp = [p.name for p in OtherPerson.excluder.all()]
self.assertEqual(resp, ['barney', 'fred'])
resp = [p.name for p in OtherPerson._default_manager.all()]
self.assertEqual(resp, ['barney', 'wilma'])
def test_permissions_created(self):
from django.contrib.auth.models import Permission
try:
Permission.objects.get(name="May display users information")
except Permission.DoesNotExist:
self.fail("The permission 'May display users information' has not been created")
def test_proxy_model_signals(self):
"""
Test save signals for proxy models
"""
output = []
def make_handler(model, event):
def _handler(*args, **kwargs):
output.append('%s %s save' % (model, event))
return _handler
h1 = make_handler('MyPerson', 'pre')
h2 = make_handler('MyPerson', 'post')
h3 = make_handler('Person', 'pre')
h4 = make_handler('Person', 'post')
signals.pre_save.connect(h1, sender=MyPerson)
signals.post_save.connect(h2, sender=MyPerson)
signals.pre_save.connect(h3, sender=Person)
signals.post_save.connect(h4, sender=Person)
MyPerson.objects.create(name="dino")
self.assertEqual(output, [
'MyPerson pre save',
'MyPerson post save'
])
output = []
h5 = make_handler('MyPersonProxy', 'pre')
h6 = make_handler('MyPersonProxy', 'post')
signals.pre_save.connect(h5, sender=MyPersonProxy)
signals.post_save.connect(h6, sender=MyPersonProxy)
MyPersonProxy.objects.create(name="pebbles")
self.assertEqual(output, [
'MyPersonProxy pre save',
'MyPersonProxy post save'
])
signals.pre_save.disconnect(h1, sender=MyPerson)
signals.post_save.disconnect(h2, sender=MyPerson)
signals.pre_save.disconnect(h3, sender=Person)
signals.post_save.disconnect(h4, sender=Person)
signals.pre_save.disconnect(h5, sender=MyPersonProxy)
signals.post_save.disconnect(h6, sender=MyPersonProxy)
def test_content_type(self):
ctype = ContentType.objects.get_for_model
self.assertIs(ctype(Person), ctype(OtherPerson))
def test_user_userproxy_userproxyproxy(self):
User.objects.create(name='Bruce')
resp = [u.name for u in User.objects.all()]
self.assertEqual(resp, ['Bruce'])
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
resp = [u.name for u in UserProxyProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
def test_proxy_for_model(self):
self.assertEqual(UserProxy, UserProxyProxy._meta.proxy_for_model)
def test_concrete_model(self):
self.assertEqual(User, UserProxyProxy._meta.concrete_model)
def test_proxy_delete(self):
"""
Proxy objects can be deleted
"""
User.objects.create(name='Bruce')
u2 = UserProxy.objects.create(name='George')
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce', 'George'])
u2.delete()
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
def test_select_related(self):
"""
We can still use `select_related()` to include related models in our
querysets.
"""
country = Country.objects.create(name='Australia')
State.objects.create(name='New South Wales', country=country)
resp = [s.name for s in State.objects.select_related()]
self.assertEqual(resp, ['New South Wales'])
resp = [s.name for s in StateProxy.objects.select_related()]
self.assertEqual(resp, ['New South Wales'])
self.assertEqual(StateProxy.objects.get(name='New South Wales').name,
'New South Wales')
resp = StateProxy.objects.select_related().get(name='New South Wales')
self.assertEqual(resp.name, 'New South Wales')
def test_filter_proxy_relation_reverse(self):
tu = TrackerUser.objects.create(name='Contributor', status='contrib')
ptu = ProxyTrackerUser.objects.get()
issue = Issue.objects.create(assignee=tu)
self.assertEqual(tu.issues.get(), issue)
self.assertEqual(ptu.issues.get(), issue)
self.assertQuerysetEqual(
TrackerUser.objects.filter(issues=issue),
[tu], lambda x: x
)
self.assertQuerysetEqual(
ProxyTrackerUser.objects.filter(issues=issue),
[ptu], lambda x: x
)
def test_proxy_bug(self):
contributor = ProxyTrackerUser.objects.create(name='Contributor',
status='contrib')
someone = BaseUser.objects.create(name='Someone')
Bug.objects.create(summary='fix this', version='1.1beta',
assignee=contributor, reporter=someone)
pcontributor = ProxyTrackerUser.objects.create(name='OtherContributor',
status='proxy')
Improvement.objects.create(summary='improve that', version='1.1beta',
assignee=contributor, reporter=pcontributor,
associated_bug=ProxyProxyBug.objects.all()[0])
# Related field filter on proxy
resp = ProxyBug.objects.get(version__icontains='beta')
self.assertEqual(repr(resp), '<ProxyBug: ProxyBug:fix this>')
# Select related + filter on proxy
resp = ProxyBug.objects.select_related().get(version__icontains='beta')
self.assertEqual(repr(resp), '<ProxyBug: ProxyBug:fix this>')
# Proxy of proxy, select_related + filter
resp = ProxyProxyBug.objects.select_related().get(
version__icontains='beta'
)
self.assertEqual(repr(resp), '<ProxyProxyBug: ProxyProxyBug:fix this>')
# Select related + filter on a related proxy field
resp = ProxyImprovement.objects.select_related().get(
reporter__name__icontains='butor'
)
self.assertEqual(
repr(resp),
'<ProxyImprovement: ProxyImprovement:improve that>'
)
# Select related + filter on a related proxy of proxy field
resp = ProxyImprovement.objects.select_related().get(
associated_bug__summary__icontains='fix'
)
self.assertEqual(
repr(resp),
'<ProxyImprovement: ProxyImprovement:improve that>'
)
def test_proxy_load_from_fixture(self):
management.call_command('loaddata', 'mypeople.json', verbosity=0)
p = MyPerson.objects.get(pk=100)
self.assertEqual(p.name, 'Elvis Presley')
def test_eq(self):
self.assertEqual(MyPerson(id=100), Person(id=100))
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='proxy_models.urls',)
class ProxyModelAdminTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = AuthUser.objects.create(
password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com', is_staff=True, is_active=True,
date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.tu1 = ProxyTrackerUser.objects.create(name='Django Pony', status='emperor')
cls.i1 = Issue.objects.create(summary="Pony's Issue", assignee=cls.tu1)
def test_cascade_delete_proxy_model_admin_warning(self):
"""
Test if admin gives warning about cascade deleting models referenced
to concrete model by deleting proxy object.
"""
tracker_user = TrackerUser.objects.all()[0]
base_user = BaseUser.objects.all()[0]
issue = Issue.objects.all()[0]
with self.assertNumQueries(7):
collector = admin.utils.NestedObjects('default')
collector.collect(ProxyTrackerUser.objects.all())
self.assertIn(tracker_user, collector.edges.get(None, ()))
self.assertIn(base_user, collector.edges.get(None, ()))
self.assertIn(issue, collector.edges.get(tracker_user, ()))
def test_delete_str_in_model_admin(self):
"""
Test if the admin delete page shows the correct string representation
for a proxy model.
"""
user = TrackerUser.objects.get(name='Django Pony')
proxy = ProxyTrackerUser.objects.get(name='Django Pony')
user_str = 'Tracker user: <a href="%s">%s</a>' % (
reverse('admin_proxy:proxy_models_trackeruser_change', args=(user.pk,)), user
)
proxy_str = 'Proxy tracker user: <a href="%s">%s</a>' % (
reverse('admin_proxy:proxy_models_proxytrackeruser_change', args=(proxy.pk,)), proxy
)
self.client.login(username='super', password='secret')
response = self.client.get(reverse('admin_proxy:proxy_models_trackeruser_delete', args=(user.pk,)))
delete_str = response.context['deleted_objects'][0]
self.assertEqual(delete_str, user_str)
response = self.client.get(reverse('admin_proxy:proxy_models_proxytrackeruser_delete', args=(proxy.pk,)))
delete_str = response.context['deleted_objects'][0]
self.assertEqual(delete_str, proxy_str)
self.client.logout()
|
|
#!/usr/bin/env python
# Copyright 2016 DIANA-HEP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import math
import numbers
import struct
from histogrammar.defs import Container, Factory, identity, JsonFormatException, ContainerException
from histogrammar.util import n_dim, datatype, serializable, inheritdoc, maybeAdd, floatToJson, hasKeys, numeq, \
basestring
from histogrammar.primitives.count import Count
class Fraction(Factory, Container):
"""Accumulate two aggregators, one numerator and one denominator
Accumulate two aggregators, one containing only entries that pass a given selection (numerator) and another
that contains all entries (denominator).
The aggregator may be a simple :doc:`Count <histogrammar.primitives.count.Count>` to measure the efficiency of a
cut, a :doc:`Bin <histogrammar.primitives.bin.Bin>` to plot a turn-on curve, or anything else to be tested with
and without a cut.
As a side effect of NaN values returning false for any comparison, a NaN return value from the selection is
treated as a failed cut (the denominator is filled but the numerator is not).
"""
@staticmethod
def ed(entries, numerator, denominator):
"""Create a Fraction that is only capable of being added.
Parameters:
entries (float): the number of entries.
numerator: (:doc:`Container <histogrammar.defs.Container>`): the filled numerator.
denominator (:doc:`Container <histogrammar.defs.Container>`): the filled denominator.
"""
if not isinstance(entries, numbers.Real) and entries not in ("nan", "inf", "-inf"):
raise TypeError("entries ({0}) must be a number".format(entries))
if not isinstance(numerator, Container):
raise TypeError("numerator ({0}) must be a Container".format(numerator))
if not isinstance(denominator, Container):
raise TypeError("denominator ({0}) must be a Container".format(denominator))
if entries < 0.0:
raise ValueError("entries ({0}) cannot be negative".format(entries))
out = Fraction(None, None)
out.entries = float(entries)
out.numerator = numerator
out.denominator = denominator
return out.specialize()
@staticmethod
def ing(quantity, value=Count()):
"""Synonym for ``__init__``."""
return Fraction(quantity, value)
def __init__(self, quantity=identity, value=Count()):
"""Create a Fraction that is capable of being filled and added.
Parameters:
quantity (function returning bool or float): computes the quantity of interest from the data and interprets
it as a selection (multiplicative factor on weight).
value (:doc:`Container <histogrammar.defs.Container>`): generates sub-aggregators for the numerator and
denominator.
Other parameters:
entries (float): the number of entries, initially 0.0.
numerator (:doc:`Container <histogrammar.defs.Container>`): the sub-aggregator of entries that pass
the selection.
denominator (:doc:`Container <histogrammar.defs.Container>`): the sub-aggregator of all entries.
"""
if value is not None and not isinstance(value, Container):
raise TypeError("value ({0}) must be None or a Container".format(value))
self.entries = 0.0
self.quantity = serializable(identity(quantity) if isinstance(quantity, str) else quantity)
if value is not None:
self.numerator = value.zero()
self.denominator = value.zero()
super(Fraction, self).__init__()
self.specialize()
@staticmethod
def build(numerator, denominator):
"""Create a Fraction out of pre-existing containers, which might have been aggregated on different streams.
Parameters:
numerator (:doc:`Container <histogrammar.defs.Container>`): the filled numerator.
denominator (:doc:`Container <histogrammar.defs.Container>`): the filled denominator.
This function will attempt to combine the ``numerator`` and ``denominator``, so they must have the same
binning/bounds/etc.
"""
if not isinstance(numerator, Container):
raise TypeError("numerator ({0}) must be a Container".format(numerator))
if not isinstance(denominator, Container):
raise TypeError("denominator ({0}) must be a Container".format(denominator))
# check for compatibility
numerator + denominator
# return object
return Fraction.ed(denominator.entries, numerator, denominator)
@inheritdoc(Container)
def zero(self):
out = Fraction(self.quantity, None)
out.numerator = self.numerator.zero()
out.denominator = self.denominator.zero()
return out.specialize()
@inheritdoc(Container)
def __add__(self, other):
if isinstance(other, Fraction):
out = Fraction(self.quantity, None)
out.entries = self.entries + other.entries
out.numerator = self.numerator + other.numerator
out.denominator = self.denominator + other.denominator
return out.specialize()
else:
raise ContainerException("cannot add {0} and {1}".format(self.name, other.name))
@inheritdoc(Container)
def __iadd__(self, other):
if isinstance(other, Fraction):
self.entries += other.entries
self.numerator += other.numerator
self.denominator += other.denominator
return self
else:
raise ContainerException("cannot add {0} and {1}".format(self.name, other.name))
@inheritdoc(Container)
def __mul__(self, factor):
if math.isnan(factor) or factor <= 0.0:
return self.zero()
else:
out = self.zero()
out.entries = factor * self.entries
out.numerator = self.numerator * factor
out.denominator = self.denominator * factor
return out.specialize()
@inheritdoc(Container)
def __rmul__(self, factor):
return self.__mul__(factor)
@inheritdoc(Container)
def fill(self, datum, weight=1.0):
self._checkForCrossReferences()
if weight > 0.0:
w = self.quantity(datum)
if not isinstance(w, numbers.Real):
raise TypeError("function return value ({0}) must be boolean or number".format(w))
w *= weight
self.denominator.fill(datum, weight)
if w > 0.0:
self.numerator.fill(datum, w)
# no possibility of exception from here on out (for rollback)
self.entries += weight
def _cppGenerateCode(self, parser, generator, inputFieldNames, inputFieldTypes, derivedFieldTypes,
derivedFieldExprs, storageStructs, initCode, initPrefix, initIndent, fillCode, fillPrefix,
fillIndent, weightVars, weightVarStack, tmpVarTypes):
return self._c99GenerateCode(parser, generator, inputFieldNames, inputFieldTypes, derivedFieldTypes,
derivedFieldExprs, storageStructs, initCode, initPrefix, initIndent, fillCode,
fillPrefix, fillIndent, weightVars, weightVarStack, tmpVarTypes)
def _c99GenerateCode(self, parser, generator, inputFieldNames, inputFieldTypes, derivedFieldTypes,
derivedFieldExprs, storageStructs, initCode, initPrefix, initIndent, fillCode, fillPrefix,
fillIndent, weightVars, weightVarStack, tmpVarTypes):
initCode.append(" " * initIndent + self._c99ExpandPrefix(*initPrefix) + ".entries = 0.0;")
normexpr = self._c99QuantityExpr(
parser,
generator,
inputFieldNames,
inputFieldTypes,
derivedFieldTypes,
derivedFieldExprs,
None)
fillCode.append(" " * fillIndent + self._c99ExpandPrefix(*fillPrefix) +
".entries += " + weightVarStack[-1] + ";")
self.denominator._c99GenerateCode(parser,
generator,
inputFieldNames,
inputFieldTypes,
derivedFieldTypes,
derivedFieldExprs,
storageStructs,
initCode,
initPrefix + (("var",
"denominator"),
),
initIndent,
fillCode,
fillPrefix + (("var",
"denominator"),
),
fillIndent,
weightVars,
weightVarStack,
tmpVarTypes)
weightVars.append("weight_" + str(len(weightVars)))
weightVarStack = weightVarStack + (weightVars[-1],)
fillCode.append(" " * fillIndent + "if (!std::isnan({0}) && {0} > 0.0) {{".format(normexpr))
fillCode.append(" " * fillIndent +
" {0} = {1} * {2};".format(weightVarStack[-1], weightVarStack[-2], normexpr))
self.numerator._c99GenerateCode(parser,
generator,
inputFieldNames,
inputFieldTypes,
derivedFieldTypes,
derivedFieldExprs,
storageStructs,
initCode,
initPrefix + (("var",
"numerator"),
),
initIndent,
fillCode,
fillPrefix + (("var",
"numerator"),
),
fillIndent + 2,
weightVars,
weightVarStack,
tmpVarTypes)
fillCode.append(" " * fillIndent + "}")
storageStructs[self._c99StructName()] = """
typedef struct {{
double entries;
{1} denominator;
{1} numerator;
}} {0};
""".format(self._c99StructName(), self.denominator._c99StorageType())
def _clingUpdate(self, filler, *extractorPrefix):
obj = self._clingExpandPrefix(filler, *extractorPrefix)
self.entries += obj.entries
self.numerator._clingUpdate(obj, ("var", "numerator"))
self.denominator._clingUpdate(obj, ("var", "denominator"))
def _c99StructName(self):
return "Fr" + self.denominator._c99StructName()
def _cudaGenerateCode(self, parser, generator, inputFieldNames, inputFieldTypes, derivedFieldTypes,
derivedFieldExprs, storageStructs, initCode, initPrefix, initIndent, fillCode, fillPrefix,
fillIndent, combineCode, totalPrefix, itemPrefix, combineIndent, jsonCode, jsonPrefix,
jsonIndent, weightVars, weightVarStack, tmpVarTypes, suppressName):
normexpr = self._cudaQuantityExpr(
parser,
generator,
inputFieldNames,
inputFieldTypes,
derivedFieldTypes,
derivedFieldExprs,
None)
initCode.append(" " * initIndent + self._c99ExpandPrefix(*initPrefix) + ".entries = 0.0f;")
fillCode.append(" " * fillIndent + "atomicAdd(&" + self._c99ExpandPrefix(*fillPrefix) + ".entries, " +
weightVarStack[-1] + ");")
combineCode.append(
" " *
combineIndent +
"atomicAdd(&" +
self._c99ExpandPrefix(
*
totalPrefix) +
".entries, " +
self._c99ExpandPrefix(
*
itemPrefix) +
".entries);")
jsonCode.append(" " * jsonIndent + "fprintf(out, \"{\\\"entries\\\": \");")
jsonCode.append(" " * jsonIndent + "floatToJson(out, " + self._c99ExpandPrefix(*jsonPrefix) + ".entries);")
jsonCode.append(
" " *
jsonIndent +
"fprintf(out, \", \\\"sub:type\\\": \\\"" +
self.denominator.name +
"\\\"\");")
jsonCode.append(" " * jsonIndent + "fprintf(out, \", \\\"denominator\\\": \");")
self.denominator._cudaGenerateCode(parser,
generator,
inputFieldNames,
inputFieldTypes,
derivedFieldTypes,
derivedFieldExprs,
storageStructs,
initCode,
initPrefix + (("var",
"denominator"),
),
initIndent,
fillCode,
fillPrefix + (("var",
"denominator"),
),
fillIndent,
combineCode,
totalPrefix + (("var",
"denominator"),
),
itemPrefix + (("var",
"denominator"),
),
combineIndent,
jsonCode,
jsonPrefix + (("var",
"denominator"),
),
jsonIndent,
weightVars,
weightVarStack,
tmpVarTypes,
False)
weightVars.append("weight_" + str(len(weightVars)))
weightVarStack = weightVarStack + (weightVars[-1],)
fillCode.append(" " * fillIndent +
"{newweight} = (isnan({q}) || {q} <= 0.0) ? 0.0 : ({oldweight} * {q});".format(
newweight=weightVarStack[-1], oldweight=weightVarStack[-2], q=normexpr))
jsonCode.append(" " * jsonIndent + "fprintf(out, \", \\\"numerator\\\": \");")
self.numerator._cudaGenerateCode(parser,
generator,
inputFieldNames,
inputFieldTypes,
derivedFieldTypes,
derivedFieldExprs,
storageStructs,
initCode,
initPrefix + (("var",
"numerator"),
),
initIndent,
fillCode,
fillPrefix + (("var",
"numerator"),
),
fillIndent,
combineCode,
totalPrefix + (("var",
"numerator"),
),
itemPrefix + (("var",
"numerator"),
),
combineIndent,
jsonCode,
jsonPrefix + (("var",
"numerator"),
),
jsonIndent,
weightVars,
weightVarStack,
tmpVarTypes,
False)
if suppressName or self.quantity.name is None:
jsonCode.append(" " * jsonIndent + "fprintf(out, \"}\");")
else:
jsonCode.append(" " * jsonIndent + "fprintf(out, \", \\\"name\\\": " +
json.dumps(json.dumps(self.quantity.name))[1:-1] + "}\");")
storageStructs[self._c99StructName()] = """
typedef struct {{
float entries;
{1} denominator;
{1} numerator;
}} {0};
""".format(self._c99StructName(), self.denominator._cudaStorageType())
def _cudaUnpackAndFill(self, data, bigendian, alignment):
format = "<f"
entries, = struct.unpack(format, data[:struct.calcsize(format)])
self.entries += entries
data = data[struct.calcsize(format):]
data = self.denominator._cudaUnpackAndFill(data, bigendian, alignment)
data = self.numerator._cudaUnpackAndFill(data, bigendian, alignment)
return data
def _numpy(self, data, weights, shape):
w = self.quantity(data)
self._checkNPQuantity(w, shape)
self._checkNPWeights(weights, shape)
weights = self._makeNPWeights(weights, shape)
import numpy
w = w * weights
w[numpy.isnan(w)] = 0.0
w[w < 0.0] = 0.0
self.numerator._numpy(data, w, shape)
self.denominator._numpy(data, weights, shape)
# no possibility of exception from here on out (for rollback)
self.entries += float(weights.sum())
def _sparksql(self, jvm, converter):
return converter.Fraction(self.quantity.asSparkSQL(), self.numerator._sparksql(jvm, converter))
@property
def children(self):
"""List of sub-aggregators, to make it possible to walk the tree."""
return [self.numerator, self.denominator]
@inheritdoc(Container)
def toJsonFragment(self, suppressName):
if getattr(self.numerator, "quantity", None) is not None:
binsName = self.numerator.quantity.name
elif getattr(self.numerator, "quantityName", None) is not None:
binsName = self.numerator.quantityName
else:
binsName = None
return maybeAdd({
"entries": floatToJson(self.entries),
"sub:type": self.numerator.name,
"numerator": self.numerator.toJsonFragment(True),
"denominator": self.denominator.toJsonFragment(True),
}, **{"name": None if suppressName else self.quantity.name,
"sub:name": binsName})
@staticmethod
@inheritdoc(Factory)
def fromJsonFragment(json, nameFromParent):
if isinstance(json, dict) and hasKeys(
json.keys(), ["entries", "sub:type", "numerator", "denominator"], ["name", "sub:name"]):
if json["entries"] in ("nan", "inf", "-inf") or isinstance(json["entries"], numbers.Real):
entries = float(json["entries"])
else:
raise JsonFormatException(json, "Fraction.entries")
if isinstance(json.get("name", None), basestring):
name = json["name"]
elif json.get("name", None) is None:
name = None
else:
raise JsonFormatException(json["name"], "Fraction.name")
if isinstance(json["sub:type"], basestring):
factory = Factory.registered[json["sub:type"]]
else:
raise JsonFormatException(json, "Fraction.type")
if isinstance(json.get("sub:name", None), basestring):
subName = json["sub:name"]
elif json.get("sub:name", None) is None:
subName = None
else:
raise JsonFormatException(json["sub:name"], "Fraction.sub:name")
numerator = factory.fromJsonFragment(json["numerator"], subName)
denominator = factory.fromJsonFragment(json["denominator"], subName)
out = Fraction.ed(entries, numerator, denominator)
out.quantity.name = nameFromParent if name is None else name
return out.specialize()
else:
raise JsonFormatException(json, "Fraction")
def __repr__(self):
return "<Fraction values={0}>".format(self.numerator.name)
def __eq__(self, other):
return isinstance(other, Fraction) and numeq(self.entries, other.entries) and \
self.quantity == other.quantity and self.numerator == other.numerator and \
self.denominator == other.denominator
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.entries, self.quantity, self.numerator, self.denominator))
# extra properties: number of dimensions and datatypes of sub-hists
Fraction.n_dim = n_dim
Fraction.datatype = datatype
# register extra methods
Factory.register(Fraction)
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import mock
from typing import Any, Union, Mapping, Callable
from zerver.lib.actions import (
do_create_user,
get_service_bot_events,
)
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import (
get_realm,
UserProfile,
Recipient,
)
BOT_TYPE_TO_QUEUE_NAME = {
UserProfile.OUTGOING_WEBHOOK_BOT: 'outgoing_webhooks',
UserProfile.EMBEDDED_BOT: 'embedded_bots',
}
class TestServiceBotBasics(ZulipTestCase):
def _get_outgoing_bot(self):
# type: () -> UserProfile
outgoing_bot = do_create_user(
email="bar-bot@zulip.com",
password="test",
realm=get_realm("zulip"),
full_name="BarBot",
short_name='bb',
bot_type=UserProfile.OUTGOING_WEBHOOK_BOT,
bot_owner=self.example_user('cordelia'),
)
return outgoing_bot
def test_service_events_for_pms(self):
# type: () -> None
sender = self.example_user('hamlet')
assert(not sender.is_bot)
outgoing_bot = self._get_outgoing_bot()
event_dict = get_service_bot_events(
sender=sender,
service_bot_tuples=[
(outgoing_bot.id, outgoing_bot.bot_type),
],
mentioned_user_ids=set(),
recipient_type=Recipient.PERSONAL,
)
expected = dict(
outgoing_webhooks=[
dict(trigger='private_message', user_profile_id=outgoing_bot.id),
],
)
self.assertEqual(event_dict, expected)
def test_service_events_for_stream_mentions(self):
# type: () -> None
sender = self.example_user('hamlet')
assert(not sender.is_bot)
outgoing_bot = self._get_outgoing_bot()
event_dict = get_service_bot_events(
sender=sender,
service_bot_tuples=[
(outgoing_bot.id, outgoing_bot.bot_type),
],
mentioned_user_ids={outgoing_bot.id},
recipient_type=Recipient.STREAM,
)
expected = dict(
outgoing_webhooks=[
dict(trigger='mention', user_profile_id=outgoing_bot.id),
],
)
self.assertEqual(event_dict, expected)
class TestServiceBotEventTriggers(ZulipTestCase):
def setUp(self):
# type: () -> None
self.user_profile = self.example_user("othello")
self.bot_profile = do_create_user(email="foo-bot@zulip.com",
password="test",
realm=get_realm("zulip"),
full_name="FooBot",
short_name="foo-bot",
bot_type=UserProfile.OUTGOING_WEBHOOK_BOT,
bot_owner=self.user_profile)
self.second_bot_profile = do_create_user(email="bar-bot@zulip.com",
password="test",
realm=get_realm("zulip"),
full_name="BarBot",
short_name="bar-bot",
bot_type=UserProfile.OUTGOING_WEBHOOK_BOT,
bot_owner=self.user_profile)
# TODO: In future versions this won't be required
self.subscribe(self.bot_profile, 'Denmark')
@mock.patch('zerver.lib.actions.queue_json_publish')
def test_trigger_on_stream_mention_from_user(self, mock_queue_json_publish):
# type: (mock.Mock) -> None
for bot_type, expected_queue_name in BOT_TYPE_TO_QUEUE_NAME.items():
self.bot_profile.bot_type = bot_type
self.bot_profile.save()
content = u'@**FooBot** foo bar!!!'
recipient = 'Denmark'
trigger = 'mention'
message_type = Recipient._type_names[Recipient.STREAM]
def check_values_passed(queue_name, trigger_event, x):
# type: (Any, Union[Mapping[Any, Any], Any], Callable[[Any], None]) -> None
self.assertEqual(queue_name, expected_queue_name)
self.assertEqual(trigger_event["failed_tries"], 0)
self.assertEqual(trigger_event["message"]["content"], content)
self.assertEqual(trigger_event["message"]["display_recipient"], recipient)
self.assertEqual(trigger_event["message"]["sender_email"], self.user_profile.email)
self.assertEqual(trigger_event["message"]["type"], message_type)
self.assertEqual(trigger_event['trigger'], trigger)
self.assertEqual(trigger_event['user_profile_id'], self.bot_profile.id)
mock_queue_json_publish.side_effect = check_values_passed
self.send_message(
self.user_profile.email,
'Denmark',
Recipient.STREAM,
content)
self.assertTrue(mock_queue_json_publish.called)
@mock.patch('zerver.lib.actions.queue_json_publish')
def test_no_trigger_on_stream_message_without_mention(self, mock_queue_json_publish):
# type: (mock.Mock) -> None
sender_email = self.user_profile.email
recipients = "Denmark"
message_type = Recipient.STREAM
self.send_message(sender_email, recipients, message_type)
self.assertFalse(mock_queue_json_publish.called)
@mock.patch('zerver.lib.actions.queue_json_publish')
def test_no_trigger_on_stream_mention_from_bot(self, mock_queue_json_publish):
# type: (mock.Mock) -> None
for bot_type in BOT_TYPE_TO_QUEUE_NAME:
self.bot_profile.bot_type = bot_type
self.bot_profile.save()
self.send_message(
self.second_bot_profile.email,
'Denmark',
Recipient.STREAM,
u'@**FooBot** foo bar!!!')
self.assertFalse(mock_queue_json_publish.called)
@mock.patch('zerver.lib.actions.queue_json_publish')
def test_trigger_on_personal_message_from_user(self, mock_queue_json_publish):
# type: (mock.Mock) -> None
for bot_type, expected_queue_name in BOT_TYPE_TO_QUEUE_NAME.items():
self.bot_profile.bot_type = bot_type
self.bot_profile.save()
sender_email = self.user_profile.email
recipient_email = self.bot_profile.email
message_type = Recipient.PERSONAL
def check_values_passed(queue_name, trigger_event, x):
# type: (Any, Union[Mapping[Any, Any], Any], Callable[[Any], None]) -> None
self.assertEqual(queue_name, expected_queue_name)
self.assertEqual(trigger_event["user_profile_id"], self.bot_profile.id)
self.assertEqual(trigger_event["trigger"], "private_message")
self.assertEqual(trigger_event["failed_tries"], 0)
self.assertEqual(trigger_event["message"]["sender_email"], sender_email)
display_recipients = [
trigger_event["message"]["display_recipient"][0]["email"],
trigger_event["message"]["display_recipient"][1]["email"],
]
self.assertTrue(sender_email in display_recipients)
self.assertTrue(recipient_email in display_recipients)
mock_queue_json_publish.side_effect = check_values_passed
self.send_message(sender_email, recipient_email, message_type, subject='', content='test')
self.assertTrue(mock_queue_json_publish.called)
@mock.patch('zerver.lib.actions.queue_json_publish')
def test_no_trigger_on_personal_message_from_bot(self, mock_queue_json_publish):
# type: (mock.Mock) -> None
for bot_type in BOT_TYPE_TO_QUEUE_NAME:
self.bot_profile.bot_type = bot_type
self.bot_profile.save()
sender_email = self.second_bot_profile.email
recipient_email = self.bot_profile.email
message_type = Recipient.PERSONAL
self.send_message(sender_email, recipient_email, message_type)
self.assertFalse(mock_queue_json_publish.called)
@mock.patch('zerver.lib.actions.queue_json_publish')
def test_trigger_on_huddle_message_from_user(self, mock_queue_json_publish):
# type: (mock.Mock) -> None
for bot_type, expected_queue_name in BOT_TYPE_TO_QUEUE_NAME.items():
self.bot_profile.bot_type = bot_type
self.bot_profile.save()
self.second_bot_profile.bot_type = bot_type
self.second_bot_profile.save()
sender_email = self.user_profile.email
recipient_emails = [self.bot_profile.email, self.second_bot_profile.email]
message_type = Recipient.HUDDLE
profile_ids = [self.bot_profile.id, self.second_bot_profile.id]
def check_values_passed(queue_name, trigger_event, x):
# type: (Any, Union[Mapping[Any, Any], Any], Callable[[Any], None]) -> None
self.assertEqual(queue_name, expected_queue_name)
self.assertIn(trigger_event["user_profile_id"], profile_ids)
profile_ids.remove(trigger_event["user_profile_id"])
self.assertEqual(trigger_event["trigger"], "private_message")
self.assertEqual(trigger_event["failed_tries"], 0)
self.assertEqual(trigger_event["message"]["sender_email"], sender_email)
self.assertEqual(trigger_event["message"]["type"], u'private')
mock_queue_json_publish.side_effect = check_values_passed
self.send_message(sender_email, recipient_emails, message_type, subject='', content='test')
self.assertEqual(mock_queue_json_publish.call_count, 2)
mock_queue_json_publish.reset_mock()
@mock.patch('zerver.lib.actions.queue_json_publish')
def test_no_trigger_on_huddle_message_from_bot(self, mock_queue_json_publish):
# type: (mock.Mock) -> None
for bot_type in BOT_TYPE_TO_QUEUE_NAME:
self.bot_profile.bot_type = bot_type
self.bot_profile.save()
sender_email = self.second_bot_profile.email
recipient_emails = [self.user_profile.email, self.bot_profile.email]
message_type = Recipient.HUDDLE
self.send_message(sender_email, recipient_emails, message_type)
self.assertFalse(mock_queue_json_publish.called)
|
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import datetime
import decimal
import pickle
from abc import ABC, abstractmethod
from typing import List
import cloudpickle
import pyarrow as pa
from pyflink.common import Row, RowKind
from pyflink.common.time import Instant
from pyflink.datastream.window import TimeWindow, CountWindow
from pyflink.fn_execution.ResettableIO import ResettableIO
from pyflink.fn_execution.stream_slow import InputStream, OutputStream
from pyflink.table.utils import pandas_to_arrow, arrow_to_pandas
ROW_KIND_BIT_SIZE = 2
class LengthPrefixBaseCoderImpl(ABC):
"""
LengthPrefixBaseCoder will be used in Operations and other coders will be the field coder of
LengthPrefixBaseCoder.
"""
def __init__(self, field_coder: 'FieldCoderImpl'):
self._field_coder = field_coder
self._data_out_stream = OutputStream()
def _write_data_to_output_stream(self, out_stream: OutputStream):
out_stream.write_var_int64(self._data_out_stream.size())
out_stream.write(self._data_out_stream.get())
self._data_out_stream.clear()
class FieldCoderImpl(ABC):
@abstractmethod
def encode_to_stream(self, value, out_stream: OutputStream):
"""
Encodes `value` to the output stream.
:param value: The output data
:param out_stream: Output Stream
"""
pass
@abstractmethod
def decode_from_stream(self, in_stream: InputStream, length: int = 0):
"""
Decodes data from the input stream.
:param in_stream: Input Stream
:param length: The `length` size data of input stream will be decoded. The default value is
0 which means the coder won't take use of the length to decode the data from input stream.
:return: The decoded Data.
"""
pass
def encode(self, value):
out = OutputStream()
self.encode_to_stream(value, out)
return out.get()
def decode(self, encoded):
return self.decode_from_stream(InputStream(encoded), len(encoded))
class IterableCoderImpl(LengthPrefixBaseCoderImpl):
"""
Encodes iterable data to output stream. The output mode will decide whether write a special end
message 0x00 to output stream after encoding data.
"""
def __init__(self, field_coder: 'FieldCoderImpl', separated_with_end_message: bool):
super(IterableCoderImpl, self).__init__(field_coder)
self._separated_with_end_message = separated_with_end_message
def encode_to_stream(self, value: List, out_stream: OutputStream):
if value:
for item in value:
self._field_coder.encode_to_stream(item, self._data_out_stream)
self._write_data_to_output_stream(out_stream)
# write end message
if self._separated_with_end_message:
out_stream.write_var_int64(1)
out_stream.write_byte(0x00)
def decode_from_stream(self, in_stream: InputStream):
while in_stream.size() > 0:
yield self._field_coder.decode_from_stream(in_stream, in_stream.read_var_int64())
class ValueCoderImpl(LengthPrefixBaseCoderImpl):
"""
Encodes a single data to output stream.
"""
def __init__(self, field_coder: 'FieldCoderImpl'):
super(ValueCoderImpl, self).__init__(field_coder)
def encode_to_stream(self, value, out_stream: OutputStream):
self._field_coder.encode_to_stream(value, self._data_out_stream)
self._write_data_to_output_stream(out_stream)
def decode_from_stream(self, in_stream: InputStream):
return self._field_coder.decode_from_stream(in_stream, in_stream.read_var_int64())
class MaskUtils:
"""
A util class used to encode mask value.
"""
def __init__(self, field_count):
self._field_count = field_count
# the row kind uses the first 2 bits of the bitmap, the remaining bits are used for null
# mask, for more details refer to:
# https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/RowSerializer.java
self._leading_complete_bytes_num = (self._field_count + ROW_KIND_BIT_SIZE) // 8
self._remaining_bits_num = (self._field_count + ROW_KIND_BIT_SIZE) % 8
self.null_mask_search_table = self.generate_null_mask_search_table()
self.null_byte_search_table = (0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01)
self.row_kind_search_table = [0x00, 0x80, 0x40, 0xC0]
@staticmethod
def generate_null_mask_search_table():
"""
Each bit of one byte represents if the column at the corresponding position is None or not,
e.g. 0x84 represents the first column and the sixth column are None.
"""
null_mask = []
for b in range(256):
every_num_null_mask = [(b & 0x80) > 0, (b & 0x40) > 0, (b & 0x20) > 0, (b & 0x10) > 0,
(b & 0x08) > 0, (b & 0x04) > 0, (b & 0x02) > 0, (b & 0x01) > 0]
null_mask.append(tuple(every_num_null_mask))
return tuple(null_mask)
def write_mask(self, value, row_kind_value, out_stream: OutputStream):
field_pos = 0
null_byte_search_table = self.null_byte_search_table
remaining_bits_num = self._remaining_bits_num
# first byte contains the row kind bits
b = self.row_kind_search_table[row_kind_value]
for i in range(0, 8 - ROW_KIND_BIT_SIZE):
if field_pos + i < len(value) and value[field_pos + i] is None:
b |= null_byte_search_table[i + ROW_KIND_BIT_SIZE]
field_pos += 8 - ROW_KIND_BIT_SIZE
out_stream.write_byte(b)
for _ in range(1, self._leading_complete_bytes_num):
b = 0x00
for i in range(0, 8):
if value[field_pos + i] is None:
b |= null_byte_search_table[i]
field_pos += 8
out_stream.write_byte(b)
if self._leading_complete_bytes_num >= 1 and remaining_bits_num:
b = 0x00
for i in range(remaining_bits_num):
if value[field_pos + i] is None:
b |= null_byte_search_table[i]
out_stream.write_byte(b)
def read_mask(self, in_stream: InputStream):
mask = []
mask_search_table = self.null_mask_search_table
remaining_bits_num = self._remaining_bits_num
for _ in range(self._leading_complete_bytes_num):
b = in_stream.read_byte()
mask.extend(mask_search_table[b])
if remaining_bits_num:
b = in_stream.read_byte()
mask.extend(mask_search_table[b][0:remaining_bits_num])
return mask
class FlattenRowCoderImpl(FieldCoderImpl):
"""
A coder for flatten row (List) object (without field names and row kind value is 0).
"""
def __init__(self, field_coders: List[FieldCoderImpl]):
self._field_coders = field_coders
self._field_count = len(field_coders)
self._mask_utils = MaskUtils(self._field_count)
def encode_to_stream(self, value, out_stream: OutputStream):
# encode mask value
self._mask_utils.write_mask(value, 0, out_stream)
# encode every field value
for i in range(self._field_count):
item = value[i]
if item is not None:
self._field_coders[i].encode_to_stream(item, out_stream)
def decode_from_stream(self, in_stream: InputStream, length: int = 0):
row_kind_and_null_mask = self._mask_utils.read_mask(in_stream)
return [None if row_kind_and_null_mask[idx + ROW_KIND_BIT_SIZE] else
self._field_coders[idx].decode_from_stream(in_stream)
for idx in range(0, self._field_count)]
def __repr__(self):
return 'FlattenRowCoderImpl[%s]' % ', '.join(str(c) for c in self._field_coders)
class RowCoderImpl(FieldCoderImpl):
"""
A coder for `Row` object.
"""
def __init__(self, field_coders, field_names):
self._field_coders = field_coders
self._field_count = len(field_coders)
self._field_names = field_names
self._mask_utils = MaskUtils(self._field_count)
def encode_to_stream(self, value: Row, out_stream: OutputStream):
# encode mask value
self._mask_utils.write_mask(value._values, value.get_row_kind().value, out_stream)
# encode every field value
for i in range(self._field_count):
item = value[i]
if item is not None:
self._field_coders[i].encode_to_stream(item, out_stream)
def decode_from_stream(self, in_stream: InputStream, length=0) -> Row:
row_kind_and_null_mask = self._mask_utils.read_mask(in_stream)
fields = [None if row_kind_and_null_mask[idx + ROW_KIND_BIT_SIZE] else
self._field_coders[idx].decode_from_stream(in_stream)
for idx in range(0, self._field_count)]
# compute the row_kind value
row_kind_value = 0
for i in range(ROW_KIND_BIT_SIZE):
row_kind_value += int(row_kind_and_null_mask[i]) * 2 ** i
row = Row(*fields)
row.set_field_names(self._field_names)
row.set_row_kind(RowKind(row_kind_value))
return row
def __repr__(self):
return 'RowCoderImpl[%s, %s]' % \
(', '.join(str(c) for c in self._field_coders), self._field_names)
class ArrowCoderImpl(FieldCoderImpl):
"""
A coder for arrow format data.
"""
def __init__(self, schema, row_type, timezone):
self._schema = schema
self._field_types = row_type.field_types()
self._timezone = timezone
self._resettable_io = ResettableIO()
self._batch_reader = ArrowCoderImpl._load_from_stream(self._resettable_io)
def encode_to_stream(self, cols, out_stream: OutputStream):
self._resettable_io.set_output_stream(out_stream)
batch_writer = pa.RecordBatchStreamWriter(self._resettable_io, self._schema)
batch_writer.write_batch(
pandas_to_arrow(self._schema, self._timezone, self._field_types, cols))
def decode_from_stream(self, in_stream: InputStream, length=0):
return self.decode_one_batch_from_stream(in_stream, length)
def decode_one_batch_from_stream(self, in_stream: InputStream, size: int) -> List:
self._resettable_io.set_input_bytes(in_stream.read(size))
# there is only one arrow batch in the underlying input stream
return arrow_to_pandas(self._timezone, self._field_types, [next(self._batch_reader)])
@staticmethod
def _load_from_stream(stream):
while stream.readable():
reader = pa.ipc.open_stream(stream)
yield reader.read_next_batch()
def __repr__(self):
return 'ArrowCoderImpl[%s]' % self._schema
class OverWindowArrowCoderImpl(FieldCoderImpl):
"""
A coder for over window with arrow format data.
The data structure: [window data][arrow format data].
"""
def __init__(self, arrow_coder_impl: ArrowCoderImpl):
self._arrow_coder = arrow_coder_impl
self._int_coder = IntCoderImpl()
def encode_to_stream(self, cols, out_stream: OutputStream):
self._arrow_coder.encode_to_stream(cols, out_stream)
def decode_from_stream(self, in_stream: InputStream, length=0):
window_num = self._int_coder.decode_from_stream(in_stream)
length -= 4
window_boundaries_and_arrow_data = []
for _ in range(window_num):
window_size = self._int_coder.decode_from_stream(in_stream)
length -= 4
window_boundaries_and_arrow_data.append(
[self._int_coder.decode_from_stream(in_stream)
for _ in range(window_size)])
length -= 4 * window_size
window_boundaries_and_arrow_data.append(
self._arrow_coder.decode_one_batch_from_stream(in_stream, length))
return window_boundaries_and_arrow_data
def __repr__(self):
return 'OverWindowArrowCoderImpl[%s]' % self._arrow_coder
class TinyIntCoderImpl(FieldCoderImpl):
"""
A coder for tiny int value (from -128 to 127).
"""
def encode_to_stream(self, value, out_stream: OutputStream):
out_stream.write_int8(value)
def decode_from_stream(self, in_stream: InputStream, length=0):
return in_stream.read_int8()
class SmallIntCoderImpl(FieldCoderImpl):
"""
A coder for small int value (from -32,768 to 32,767).
"""
def encode_to_stream(self, value, out_stream: OutputStream):
out_stream.write_int16(value)
def decode_from_stream(self, in_stream: InputStream, length=0):
return in_stream.read_int16()
class IntCoderImpl(FieldCoderImpl):
"""
A coder for int value (from -2,147,483,648 to 2,147,483,647).
"""
def encode_to_stream(self, value, out_stream: OutputStream):
out_stream.write_int32(value)
def decode_from_stream(self, in_stream: InputStream, length=0):
return in_stream.read_int32()
class BigIntCoderImpl(FieldCoderImpl):
"""
A coder for big int value (from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807).
"""
def encode_to_stream(self, value, out_stream: OutputStream):
out_stream.write_int64(value)
def decode_from_stream(self, in_stream: InputStream, length=0):
return in_stream.read_int64()
class BooleanCoderImpl(FieldCoderImpl):
"""
A coder for a boolean value.
"""
def encode_to_stream(self, value, out_stream: OutputStream):
out_stream.write_byte(value)
def decode_from_stream(self, in_stream: InputStream, length=0):
return not not in_stream.read_byte()
class FloatCoderImpl(FieldCoderImpl):
"""
A coder for a float value (4-byte single precision floating point number).
"""
def encode_to_stream(self, value, out_stream: OutputStream):
out_stream.write_float(value)
def decode_from_stream(self, in_stream: InputStream, length=0):
return in_stream.read_float()
class DoubleCoderImpl(FieldCoderImpl):
"""
A coder for a double value (8-byte double precision floating point number).
"""
def encode_to_stream(self, value, out_stream: OutputStream):
out_stream.write_double(value)
def decode_from_stream(self, in_stream: InputStream, length=0):
return in_stream.read_double()
class BinaryCoderImpl(FieldCoderImpl):
"""
A coder for a bytes value.
"""
def encode_to_stream(self, value, out_stream: OutputStream):
out_stream.write_bytes(value, len(value))
def decode_from_stream(self, in_stream: InputStream, length=0):
return in_stream.read_bytes()
class CharCoderImpl(FieldCoderImpl):
"""
A coder for a str value.
"""
def encode_to_stream(self, value, out_stream: OutputStream):
bytes_value = value.encode("utf-8")
out_stream.write_bytes(bytes_value, len(bytes_value))
def decode_from_stream(self, in_stream: InputStream, length=0):
return in_stream.read_bytes().decode("utf-8")
class DecimalCoderImpl(FieldCoderImpl):
"""
A coder for a decimal value (with fixed precision and scale).
"""
def __init__(self, precision, scale):
self.context = decimal.Context(prec=precision)
self.scale_format = decimal.Decimal(10) ** -scale
def encode_to_stream(self, value, out_stream: OutputStream):
user_context = decimal.getcontext()
decimal.setcontext(self.context)
value = value.quantize(self.scale_format)
bytes_value = str(value).encode("utf-8")
out_stream.write_bytes(bytes_value, len(bytes_value))
decimal.setcontext(user_context)
def decode_from_stream(self, in_stream: InputStream, length=0):
user_context = decimal.getcontext()
decimal.setcontext(self.context)
value = decimal.Decimal(in_stream.read_bytes().decode("utf-8")).quantize(self.scale_format)
decimal.setcontext(user_context)
return value
class BigDecimalCoderImpl(FieldCoderImpl):
"""
A coder for a big decimal value (without fixed precision and scale).
"""
def encode_to_stream(self, value, out_stream: OutputStream):
bytes_value = str(value).encode("utf-8")
out_stream.write_bytes(bytes_value, len(bytes_value))
def decode_from_stream(self, in_stream: InputStream, length=0):
return decimal.Decimal(in_stream.read_bytes().decode("utf-8"))
class DateCoderImpl(FieldCoderImpl):
"""
A coder for a datetime.date value.
"""
EPOCH_ORDINAL = datetime.datetime(1970, 1, 1).toordinal()
def encode_to_stream(self, value, out_stream: OutputStream):
out_stream.write_int32(self.date_to_internal(value))
def decode_from_stream(self, in_stream: InputStream, length=0):
value = in_stream.read_int32()
return self.internal_to_date(value)
def date_to_internal(self, d):
return d.toordinal() - self.EPOCH_ORDINAL
def internal_to_date(self, v):
return datetime.date.fromordinal(v + self.EPOCH_ORDINAL)
class TimeCoderImpl(FieldCoderImpl):
"""
A coder for a datetime.time value.
"""
def encode_to_stream(self, value, out_stream: OutputStream):
out_stream.write_int32(self.time_to_internal(value))
def decode_from_stream(self, in_stream: InputStream, length=0):
value = in_stream.read_int32()
return self.internal_to_time(value)
@staticmethod
def time_to_internal(t):
milliseconds = (t.hour * 3600000
+ t.minute * 60000
+ t.second * 1000
+ t.microsecond // 1000)
return milliseconds
@staticmethod
def internal_to_time(v):
seconds, milliseconds = divmod(v, 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return datetime.time(hours, minutes, seconds, milliseconds * 1000)
class TimestampCoderImpl(FieldCoderImpl):
"""
A coder for a datetime.datetime value.
"""
def __init__(self, precision):
self.precision = precision
def is_compact(self):
return self.precision <= 3
def encode_to_stream(self, value, out_stream: OutputStream):
milliseconds, nanoseconds = self.timestamp_to_internal(value)
if self.is_compact():
assert nanoseconds == 0
out_stream.write_int64(milliseconds)
else:
out_stream.write_int64(milliseconds)
out_stream.write_int32(nanoseconds)
def decode_from_stream(self, in_stream: InputStream, length=0):
if self.is_compact():
milliseconds = in_stream.read_int64()
nanoseconds = 0
else:
milliseconds = in_stream.read_int64()
nanoseconds = in_stream.read_int32()
return self.internal_to_timestamp(milliseconds, nanoseconds)
@staticmethod
def timestamp_to_internal(timestamp):
seconds = int(timestamp.replace(tzinfo=datetime.timezone.utc).timestamp())
microseconds_of_second = timestamp.microsecond
milliseconds = seconds * 1000 + microseconds_of_second // 1000
nanoseconds = microseconds_of_second % 1000 * 1000
return milliseconds, nanoseconds
def internal_to_timestamp(self, milliseconds, nanoseconds):
second, microsecond = (milliseconds // 1000,
milliseconds % 1000 * 1000 + nanoseconds // 1000)
return datetime.datetime.utcfromtimestamp(second).replace(microsecond=microsecond)
class LocalZonedTimestampCoderImpl(TimestampCoderImpl):
"""
A coder for a datetime.datetime with time zone value.
"""
def __init__(self, precision, timezone):
super(LocalZonedTimestampCoderImpl, self).__init__(precision)
self.timezone = timezone
def internal_to_timestamp(self, milliseconds, nanoseconds):
return self.timezone.localize(
super(LocalZonedTimestampCoderImpl, self).internal_to_timestamp(
milliseconds, nanoseconds))
class InstantCoderImpl(FieldCoderImpl):
"""
A coder for Instant.
"""
def __init__(self):
self._null_seconds = -9223372036854775808
self._null_nanos = -2147483648
def encode_to_stream(self, value: Instant, out_stream: OutputStream):
if value is None:
out_stream.write_int64(self._null_seconds)
out_stream.write_int32(self._null_nanos)
else:
out_stream.write_int64(value.seconds)
out_stream.write_int32(value.nanos)
def decode_from_stream(self, in_stream: InputStream, length: int = 0):
seconds = in_stream.read_int64()
nanos = in_stream.read_int32()
if seconds == self._null_seconds and nanos == self._null_nanos:
return None
else:
return Instant(seconds, nanos)
class CloudPickleCoderImpl(FieldCoderImpl):
"""
A coder used with cloudpickle for all kinds of python object.
"""
def __init__(self):
self.field_coder = BinaryCoderImpl()
def encode_to_stream(self, value, out_stream: OutputStream):
coded_data = cloudpickle.dumps(value)
self.field_coder.encode_to_stream(coded_data, out_stream)
def decode_from_stream(self, in_stream: InputStream, length=0):
return self._decode_one_value_from_stream(in_stream)
def _decode_one_value_from_stream(self, in_stream: InputStream):
real_data = self.field_coder.decode_from_stream(in_stream)
value = cloudpickle.loads(real_data)
return value
def __repr__(self) -> str:
return 'CloudPickleCoderImpl[%s]' % str(self.field_coder)
class PickleCoderImpl(FieldCoderImpl):
"""
A coder used with pickle for all kinds of python object.
"""
def __init__(self):
self.field_coder = BinaryCoderImpl()
def encode_to_stream(self, value, out_stream: OutputStream):
coded_data = pickle.dumps(value)
self.field_coder.encode_to_stream(coded_data, out_stream)
def decode_from_stream(self, in_stream: InputStream, length=0):
real_data = self.field_coder.decode_from_stream(in_stream)
value = pickle.loads(real_data)
return value
def __repr__(self) -> str:
return 'PickleCoderImpl[%s]' % str(self.field_coder)
class TupleCoderImpl(FieldCoderImpl):
"""
A coder for a tuple value.
"""
def __init__(self, field_coders):
self._field_coders = field_coders
self._field_count = len(field_coders)
def encode_to_stream(self, value, out_stream: OutputStream):
field_coders = self._field_coders
for i in range(self._field_count):
field_coders[i].encode_to_stream(value[i], out_stream)
def decode_from_stream(self, stream: InputStream, length=0):
decoded_list = [field_coder.decode_from_stream(stream)
for field_coder in self._field_coders]
return (*decoded_list,)
def __repr__(self) -> str:
return 'TupleCoderImpl[%s]' % ', '.join(str(c) for c in self._field_coders)
class GenericArrayCoderImpl(FieldCoderImpl):
"""
A coder for object array value (the element of array could be any kind of Python object).
"""
def __init__(self, elem_coder: FieldCoderImpl):
self._elem_coder = elem_coder
def encode_to_stream(self, value, out_stream: OutputStream):
out_stream.write_int32(len(value))
for elem in value:
if elem is None:
out_stream.write_byte(False)
else:
out_stream.write_byte(True)
self._elem_coder.encode_to_stream(elem, out_stream)
def decode_from_stream(self, in_stream: InputStream, length=0):
size = in_stream.read_int32()
elements = [self._elem_coder.decode_from_stream(in_stream)
if in_stream.read_byte() else None for _ in range(size)]
return elements
def __repr__(self):
return 'GenericArrayCoderImpl[%s]' % repr(self._elem_coder)
class PrimitiveArrayCoderImpl(FieldCoderImpl):
"""
A coder for primitive array value (the element of array won't be null).
"""
def __init__(self, elem_coder: FieldCoderImpl):
self._elem_coder = elem_coder
def encode_to_stream(self, value, out_stream: OutputStream):
out_stream.write_int32(len(value))
for elem in value:
self._elem_coder.encode_to_stream(elem, out_stream)
def decode_from_stream(self, in_stream: InputStream, length=0):
size = in_stream.read_int32()
elements = [self._elem_coder.decode_from_stream(in_stream) for _ in range(size)]
return elements
def __repr__(self):
return 'PrimitiveArrayCoderImpl[%s]' % repr(self._elem_coder)
class MapCoderImpl(FieldCoderImpl):
"""
A coder for map value (dict with same type key and same type value).
"""
def __init__(self, key_coder: FieldCoderImpl, value_coder: FieldCoderImpl):
self._key_coder = key_coder
self._value_coder = value_coder
def encode_to_stream(self, map_value, out_stream: OutputStream):
out_stream.write_int32(len(map_value))
for key in map_value:
self._key_coder.encode_to_stream(key, out_stream)
value = map_value[key]
if value is None:
out_stream.write_byte(True)
else:
out_stream.write_byte(False)
self._value_coder.encode_to_stream(map_value[key], out_stream)
def decode_from_stream(self, in_stream: InputStream, length=0):
size = in_stream.read_int32()
map_value = {}
for _ in range(size):
key = self._key_coder.decode_from_stream(in_stream)
is_null = in_stream.read_byte()
if is_null:
map_value[key] = None
else:
value = self._value_coder.decode_from_stream(in_stream)
map_value[key] = value
return map_value
def __repr__(self):
return 'MapCoderImpl[%s]' % ' : '.join([repr(self._key_coder), repr(self._value_coder)])
class TimeWindowCoderImpl(FieldCoderImpl):
"""
A coder for TimeWindow.
"""
def encode_to_stream(self, value, out_stream: OutputStream):
out_stream.write_int64(value.start)
out_stream.write_int64(value.end)
def decode_from_stream(self, in_stream: InputStream, length=0):
start = in_stream.read_int64()
end = in_stream.read_int64()
return TimeWindow(start, end)
class CountWindowCoderImpl(FieldCoderImpl):
"""
A coder for CountWindow.
"""
def encode_to_stream(self, value, out_stream: OutputStream):
out_stream.write_int64(value.id)
def decode_from_stream(self, in_stream: InputStream, length=0):
return CountWindow(in_stream.read_int64())
class DataViewFilterCoderImpl(FieldCoderImpl):
"""
A coder for data view filter.
"""
def __init__(self, udf_data_view_specs):
self._udf_data_view_specs = udf_data_view_specs
self._pickle_coder = PickleCoderImpl()
def encode_to_stream(self, value, out_stream: OutputStream):
self._pickle_coder.encode_to_stream(self._filter_data_views(value), out_stream)
def decode_from_stream(self, in_stream: InputStream, length=0):
return self._pickle_coder.decode_from_stream(in_stream)
def _filter_data_views(self, row):
i = 0
for specs in self._udf_data_view_specs:
for spec in specs:
row[i][spec.field_index] = None
i += 1
return row
|
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Cisco Systems, Inc.
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may page
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import collections
import logging
import netaddr
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from neutronclient.common import exceptions as neutron_exc
from neutronclient.v2_0 import client as neutron_client
import six
from horizon import messages
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from openstack_dashboard.api import network_base
from openstack_dashboard.api import nova
from openstack_dashboard import policy
LOG = logging.getLogger(__name__)
IP_VERSION_DICT = {4: 'IPv4', 6: 'IPv6'}
OFF_STATE = 'OFF'
ON_STATE = 'ON'
ROUTER_INTERFACE_OWNERS = (
'network:router_interface',
'network:router_interface_distributed'
)
class NeutronAPIDictWrapper(base.APIDictWrapper):
def __init__(self, apidict):
if 'admin_state_up' in apidict:
if apidict['admin_state_up']:
apidict['admin_state'] = 'UP'
else:
apidict['admin_state'] = 'DOWN'
# Django cannot handle a key name with ':', so use '__'.
apidict.update({
key.replace(':', '__'): value
for key, value in apidict.items()
if ':' in key
})
super(NeutronAPIDictWrapper, self).__init__(apidict)
def set_id_as_name_if_empty(self, length=8):
try:
if not self._apidict['name']:
id = self._apidict['id']
if length:
id = id[:length]
self._apidict['name'] = '(%s)' % id
except KeyError:
pass
def items(self):
return self._apidict.items()
@property
def name_or_id(self):
return (self._apidict.get('name') or
'(%s)' % self._apidict['id'][:13])
class Agent(NeutronAPIDictWrapper):
"""Wrapper for neutron agents."""
class Network(NeutronAPIDictWrapper):
"""Wrapper for neutron Networks."""
def to_dict(self):
d = dict(super(NeutronAPIDictWrapper, self).to_dict())
d['subnets'] = [s.to_dict() for s in d['subnets']]
return d
class Subnet(NeutronAPIDictWrapper):
"""Wrapper for neutron subnets."""
def __init__(self, apidict):
apidict['ipver_str'] = get_ipver_str(apidict['ip_version'])
super(Subnet, self).__init__(apidict)
class SubnetPool(NeutronAPIDictWrapper):
"""Wrapper for neutron subnetpools."""
class Port(NeutronAPIDictWrapper):
"""Wrapper for neutron ports."""
def __init__(self, apidict):
if 'mac_learning_enabled' in apidict:
apidict['mac_state'] = \
ON_STATE if apidict['mac_learning_enabled'] else OFF_STATE
super(Port, self).__init__(apidict)
class Profile(NeutronAPIDictWrapper):
"""Wrapper for neutron profiles."""
_attrs = ['profile_id', 'name', 'segment_type', 'segment_range',
'sub_type', 'multicast_ip_index', 'multicast_ip_range']
class Router(NeutronAPIDictWrapper):
"""Wrapper for neutron routers."""
class RouterStaticRoute(NeutronAPIDictWrapper):
"""Wrapper for neutron routes extra route."""
def __init__(self, route):
super(RouterStaticRoute, self).__init__(route)
# Horizon references id property for table operations
self.id = route['nexthop'] + ":" + route['destination']
class SecurityGroup(NeutronAPIDictWrapper):
# Required attributes: id, name, description, tenant_id, rules
def __init__(self, sg, sg_dict=None):
if sg_dict is None:
sg_dict = {sg['id']: sg['name']}
sg['rules'] = [SecurityGroupRule(rule, sg_dict)
for rule in sg['security_group_rules']]
super(SecurityGroup, self).__init__(sg)
def to_dict(self):
return {k: self._apidict[k] for k in self._apidict if k != 'rules'}
@six.python_2_unicode_compatible
class SecurityGroupRule(NeutronAPIDictWrapper):
# Required attributes:
# id, parent_group_id
# ip_protocol, from_port, to_port, ip_range, group
# ethertype, direction (Neutron specific)
def _get_secgroup_name(self, sg_id, sg_dict):
if sg_id:
if sg_dict is None:
sg_dict = {}
# If sg name not found in sg_dict,
# first two parts of UUID is used as sg name.
return sg_dict.get(sg_id, sg_id[:13])
else:
return u''
def __init__(self, sgr, sg_dict=None):
# In Neutron, if both remote_ip_prefix and remote_group_id are None,
# it means all remote IP range is allowed, i.e., 0.0.0.0/0 or ::/0.
if not sgr['remote_ip_prefix'] and not sgr['remote_group_id']:
if sgr['ethertype'] == 'IPv6':
sgr['remote_ip_prefix'] = '::/0'
else:
sgr['remote_ip_prefix'] = '0.0.0.0/0'
rule = {
'id': sgr['id'],
'parent_group_id': sgr['security_group_id'],
'direction': sgr['direction'],
'ethertype': sgr['ethertype'],
'ip_protocol': sgr['protocol'],
'from_port': sgr['port_range_min'],
'to_port': sgr['port_range_max'],
}
cidr = sgr['remote_ip_prefix']
rule['ip_range'] = {'cidr': cidr} if cidr else {}
group = self._get_secgroup_name(sgr['remote_group_id'], sg_dict)
rule['group'] = {'name': group} if group else {}
super(SecurityGroupRule, self).__init__(rule)
def __str__(self):
if 'name' in self.group:
remote = self.group['name']
elif 'cidr' in self.ip_range:
remote = self.ip_range['cidr']
else:
remote = 'ANY'
direction = 'to' if self.direction == 'egress' else 'from'
if self.from_port:
if self.from_port == self.to_port:
proto_port = ("%s/%s" %
(self.from_port, self.ip_protocol.lower()))
else:
proto_port = ("%s-%s/%s" %
(self.from_port, self.to_port,
self.ip_protocol.lower()))
elif self.ip_protocol:
try:
ip_proto = int(self.ip_protocol)
proto_port = "ip_proto=%d" % ip_proto
except Exception:
# well-defined IP protocol name like TCP, UDP, ICMP.
proto_port = self.ip_protocol
else:
proto_port = ''
return (_('ALLOW %(ethertype)s %(proto_port)s '
'%(direction)s %(remote)s') %
{'ethertype': self.ethertype,
'proto_port': proto_port,
'remote': remote,
'direction': direction})
class SecurityGroupManager(network_base.SecurityGroupManager):
backend = 'neutron'
def __init__(self, request):
self.request = request
self.client = neutronclient(request)
def _list(self, **filters):
secgroups = self.client.list_security_groups(**filters)
return [SecurityGroup(sg) for sg in secgroups.get('security_groups')]
def list(self):
tenant_id = self.request.user.tenant_id
return self._list(tenant_id=tenant_id)
def _sg_name_dict(self, sg_id, rules):
"""Create a mapping dict from secgroup id to its name."""
related_ids = set([sg_id])
related_ids |= set(filter(None, [r['remote_group_id'] for r in rules]))
related_sgs = self.client.list_security_groups(id=related_ids,
fields=['id', 'name'])
related_sgs = related_sgs.get('security_groups')
return dict((sg['id'], sg['name']) for sg in related_sgs)
def get(self, sg_id):
secgroup = self.client.show_security_group(sg_id).get('security_group')
sg_dict = self._sg_name_dict(sg_id, secgroup['security_group_rules'])
return SecurityGroup(secgroup, sg_dict)
def create(self, name, desc):
body = {'security_group': {'name': name,
'description': desc,
'tenant_id': self.request.user.project_id}}
secgroup = self.client.create_security_group(body)
return SecurityGroup(secgroup.get('security_group'))
def update(self, sg_id, name, desc):
body = {'security_group': {'name': name,
'description': desc}}
secgroup = self.client.update_security_group(sg_id, body)
return SecurityGroup(secgroup.get('security_group'))
def delete(self, sg_id):
self.client.delete_security_group(sg_id)
def rule_create(self, parent_group_id,
direction=None, ethertype=None,
ip_protocol=None, from_port=None, to_port=None,
cidr=None, group_id=None):
if not cidr:
cidr = None
if from_port < 0:
from_port = None
if to_port < 0:
to_port = None
if isinstance(ip_protocol, int) and ip_protocol < 0:
ip_protocol = None
body = {'security_group_rule':
{'security_group_id': parent_group_id,
'direction': direction,
'ethertype': ethertype,
'protocol': ip_protocol,
'port_range_min': from_port,
'port_range_max': to_port,
'remote_ip_prefix': cidr,
'remote_group_id': group_id}}
rule = self.client.create_security_group_rule(body)
rule = rule.get('security_group_rule')
sg_dict = self._sg_name_dict(parent_group_id, [rule])
return SecurityGroupRule(rule, sg_dict)
def rule_delete(self, sgr_id):
self.client.delete_security_group_rule(sgr_id)
def list_by_instance(self, instance_id):
"""Gets security groups of an instance."""
ports = port_list(self.request, device_id=instance_id)
sg_ids = []
for p in ports:
sg_ids += p.security_groups
return self._list(id=set(sg_ids)) if sg_ids else []
def update_instance_security_group(self, instance_id,
new_security_group_ids):
ports = port_list(self.request, device_id=instance_id)
for p in ports:
params = {'security_groups': new_security_group_ids}
port_update(self.request, p.id, **params)
class FloatingIp(base.APIDictWrapper):
_attrs = ['id', 'ip', 'fixed_ip', 'port_id', 'instance_id',
'instance_type', 'pool']
def __init__(self, fip):
fip['ip'] = fip['floating_ip_address']
fip['fixed_ip'] = fip['fixed_ip_address']
fip['pool'] = fip['floating_network_id']
super(FloatingIp, self).__init__(fip)
class FloatingIpPool(base.APIDictWrapper):
pass
class FloatingIpTarget(base.APIDictWrapper):
pass
class FloatingIpManager(network_base.FloatingIpManager):
device_owner_map = {
'compute:': 'compute',
'neutron:LOADBALANCER': 'loadbalancer',
}
def __init__(self, request):
self.request = request
self.client = neutronclient(request)
def list_pools(self):
search_opts = {'router:external': True}
return [FloatingIpPool(pool) for pool
in self.client.list_networks(**search_opts).get('networks')]
def _get_instance_type_from_device_owner(self, device_owner):
for key, value in self.device_owner_map.items():
if device_owner.startswith(key):
return value
return device_owner
def _set_instance_info(self, fip, port=None):
if fip['port_id']:
if not port:
port = port_get(self.request, fip['port_id'])
fip['instance_id'] = port.device_id
fip['instance_type'] = self._get_instance_type_from_device_owner(
port.device_owner)
else:
fip['instance_id'] = None
fip['instance_type'] = None
def list(self, all_tenants=False, **search_opts):
if not all_tenants:
tenant_id = self.request.user.tenant_id
# In Neutron, list_floatingips returns Floating IPs from
# all tenants when the API is called with admin role, so
# we need to filter them with tenant_id.
search_opts['tenant_id'] = tenant_id
port_search_opts = {'tenant_id': tenant_id}
else:
port_search_opts = {}
fips = self.client.list_floatingips(**search_opts)
fips = fips.get('floatingips')
# Get port list to add instance_id to floating IP list
# instance_id is stored in device_id attribute
ports = port_list(self.request, **port_search_opts)
port_dict = collections.OrderedDict([(p['id'], p) for p in ports])
for fip in fips:
self._set_instance_info(fip, port_dict.get(fip['port_id']))
return [FloatingIp(fip) for fip in fips]
def get(self, floating_ip_id):
fip = self.client.show_floatingip(floating_ip_id).get('floatingip')
self._set_instance_info(fip)
return FloatingIp(fip)
def allocate(self, pool):
body = {'floatingip': {'floating_network_id': pool,
'tenant_id': self.request.user.project_id}}
fip = self.client.create_floatingip(body).get('floatingip')
self._set_instance_info(fip)
return FloatingIp(fip)
def release(self, floating_ip_id):
self.client.delete_floatingip(floating_ip_id)
def associate(self, floating_ip_id, port_id):
# NOTE: In Neutron Horizon floating IP support, port_id is
# "<port_id>_<ip_address>" format to identify multiple ports.
pid, ip_address = port_id.split('_', 1)
update_dict = {'port_id': pid,
'fixed_ip_address': ip_address}
self.client.update_floatingip(floating_ip_id,
{'floatingip': update_dict})
def disassociate(self, floating_ip_id):
update_dict = {'port_id': None}
self.client.update_floatingip(floating_ip_id,
{'floatingip': update_dict})
def _get_reachable_subnets(self, ports):
if not is_enabled_by_config('enable_fip_topology_check', True):
# All subnets are reachable from external network
return set(
p.fixed_ips[0]['subnet_id'] for p in ports if p.fixed_ips
)
# Retrieve subnet list reachable from external network
ext_net_ids = [ext_net.id for ext_net in self.list_pools()]
gw_routers = [r.id for r in router_list(self.request)
if (r.external_gateway_info and
r.external_gateway_info.get('network_id')
in ext_net_ids)]
reachable_subnets = set([p.fixed_ips[0]['subnet_id'] for p in ports
if ((p.device_owner in
ROUTER_INTERFACE_OWNERS)
and (p.device_id in gw_routers))])
# we have to include any shared subnets as well because we may not
# have permission to see the router interface to infer connectivity
shared = set([s.id for n in network_list(self.request, shared=True)
for s in n.subnets])
return reachable_subnets | shared
def list_targets(self):
tenant_id = self.request.user.tenant_id
ports = port_list(self.request, tenant_id=tenant_id)
servers, has_more = nova.server_list(self.request)
server_dict = collections.OrderedDict(
[(s.id, s.name) for s in servers])
reachable_subnets = self._get_reachable_subnets(ports)
if is_service_enabled(self.request,
config_name='enable_lb',
ext_name='lbaas'):
# Also get the loadbalancer VIPs
vip_dict = {v['port_id']: v['name']
for v in self.client.list_vips().get('vips', [])}
else:
vip_dict = {}
targets = []
for p in ports:
# Remove network ports from Floating IP targets
if p.device_owner.startswith('network:'):
continue
port_id = p.id
server_name = server_dict.get(p.device_id) or vip_dict.get(port_id)
for ip in p.fixed_ips:
if ip['subnet_id'] not in reachable_subnets:
continue
target = {'name': '%s: %s' % (server_name, ip['ip_address']),
'id': '%s_%s' % (port_id, ip['ip_address']),
'port_id': port_id,
'instance_id': p.device_id}
targets.append(FloatingIpTarget(target))
return targets
def _target_ports_by_instance(self, instance_id):
if not instance_id:
return None
search_opts = {'device_id': instance_id}
return port_list(self.request, **search_opts)
def get_target_id_by_instance(self, instance_id, target_list=None):
if target_list is not None:
targets = [target for target in target_list
if target['instance_id'] == instance_id]
if not targets:
return None
return targets[0]['id']
else:
# In Neutron one port can have multiple ip addresses, so this
# method picks up the first one and generate target id.
ports = self._target_ports_by_instance(instance_id)
if not ports:
return None
return '{0}_{1}'.format(ports[0].id,
ports[0].fixed_ips[0]['ip_address'])
def list_target_id_by_instance(self, instance_id, target_list=None):
if target_list is not None:
return [target['id'] for target in target_list
if target['instance_id'] == instance_id]
else:
ports = self._target_ports_by_instance(instance_id)
return ['{0}_{1}'.format(p.id, p.fixed_ips[0]['ip_address'])
for p in ports]
def is_simple_associate_supported(self):
# NOTE: There are two reason that simple association support
# needs more considerations. (1) Neutron does not support the
# default floating IP pool at the moment. It can be avoided
# in case where only one floating IP pool exists.
# (2) Neutron floating IP is associated with each VIF and
# we need to check whether such VIF is only one for an instance
# to enable simple association support.
return False
def is_supported(self):
network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {})
return network_config.get('enable_router', True)
def get_ipver_str(ip_version):
"""Convert an ip version number to a human-friendly string."""
return IP_VERSION_DICT.get(ip_version, '')
@memoized
def neutronclient(request):
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
c = neutron_client.Client(token=request.user.token.id,
auth_url=base.url_for(request, 'identity'),
endpoint_url=base.url_for(request, 'network'),
insecure=insecure, ca_cert=cacert)
return c
def list_resources_with_long_filters(list_method,
filter_attr, filter_values, **params):
"""List neutron resources with handling RequestURITooLong exception.
If filter parameters are long, list resources API request leads to
414 error (URL is too long). For such case, this method split
list parameters specified by a list_field argument into chunks
and call the specified list_method repeatedly.
:param list_method: Method used to retrieve resource list.
:param filter_attr: attribute name to be filtered. The value corresponding
to this attribute is specified by "filter_values".
If you want to specify more attributes for a filter condition,
pass them as keyword arguments like "attr2=values2".
:param filter_values: values of "filter_attr" to be filtered.
If filter_values are too long and the total URI length exceed the
maximum length supported by the neutron server, filter_values will
be split into sub lists if filter_values is a list.
:param params: parameters to pass a specified listing API call
without any changes. You can specify more filter conditions
in addition to a pair of filter_attr and filter_values.
"""
try:
params[filter_attr] = filter_values
return list_method(**params)
except neutron_exc.RequestURITooLong as uri_len_exc:
# The URI is too long because of too many filter values.
# Use the excess attribute of the exception to know how many
# filter values can be inserted into a single request.
# We consider only the filter condition from (filter_attr,
# filter_values) and do not consider other filter conditions
# which may be specified in **params.
if type(filter_values) != list:
filter_values = [filter_values]
# Length of each query filter is:
# <key>=<value>& (e.g., id=<uuid>)
# The length will be key_len + value_maxlen + 2
all_filter_len = sum(len(filter_attr) + len(val) + 2
for val in filter_values)
allowed_filter_len = all_filter_len - uri_len_exc.excess
val_maxlen = max(len(val) for val in filter_values)
filter_maxlen = len(filter_attr) + val_maxlen + 2
chunk_size = allowed_filter_len / filter_maxlen
resources = []
for i in range(0, len(filter_values), chunk_size):
params[filter_attr] = filter_values[i:i + chunk_size]
resources.extend(list_method(**params))
return resources
def network_list(request, **params):
LOG.debug("network_list(): params=%s", params)
networks = neutronclient(request).list_networks(**params).get('networks')
# Get subnet list to expand subnet info in network list.
subnets = subnet_list(request)
subnet_dict = dict([(s['id'], s) for s in subnets])
# Expand subnet list from subnet_id to values.
for n in networks:
# Due to potential timing issues, we can't assume the subnet_dict data
# is in sync with the network data.
n['subnets'] = [subnet_dict[s] for s in n.get('subnets', []) if
s in subnet_dict]
return [Network(n) for n in networks]
def network_list_for_tenant(request, tenant_id, **params):
"""Return a network list available for the tenant.
The list contains networks owned by the tenant and public networks.
If requested_networks specified, it searches requested_networks only.
"""
LOG.debug("network_list_for_tenant(): tenant_id=%s, params=%s"
% (tenant_id, params))
# If a user has admin role, network list returned by Neutron API
# contains networks that do not belong to that tenant.
# So we need to specify tenant_id when calling network_list().
networks = network_list(request, tenant_id=tenant_id,
shared=False, **params)
# In the current Neutron API, there is no way to retrieve
# both owner networks and public networks in a single API call.
networks += network_list(request, shared=True, **params)
return networks
def network_get(request, network_id, expand_subnet=True, **params):
LOG.debug("network_get(): netid=%s, params=%s" % (network_id, params))
network = neutronclient(request).show_network(network_id,
**params).get('network')
if expand_subnet:
if request.user.tenant_id == network['tenant_id'] or network['shared']:
# Since the number of subnets per network must be small,
# call subnet_get() for each subnet instead of calling
# subnet_list() once.
network['subnets'] = [subnet_get(request, sid)
for sid in network['subnets']]
return Network(network)
def network_create(request, **kwargs):
"""Create a network object.
:param request: request context
:param tenant_id: (optional) tenant id of the network created
:param name: (optional) name of the network created
:returns: Network object
"""
LOG.debug("network_create(): kwargs = %s" % kwargs)
# In the case network profiles are being used, profile id is needed.
if 'net_profile_id' in kwargs:
kwargs['n1kv:profile'] = kwargs.pop('net_profile_id')
if 'tenant_id' not in kwargs:
kwargs['tenant_id'] = request.user.project_id
body = {'network': kwargs}
network = neutronclient(request).create_network(body=body).get('network')
return Network(network)
def network_update(request, network_id, **kwargs):
LOG.debug("network_update(): netid=%s, params=%s" % (network_id, kwargs))
body = {'network': kwargs}
network = neutronclient(request).update_network(network_id,
body=body).get('network')
return Network(network)
def network_delete(request, network_id):
LOG.debug("network_delete(): netid=%s" % network_id)
neutronclient(request).delete_network(network_id)
def subnet_list(request, **params):
LOG.debug("subnet_list(): params=%s" % (params))
subnets = neutronclient(request).list_subnets(**params).get('subnets')
return [Subnet(s) for s in subnets]
def subnet_get(request, subnet_id, **params):
LOG.debug("subnet_get(): subnetid=%s, params=%s" % (subnet_id, params))
subnet = neutronclient(request).show_subnet(subnet_id,
**params).get('subnet')
return Subnet(subnet)
def subnet_create(request, network_id, **kwargs):
"""Create a subnet on a specified network.
:param request: request context
:param network_id: network id a subnet is created on
:param cidr: (optional) subnet IP address range
:param ip_version: (optional) IP version (4 or 6)
:param gateway_ip: (optional) IP address of gateway
:param tenant_id: (optional) tenant id of the subnet created
:param name: (optional) name of the subnet created
:param subnetpool_id: (optional) subnetpool to allocate prefix from
:param prefixlen: (optional) length of prefix to allocate
:returns: Subnet object
Although both cidr+ip_version and subnetpool_id+preifxlen is listed as
optional you MUST pass along one of the combinations to get a successful
result.
"""
LOG.debug("subnet_create(): netid=%s, kwargs=%s"
% (network_id, kwargs))
body = {'subnet': {'network_id': network_id}}
if 'tenant_id' not in kwargs:
kwargs['tenant_id'] = request.user.project_id
body['subnet'].update(kwargs)
subnet = neutronclient(request).create_subnet(body=body).get('subnet')
return Subnet(subnet)
def subnet_update(request, subnet_id, **kwargs):
LOG.debug("subnet_update(): subnetid=%s, kwargs=%s" % (subnet_id, kwargs))
body = {'subnet': kwargs}
subnet = neutronclient(request).update_subnet(subnet_id,
body=body).get('subnet')
return Subnet(subnet)
def subnet_delete(request, subnet_id):
LOG.debug("subnet_delete(): subnetid=%s" % subnet_id)
neutronclient(request).delete_subnet(subnet_id)
def subnetpool_list(request, **params):
LOG.debug("subnetpool_list(): params=%s" % (params))
subnetpools = \
neutronclient(request).list_subnetpools(**params).get('subnetpools')
return [SubnetPool(s) for s in subnetpools]
def subnetpool_get(request, subnetpool_id, **params):
LOG.debug("subnetpool_get(): subnetpoolid=%s, params=%s" %
(subnetpool_id, params))
subnetpool = \
neutronclient(request).show_subnetpool(subnetpool_id,
**params).get('subnetpool')
return SubnetPool(subnetpool)
def subnetpool_create(request, name, prefixes, **kwargs):
"""Create a subnetpool.
ip_version is auto-detected in back-end.
Parameters:
request -- Request context
name -- Name for subnetpool
prefixes -- List of prefixes for pool
Keyword Arguments (optional):
min_prefixlen -- Minimum prefix length for allocations from pool
max_prefixlen -- Maximum prefix length for allocations from pool
default_prefixlen -- Default prefix length for allocations from pool
default_quota -- Default quota for allocations from pool
shared -- Subnetpool should be shared (Admin-only)
tenant_id -- Owner of subnetpool
Returns:
SubnetPool object
"""
LOG.debug("subnetpool_create(): name=%s, prefixes=%s, kwargs=%s"
% (name, prefixes, kwargs))
body = {'subnetpool':
{'name': name,
'prefixes': prefixes,
}
}
if 'tenant_id' not in kwargs:
kwargs['tenant_id'] = request.user.project_id
body['subnetpool'].update(kwargs)
subnetpool = \
neutronclient(request).create_subnetpool(body=body).get('subnetpool')
return SubnetPool(subnetpool)
def subnetpool_update(request, subnetpool_id, **kwargs):
LOG.debug("subnetpool_update(): subnetpoolid=%s, kwargs=%s" %
(subnetpool_id, kwargs))
body = {'subnetpool': kwargs}
subnetpool = \
neutronclient(request).update_subnetpool(subnetpool_id,
body=body).get('subnetpool')
return SubnetPool(subnetpool)
def subnetpool_delete(request, subnetpool_id):
LOG.debug("subnetpool_delete(): subnetpoolid=%s" % subnetpool_id)
return neutronclient(request).delete_subnetpool(subnetpool_id)
def port_list(request, **params):
LOG.debug("port_list(): params=%s" % (params))
ports = neutronclient(request).list_ports(**params).get('ports')
return [Port(p) for p in ports]
def port_get(request, port_id, **params):
LOG.debug("port_get(): portid=%s, params=%s" % (port_id, params))
port = neutronclient(request).show_port(port_id, **params).get('port')
return Port(port)
def unescape_port_kwargs(**kwargs):
for key in kwargs:
if '__' in key:
kwargs[':'.join(key.split('__'))] = kwargs.pop(key)
return kwargs
def port_create(request, network_id, **kwargs):
"""Create a port on a specified network.
:param request: request context
:param network_id: network id a subnet is created on
:param device_id: (optional) device id attached to the port
:param tenant_id: (optional) tenant id of the port created
:param name: (optional) name of the port created
:returns: Port object
"""
LOG.debug("port_create(): netid=%s, kwargs=%s" % (network_id, kwargs))
# In the case policy profiles are being used, profile id is needed.
if 'policy_profile_id' in kwargs:
kwargs['n1kv:profile'] = kwargs.pop('policy_profile_id')
kwargs = unescape_port_kwargs(**kwargs)
body = {'port': {'network_id': network_id}}
if 'tenant_id' not in kwargs:
kwargs['tenant_id'] = request.user.project_id
body['port'].update(kwargs)
port = neutronclient(request).create_port(body=body).get('port')
return Port(port)
def port_delete(request, port_id):
LOG.debug("port_delete(): portid=%s" % port_id)
neutronclient(request).delete_port(port_id)
def port_update(request, port_id, **kwargs):
LOG.debug("port_update(): portid=%s, kwargs=%s" % (port_id, kwargs))
kwargs = unescape_port_kwargs(**kwargs)
body = {'port': kwargs}
port = neutronclient(request).update_port(port_id, body=body).get('port')
return Port(port)
def profile_list(request, type_p, **params):
LOG.debug("profile_list(): "
"profile_type=%(profile_type)s, params=%(params)s",
{'profile_type': type_p, 'params': params})
if type_p == 'network':
profiles = neutronclient(request).list_network_profiles(
**params).get('network_profiles')
elif type_p == 'policy':
profiles = neutronclient(request).list_policy_profiles(
**params).get('policy_profiles')
return [Profile(n) for n in profiles]
def profile_get(request, profile_id, **params):
LOG.debug("profile_get(): "
"profileid=%(profileid)s, params=%(params)s",
{'profileid': profile_id, 'params': params})
profile = neutronclient(request).show_network_profile(
profile_id, **params).get('network_profile')
return Profile(profile)
def profile_create(request, **kwargs):
LOG.debug("profile_create(): kwargs=%s", kwargs)
body = {'network_profile': {}}
body['network_profile'].update(kwargs)
profile = neutronclient(request).create_network_profile(
body=body).get('network_profile')
return Profile(profile)
def profile_delete(request, profile_id):
LOG.debug("profile_delete(): profile_id=%s", profile_id)
neutronclient(request).delete_network_profile(profile_id)
def profile_update(request, profile_id, **kwargs):
LOG.debug("profile_update(): "
"profileid=%(profileid)s, kwargs=%(kwargs)s",
{'profileid': profile_id, 'kwargs': kwargs})
body = {'network_profile': kwargs}
profile = neutronclient(request).update_network_profile(
profile_id, body=body).get('network_profile')
return Profile(profile)
def profile_bindings_list(request, type_p, **params):
LOG.debug("profile_bindings_list(): "
"profile_type=%(profile_type)s params=%(params)s",
{'profile_type': type_p, 'params': params})
if type_p == 'network':
bindings = neutronclient(request).list_network_profile_bindings(
**params).get('network_profile_bindings')
elif type_p == 'policy':
bindings = neutronclient(request).list_policy_profile_bindings(
**params).get('policy_profile_bindings')
return [Profile(n) for n in bindings]
def router_create(request, **kwargs):
LOG.debug("router_create():, kwargs=%s" % kwargs)
body = {'router': {}}
if 'tenant_id' not in kwargs:
kwargs['tenant_id'] = request.user.project_id
body['router'].update(kwargs)
router = neutronclient(request).create_router(body=body).get('router')
return Router(router)
def router_update(request, r_id, **kwargs):
LOG.debug("router_update(): router_id=%s, kwargs=%s" % (r_id, kwargs))
body = {'router': {}}
body['router'].update(kwargs)
router = neutronclient(request).update_router(r_id, body=body)
return Router(router['router'])
def router_get(request, router_id, **params):
router = neutronclient(request).show_router(router_id,
**params).get('router')
return Router(router)
def router_list(request, **params):
routers = neutronclient(request).list_routers(**params).get('routers')
return [Router(r) for r in routers]
def router_delete(request, router_id):
neutronclient(request).delete_router(router_id)
def router_add_interface(request, router_id, subnet_id=None, port_id=None):
body = {}
if subnet_id:
body['subnet_id'] = subnet_id
if port_id:
body['port_id'] = port_id
client = neutronclient(request)
return client.add_interface_router(router_id, body)
def router_remove_interface(request, router_id, subnet_id=None, port_id=None):
body = {}
if subnet_id:
body['subnet_id'] = subnet_id
if port_id:
body['port_id'] = port_id
neutronclient(request).remove_interface_router(router_id, body)
def router_add_gateway(request, router_id, network_id):
body = {'network_id': network_id}
neutronclient(request).add_gateway_router(router_id, body)
def router_remove_gateway(request, router_id):
neutronclient(request).remove_gateway_router(router_id)
def router_static_route_list(request, router_id=None):
router = router_get(request, router_id)
try:
routes = [RouterStaticRoute(r) for r in router.routes]
except AttributeError:
LOG.debug("router_static_route_list(): router_id=%s, "
"router=%s", (router_id, router))
return []
return routes
def router_static_route_remove(request, router_id, route_ids):
currentroutes = router_static_route_list(request, router_id=router_id)
newroutes = []
for oldroute in currentroutes:
if oldroute.id not in route_ids:
newroutes.append({'nexthop': oldroute.nexthop,
'destination': oldroute.destination})
body = {'routes': newroutes}
new = router_update(request, router_id, **body)
return new
def router_static_route_add(request, router_id, newroute):
body = {}
currentroutes = router_static_route_list(request, router_id=router_id)
body['routes'] = [newroute] + [{'nexthop': r.nexthop,
'destination': r.destination}
for r in currentroutes]
new = router_update(request, router_id, **body)
return new
def tenant_quota_get(request, tenant_id):
return base.QuotaSet(neutronclient(request).show_quota(tenant_id)['quota'])
def tenant_quota_update(request, tenant_id, **kwargs):
quotas = {'quota': kwargs}
return neutronclient(request).update_quota(tenant_id, quotas)
def agent_list(request, **params):
agents = neutronclient(request).list_agents(**params)
return [Agent(a) for a in agents['agents']]
def list_dhcp_agent_hosting_networks(request, network, **params):
agents = neutronclient(request).list_dhcp_agent_hosting_networks(network,
**params)
return [Agent(a) for a in agents['agents']]
def add_network_to_dhcp_agent(request, dhcp_agent, network_id):
body = {'network_id': network_id}
return neutronclient(request).add_network_to_dhcp_agent(dhcp_agent, body)
def remove_network_from_dhcp_agent(request, dhcp_agent, network_id):
return neutronclient(request).remove_network_from_dhcp_agent(dhcp_agent,
network_id)
def provider_list(request):
providers = neutronclient(request).list_service_providers()
return providers['service_providers']
def servers_update_addresses(request, servers, all_tenants=False):
"""Retrieve servers networking information from Neutron if enabled.
Should be used when up to date networking information is required,
and Nova's networking info caching mechanism is not fast enough.
"""
# Get all (filtered for relevant servers) information from Neutron
try:
ports = list_resources_with_long_filters(
port_list, 'device_id', [instance.id for instance in servers],
request=request)
fips = FloatingIpManager(request)
if fips.is_supported():
floating_ips = list_resources_with_long_filters(
fips.list, 'port_id', [port.id for port in ports],
all_tenants=all_tenants)
else:
floating_ips = []
networks = list_resources_with_long_filters(
network_list, 'id', set([port.network_id for port in ports]),
request=request)
except Exception:
error_message = _('Unable to connect to Neutron.')
LOG.error(error_message)
messages.error(request, error_message)
return
# Map instance to its ports
instances_ports = collections.defaultdict(list)
for port in ports:
instances_ports[port.device_id].append(port)
# Map port to its floating ips
ports_floating_ips = collections.defaultdict(list)
for fip in floating_ips:
ports_floating_ips[fip.port_id].append(fip)
# Map network id to its name
network_names = dict(((network.id, network.name) for network in networks))
for server in servers:
try:
addresses = _server_get_addresses(
request,
server,
instances_ports,
ports_floating_ips,
network_names)
except Exception as e:
LOG.error(e)
else:
server.addresses = addresses
def _server_get_addresses(request, server, ports, floating_ips, network_names):
def _format_address(mac, ip, type):
try:
version = netaddr.IPAddress(ip).version
except Exception as e:
error_message = _('Unable to parse IP address %s.') % ip
LOG.error(error_message)
messages.error(request, error_message)
raise e
return {u'OS-EXT-IPS-MAC:mac_addr': mac,
u'version': version,
u'addr': ip,
u'OS-EXT-IPS:type': type}
addresses = collections.defaultdict(list)
instance_ports = ports.get(server.id, [])
for port in instance_ports:
network_name = network_names.get(port.network_id)
if network_name is not None:
for fixed_ip in port.fixed_ips:
addresses[network_name].append(
_format_address(port.mac_address,
fixed_ip['ip_address'],
u'fixed'))
port_fips = floating_ips.get(port.id, [])
for fip in port_fips:
addresses[network_name].append(
_format_address(port.mac_address,
fip.floating_ip_address,
u'floating'))
return dict(addresses)
@memoized
def list_extensions(request):
extensions_list = neutronclient(request).list_extensions()
if 'extensions' in extensions_list:
return extensions_list['extensions']
else:
return {}
@memoized
def is_extension_supported(request, extension_alias):
extensions = list_extensions(request)
for extension in extensions:
if extension['alias'] == extension_alias:
return True
else:
return False
def is_enabled_by_config(name, default=True):
network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {})
return network_config.get(name, default)
@memoized
def is_service_enabled(request, config_name, ext_name):
return (is_enabled_by_config(config_name) and
is_extension_supported(request, ext_name))
@memoized
def is_quotas_extension_supported(request):
return (is_enabled_by_config('enable_quotas', False) and
is_extension_supported(request, 'quotas'))
# Using this mechanism till a better plugin/sub-plugin detection
# mechanism is available.
# When using specific plugins the profile_support can be
# turned on if needed to configure and/or use profiles.
# Since this is a temporary mechanism used to detect profile_support
# @memorize is not being used.
# TODO(absubram): Change this config variable check with
# subplugin/plugin detection API when it becomes available.
def is_port_profiles_supported():
network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {})
# Can be used to check for vendor specific plugin
profile_support = network_config.get('profile_support', None)
if str(profile_support).lower() == 'cisco':
return True
# FEATURE_MAP is used to define:
# - related neutron extension name (key: "extension")
# - corresponding dashboard config (key: "config")
# - RBAC policies (key: "poclies")
# If a key is not contained, the corresponding permission check is skipped.
FEATURE_MAP = {
'dvr': {
'extension': 'dvr',
'config': {
'name': 'enable_distributed_router',
'default': False,
},
'policies': {
'get': 'get_router:distributed',
'create': 'create_router:distributed',
'update': 'update_router:distributed',
}
},
'l3-ha': {
'extension': 'l3-ha',
'config': {'name': 'enable_ha_router',
'default': False},
'policies': {
'get': 'get_router:ha',
'create': 'create_router:ha',
'update': 'update_router:ha',
}
},
}
def get_feature_permission(request, feature, operation=None):
"""Check if a feature-specific field can be displayed.
This method check a permission for a feature-specific field.
Such field is usually provided through Neutron extension.
:param request: Request Object
:param feature: feature name defined in FEATURE_MAP
:param operation (optional): Operation type. The valid value should be
defined in FEATURE_MAP[feature]['policies']
It must be specified if FEATURE_MAP[feature] has 'policies'.
"""
network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {})
feature_info = FEATURE_MAP.get(feature)
if not feature_info:
# Translators: Only used inside Horizon code and invisible to users
raise ValueError(_("The requested feature '%(feature)s' is unknown. "
"Please make sure to specify a feature defined "
"in FEATURE_MAP."))
# Check dashboard settings
feature_config = feature_info.get('config')
if feature_config:
if not network_config.get(feature_config['name'],
feature_config['default']):
return False
# Check policy
feature_policies = feature_info.get('policies')
policy_check = getattr(settings, "POLICY_CHECK_FUNCTION", None)
if feature_policies and policy_check:
policy_name = feature_policies.get(operation)
if not policy_name:
# Translators: Only used inside Horizon code and invisible to users
raise ValueError(_("The 'operation' parameter for "
"get_feature_permission '%(feature)s' "
"is invalid. It should be one of %(allowed)s")
% {'feature': feature,
'allowed': ' '.join(feature_policies.keys())})
role = (('network', policy_name),)
if not policy.check(role, request):
return False
# Check if a required extension is enabled
feature_extension = feature_info.get('extension')
if feature_extension:
try:
return is_extension_supported(request, feature_extension)
except Exception:
msg = (_("Failed to check Neutron '%s' extension is not supported")
% feature_extension)
LOG.info(msg)
return False
# If all checks are passed, now a given feature is allowed.
return True
|
|
#!/usr/bin/env python
#
# Electrum - lightweight Fujicoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
from decimal import Decimal
from PyQt5.QtGui import QFontMetrics
from electrum import bitcoin
from electrum.util import bfh
from electrum.transaction import TxOutput, push_script
from electrum.bitcoin import opcodes
from electrum.logging import Logger
from .qrtextedit import ScanQRTextEdit
from .completion_text_edit import CompletionTextEdit
from . import util
RE_ALIAS = r'(.*?)\s*\<([0-9A-Za-z]{1,})\>'
frozen_style = "QWidget { background-color:none; border:none;}"
normal_style = "QPlainTextEdit { }"
class PayToEdit(CompletionTextEdit, ScanQRTextEdit, Logger):
def __init__(self, win):
CompletionTextEdit.__init__(self)
ScanQRTextEdit.__init__(self)
Logger.__init__(self)
self.win = win
self.amount_edit = win.amount_e
self.document().contentsChanged.connect(self.update_size)
self.heightMin = 0
self.heightMax = 150
self.c = None
self.textChanged.connect(self.check_text)
self.outputs = []
self.errors = []
self.is_pr = False
self.is_alias = False
self.scan_f = win.pay_to_URI
self.update_size()
self.payto_address = None
self.previous_payto = ''
def setFrozen(self, b):
self.setReadOnly(b)
self.setStyleSheet(frozen_style if b else normal_style)
for button in self.buttons:
button.setHidden(b)
def setGreen(self):
self.setStyleSheet(util.ColorScheme.GREEN.as_stylesheet(True))
def setExpired(self):
self.setStyleSheet(util.ColorScheme.RED.as_stylesheet(True))
def parse_address_and_amount(self, line):
x, y = line.split(',')
out_type, out = self.parse_output(x)
amount = self.parse_amount(y)
return TxOutput(out_type, out, amount)
def parse_output(self, x):
try:
address = self.parse_address(x)
return bitcoin.TYPE_ADDRESS, address
except:
script = self.parse_script(x)
return bitcoin.TYPE_SCRIPT, script
def parse_script(self, x):
script = ''
for word in x.split():
if word[0:3] == 'OP_':
opcode_int = opcodes[word]
assert opcode_int < 256 # opcode is single-byte
script += bitcoin.int_to_hex(opcode_int)
else:
bfh(word) # to test it is hex data
script += push_script(word)
return script
def parse_amount(self, x):
if x.strip() == '!':
return '!'
p = pow(10, self.amount_edit.decimal_point())
return int(p * Decimal(x.strip()))
def parse_address(self, line):
r = line.strip()
m = re.match('^'+RE_ALIAS+'$', r)
address = str(m.group(2) if m else r)
assert bitcoin.is_address(address)
return address
def check_text(self):
self.errors = []
if self.is_pr:
return
# filter out empty lines
lines = [i for i in self.lines() if i]
outputs = []
total = 0
self.payto_address = None
if len(lines) == 1:
data = lines[0]
if data.startswith("fujicoin:"):
self.scan_f(data)
return
try:
self.payto_address = self.parse_output(data)
except:
pass
if self.payto_address:
self.win.lock_amount(False)
return
is_max = False
for i, line in enumerate(lines):
try:
output = self.parse_address_and_amount(line)
except:
self.errors.append((i, line.strip()))
continue
outputs.append(output)
if output.value == '!':
is_max = True
else:
total += output.value
self.win.max_button.setChecked(is_max)
self.outputs = outputs
self.payto_address = None
if self.win.max_button.isChecked():
self.win.do_update_fee()
else:
self.amount_edit.setAmount(total if outputs else None)
self.win.lock_amount(total or len(lines)>1)
def get_errors(self):
return self.errors
def get_recipient(self):
return self.payto_address
def get_outputs(self, is_max):
if self.payto_address:
if is_max:
amount = '!'
else:
amount = self.amount_edit.get_amount()
_type, addr = self.payto_address
self.outputs = [TxOutput(_type, addr, amount)]
return self.outputs[:]
def lines(self):
return self.toPlainText().split('\n')
def is_multiline(self):
return len(self.lines()) > 1
def paytomany(self):
self.setText("\n\n\n")
self.update_size()
def update_size(self):
lineHeight = QFontMetrics(self.document().defaultFont()).height()
docHeight = self.document().size().height()
h = docHeight * lineHeight + 11
h = min(max(h, self.heightMin), self.heightMax)
self.setMinimumHeight(h)
self.setMaximumHeight(h)
self.verticalScrollBar().hide()
def qr_input(self):
data = super(PayToEdit,self).qr_input()
if data.startswith("fujicoin:"):
self.scan_f(data)
# TODO: update fee
def resolve(self):
self.is_alias = False
if self.hasFocus():
return
if self.is_multiline(): # only supports single line entries atm
return
if self.is_pr:
return
key = str(self.toPlainText())
key = key.strip() # strip whitespaces
if key == self.previous_payto:
return
self.previous_payto = key
if not (('.' in key) and (not '<' in key) and (not ' ' in key)):
return
parts = key.split(sep=',') # assuming single line
if parts and len(parts) > 0 and bitcoin.is_address(parts[0]):
return
try:
data = self.win.contacts.resolve(key)
except Exception as e:
self.logger.info(f'error resolving address/alias: {repr(e)}')
return
if not data:
return
self.is_alias = True
address = data.get('address')
name = data.get('name')
new_url = key + ' <' + address + '>'
self.setText(new_url)
self.previous_payto = new_url
#if self.win.config.get('openalias_autoadd') == 'checked':
self.win.contacts[key] = ('openalias', name)
self.win.contact_list.update()
self.setFrozen(True)
if data.get('type') == 'openalias':
self.validated = data.get('validated')
if self.validated:
self.setGreen()
else:
self.setExpired()
else:
self.validated = None
|
|
from datetime import (
datetime,
timedelta,
)
from io import StringIO
import warnings
import numpy as np
import pytest
from pandas import (
Categorical,
DataFrame,
MultiIndex,
NaT,
PeriodIndex,
Series,
Timestamp,
date_range,
option_context,
period_range,
)
import pandas._testing as tm
import pandas.io.formats.format as fmt
class TestDataFrameReprInfoEtc:
def test_repr_bytes_61_lines(self):
# GH#12857
lets = list("ACDEFGHIJKLMNOP")
slen = 50
nseqs = 1000
words = [[np.random.choice(lets) for x in range(slen)] for _ in range(nseqs)]
df = DataFrame(words).astype("U1")
assert (df.dtypes == object).all()
# smoke tests; at one point this raised with 61 but not 60
repr(df)
repr(df.iloc[:60, :])
repr(df.iloc[:61, :])
def test_repr_unicode_level_names(self, frame_or_series):
index = MultiIndex.from_tuples([(0, 0), (1, 1)], names=["\u0394", "i1"])
obj = DataFrame(np.random.randn(2, 4), index=index)
if frame_or_series is Series:
obj = obj[0]
repr(obj)
def test_assign_index_sequences(self):
# GH#2200
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}).set_index(
["a", "b"]
)
index = list(df.index)
index[0] = ("faz", "boo")
df.index = index
repr(df)
# this travels an improper code path
index[0] = ["faz", "boo"]
df.index = index
repr(df)
def test_repr_with_mi_nat(self, float_string_frame):
df = DataFrame({"X": [1, 2]}, index=[[NaT, Timestamp("20130101")], ["a", "b"]])
result = repr(df)
expected = " X\nNaT a 1\n2013-01-01 b 2"
assert result == expected
def test_multiindex_na_repr(self):
# only an issue with long columns
df3 = DataFrame(
{
"A" * 30: {("A", "A0006000", "nuit"): "A0006000"},
"B" * 30: {("A", "A0006000", "nuit"): np.nan},
"C" * 30: {("A", "A0006000", "nuit"): np.nan},
"D" * 30: {("A", "A0006000", "nuit"): np.nan},
"E" * 30: {("A", "A0006000", "nuit"): "A"},
"F" * 30: {("A", "A0006000", "nuit"): np.nan},
}
)
idf = df3.set_index(["A" * 30, "C" * 30])
repr(idf)
def test_repr_name_coincide(self):
index = MultiIndex.from_tuples(
[("a", 0, "foo"), ("b", 1, "bar")], names=["a", "b", "c"]
)
df = DataFrame({"value": [0, 1]}, index=index)
lines = repr(df).split("\n")
assert lines[2].startswith("a 0 foo")
def test_repr_to_string(
self,
multiindex_year_month_day_dataframe_random_data,
multiindex_dataframe_random_data,
):
ymd = multiindex_year_month_day_dataframe_random_data
frame = multiindex_dataframe_random_data
repr(frame)
repr(ymd)
repr(frame.T)
repr(ymd.T)
buf = StringIO()
frame.to_string(buf=buf)
ymd.to_string(buf=buf)
frame.T.to_string(buf=buf)
ymd.T.to_string(buf=buf)
def test_repr_empty(self):
# empty
repr(DataFrame())
# empty with index
frame = DataFrame(index=np.arange(1000))
repr(frame)
def test_repr_mixed(self, float_string_frame):
buf = StringIO()
# mixed
repr(float_string_frame)
float_string_frame.info(verbose=False, buf=buf)
@pytest.mark.slow
def test_repr_mixed_big(self):
# big mixed
biggie = DataFrame(
{"A": np.random.randn(200), "B": tm.makeStringIndex(200)}, index=range(200)
)
biggie.loc[:20, "A"] = np.nan
biggie.loc[:20, "B"] = np.nan
repr(biggie)
def test_repr(self, float_frame):
buf = StringIO()
# small one
repr(float_frame)
float_frame.info(verbose=False, buf=buf)
# even smaller
float_frame.reindex(columns=["A"]).info(verbose=False, buf=buf)
float_frame.reindex(columns=["A", "B"]).info(verbose=False, buf=buf)
# exhausting cases in DataFrame.info
# columns but no index
no_index = DataFrame(columns=[0, 1, 3])
repr(no_index)
# no columns or index
DataFrame().info(buf=buf)
df = DataFrame(["a\n\r\tb"], columns=["a\n\r\td"], index=["a\n\r\tf"])
assert "\t" not in repr(df)
assert "\r" not in repr(df)
assert "a\n" not in repr(df)
def test_repr_dimensions(self):
df = DataFrame([[1, 2], [3, 4]])
with option_context("display.show_dimensions", True):
assert "2 rows x 2 columns" in repr(df)
with option_context("display.show_dimensions", False):
assert "2 rows x 2 columns" not in repr(df)
with option_context("display.show_dimensions", "truncate"):
assert "2 rows x 2 columns" not in repr(df)
@pytest.mark.slow
def test_repr_big(self):
# big one
biggie = DataFrame(np.zeros((200, 4)), columns=range(4), index=range(200))
repr(biggie)
def test_repr_unsortable(self, float_frame):
# columns are not sortable
warn_filters = warnings.filters
warnings.filterwarnings("ignore", category=FutureWarning, module=".*format")
unsortable = DataFrame(
{
"foo": [1] * 50,
datetime.today(): [1] * 50,
"bar": ["bar"] * 50,
datetime.today() + timedelta(1): ["bar"] * 50,
},
index=np.arange(50),
)
repr(unsortable)
fmt.set_option("display.precision", 3, "display.column_space", 10)
repr(float_frame)
fmt.set_option("display.max_rows", 10, "display.max_columns", 2)
repr(float_frame)
fmt.set_option("display.max_rows", 1000, "display.max_columns", 1000)
repr(float_frame)
tm.reset_display_options()
warnings.filters = warn_filters
def test_repr_unicode(self):
uval = "\u03c3\u03c3\u03c3\u03c3"
df = DataFrame({"A": [uval, uval]})
result = repr(df)
ex_top = " A"
assert result.split("\n")[0].rstrip() == ex_top
df = DataFrame({"A": [uval, uval]})
result = repr(df)
assert result.split("\n")[0].rstrip() == ex_top
def test_unicode_string_with_unicode(self):
df = DataFrame({"A": ["\u05d0"]})
str(df)
def test_repr_unicode_columns(self):
df = DataFrame({"\u05d0": [1, 2, 3], "\u05d1": [4, 5, 6], "c": [7, 8, 9]})
repr(df.columns) # should not raise UnicodeDecodeError
def test_str_to_bytes_raises(self):
# GH 26447
df = DataFrame({"A": ["abc"]})
msg = "^'str' object cannot be interpreted as an integer$"
with pytest.raises(TypeError, match=msg):
bytes(df)
def test_very_wide_info_repr(self):
df = DataFrame(np.random.randn(10, 20), columns=tm.rands_array(10, 20))
repr(df)
def test_repr_column_name_unicode_truncation_bug(self):
# #1906
df = DataFrame(
{
"Id": [7117434],
"StringCol": (
"Is it possible to modify drop plot code"
"so that the output graph is displayed "
"in iphone simulator, Is it possible to "
"modify drop plot code so that the "
"output graph is \xe2\x80\xa8displayed "
"in iphone simulator.Now we are adding "
"the CSV file externally. I want to Call "
"the File through the code.."
),
}
)
with option_context("display.max_columns", 20):
assert "StringCol" in repr(df)
def test_latex_repr(self):
result = r"""\begin{tabular}{llll}
\toprule
{} & 0 & 1 & 2 \\
\midrule
0 & $\alpha$ & b & c \\
1 & 1 & 2 & 3 \\
\bottomrule
\end{tabular}
"""
with option_context("display.latex.escape", False, "display.latex.repr", True):
df = DataFrame([[r"$\alpha$", "b", "c"], [1, 2, 3]])
assert result == df._repr_latex_()
# GH 12182
assert df._repr_latex_() is None
def test_repr_categorical_dates_periods(self):
# normal DataFrame
dt = date_range("2011-01-01 09:00", freq="H", periods=5, tz="US/Eastern")
p = period_range("2011-01", freq="M", periods=5)
df = DataFrame({"dt": dt, "p": p})
exp = """ dt p
0 2011-01-01 09:00:00-05:00 2011-01
1 2011-01-01 10:00:00-05:00 2011-02
2 2011-01-01 11:00:00-05:00 2011-03
3 2011-01-01 12:00:00-05:00 2011-04
4 2011-01-01 13:00:00-05:00 2011-05"""
assert repr(df) == exp
df2 = DataFrame({"dt": Categorical(dt), "p": Categorical(p)})
assert repr(df2) == exp
@pytest.mark.parametrize("arg", [np.datetime64, np.timedelta64])
@pytest.mark.parametrize(
"box, expected",
[[Series, "0 NaT\ndtype: object"], [DataFrame, " 0\n0 NaT"]],
)
def test_repr_np_nat_with_object(self, arg, box, expected):
# GH 25445
result = repr(box([arg("NaT")], dtype=object))
assert result == expected
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({"year": date_range("1/1/1700", periods=50, freq="A-DEC")})
# it works!
repr(df)
def test_frame_to_string_with_periodindex(self):
index = PeriodIndex(["2011-1", "2011-2", "2011-3"], freq="M")
frame = DataFrame(np.random.randn(3, 4), index=index)
# it works!
frame.to_string()
def test_datetime64tz_slice_non_truncate(self):
# GH 30263
df = DataFrame({"x": date_range("2019", periods=10, tz="UTC")})
expected = repr(df)
df = df.iloc[:, :5]
result = repr(df)
assert result == expected
|
|
# Built-in modules #
import sqlite3
from itertools import islice
# Internal modules #
from color import Color
from autopaths import FilePath
from cache import property_cached
################################################################################
def convert_to_sql(source, dest, keys, values, sql_field_types=None):
if sql_field_types is None: sql_field_types = {}
with sqlite3.connect(dest) as connection:
# Prepare #
cursor = connection.cursor()
fields = ','.join(['"' + f + '"' + ' ' + sql_field_types.get(f, 'text') for f in keys])
cursor.execute("CREATE table 'data' (%s)" % fields)
question_marks = '(' + ','.join(['?' for x in keys]) + ')'
sql_command = "INSERT into 'data' values " + question_marks
# Main loop #
try:
cursor.executemany(sql_command, values)
except (ValueError, sqlite3.OperationalError, sqlite3.ProgrammingError, sqlite3.InterfaceError) as err:
first_elem = islice(values, 0, 1)
message1 = "The command <%s%s%s> on the database '%s' failed with error:\n %s%s%s"
message1 = message1 % (Color.cyn, sql_command, Color.end, dest, Color.u_red, err, Color.end)
message2 = "\n * %sThe bindings (%i) %s: %s \n * %sYou gave%s: %s"
message2 = message2 % (Color.b_ylw, len(keys), Color.end, keys, Color.b_ylw, Color.end, values)
message3 = "\n * %sFirst element (%i)%s: %s \n"
message3 = message3 % (Color.b_ylw, len(first_elem) if first_elem else 0, Color.end, first_elem)
raise Exception(message1 + message2 + message3)
except KeyboardInterrupt as err:
print "You interrupted the creation of the database. Committing everything done up to this point."
connection.commit()
cursor.close()
raise err
# Index #
try:
cursor.execute("CREATE INDEX if not exists 'data_index' on 'data' (id)")
except KeyboardInterrupt as err:
print "You interrupted the creation of the index. Committing everything done up to this point."
connection.commit()
cursor.close()
raise err
# Close #
connection.commit()
cursor.close()
################################################################################
class Database(FilePath):
def __init__(self, path, factory=None, isolation=None):
self.path = path
self.factory = factory
self.isolation = isolation
def __repr__(self):
"""Called when evaluating ``print seqs``."""
return '<%s object on "%s">' % (self.__class__.__name__, self.path)
def __enter__(self):
"""Called when entering the 'with' statement."""
return self
def __exit__(self, errtype, value, traceback):
"""Called when exiting the 'with' statement.
Enables us to close the database properly, even when
exceptions are raised."""
self.close()
def __iter__(self):
"""Called when evaluating ``for x in seqs: pass``."""
new_cursor = self.own_connection.cursor()
new_cursor.execute("SELECT * from '%s'" % self.main_table)
return new_cursor
def __contains__(self, key):
"""Called when evaluating ``"P81239A" in seqs``."""
self.own_cursor.execute("SELECT EXISTS(SELECT 1 FROM '%s' WHERE id=='%s' LIMIT 1);" % (self.main_table, key))
return bool(self.own_cursor.fetchone())
def __len__(self):
"""Called when evaluating ``len(seqs)``."""
self.own_cursor.execute("SELECT COUNT(1) FROM '%s';" % self.main_table)
return int(self.own_cursor.fetchone())
def __nonzero__(self):
"""Called when evaluating ``if seqs: pass``."""
return True if len(self) != 0 else False
def __getitem__(self, key):
"""Called when evaluating ``seqs[0] or seqs['P81239A']``."""
if isinstance(key, int):
self.own_cursor.execute("SELECT * from '%s' LIMIT 1 OFFSET %i;" % (self.main_table, key))
else:
key = key.replace("'","''")
self.own_cursor.execute("SELECT * from '%s' where id=='%s' LIMIT 1;" % (self.main_table, key))
return self.own_cursor.fetchone()
#------------------------------- Properties ------------------------------#
@property_cached
def connection(self):
"""To be used externally by the user."""
self.check_format()
con = sqlite3.connect(self.path, isolation_level=self.isolation)
con.row_factory = self.factory
return con
@property_cached
def cursor(self):
"""To be used externally by the user."""
return self.connection.cursor()
@property_cached
def own_connection(self):
"""To be used internally in this object."""
self.check_format()
return sqlite3.connect(self.path, isolation_level=self.isolation)
@property_cached
def own_cursor(self):
"""To be used internally in this object."""
return self.own_connection.cursor()
@property
def tables(self):
"""The complete list of SQL tables."""
self.own_connection.row_factory = sqlite3.Row
self.own_cursor.execute("select name from sqlite_master where type='table'")
result = [x[0].encode('ascii') for x in self.own_cursor.fetchall()]
self.own_connection.row_factory = self.factory
return result
@property_cached
def main_table(self):
if self.tables and not 'data' in self.tables:
raise Exception("The file '" + self.path + "' does not contain any 'data' table.")
return 'data'
@property
def fields(self):
"""The list of fields available for every entry."""
return self.get_fields_of_table(self.main_table)
@property
def first(self):
"""Just the first entry"""
return self[0]
@property
def last(self):
"""Just the last entry"""
return self.own_cursor.execute("SELECT * FROM data ORDER BY ROWID DESC LIMIT 1;").fetchone()
#-------------------------------- Methods --------------------------------#
def check_format(self):
if self.count_bytes == 0: return
with open(self.path, 'r') as f: header = f.read(15)
if header != 'SQLite format 3':
raise Exception("The file '" + self.path + "' is not an SQLite database.")
def get_fields_of_table(self, table):
"""Return the list of fields for a particular table
by querying the SQL for the complete list of column names."""
# Check the table exists #
if not table in self.tables: return []
# A PRAGMA statement will implicitly issue a commit, don't use #
self.own_cursor.execute("SELECT * from '%s' LIMIT 1" % table)
fields = [x[0] for x in self.own_cursor.description]
self.cursor.fetchall()
return fields
def close(self):
self.cursor.close()
self.connection.close()
self.own_cursor.close()
self.own_connection.close()
def create(self, fields, types=None, overwrite=False):
"""Create a new database with a certain schema. For instance you could do this:
self.create({'id':'integer', 'source':'text', 'pubmed':'integer'})"""
# Check already exists #
if self.count_bytes > 0:
if overwrite: self.remove()
else: raise Exception("File exists already at '%s'" % self)
# Check types #
if types is None and isinstance(fields, dict): types=fields
if types is None: types = {}
# Do it #
fields = ','.join(['"' + f + '"' + ' ' + types.get(f, 'text') for f in fields])
self.own_cursor.execute("CREATE table '%s' (%s)" % (self.main_table, fields))
def index(self, column='id'):
try:
command = "CREATE INDEX if not exists 'main_index' on '%s' (%s)"
self.own_cursor.execute(command % (self.main_table, column))
except KeyboardInterrupt as err:
print "You interrupted the creation of the index. Not committing."
raise err
def add(self, entries):
"""Add entries to the main table.
The *entries* variable should be an iterable."""
question_marks = '(' + ','.join(['?' for x in self.fields]) + ')'
sql_command = "INSERT into 'data' values " + question_marks
try:
self.own_cursor.executemany(sql_command, entries)
except (ValueError, sqlite3.OperationalError, sqlite3.ProgrammingError, sqlite3.InterfaceError) as err:
first_elem = islice(entries, 0, 1)
message1 = "The command <%s%s%s> on the database '%s' failed with error:\n %s%s%s"
message1 = message1 % (Color.cyn, sql_command, Color.end, self, Color.u_red, err, Color.end)
message2 = "\n * %sThe bindings (%i) %s: %s \n * %sYou gave%s: %s"
message2 = message2 % (Color.b_ylw, len(self.fields), Color.end, self.fields, Color.b_ylw, Color.end, entries)
message3 = "\n * %sFirst element (%i)%s: %s \n"
message3 = message3 % (Color.b_ylw, len(first_elem) if first_elem else 0, Color.end, first_elem)
message4 = "\n The original error was: '%s'" % err
raise Exception(message1 + message2 + message3 + message4)
except KeyboardInterrupt as err:
print "You interrupted the data insertion. Committing everything done up to this point."
self.own_connection.commit()
raise err
def add_by_steps(self, entries_by_step):
"""Add entries to the main table.
The *entries* variable should be an iterable yielding iterables."""
for entries in entries_by_step: self.add(entries)
|
|
#!/usr/bin/python
# ck_setup.py - checks the veyepar setup - reports what features are ready.
from process import process
from main.models import Show, Location, Client
from django.conf import settings
from django.template.defaultfilters import slugify
import pw
import rax_uploader
import archive_uploader
import steve.richardapi
import os
import xml.etree.ElementTree
import requests
# from the blender build scripts
# https://svn.blender.org/svnroot/bf-blender/trunk/blender/build_files/scons/tools/bcolors.py
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def p_print(text):
print(text)
return
def p_okg(text):
print((bcolors.OKGREEN + text +bcolors.ENDC))
return
def p_warn(text):
print((bcolors.WARNING + text +bcolors.ENDC))
return
def p_fail(text):
print((bcolors.FAIL + text +bcolors.ENDC))
return
class ck_setup(process):
client=None
show=None
def ck_pw(self,
service,
client_id_field=None,
cred_keys=[]):
try:
creds = getattr(pw, service)
except AttributeError as e:
# 'module' object has no attribute 'foo'
p_fail('pw.py does not have: "{}"'.format(service))
return False
keys = list(creds.keys())
print("keys for service {}: {}".format( service, keys ))
key = getattr(self.client, client_id_field, None)
# import code; code.interact(local=locals())
print('checking client.{} & pw.py for "{}" in: "{}={{..."'.format(
client_id_field,key,service))
if not key:
p_warn('client.{} is blank'.format(client_id_field))
return False
elif key in keys:
p_okg('key "{}" found in "{}" keys.'.format(key,service))
else:
p_warn('key "{}" not found in "{}" keys.'.format(key,service))
raise AttributeError
secrets = creds[key]
# try not to display secret values
print(('names of secrets in pw.py {}:{}'.format(
key, list(secrets.keys()) )))
print(('checking for existance of {}'.format(cred_keys)))
for cred_key in cred_keys:
if cred_key not in secrets:
p_warn('"{}" NOT found.'.format(cred_key))
return secrets
def ck_client(self):
try:
client_slug = self.options.client
except AttributeError as e:
p_fail("No client set in config file or command line.")
raise e
p_okg("client_slug: {}".format(client_slug))
try:
self.client = Client.objects.get(slug=client_slug)
except Client.DoesNotExist as e:
p_fail("client slug not found in db.")
raise e
return
def ck_show(self):
try:
show_slug = self.options.show
except AttributeError as e:
p_fail("No show set in config file or command line.")
raise e
p_okg("show_slug: {}".format(show_slug))
try:
self.show = Show.objects.get(slug=show_slug)
except Show.DoesNotExist as e:
p_fail( "show slug not found in db." )
raise e
return
def ck_dir(self):
if os.path.exists(self.show_dir):
print(("~/Videos/showdir exits: {}".format(self.show_dir)))
else:
# print(bcolors.FAIL + "~/Videos/showdir not created yet. run mk_dirs.py"+bcolors.ENDC)
p_fail("~/Videos/showdir not created yet. run mk_dirs.py")
def ck_title(self):
title_svg = self.client.title_svg
print(('client.title_svg: {}'.format(title_svg)))
title_svg = os.path.join(
os.path.split(os.path.abspath(__file__))[0],
"bling",
title_svg)
p_okg(title_svg)
if not os.path.exists(title_svg):
p_fail("title_svg not found.")
raw_svg=open(title_svg).read()
tree=xml.etree.ElementTree.XMLID(raw_svg)
keys = [ 'client',
'show',
'title',
'title2',
'tag1',
'presenternames', # authors
'presentertitle',
'twitter_id',
'date',
'time',
'room',
'license', ]
print(("checking title_svg for object IDs: {}".format(keys) ))
print("found:")
found=[]
for key in keys:
if key in tree[1]:
found.append(key)
print((key,tree[1][key].text))
if not found:
p_warn("no keys found in {}".format(title_svg))
def ck_mlt(self):
mlt = self.client.template_mlt
print(('client.template_mlt: {}'.format(mlt)))
if not mlt:
p_fail("client.template_mlt not set.")
mlt = os.path.join(
os.path.split(os.path.abspath(__file__))[0],
mlt)
p_okg(mlt)
if not os.path.exists(mlt):
p_fail("mlt not found.")
def ck_foot(self):
credits_img = self.client.credits
if not credits_img:
p_fail("client.credits not set.")
credits_img = os.path.join(
self.show_dir,
"assets",
credits_img)
if not os.path.exists(credits_img):
p_fail("credits_img not found: {}".format(credits_img))
p_okg("credits: {}".format(self.client.credits))
def ck_email(self):
if self.client.contacts:
p_okg("client.contacts: {}".format(self.client.contacts))
else:
p_warn("client.contacts: blank")
try:
p_okg("sender: {}".format(settings.EMAIL_SENDER))
# some of these are needed:
"""
EMAIL_USE_TLS
EMAIL_HOST
EMAIL_PORT
EMAIL_HOST_USER
EMAIL_HOST_PASSWORD
"""
except AttributeError as e:
p_warn("settings.EMAIL_SENDER not set.")
def ck_richard(self, secrets):
category_key = self.client.category_key
if category_key:
p_print("client.category_key: {}".format(category_key))
else:
p_warn("client.category_key not set.")
return False
print("checking for category...")
endpoint = "http://{}/api/v2/".format( secrets['host'] )
"""
print(endpoint)
try:
category = steve.richardapi.get_category(endpoint, category_key)
print("category: {}".format(category) )
except steve.richardapi.DoesNotExist:
print("category: {} not found".format(category_key) )
category_slug = slugify(category_key)
try:
category = steve.richardapi.get_category(
endpoint, category_slug)
print("category: {}".format(category) )
except steve.richardapi.DoesNotExist:
print("category slug: {} not found".format(category_slug) )
"""
categories = steve.richardapi.get_all_categories(endpoint)
cat_titles = [cat['title'] for cat in categories]
print(("found {} categories. last 5: {}".format(
len(categories), cat_titles[-5:] )))
if category_key in cat_titles:
p_okg('client.category_key:"{}" found.'.format(category_key))
else:
p_fail('client.category_key:"{}" NOT found.'.format(category_key))
return
def ck_cdn(self, secrets):
if self.client.rax_id:
rax_id = self.client.rax_id
p_okg("client.rax_id: {}".format(rax_id))
else:
p_warn("client.rax_id not set.")
return
if self.client.bucket_id:
bucket_id = self.client.bucket_id
p_okg("client.bucket_id: {}".format(bucket_id))
else:
p_fail("client.bucket_id not set.")
print("checking for valid bucket...")
cf = rax_uploader.auth(rax_id)
containers = cf.get_all_containers()
container_names = [container.name for container in containers]
print("container_names", container_names)
if bucket_id in container_names:
p_okg('"{}" found.'.format(bucket_id))
else:
p_fail('"{}" not found.'.format(bucket_id))
# not sure what to do with this...
# container = cf.get_container(bucket_id)
return
def ck_archive(self, secrets):
if self.client.archive_id:
archive_id = self.client.archive_id
p_okg("client.archive_id: {}".format(archive_id))
else:
p_warn("client.archive_id not set.")
return
if self.client.bucket_id:
bucket_id = self.client.bucket_id
p_okg("client.bucket_id: {}".format(bucket_id))
else:
p_fail("client.bucket_id not set.")
print("auth...")
service = archive_uploader.auth(archive_id)
print("checking for valid bucket...")
buckets = service.get_all_buckets()
bucket_names = [bucket.name for bucket in buckets]
print("bucket_names", bucket_names)
if bucket_id in bucket_names:
p_okg('"{}" found.'.format(bucket_id))
else:
p_fail('"{}" not found.'.format(bucket_id))
p_fail('Either create it or set client.bucket_id to one of the above.')
bucket = service.get_bucket(bucket_id,headers={})
# not sure what to do with this...
# container = cf.get_container(bucket_id)
return
def ck_youtube(self, secrets):
ret = True
print("looking for client_secrets.json...")
if not os.path.exists('client_secrets.json'):
p_fail("client_secrets.json NOT found.")
ret = False
print(("looking for {}".format(secrets['filename'])))
if not os.path.exists(secrets['filename']):
p_fail("{} NOT found.".format(secrets['filename']))
ret = False
return ret
def ck_schedule_api(self):
schedule_url = self.show.schedule_url
if schedule_url:
p_okg("show.schedule_url: {}".format(schedule_url))
else:
p_warn("no show.schedule_url")
return
if schedule_url.startswith('file'):
url = schedule_url[7:]
if not os.path.exists(url):
print(("{} NOT found.".format(url)))
else:
print("getting...")
session = requests.session()
response = session.get(schedule_url, verify=False)
text = response.text
print(text[:75])
auth = pw.addeps.get(self.show.slug, None)
if auth is not None:
print(("found in pw.addeps:{}".format(list(auth.keys()))))
def work(self):
"""
what has happened so far:
files=config.read(['veyepar.cfg','~/veyepar.cfg'
self.options, self.args = parser.parse_args()
"""
try:
self.ck_client()
self.ck_show()
self.set_dirs(self.show)
self.ck_dir()
self.ck_title()
self.ck_foot()
self.ck_mlt()
self.ck_schedule_api()
# email uses local_settings.py
# self.ck_pw("smtp","email_id")
self.ck_email()
secrets = self.ck_pw( "richard","richard_id",
['host', 'api_key', ])
if secrets:
self.ck_richard(secrets)
secrets = self.ck_pw("rax","rax_id",['api_key', 'user'])
if secrets:
self.ck_cdn(secrets)
secrets = self.ck_pw( "yt","youtube_id",['filename', ])
if secrets:
self.ck_youtube(secrets)
secrets = self.ck_pw( "archive","archive_id",['access','secret'])
if secrets:
self.ck_archive(secrets)
except Exception as e:
print("tests stopped at")
print(e.message)
print(e.__class__, e)
# import code; code.interact(local=locals())
# raise e
return
if __name__ == '__main__':
p=ck_setup()
p.main()
|
|
from __future__ import with_statement
import random
from collections import deque
import pytest
from whoosh import fields, query
from whoosh.compat import u, izip, xrange, permutations
from whoosh.util.numeric import length_to_byte, byte_to_length
from whoosh.util.testing import TempIndex
def check_multi():
try:
import multiprocessing
import multiprocessing.synchronize # @UnusedImport
except ImportError:
pytest.skip()
else:
try:
from multiprocessing import Queue
Queue()
except OSError:
pytest.skip()
else:
return False
def _byten(n):
return byte_to_length(length_to_byte(n))
def _do_basic(writerclass):
# Create the domain data
# List of individual words added to the index
words = []
# List of string values added to the index
docs = []
# A ring buffer for creating string values
buf = deque()
for ls in permutations(u("abcd")):
word = "".join(ls)
# Remember this word is in the index (to check lexicon)
words.append(word)
# Add this word on to the end, pop the first word off to create N word
# documents where N <= 10
buf.append(word)
if len(buf) > 10:
buf.popleft()
# Create a copy of the buffer and shuffle it to create a document value
# and add it to the list of document values
doc = list(buf)
random.shuffle(doc)
docs.append(" ".join(doc))
# Shuffle the list of document values
random.shuffle(docs)
schema = fields.Schema(text=fields.TEXT(stored=True, spelling=True,
vector=True),
row=fields.NUMERIC(stored=True))
with TempIndex(schema, storage_debug=True) as ix:
# Add the domain data to the index
with writerclass(ix, procs=3) as w:
for i, value in enumerate(docs):
w.add_document(text=value, row=i)
with ix.searcher() as s:
r = s.reader()
# Check the lexicon
for word, term in izip(words, r.field_terms("text")):
assert word == term
# Check the doc count
assert r.doc_count_all() == len(docs)
# Check the word graph
assert r.has_word_graph("text")
flat = [w.decode("latin1") for w in r.word_graph("text").flatten()]
assert flat == words
# Check there are lengths
total = sum(r.doc_field_length(docnum, "text", 0)
for docnum in xrange(r.doc_count_all()))
assert total > 0
# Check per-doc info
for i, value in enumerate(docs):
pieces = value.split()
docnum = s.document_number(row=i)
# Check stored value
sv = r.stored_fields(docnum)
assert sv["text"] == value
# Check vectors
vr = r.vector(docnum, "text")
# Get the terms and positions from the vector matcher
iv = list(vr.items_as("positions"))
# What the vector should look like
ov = sorted((text, [i]) for i, text in enumerate(pieces))
assert iv == ov
# Check field length
assert r.doc_field_length(docnum, "text") == len(pieces)
def test_basic_serial():
check_multi()
from whoosh.multiproc import SerialMpWriter
_do_basic(SerialMpWriter)
def test_basic_multi():
check_multi()
from whoosh.multiproc import MpWriter
_do_basic(MpWriter)
def test_no_add():
check_multi()
from whoosh.multiproc import MpWriter
schema = fields.Schema(text=fields.TEXT(stored=True, spelling=True,
vector=True))
with TempIndex(schema) as ix:
with ix.writer(procs=3) as w:
assert type(w) == MpWriter
def _do_merge(writerclass):
schema = fields.Schema(key=fields.ID(stored=True, unique=True),
value=fields.TEXT(stored=True, spelling=True,
vector=True))
domain = {"a": "aa", "b": "bb cc", "c": "cc dd ee", "d": "dd ee ff gg",
"e": "ee ff gg hh ii", "f": "ff gg hh ii jj kk",
"g": "gg hh ii jj kk ll mm", "h": "hh ii jj kk ll mm nn oo",
"i": "ii jj kk ll mm nn oo pp qq ww ww ww ww ww ww",
"j": "jj kk ll mm nn oo pp qq rr ss",
"k": "kk ll mm nn oo pp qq rr ss tt uu"}
with TempIndex(schema) as ix:
w = ix.writer()
for key in "abc":
w.add_document(key=u(key), value=u(domain[key]))
w.commit()
w = ix.writer()
for key in "def":
w.add_document(key=u(key), value=u(domain[key]))
w.commit(merge=False)
w = writerclass(ix, procs=3)
del domain["b"]
w.delete_by_term("key", u("b"))
domain["e"] = "xx yy zz"
w.update_document(key=u("e"), value=u(domain["e"]))
for key in "ghijk":
w.add_document(key=u(key), value=u(domain[key]))
w.commit(optimize=True)
assert len(ix._segments()) == 1
with ix.searcher() as s:
r = s.reader()
assert s.doc_count() == len(domain)
assert "".join(r.field_terms("key")) == "acdefghijk"
assert " ".join(r.field_terms("value")) == "aa cc dd ee ff gg hh ii jj kk ll mm nn oo pp qq rr ss tt uu ww xx yy zz"
for key in domain:
docnum = s.document_number(key=key)
assert docnum is not None
length = r.doc_field_length(docnum, "value")
assert length
assert _byten(len(domain[key].split())) == length
sf = r.stored_fields(docnum)
assert domain[key] == sf["value"]
words = sorted(set((" ".join(domain.values())).split()))
assert words == list(r.field_terms("value"))
for word in words:
hits = s.search(query.Term("value", word))
for hit in hits:
assert word in hit["value"].split()
def test_merge_serial():
check_multi()
from whoosh.multiproc import SerialMpWriter
_do_merge(SerialMpWriter)
def test_merge_multi():
check_multi()
from whoosh.multiproc import MpWriter
_do_merge(MpWriter)
def test_no_score_no_store():
check_multi()
from whoosh.multiproc import MpWriter
schema = fields.Schema(a=fields.ID, b=fields.KEYWORD)
domain = {}
keys = list(u("abcdefghijklmnopqrstuvwx"))
random.shuffle(keys)
words = u("alfa bravo charlie delta").split()
for i, key in enumerate(keys):
domain[key] = words[i % len(words)]
with TempIndex(schema) as ix:
with MpWriter(ix, procs=3) as w:
for key, value in domain.items():
w.add_document(a=key, b=value)
with ix.searcher() as s:
for word in words:
r = s.search(query.Term("b", word))
assert len(r) == 6
def test_multisegment():
check_multi()
from whoosh.multiproc import MpWriter
schema = fields.Schema(a=fields.TEXT(stored=True, spelling=True,
vector=True))
words = u("alfa bravo charlie delta echo").split()
with TempIndex(schema) as ix:
with ix.writer(procs=3, multisegment=True, batchsize=10) as w:
assert w.__class__ == MpWriter
assert w.multisegment
for ls in permutations(words, 3):
w.add_document(a=u(" ").join(ls))
assert len(ix._segments()) == 3
with ix.searcher() as s:
for word in words:
r = s.search(query.Term("a", word))
for hit in r:
assert word in hit["a"].split()
def test_batchsize_eq_doccount():
check_multi()
schema = fields.Schema(a=fields.KEYWORD(stored=True))
with TempIndex(schema) as ix:
with ix.writer(procs=4, batchsize=10) as w:
for i in xrange(10):
w.add_document(a=u(str(i)))
def test_finish_segment():
check_multi()
from whoosh.multiproc import MpWriter
schema = fields.Schema(a=fields.KEYWORD(stored=True))
with TempIndex(schema) as ix:
w = MpWriter(ix, procs=2, batchsize=1, multisegment=False,
limitmb=0.00001)
for i in range(9):
w.add_document(a=u(chr(65 + i) * 50))
w.commit()
|
|
"""
MRS.analysis
------------
Analysis functions for analysis of MRS data. These include a variety of
functions that can be called independently, or through the interface provided
in :mod:`MRS.api`.
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import nitime as nt
import nitime.timeseries as nts
import nitime.analysis as nta
import scipy.fftpack as fft
import scipy.integrate as spi
from scipy.integrate import trapz, simps
import scipy.stats as stats
import MRS.leastsqbound as lsq
import MRS.utils as ut
import MRS.optimize as mopt
def bootstrap_stat(arr, stat=np.mean, n_iters=1000, alpha=0.05):
"""
Produce a boot-strap distribution of the mean of an array on axis 0
Parameters
---------
arr : ndarray
The array with data to be bootstrapped
stat : callable
The statistical function to call. will be called as `stat(arr, 0)`, so
needs to accept that call signature.
n_iters : int
The number of bootstrap iterations to sample
alpha : float
The confidence interval size will be 1-alpha
"""
stat_orig = stat(arr, 0)
boot_arr = np.empty((arr.shape[-1] , n_iters))
for ii in xrange(n_iters):
this_arr=arr[np.random.random_integers(0, arr.shape[0]-1, arr.shape[0])]
boot_arr[:, ii] = stat(this_arr, 0)
eb = np.array([stats.scoreatpercentile(boot_arr[xx], 1-(alpha/2)) -
stats.scoreatpercentile(boot_arr[xx], alpha/2)
for xx in range(boot_arr.shape[0])])
return stat_orig, eb
def separate_signals(data, w_idx=[1, 2, 3]):
"""
Separate the water and non-water data from each other
Parameters
----------
data : nd array
FID signal with shape (transients, echos, coils, time-points)
w_idx : list (optional)
Indices into the 'transients' (0th) dimension of the data for the signal
that is not water-suppressed
Returns
-------
water_data, w_supp_data : tuple
The first element is an array with the transients in the data in which
no water suppression was applied. The second element is an array with
the transients in which water suppression was applied
"""
# The transients are the first dimension in the data
idxes_w = np.zeros(data.shape[0], dtype=bool)
idxes_w[w_idx] = True
# Data with water unsuppressed (first four transients - we throw away the
# first one which is probably crap):
w_data = data[np.where(idxes_w)]
# Data with water suppressed (the rest of the transients):
idxes_nonw = np.zeros(data.shape[0], dtype=bool)
idxes_nonw[np.where(~idxes_w)] = True
idxes_nonw[0] = False
w_supp_data = data[np.where(idxes_nonw)]
return w_data, w_supp_data
def coil_combine(data, w_idx=[1,2,3], coil_dim=2, sampling_rate=5000.):
"""
Combine data across coils based on the amplitude of the water peak,
according to:
.. math::
X = \sum_{i}{w_i S_i}
Where X is the resulting combined signal, $S_i$ are the individual coil
signals and $w_i$ are calculated as:
.. math::
w_i = mean(S_i) / var (S_i)
following [Hall2013]_. In addition, we apply a phase-correction, so that
all the phases of the signals from each coil are 0
Parameters
----------
data : float array
The data as it comes from the scanner, with shape (transients, echos,
coils, time points)
w_idx : list
The indices to the non-water-suppressed transients. Per default we take
the 2nd-4th transients. We dump the first one, because it seems to be
quite different than the rest of them...
coil_dim : int
The dimension on which the coils are represented. Default: 2
sampling rate : float
The sampling rate in Hz. Default : 5000.
References
----------
.. [Hall2013] Emma L. Hall, Mary C. Stephenson, Darren Price, Peter
G. Morris (2013). Methodology for improved detection of low
concentration metabolites in MRS: Optimised combination of signals from
multi-element coil arrays. Neuroimage 86: 35-42.
.. [Wald1997] Wald, L. and Wright, S. (1997). Theory and application of
array coils in MR spectroscopy. NMR in Biomedicine, 10: 394-410.
.. [Keeler2005] Keeler, J (2005). Understanding NMR spectroscopy, second
edition. Wiley (West Sussex, UK).
"""
w_data, w_supp_data = separate_signals(data, w_idx)
fft_w = np.fft.fftshift(fft.fft(w_data))
fft_w_supp = np.fft.fftshift(fft.fft(w_supp_data))
freqs_w = np.linspace(-sampling_rate/2.0,
sampling_rate/2.0,
w_data.shape[-1])
# To determine phase and amplitude, fit a Lorentzian line-shape to each
# coils data in each trial:
# No bounds except for on the phase:
bounds = [(None,None),
(0,None),
(0,None),
(-np.pi, np.pi),
(None,None),
(None, None)]
n_params = len(bounds)
params = np.zeros(fft_w.shape[:-1] + (n_params,))
# Let's fit a Lorentzian line-shape to each one of these:
for repeat in range(w_data.shape[0]):
for echo in range(w_data.shape[1]):
for coil in range(w_data.shape[2]):
sig = fft_w[repeat, echo, coil]
# Use the private function to do this:
params[repeat, echo, coil] = _do_lorentzian_fit(freqs_w,
sig, bounds)
# The area parameter stands for the magnitude:
area_w = params[..., 1]
# In each coil, we derive S/(N^2):
s = np.mean(area_w.reshape(-1, area_w.shape[-1]), 0)
n = np.var(area_w.reshape(-1, area_w.shape[-1]), 0)
amp_weight = s/n
# Normalize to sum to 1:
amp_weight = amp_weight / np.sum(amp_weight)
# Next, we make sure that all the coils have the same phase. We will use
# the phase of the Lorentzian to align the phases:
phase_param = params[..., 3]
zero_phi_w = np.mean(phase_param.reshape(-1, phase_param.shape[-1]),0)
# This recalculates the weight with the phase alignment (see page 397 in
# Wald paper):
weight = amp_weight * np.exp(-1j * zero_phi_w)
# Multiply each one of the signals by its coil-weights and average across
# coils:
na = np.newaxis # Short-hand
# Collapse across coils for the combination in both the water
weighted_w_data = np.mean(np.fft.ifft(np.fft.fftshift(
weight[na, na, :, na] * fft_w)), coil_dim)
weighted_w_supp_data = np.mean(np.fft.ifft(np.fft.fftshift(
weight[na, na, : ,na] * fft_w_supp)) , coil_dim)
# Normalize each series by the sqrt(rms):
def normalize_this(x):
return x * (x.shape[-1] / (np.sum(np.abs(x))))
weighted_w_data = normalize_this(weighted_w_data)
weighted_w_supp_data = normalize_this(weighted_w_supp_data)
# Squeeze in case that some extraneous dimensions were introduced (can
# happen for SV data, for example)
return weighted_w_data.squeeze(), weighted_w_supp_data.squeeze()
def get_spectra(data, filt_method=dict(lb=0.1, filt_order=256),
spect_method=dict(NFFT=1024, n_overlap=1023, BW=2),
phase_zero=None, line_broadening=None, zerofill=None):
"""
Derive the spectra from MRS data
Parameters
----------
data : nitime TimeSeries class instance or array
Time-series object with data of shape (echos, transients, time-points),
containing the FID data. If an array is provided, we will assume that a
sampling rate of 5000.0 Hz was used
filt_method : dict
Details for the filtering method. A FIR zero phase-delay method is used
with parameters set according to these parameters
spect_method : dict
Details for the spectral analysis. Per default, we use
line_broadening : float
Linewidth for apodization (in Hz).
zerofill : int
Number of bins to zero fill with.
Returns
-------
f :
the center frequency of the frequencies represented in the
spectra
spectrum_water, spectrum_water_suppressed:
The first spectrum is for the data with water not suppressed and
the second spectrum is for the water-suppressed data.
Notes
-----
This function performs the following operations:
1. Filtering.
2. Apodizing/windowing. Optionally, this is done with line-broadening (see
page 92 of Keeler2005_.
3. Spectral analysis.
.. [Keeler2005] Keeler, J (2005). Understanding NMR spectroscopy, second
edition. Wiley (West Sussex, UK).
"""
if not isinstance(data, nt.TimeSeries):
data = nt.TimeSeries(data, sampling_rate=5000.0)
if filt_method is not None:
filtered = nta.FilterAnalyzer(data, **filt_method).fir
else:
filtered = data
if line_broadening is not None:
lbr_time = line_broadening * np.pi # Conversion from Hz to
# time-constant, see Keeler page 94
else:
lbr_time = 0
apodized = ut.line_broadening(filtered, lbr_time)
if zerofill is not None:
new_apodized = np.concatenate([apodized.data,
np.zeros(apodized.shape[:-1] + (zerofill,))], -1)
apodized = nt.TimeSeries(new_apodized,
sampling_rate=apodized.sampling_rate)
S = nta.SpectralAnalyzer(apodized,
method=dict(NFFT=spect_method['NFFT'],
n_overlap=spect_method['n_overlap']),
BW=spect_method['BW'])
f, c = S.spectrum_fourier
return f, c
def subtract_water(w_sig, w_supp_sig):
"""
Subtract the residual water signal from the
Normalize the water-suppressed signal by the signal that is not
water-suppressed, to get rid of the residual water peak.
Parameters
----------
w_sig : array with shape (n_reps, n_echos, n_points)
A signal with water unsupressed
w_supp_sig :array with shape (n_reps, n_echos, n_points)
A signal with water suppressed.
Returns
-------
The water suppressed signal with the additional subtraction of a scaled
version of the signal that is presumably just due to water.
"""
mean_nw = np.mean(w_supp_sig,0)
water_only = np.mean(w_sig - mean_nw, 0)
mean_water = np.mean(w_sig, 0)
scale_factor = water_only/mean_nw
corrected = w_supp_sig - water_only/scale_factor[...,0,np.newaxis]
return corrected
def fit_lorentzian(spectra, f_ppm, lb=2.6, ub=3.6):
"""
Fit a lorentzian function to spectra
This is used in estimation of the water peak and for estimation of the NAA
peak.
Parameters
----------
spectra : array of shape (n_transients, n_points)
Typically the sum of the on/off spectra in each transient.
f_ppm : array
lb, ub: floats
In ppm, the range over which optimization is bounded
"""
# We are only going to look at the interval between lb and ub
idx = ut.make_idx(f_ppm, lb, ub)
n_points = np.abs(idx.stop - idx.start)
n_params = 6
# Set the bounds for the optimization
bounds = [(lb,ub), #peak
(0,None), #area
(0,None), #hwhm
(-np.pi/2, np.pi/2), #phase
(None,None), #offset
(None, None)] #drift
model = np.empty((spectra.shape[0], n_points))
signal = np.empty((spectra.shape[0], n_points))
params = np.empty((spectra.shape[0], n_params))
for ii, xx in enumerate(spectra):
# We fit to the real spectrum:
signal[ii] = np.real(xx[idx])
params[ii] = _do_lorentzian_fit(f_ppm[idx], np.real(signal[ii]),
bounds=bounds)
model[ii] = ut.lorentzian(f_ppm[idx], *params[ii])
return model, signal, params
def _do_lorentzian_fit(freqs, signal, bounds=None):
"""
Helper function, so that Lorentzian fit can be generalized to different
frequency scales (Hz and ppm).
"""
# Use the signal for a rough estimate of the parameters for initialization:
max_idx = np.argmax(np.real(signal))
max_sig = np.max(np.real(signal))
initial_f0 = freqs[max_idx]
half_max_idx = np.argmin(np.abs(np.real(signal) - max_sig/2))
initial_hwhm = np.abs(initial_f0 - freqs[half_max_idx])
# Everything should be treated as real, except for the phase!
initial_ph = np.angle(signal[signal.shape[-1]/2.])
initial_off = np.min(np.real(signal))
initial_drift = 0
initial_a = (np.sum(np.real(signal)[max_idx:max_idx +
np.abs(half_max_idx)*2]) ) * 2
initial = (initial_f0,
initial_a,
initial_hwhm,
initial_ph,
initial_off,
initial_drift)
params, _ = lsq.leastsqbound(mopt.err_func, initial,
args=(freqs, np.real(signal), ut.lorentzian),
bounds=bounds)
return params
def _two_func_initializer(freqs, signal):
"""
This is a helper function for heuristic estimation of the initial parameters
used in fitting dual peak functions
_do_two_lorentzian_fit
_do_two_gaussian_fit
"""
# Use the signal for a rough estimate of the parameters for initialization:
r_signal = np.real(signal)
# The local maxima have a zero-crossing in their derivative, so we start by
# calculating the derivative:
diff_sig = np.diff(r_signal)
# We look for indices that have zero-crossings (in the right direction - we
# are looking for local maxima, not minima!)
local_max_idx = []
for ii in range(len(diff_sig)-1):
if diff_sig[ii]>0 and diff_sig[ii+1]<0:
local_max_idx.append(ii)
# Array-ify it before moving on:
local_max_idx = np.array(local_max_idx)
# Our guesses for the location of the interesting local maxima is the two
# with the largest signals in them:
max_idx = local_max_idx[np.argsort(r_signal[local_max_idx])[::-1][:2]]
# We sort again, so that we can try to get the first one to be the left peak:
max_idx = np.sort(max_idx)
if len(max_idx)==1:
max_idx = [max_idx[0], max_idx[0]]
# And thusly:
max_idx_1 = max_idx[0]
max_idx_2 = max_idx[1]
# A few of the rest just follow:
max_sig_1 = r_signal[max_idx_1]
max_sig_2 = r_signal[max_idx_2]
initial_amp_1 = max_sig_1
initial_amp_2 = max_sig_2
initial_f0_1 = freqs[max_idx_1]
initial_f0_2 = freqs[max_idx_2]
half_max_idx_1 = np.argmin(np.abs(np.real(signal) - max_sig_1/2))
initial_hwhm_1 = np.abs(initial_f0_1 - freqs[half_max_idx_1])
half_max_idx_2 = np.argmin(np.abs(np.real(signal) - max_sig_2/2))
initial_hwhm_2 = np.abs(initial_f0_2 - freqs[half_max_idx_2])
# Everything should be treated as real, except for the phase!
initial_ph_1 = np.angle(signal[max_idx_1])
initial_ph_2 = np.angle(signal[max_idx_2])
# We only fit one offset and one drift, for both functions together!
initial_off = np.min(np.real(signal))
initial_drift = 0
initial_a_1 = (np.sum(np.real(signal)[max_idx_1:max_idx_1 +
np.abs(half_max_idx_1)*2]) ) * 2
initial_a_2 = (np.sum(np.real(signal)[max_idx_2:max_idx_2 +
np.abs(half_max_idx_2)*2]) ) * 2
return (initial_f0_1,
initial_f0_2,
initial_amp_1,
initial_amp_2,
initial_a_1,
initial_a_2,
initial_hwhm_1,
initial_hwhm_2,
initial_ph_1,
initial_ph_2,
initial_off,
initial_drift)
def _do_two_lorentzian_fit(freqs, signal, bounds=None):
"""
Helper function for the Two-Lorentzian fit
"""
initial = _two_func_initializer(freqs, signal)
# Edit out the ones we want:
initial = (initial[0], initial[1],
initial[4], initial[5],
initial[6], initial[7],
initial[8], initial[9],
initial[10], initial[11])
# We want to preferntially weight the error on estimating the height of the
# individual peaks, so we formulate an error-weighting function based on
# these peaks, which is simply a two-gaussian bumpety-bump:
w = (ut.gaussian(freqs, initial[0], 0.075, 1, 0, 0) +
ut.gaussian(freqs, initial[1], 0.075, 1, 0, 0))
# Further, we want to also optimize on the individual lorentzians error, to
# restrict the fit space a bit more. For this purpose, we will pass a list
# of lorentzians with indices into the parameter list, so that we can do
# that (see mopt.err_func for the mechanics).
func_list = [[ut.lorentzian, [0,2,4,6,8,9],
ut.gaussian(freqs, initial[0], 0.075, 1, 0, 0)],
[ut.lorentzian, [1,3,5,7,8,9],
ut.gaussian(freqs, initial[1], 0.075, 1, 0, 0)]]
params, _ = lsq.leastsqbound(mopt.err_func, initial,
args=(freqs, np.real(signal),
ut.two_lorentzian, w, func_list),
bounds=bounds)
return params
def _do_two_gaussian_fit(freqs, signal, bounds=None):
"""
Helper function for the two gaussian fit
"""
initial = _two_func_initializer(freqs, signal)
# Edit out the ones we want in the order we want them:
initial = (initial[0], initial[1],
initial[6], initial[7],
initial[2], initial[3],
initial[10], initial[11])
# We want to preferntially weight the error on estimating the height of the
# individual peaks, so we formulate an error-weighting function based on
# these peaks, which is simply a two-gaussian bumpety-bump:
w = (ut.gaussian(freqs, initial[0], 0.075, 1, 0, 0) +
ut.gaussian(freqs, initial[1], 0.075, 1, 0, 0))
# Further, we want to also optimize on the individual gaussians error, to
# restrict the fit space a bit more. For this purpose, we will pass a list
# of gaussians with indices into the parameter list, so that we can do
# that (see mopt.err_func for the mechanics).
func_list = [[ut.gaussian, [0,2,4,6,7],
ut.gaussian(freqs, initial[0], 0.075, 1, 0, 0)],
[ut.gaussian, [1,3,5,6,7],
ut.gaussian(freqs, initial[1], 0.075, 1, 0, 0)]]
params, _ = lsq.leastsqbound(mopt.err_func, initial,
args=(freqs, np.real(signal),
ut.two_gaussian, w, func_list),
bounds=bounds)
return params
def fit_two_lorentzian(spectra, f_ppm, lb=2.6, ub=3.6):
"""
Fit a lorentzian function to the sum spectra to be used for estimation of
the creatine and choline peaks.
Parameters
----------
spectra : array of shape (n_transients, n_points)
Typically the sum of the on/off spectra in each transient.
f_ppm : array
lb, ub : floats
In ppm, the range over which optimization is bounded
"""
# We are only going to look at the interval between lb and ub
idx = ut.make_idx(f_ppm, lb, ub)
n_points = np.abs(idx.stop - idx.start)
n_params = 10 # Lotsa params!
# Set the bounds for the optimization
bounds = [(lb,ub), #peak1
(lb,ub), #peak2
(0,None), #area1
(0,None), #area2
(0,ub-lb), #hwhm1
(0,ub-lb), #hwhm2
(-np.pi/2, np.pi/2), #phase
(-np.pi/2, np.pi/2), #phase
(None,None), #offset
(None, None)] #drift
model = np.empty((spectra.shape[0], n_points))
signal = np.empty((spectra.shape[0], n_points))
params = np.empty((spectra.shape[0], n_params))
for ii, xx in enumerate(spectra):
# We fit to the real spectrum:
signal[ii] = np.real(xx[idx])
params[ii] = _do_two_lorentzian_fit(f_ppm[idx], np.real(signal[ii]),
bounds=bounds)
model[ii] = ut.two_lorentzian(f_ppm[idx], *params[ii])
return model, signal, params
def fit_two_gaussian(spectra, f_ppm, lb=3.6, ub=3.9):
"""
Fit a gaussian function to the difference spectra
This is useful for estimation of the Glx peak, which tends to have two
peaks.
Parameters
----------
spectra : array of shape (n_transients, n_points)
Typically the difference of the on/off spectra in each transient.
f_ppm : array
lb, ub : floats
In ppm, the range over which optimization is bounded
"""
idx = ut.make_idx(f_ppm, lb, ub)
# We are only going to look at the interval between lb and ub
n_points = idx.stop - idx.start
n_params = 8
fit_func = ut.two_gaussian
# Set the bounds for the optimization
bounds = [(lb,ub), # peak 1 location
(lb,ub), # peak 2 location
(0,None), # sigma 1
(0,None), # sigma 2
(0,None), # amp 1
(0,None), # amp 2
(None, None), # offset
(None, None), # drift
]
model = np.empty((spectra.shape[0], n_points))
signal = np.empty((spectra.shape[0], n_points))
params = np.empty((spectra.shape[0], n_params))
for ii, xx in enumerate(spectra):
# We fit to the real spectrum:
signal[ii] = np.real(xx[idx])
params[ii] = _do_two_gaussian_fit(f_ppm[idx], np.real(signal[ii]),
bounds=bounds)
model[ii] = fit_func(f_ppm[idx], *params[ii])
return model, signal, params
def _do_scale_fit(freqs, signal, model, w=None):
"""
Perform a round of fitting to deal with over or under-estimation.
Scales curve on y-axis but preserves shape.
Parameters
----------
freqs : array
signal : array
The signal that the model is being fit to
model : array
The model being scaled
w : array
weighting function
Returns
-------
scalefac : array of len(signal)
the scaling factor for each transient
scalemodel : array of model.shape
the scaled model
"""
scalefac = np.empty(model.shape[0])
scalemodel = np.empty((model.shape[0], np.real(model).shape[1]))
scalesignal = np.empty((signal.shape[0], np.real(signal).shape[1]))
for ii, xx in enumerate(signal): # per transient
scalesignal[ii] = np.real(xx)
# ratio = np.empty(scalesignal[ii].shape[0])
# for ppm, trans in enumerate(scalesignal[ii]):
# ratio[ppm] = trans/model[ii][ppm]
# scalefac[ii] = np.mean(ratio,0)
scalefac[ii] = np.nanmean(scalesignal[ii],0)/np.nanmean(model[ii],0)
scalemodel[ii] = scalefac[ii] * model[ii]
return scalefac, scalemodel
def scalemodel(model, scalefac):
"""
Given a scale factor, multiply by model to get scaled model
Parameters
----------
model : array
original model
scalefac : array of model.shape[0]
array of scalefactors
Returns
-------
scaledmodel : array
model scaled by scale factor
"""
for ii, mm in enumerate(model):
scaledmodel[ii] = mm * scalefac[ii]
return scaledmodel
def fit_gaussian(spectra, f_ppm, lb=2.6, ub=3.6):
"""
Fit a gaussian function to the difference spectra to be used for estimation
of the GABA peak.
Parameters
----------
spectra : array of shape (n_transients, n_points)
Typically the difference of the on/off spectra in each transient.
f_ppm : array
lb, ub : floats
In ppm, the range over which optimization is bounded
"""
idx = ut.make_idx(f_ppm, lb, ub)
# We are only going to look at the interval between lb and ub
n_points = idx.stop - idx.start
n_params = 5
fit_func = ut.gaussian
# Set the bounds for the optimization
bounds = [(lb,ub), # peak location
(0,None), # sigma
(0,None), # amp
(None, None), # offset
(None, None) # drift
]
model = np.empty((spectra.shape[0], n_points))
signal = np.empty((spectra.shape[0], n_points))
params = np.empty((spectra.shape[0], n_params))
for ii, xx in enumerate(spectra):
# We fit to the real spectrum:
signal[ii] = np.real(xx[idx])
# Use the signal for a rough estimate of the parameters for
# initialization :
max_idx = np.argmax(signal[ii])
max_sig = np.max(signal[ii])
initial_f0 = f_ppm[idx][max_idx]
half_max_idx = np.argmin(np.abs(signal[ii] - max_sig/2))
# We estimate sigma as the hwhm:
initial_sigma = np.abs(initial_f0 - f_ppm[idx][half_max_idx])
initial_off = np.min(signal[ii])
initial_drift = 0
initial_amp = max_sig
initial = (initial_f0,
initial_sigma,
initial_amp,
initial_off,
initial_drift)
params[ii], _ = lsq.leastsqbound(mopt.err_func,
initial,
args=(f_ppm[idx],
np.real(signal[ii]),
fit_func), bounds=bounds)
model[ii] = fit_func(f_ppm[idx], *params[ii])
return model, signal, params
def integrate(func, x, args=(), offset=0, drift=0):
"""
Integrate a function over the domain x
Parameters
----------
func : callable
A function from the domain x to floats. The first input to this function
has to be x, an array with values to evaluate for, running in monotonic
order
x : float array
The domain over which to integrate, as sampled. This can be monotonically
decreasing or monotonically increasing.
args : tuple
The parameters of func after x.
offset :
Notes
-----
We apply the trapezoid rule for integration here, using
scipy.integrate.trapz.
See: http://en.wikipedia.org/wiki/Trapezoidal_rule
"""
# If it's monotonically decreasing (as is often the case here), we invert
# it, so that our results are strictly positive
if x[1]<x[0]:
x = x[::-1]
y = func(x, *args)
# Correct for offset and drift, if those are present and specified
# (otherwise default to 0 on both):
y = y - offset
y = y - drift * (x-x[0])
# Use trapezoidal integration on the corrected function:
return spi.trapz(y, x)
def simple_auc(spectrum, f_ppm, center=3.00, bandwidth=0.30):
"""
Calculates area under the curve (no fitting)
Parameters
----------
spectrum : array of shape (n_transients, n_points)
Typically the difference of the on/off spectra in each transient.
center, bandwidth : float
Determine the limits for the part of the spectrum for which we want
to calculate the AUC.
e.g. if center = 3.0, bandwidth = 0.3, lower and upper bounds will be
2.85 and 3.15 respectively (center +/- bandwidth/2).
Notes
-----
Default center and bandwidth are 3.0 and 0.3ppm respectively
because of Sanacora 1999 pg 1045:
"The GABA signal was integrated over a 0.30-ppm bandwidth at 3.00ppm"
Ref: Sanacora, G., Mason, G. F., Rothman, D. L., Behar, K. L., Hyder, F.,
Petroff, O. A., ... & Krystal, J. H. (1999). Reduced cortical
{gamma}-aminobutyric acid levels in depressed patients determined by proton
magnetic resonance spectroscopy. Archives of general psychiatry, 56(11),
1043.
"""
range = np.max(f_ppm)-np.min(f_ppm)
dx=float(range)/float(len(f_ppm))
lb = np.floor((np.max(f_ppm)-float(center)+float(bandwidth)/2)/dx)
ub = np.ceil((np.max(f_ppm)-float(center)-float(bandwidth)/2)/dx)
auc = trapz(spectrum[ub:lb].real, dx=dx)
return auc, ub, lb
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Multivariate Normal distribution class.
@@MultivariateNormal
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util # pylint: disable=line-too-long
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
def _assert_compatible_shapes(mu, sigma):
r_mu = array_ops.rank(mu)
r_sigma = array_ops.rank(sigma)
sigma_shape = array_ops.shape(sigma)
sigma_rank = array_ops.rank(sigma)
mu_shape = array_ops.shape(mu)
return control_flow_ops.group(
logging_ops.Assert(
math_ops.equal(r_mu + 1, r_sigma),
["Rank of mu should be one less than rank of sigma, but saw: ",
r_mu, " vs. ", r_sigma]),
logging_ops.Assert(
math_ops.equal(
array_ops.gather(sigma_shape, sigma_rank - 2),
array_ops.gather(sigma_shape, sigma_rank - 1)),
["Last two dimensions of sigma (%s) must be equal: " % sigma.name,
sigma_shape]),
logging_ops.Assert(
math_ops.reduce_all(math_ops.equal(
mu_shape,
array_ops.slice(
sigma_shape, [0], array_ops.pack([sigma_rank - 1])))),
["mu.shape and sigma.shape[:-1] must match, but saw: ",
mu_shape, " vs. ", sigma_shape]))
def _assert_batch_positive_definite(sigma_chol):
"""Add assertions checking that the sigmas are all Positive Definite.
Given `sigma_chol == cholesky(sigma)`, it is sufficient to check that
`all(diag(sigma_chol) > 0)`. This is because to check that a matrix is PD,
it is sufficient that its cholesky factorization is PD, and to check that a
triangular matrix is PD, it is sufficient to check that its diagonal
entries are positive.
Args:
sigma_chol: N-D. The lower triangular cholesky decomposition of `sigma`.
Returns:
An assertion op to use with `control_dependencies`, verifying that
`sigma_chol` is positive definite.
"""
sigma_batch_diag = array_ops.batch_matrix_diag_part(sigma_chol)
return logging_ops.Assert(
math_ops.reduce_all(sigma_batch_diag > 0),
["sigma_chol is not positive definite. batched diagonals: ",
sigma_batch_diag, " shaped: ", array_ops.shape(sigma_batch_diag)])
def _determinant_from_sigma_chol(sigma_chol):
det_last_dim = array_ops.rank(sigma_chol) - 2
sigma_batch_diag = array_ops.batch_matrix_diag_part(sigma_chol)
det = math_ops.square(math_ops.reduce_prod(
sigma_batch_diag, reduction_indices=det_last_dim))
det.set_shape(sigma_chol.get_shape()[:-2])
return det
class MultivariateNormal(object):
"""The Multivariate Normal distribution on `R^k`.
The distribution has mean and covariance parameters mu (1-D), sigma (2-D),
or alternatively mean `mu` and factored covariance (cholesky decomposed
`sigma`) called `sigma_chol`.
The PDF of this distribution is:
```
f(x) = (2*pi)^(-k/2) |det(sigma)|^(-1/2) exp(-1/2*(x-mu)^*.sigma^{-1}.(x-mu))
```
where `.` denotes the inner product on `R^k` and `^*` denotes transpose.
Alternatively, if `sigma` is positive definite, it can be represented in terms
of its lower triangular cholesky factorization
```sigma = sigma_chol . sigma_chol^*```
and the pdf above allows simpler computation:
```
|det(sigma)| = reduce_prod(diag(sigma_chol))^2
x_whitened = sigma^{-1/2} . (x - mu) = tri_solve(sigma_chol, x - mu)
(x-mu)^* .sigma^{-1} . (x-mu) = x_whitened^* . x_whitened
```
where `tri_solve()` solves a triangular system of equations.
"""
def __init__(self, mu, sigma=None, sigma_chol=None, name=None):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu`, which are tensors of rank `N+1` (`N >= 0`)
with the last dimension having length `k`.
User must provide exactly one of `sigma` (the covariance matrices) or
`sigma_chol` (the cholesky decompositions of the covariance matrices).
`sigma` or `sigma_chol` must be of rank `N+2`. The last two dimensions
must both have length `k`. The first `N` dimensions correspond to batch
indices.
If `sigma_chol` is not provided, the batch cholesky factorization of `sigma`
is calculated for you.
The shapes of `mu` and `sigma` must match for the first `N` dimensions.
Regardless of which parameter is provided, the covariance matrices must all
be **positive definite** (an error is raised if one of them is not).
Args:
mu: (N+1)-D. `float` or `double` tensor, the means of the distributions.
sigma: (N+2)-D. (optional) `float` or `double` tensor, the covariances
of the distribution(s). The first `N+1` dimensions must match
those of `mu`. Must be batch-positive-definite.
sigma_chol: (N+2)-D. (optional) `float` or `double` tensor, a
lower-triangular factorization of `sigma`
(`sigma = sigma_chol . sigma_chol^*`). The first `N+1` dimensions
must match those of `mu`. The tensor itself need not be batch
lower triangular: we ignore the upper triangular part. However,
the batch diagonals must be positive (i.e., sigma_chol must be
batch-positive-definite).
name: The name to give Ops created by the initializer.
Raises:
ValueError: if neither sigma nor sigma_chol is provided.
TypeError: if mu and sigma (resp. sigma_chol) are different dtypes.
"""
if (sigma is None) == (sigma_chol is None):
raise ValueError("Exactly one of sigma and sigma_chol must be provided")
with ops.op_scope([mu, sigma, sigma_chol], name, "MultivariateNormal"):
sigma_or_half = sigma_chol if sigma is None else sigma
mu = ops.convert_to_tensor(mu)
sigma_or_half = ops.convert_to_tensor(sigma_or_half)
contrib_tensor_util.assert_same_float_dtype((mu, sigma_or_half))
with ops.control_dependencies([
_assert_compatible_shapes(mu, sigma_or_half)]):
mu = array_ops.identity(mu, name="mu")
# Store the dimensionality of the MVNs
self._k = array_ops.gather(array_ops.shape(mu), array_ops.rank(mu) - 1)
if sigma_chol is not None:
# Ensure we only keep the lower triangular part.
sigma_chol = array_ops.batch_matrix_band_part(
sigma_chol, num_lower=-1, num_upper=0)
sigma_det = _determinant_from_sigma_chol(sigma_chol)
with ops.control_dependencies([
_assert_batch_positive_definite(sigma_chol)]):
self._sigma = math_ops.batch_matmul(
sigma_chol, sigma_chol, adj_y=True, name="sigma")
self._sigma_chol = array_ops.identity(sigma_chol, "sigma_chol")
self._sigma_det = array_ops.identity(sigma_det, "sigma_det")
self._mu = array_ops.identity(mu, "mu")
else: # sigma is not None
sigma_chol = linalg_ops.batch_cholesky(sigma)
sigma_det = _determinant_from_sigma_chol(sigma_chol)
# batch_cholesky checks for PSD; so we can just use it here.
with ops.control_dependencies([sigma_chol]):
self._sigma = array_ops.identity(sigma, "sigma")
self._sigma_chol = array_ops.identity(sigma_chol, "sigma_chol")
self._sigma_det = array_ops.identity(sigma_det, "sigma_det")
self._mu = array_ops.identity(mu, "mu")
@property
def dtype(self):
return self._mu.dtype
@property
def mu(self):
return self._mu
@property
def sigma(self):
return self._sigma
@property
def mean(self):
return self._mu
@property
def sigma_det(self):
return self._sigma_det
def log_pdf(self, x, name=None):
"""Log pdf of observations `x` given these Multivariate Normals.
Args:
x: tensor of dtype `dtype`, must be broadcastable with `mu`.
name: The name to give this op.
Returns:
log_pdf: tensor of dtype `dtype`, the log-PDFs of `x`.
"""
with ops.op_scope(
[self._mu, self._sigma_chol, x], name, "MultivariateNormalLogPdf"):
x = ops.convert_to_tensor(x)
contrib_tensor_util.assert_same_float_dtype((self._mu, x))
x_centered = x - self.mu
x_rank = array_ops.rank(x_centered)
sigma_rank = array_ops.rank(self._sigma_chol)
x_rank_vec = array_ops.pack([x_rank])
sigma_rank_vec = array_ops.pack([sigma_rank])
x_shape = array_ops.shape(x_centered)
# sigma_chol is shaped [D, E, F, ..., k, k]
# x_centered shape is one of:
# [D, E, F, ..., k], or [F, ..., k], or
# [A, B, C, D, E, F, ..., k]
# and we need to convert x_centered to shape:
# [D, E, F, ..., k, A*B*C] (or 1 if A, B, C don't exist)
# then transpose and reshape x_whitened back to one of the shapes:
# [D, E, F, ..., k], or [1, 1, F, ..., k], or
# [A, B, C, D, E, F, ..., k]
# This helper handles the case where rank(x_centered) < rank(sigma)
def _broadcast_x_not_higher_rank_than_sigma():
return array_ops.reshape(
x_centered,
array_ops.concat(
# Reshape to ones(deficient x rank) + x_shape + [1]
0, (array_ops.ones(array_ops.pack([sigma_rank - x_rank - 1]),
dtype=x_rank.dtype),
x_shape,
[1])))
# These helpers handle the case where rank(x_centered) >= rank(sigma)
def _broadcast_x_higher_rank_than_sigma():
x_shape_left = array_ops.slice(
x_shape, [0], sigma_rank_vec - 1)
x_shape_right = array_ops.slice(
x_shape, sigma_rank_vec - 1, x_rank_vec - 1)
x_shape_perm = array_ops.concat(
0, (math_ops.range(sigma_rank - 1, x_rank),
math_ops.range(0, sigma_rank - 1)))
return array_ops.reshape(
# Convert to [D, E, F, ..., k, B, C]
array_ops.transpose(
x_centered, perm=x_shape_perm),
# Reshape to [D, E, F, ..., k, B*C]
array_ops.concat(
0, (x_shape_right,
array_ops.pack([
math_ops.reduce_prod(x_shape_left, 0)]))))
def _unbroadcast_x_higher_rank_than_sigma():
x_shape_left = array_ops.slice(
x_shape, [0], sigma_rank_vec - 1)
x_shape_right = array_ops.slice(
x_shape, sigma_rank_vec - 1, x_rank_vec - 1)
x_shape_perm = array_ops.concat(
0, (math_ops.range(sigma_rank - 1, x_rank),
math_ops.range(0, sigma_rank - 1)))
return array_ops.transpose(
# [D, E, F, ..., k, B, C] => [B, C, D, E, F, ..., k]
array_ops.reshape(
# convert to [D, E, F, ..., k, B, C]
x_whitened_broadcast,
array_ops.concat(0, (x_shape_right, x_shape_left))),
perm=x_shape_perm)
# Step 1: reshape x_centered
x_centered_broadcast = control_flow_ops.cond(
# x_centered == [D, E, F, ..., k] => [D, E, F, ..., k, 1]
# or == [F, ..., k] => [1, 1, F, ..., k, 1]
x_rank <= sigma_rank - 1,
_broadcast_x_not_higher_rank_than_sigma,
# x_centered == [B, C, D, E, F, ..., k] => [D, E, F, ..., k, B*C]
_broadcast_x_higher_rank_than_sigma)
x_whitened_broadcast = linalg_ops.batch_matrix_triangular_solve(
self._sigma_chol, x_centered_broadcast)
# Reshape x_whitened_broadcast back to x_whitened
x_whitened = control_flow_ops.cond(
x_rank <= sigma_rank - 1,
lambda: array_ops.reshape(x_whitened_broadcast, x_shape),
_unbroadcast_x_higher_rank_than_sigma)
x_whitened = array_ops.expand_dims(x_whitened, -1)
# Reshape x_whitened to contain row vectors
# Returns a batchwise scalar
x_whitened_norm = math_ops.batch_matmul(
x_whitened, x_whitened, adj_x=True)
x_whitened_norm = control_flow_ops.cond(
x_rank <= sigma_rank - 1,
lambda: array_ops.squeeze(x_whitened_norm, [-2, -1]),
lambda: array_ops.squeeze(x_whitened_norm, [-1]))
log_two_pi = constant_op.constant(math.log(2 * math.pi), dtype=self.dtype)
k = math_ops.cast(self._k, self.dtype)
log_pdf_value = (
-math_ops.log(self._sigma_det) -k * log_two_pi - x_whitened_norm) / 2
final_shaped_value = control_flow_ops.cond(
x_rank <= sigma_rank - 1,
lambda: log_pdf_value,
lambda: array_ops.squeeze(log_pdf_value, [-1]))
output_static_shape = x_centered.get_shape()[:-1]
final_shaped_value.set_shape(output_static_shape)
return final_shaped_value
def pdf(self, x, name=None):
"""The PDF of observations `x` under these Multivariate Normals.
Args:
x: tensor of dtype `dtype`, must be broadcastable with `mu` and `sigma`.
name: The name to give this op.
Returns:
pdf: tensor of dtype `dtype`, the pdf values of `x`.
"""
with ops.op_scope(
[self._mu, self._sigma_chol, x], name, "MultivariateNormalPdf"):
return math_ops.exp(self.log_pdf(x))
def entropy(self, name=None):
"""The entropies of these Multivariate Normals.
Args:
name: The name to give this op.
Returns:
entropy: tensor of dtype `dtype`, the entropies.
"""
with ops.op_scope(
[self._mu, self._sigma_chol], name, "MultivariateNormalEntropy"):
one_plus_log_two_pi = constant_op.constant(
1 + math.log(2 * math.pi), dtype=self.dtype)
# Use broadcasting rules to calculate the full broadcast sigma.
k = math_ops.cast(self._k, dtype=self.dtype)
entropy_value = (
k * one_plus_log_two_pi + math_ops.log(self._sigma_det)) / 2
entropy_value.set_shape(self._sigma_det.get_shape())
return entropy_value
def sample(self, n, seed=None, name=None):
"""Sample `n` observations from the Multivariate Normal Distributions.
Args:
n: `Scalar`, type int32, the number of observations to sample.
seed: Python integer, the random seed.
name: The name to give this op.
Returns:
samples: `[n, ...]`, a `Tensor` of `n` samples for each
of the distributions determined by broadcasting the hyperparameters.
"""
with ops.op_scope(
[self._mu, self._sigma_chol, n], name, "MultivariateNormalSample"):
# TODO(ebrevdo): Is there a better way to get broadcast_shape?
broadcast_shape = self.mu.get_shape()
n = ops.convert_to_tensor(n)
sigma_shape_left = array_ops.slice(
array_ops.shape(self._sigma_chol),
[0], array_ops.pack([array_ops.rank(self._sigma_chol) - 2]))
k_n = array_ops.pack([self._k, n])
shape = array_ops.concat(0, [sigma_shape_left, k_n])
white_samples = random_ops.random_normal(
shape=shape, mean=0, stddev=1, dtype=self._mu.dtype, seed=seed)
correlated_samples = math_ops.batch_matmul(
self._sigma_chol, white_samples)
# Move the last dimension to the front
perm = array_ops.concat(
0,
(array_ops.pack([array_ops.rank(correlated_samples) - 1]),
math_ops.range(0, array_ops.rank(correlated_samples) - 1)))
# TODO(ebrevdo): Once we get a proper tensor contraction op,
# perform the inner product using that instead of batch_matmul
# and this slow transpose can go away!
correlated_samples = array_ops.transpose(correlated_samples, perm)
samples = correlated_samples + self.mu
# Provide some hints to shape inference
n_val = tensor_util.constant_value(n)
final_shape = tensor_shape.vector(n_val).concatenate(broadcast_shape)
samples.set_shape(final_shape)
return samples
|
|
from __future__ import absolute_import, division, print_function
import csv
import itertools as it
import os
import datashape
from dynd import nd
from .. import py2help
from .data_descriptor import DDesc, Capabilities
from .dynd_data_descriptor import DyND_DDesc
def open_file(path, mode, has_header):
"""Return a file handler positionated at the first valid line."""
csvfile = open(path, mode=mode)
if has_header:
csvfile.readline()
return csvfile
def csv_descriptor_iter(filename, mode, has_header, schema, dialect={}):
with open_file(filename, mode, has_header) as csvfile:
for row in csv.reader(csvfile, **dialect):
yield DyND_DDesc(nd.array(row, dtype=schema))
def csv_descriptor_iterchunks(filename, mode, has_header, schema,
blen, dialect={}, start=None, stop=None):
rows = []
with open_file(filename, mode, has_header) as csvfile:
for nrow, row in enumerate(csv.reader(csvfile, **dialect)):
if start is not None and nrow < start:
continue
if stop is not None and nrow >= stop:
if rows != []:
# Build the descriptor for the data we have and return
yield DyND_DDesc(nd.array(rows, dtype=schema))
return
rows.append(row)
if nrow % blen == 0:
print("rows:", rows, schema)
yield DyND_DDesc(nd.array(rows, dtype=schema))
rows = []
class CSV_DDesc(DDesc):
"""
A Blaze data descriptor which exposes a CSV file.
Parameters
----------
path : string
A path string for the CSV file.
schema : string or datashape
A datashape (or its string representation) of the schema
in the CSV file.
dialect : string or csv.Dialect instance
The dialect as understood by the `csv` module in Python standard
library. If not specified, a value is guessed.
has_header : boolean
Whether the CSV file has a header or not. If not specified a value
is guessed.
"""
def __init__(self, path, mode='r', schema=None, dialect=None,
has_header=None, **kwargs):
if os.path.isfile(path) is not True:
raise ValueError('CSV file "%s" does not exist' % path)
self.path = path
self.mode = mode
csvfile = open(path, mode=self.mode)
# Handle Schema
if isinstance(schema, py2help._strtypes):
schema = datashape.dshape(schema)
if isinstance(schema, datashape.DataShape) and len(schema) == 1:
schema = schema[0]
if not isinstance(schema, datashape.Record):
raise TypeError(
'schema cannot be converted into a blaze record dshape')
self.schema = str(schema)
# Handle Dialect
if dialect is None:
# Guess the dialect
sniffer = csv.Sniffer()
try:
dialect = sniffer.sniff(csvfile.read(1024))
except:
# Cannot guess dialect. Assume Excel.
dialect = csv.get_dialect('excel')
csvfile.seek(0)
else:
dialect = csv.get_dialect(dialect)
self.dialect = dict((key, getattr(dialect, key))
for key in dir(dialect) if not key.startswith('_'))
# Update dialect with any keyword arguments passed in
# E.g. allow user to override with delimiter=','
for k, v in kwargs.items():
if k in self.dialect:
self.dialect[k] = v
# Handle Header
if has_header is None:
# Guess whether the file has a header or not
sniffer = csv.Sniffer()
csvfile.seek(0)
sample = csvfile.read(1024)
self.has_header = sniffer.has_header(sample)
else:
self.has_header = has_header
csvfile.close()
@property
def dshape(self):
return datashape.DataShape(datashape.Var(), self.schema)
@property
def capabilities(self):
"""The capabilities for the csv data descriptor."""
return Capabilities(
# csv datadescriptor cannot be updated
immutable = False,
# csv datadescriptors are concrete
deferred = False,
# csv datadescriptor is persistent
persistent = True,
# csv datadescriptor can be appended efficiently
appendable = True,
remote = False,
)
def dynd_arr(self):
# Positionate at the beginning of the file
with open_file(self.path, self.mode, self.has_header) as csvfile:
return nd.array(csv.reader(csvfile, **self.dialect), dtype=self.schema)
def __array__(self):
return nd.as_numpy(self.dynd_arr())
def __len__(self):
# We don't know how many rows we have
return None
def __getitem__(self, key):
with open_file(self.path, self.mode, self.has_header) as csvfile:
if isinstance(key, py2help._inttypes):
start, stop, step = key, key + 1, 1
elif isinstance(key, slice):
start, stop, step = key.start, key.stop, key.step
else:
raise IndexError("key '%r' is not valid" % key)
read_iter = it.islice(csv.reader(csvfile, **self.dialect),
start, stop, step)
res = nd.array(read_iter, dtype=self.schema)
return DyND_DDesc(res)
def __setitem__(self, key, value):
# CSV files cannot be updated (at least, not efficiently)
raise NotImplementedError
def __iter__(self):
return csv_descriptor_iter(
self.path, self.mode, self.has_header, self.schema, self.dialect)
def append(self, row):
"""Append a row of values (in sequence form)."""
values = nd.array(row, dtype=self.schema) # validate row
with open_file(self.path, self.mode, self.has_header) as csvfile:
csvfile.seek(0, os.SEEK_END) # go to the end of the file
delimiter = self.dialect['delimiter']
csvfile.write(delimiter.join(py2help.unicode(v) for v in row)+'\n')
def iterchunks(self, blen=None, start=None, stop=None):
"""Return chunks of size `blen` (in leading dimension).
Parameters
----------
blen : int
The length, in rows, of the buffers that are returned.
start : int
Where the iterator starts. The default is to start at the
beginning.
stop : int
Where the iterator stops. The default is to stop at the end.
Returns
-------
out : iterable
This iterable returns buffers as DyND arrays,
"""
# Return the iterable
return csv_descriptor_iterchunks(
self.path, self.mode, self.has_header,
self.schema, blen, self.dialect, start, stop)
def remove(self):
"""Remove the persistent storage."""
os.unlink(self.path)
|
|
# Copyright 2018 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from enum import Enum, IntEnum
from abc import ABC, abstractmethod
from mycroft.messagebus.message import Message
from .mycroft_skill import MycroftSkill
from .audioservice import AudioService
class CPSMatchLevel(Enum):
EXACT = 1
MULTI_KEY = 2
TITLE = 3
ARTIST = 4
CATEGORY = 5
GENERIC = 6
class CPSTrackStatus(IntEnum):
DISAMBIGUATION = 1 # not queued for playback, show in gui
PLAYING = 20 # Skill is handling playback internally
PLAYING_AUDIOSERVICE = 21 # Skill forwarded playback to audio service
PLAYING_GUI = 22 # Skill forwarded playback to gui
PLAYING_ENCLOSURE = 23 # Skill forwarded playback to enclosure
QUEUED = 30 # Waiting playback to be handled inside skill
QUEUED_AUDIOSERVICE = 31 # Waiting playback in audio service
QUEUED_GUI = 32 # Waiting playback in gui
QUEUED_ENCLOSURE = 33 # Waiting for playback in enclosure
PAUSED = 40 # media paused but ready to resume
STALLED = 60 # playback has stalled, reason may be unknown
BUFFERING = 61 # media is buffering from an external source
END_OF_MEDIA = 90 # playback finished, is the default state when CPS loads
class CommonPlaySkill(MycroftSkill, ABC):
""" To integrate with the common play infrastructure of Mycroft
skills should use this base class and override the two methods
`CPS_match_query_phrase` (for checking if the skill can play the
utterance) and `CPS_start` for launching the media.
The class makes the skill available to queries from the
mycroft-playback-control skill and no special vocab for starting playback
is needed.
"""
def __init__(self, name=None, bus=None):
super().__init__(name, bus)
self.audioservice = None
self.play_service_string = None
# "MusicServiceSkill" -> "Music Service"
spoken = name or self.__class__.__name__
self.spoken_name = re.sub(r"([a-z])([A-Z])", r"\g<1> \g<2>",
spoken.replace("Skill", ""))
# NOTE: Derived skills will likely want to override self.spoken_name
# with a translatable name in their initialize() method.
def bind(self, bus):
"""Overrides the normal bind method.
Adds handlers for play:query and play:start messages allowing
interaction with the playback control skill.
This is called automatically during setup, and
need not otherwise be used.
"""
if bus:
super().bind(bus)
self.audioservice = AudioService(self.bus)
self.add_event('play:query', self.__handle_play_query)
self.add_event('play:start', self.__handle_play_start)
def __handle_play_query(self, message):
"""Query skill if it can start playback from given phrase."""
search_phrase = message.data["phrase"]
# First, notify the requestor that we are attempting to handle
# (this extends a timeout while this skill looks for a match)
self.bus.emit(message.response({"phrase": search_phrase,
"skill_id": self.skill_id,
"searching": True}))
# Now invoke the CPS handler to let the skill perform its search
result = self.CPS_match_query_phrase(search_phrase)
if result:
match = result[0]
level = result[1]
callback = result[2] if len(result) > 2 else None
confidence = self.__calc_confidence(match, search_phrase, level)
self.bus.emit(message.response({"phrase": search_phrase,
"skill_id": self.skill_id,
"callback_data": callback,
"service_name": self.spoken_name,
"conf": confidence}))
else:
# Signal we are done (can't handle it)
self.bus.emit(message.response({"phrase": search_phrase,
"skill_id": self.skill_id,
"searching": False}))
def __calc_confidence(self, match, phrase, level):
"""Translate confidence level and match to a 0-1 value.
"play pandora"
"play pandora is my girlfriend"
"play tom waits on pandora"
Assume the more of the words that get consumed, the better the match
Args:
match (str): Matching string
phrase (str): original input phrase
level (CPSMatchLevel): match level
"""
consumed_pct = len(match.split()) / len(phrase.split())
if consumed_pct > 1.0:
consumed_pct = 1.0 / consumed_pct # deal with over/under-matching
# We'll use this to modify the level, but don't want it to allow a
# match to jump to the next match level. So bonus is 0 - 0.05 (1/20)
bonus = consumed_pct / 20.0
if level == CPSMatchLevel.EXACT:
return 1.0
elif level == CPSMatchLevel.MULTI_KEY:
return 0.9 + bonus
elif level == CPSMatchLevel.TITLE:
return 0.8 + bonus
elif level == CPSMatchLevel.ARTIST:
return 0.7 + bonus
elif level == CPSMatchLevel.CATEGORY:
return 0.6 + bonus
elif level == CPSMatchLevel.GENERIC:
return 0.5 + bonus
else:
return 0.0 # should never happen
def __handle_play_start(self, message):
"""Bus handler for starting playback using the skill."""
if message.data["skill_id"] != self.skill_id:
# Not for this skill!
return
phrase = message.data["phrase"]
data = message.data.get("callback_data")
# Stop any currently playing audio
if self.audioservice.is_playing:
self.audioservice.stop()
self.bus.emit(message.forward("mycroft.stop"))
# Save for CPS_play() later, e.g. if phrase includes modifiers like
# "... on the chromecast"
self.play_service_string = phrase
self.make_active()
# Invoke derived class to provide playback data
self.CPS_start(phrase, data)
def CPS_play(self, *args, **kwargs):
"""Begin playback of a media file or stream
Normally this method will be invoked with somthing like:
self.CPS_play(url)
Advanced use can also include keyword arguments, such as:
self.CPS_play(url, repeat=True)
Args:
same as the Audioservice.play method
"""
# Inject the user's utterance in case the audio backend wants to
# interpret it. E.g. "play some rock at full volume on the stereo"
if 'utterance' not in kwargs:
kwargs['utterance'] = self.play_service_string
self.audioservice.play(*args, **kwargs)
self.CPS_send_status(uri=args[0],
status=CPSTrackStatus.PLAYING_AUDIOSERVICE)
def stop(self):
"""Stop anything playing on the audioservice."""
if self.audioservice.is_playing:
self.audioservice.stop()
return True
else:
return False
######################################################################
# Abstract methods
# All of the following must be implemented by a skill that wants to
# act as a CommonPlay Skill
@abstractmethod
def CPS_match_query_phrase(self, phrase):
"""Analyze phrase to see if it is a play-able phrase with this skill.
Args:
phrase (str): User phrase uttered after "Play", e.g. "some music"
Returns:
(match, CPSMatchLevel[, callback_data]) or None: Tuple containing
a string with the appropriate matching phrase, the PlayMatch
type, and optionally data to return in the callback if the
match is selected.
"""
# Derived classes must implement this, e.g.
#
# if phrase in ["Zoosh"]:
# return ("Zoosh", CPSMatchLevel.Generic, {"hint": "music"})
# or:
# zoosh_song = find_zoosh(phrase)
# if zoosh_song and "Zoosh" in phrase:
# # "play Happy Birthday in Zoosh"
# return ("Zoosh", CPSMatchLevel.MULTI_KEY, {"song": zoosh_song})
# elif zoosh_song:
# # "play Happy Birthday"
# return ("Zoosh", CPSMatchLevel.TITLE, {"song": zoosh_song})
# elif "Zoosh" in phrase
# # "play Zoosh"
# return ("Zoosh", CPSMatchLevel.GENERIC, {"cmd": "random"})
return None
@abstractmethod
def CPS_start(self, phrase, data):
"""Begin playing whatever is specified in 'phrase'
Args:
phrase (str): User phrase uttered after "Play", e.g. "some music"
data (dict): Callback data specified in match_query_phrase()
"""
# Derived classes must implement this, e.g.
# self.CPS_play("http://zoosh.com/stream_music")
pass
def CPS_extend_timeout(self, timeout=5):
"""Request Common Play Framework to wait another {timeout} seconds
for an answer from this skill.
Args:
timeout (int): Number of seconds
"""
self.bus.emit(Message('play:query.response',
{"phrase": self.play_service_string,
"searching": True,
"timeout": timeout,
"skill_id": self.skill_id}))
def CPS_send_status(self, artist='', track='', album='', image='',
uri='', track_length=None, elapsed_time=None,
playlist_position=None,
status=CPSTrackStatus.DISAMBIGUATION, **kwargs):
"""Inform system of playback status.
If a skill is handling playback and wants the playback control to be
aware of it's current status it can emit this message indicating that
it's performing playback and can provide some standard info.
All parameters are optional so any can be left out. Also if extra
non-standard parameters are added, they too will be sent in the message
data.
Args:
artist (str): Current track artist
track (str): Track name
album (str): Album title
image (str): url for image to show
uri (str): uri for track
track_length (float): track length in seconds
elapsed_time (float): current offset into track in seconds
playlist_position (int): Position in playlist of current track
"""
data = {'skill': self.name,
'uri': uri,
'artist': artist,
'album': album,
'track': track,
'image': image,
'track_length': track_length,
'elapsed_time': elapsed_time,
'playlist_position': playlist_position,
'status': status
}
data = {**data, **kwargs} # Merge extra arguments
self.bus.emit(Message('play:status', data))
def CPS_send_tracklist(self, tracklist):
"""Inform system of playlist track info.
Provides track data for playlist
Args:
tracklist (list/dict): Tracklist data
"""
tracklist = tracklist or []
if not isinstance(tracklist, list):
tracklist = [tracklist]
for idx, track in enumerate(tracklist):
self.CPS_send_status(playlist_position=idx, **track)
|
|
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
import json
import logging
from tabpy.tabpy_server.management.util import write_state_config
from threading import Lock
from time import time
logger = logging.getLogger(__name__)
# State File Config Section Names
_DEPLOYMENT_SECTION_NAME = "Query Objects Service Versions"
_QUERY_OBJECT_DOCSTRING = "Query Objects Docstrings"
_SERVICE_INFO_SECTION_NAME = "Service Info"
_META_SECTION_NAME = "Meta"
# Directory Names
_QUERY_OBJECT_DIR = "query_objects"
"""
Lock to change the TabPy State.
"""
_PS_STATE_LOCK = Lock()
def state_lock(func):
"""
Mutex for changing PS state
"""
def wrapper(self, *args, **kwargs):
try:
_PS_STATE_LOCK.acquire()
return func(self, *args, **kwargs)
finally:
# ALWAYS RELEASE LOCK
_PS_STATE_LOCK.release()
return wrapper
def _get_root_path(state_path):
if state_path[-1] != "/":
state_path += "/"
return state_path
def get_query_object_path(state_file_path, name, version):
"""
Returns the query object path
If the version is None, a path without the version will be returned.
"""
root_path = _get_root_path(state_file_path)
sub_path = [_QUERY_OBJECT_DIR, name]
if version is not None:
sub_path.append(str(version))
full_path = root_path + "/".join(sub_path)
return full_path
class TabPyState:
"""
The TabPy state object that stores attributes
about this TabPy and perform GET/SET on these
attributes.
Attributes:
- name
- description
- endpoints (name, description, docstring, version, target)
- revision number
When the state object is initialized, the state is saved as a ConfigParser.
There is a config to any attribute.
"""
def __init__(self, settings, config=None):
self.settings = settings
self.set_config(config, _update=False)
@state_lock
def set_config(self, config, logger=logging.getLogger(__name__), _update=True):
"""
Set the local ConfigParser manually.
This new ConfigParser will be used as current state.
"""
if not isinstance(config, ConfigParser):
raise ValueError("Invalid config")
self.config = config
if _update:
self._write_state(logger)
def get_endpoints(self, name=None):
"""
Return a dictionary of endpoints
Parameters
----------
name : str
The name of the endpoint.
If "name" is specified, only the information about that endpoint
will be returned.
Returns
-------
endpoints : dict
The dictionary containing information about each endpoint.
The keys are the endpoint names.
The values for each include:
- description
- doc string
- type
- target
"""
endpoints = {}
try:
endpoint_names = self._get_config_value(_DEPLOYMENT_SECTION_NAME, name)
except Exception as e:
logger.error(f"error in get_endpoints: {str(e)}")
return {}
if name:
endpoint_info = json.loads(endpoint_names)
docstring = self._get_config_value(_QUERY_OBJECT_DOCSTRING, name)
endpoint_info["docstring"] = str(
bytes(docstring, "utf-8").decode("unicode_escape")
)
endpoints = {name: endpoint_info}
else:
for endpoint_name in endpoint_names:
endpoint_info = json.loads(
self._get_config_value(_DEPLOYMENT_SECTION_NAME, endpoint_name)
)
docstring = self._get_config_value(
_QUERY_OBJECT_DOCSTRING, endpoint_name, True, ""
)
endpoint_info["docstring"] = str(
bytes(docstring, "utf-8").decode("unicode_escape")
)
endpoints[endpoint_name] = endpoint_info
logger.debug(f"Collected endpoints: {endpoints}")
return endpoints
def _check_endpoint_exists(self, name):
endpoints = self.get_endpoints()
if not name or not isinstance(name, str) or len(name) == 0:
raise ValueError("name of the endpoint must be a valid string.")
return name in endpoints
def _check_and_set_endpoint_str_value(self, param, paramName, defaultValue):
if not param and defaultValue is not None:
return defaultValue
if not param or not isinstance(param, str):
raise ValueError(f"{paramName} must be a string.")
return param
def _check_and_set_endpoint_description(self, description, defaultValue):
return self._check_and_set_endpoint_str_value(description, "description", defaultValue)
def _check_and_set_endpoint_docstring(self, docstring, defaultValue):
return self._check_and_set_endpoint_str_value(docstring, "docstring", defaultValue)
def _check_and_set_endpoint_type(self, endpoint_type, defaultValue):
return self._check_and_set_endpoint_str_value(
endpoint_type, "endpoint type", defaultValue)
def _check_target(self, target):
if target and not isinstance(target, str):
raise ValueError("target must be a string.")
def _check_and_set_dependencies(self, dependencies, defaultValue):
if not dependencies:
return defaultValue
if dependencies or not isinstance(dependencies, list):
raise ValueError("dependencies must be a list.")
return dependencies
@state_lock
def add_endpoint(
self,
name,
description=None,
docstring=None,
endpoint_type=None,
methods=None,
target=None,
dependencies=None,
schema=None,
):
"""
Add a new endpoint to the TabPy.
Parameters
----------
name : str
Name of the endpoint
description : str, optional
Description of this endpoint
doc_string : str, optional
The doc string for this endpoint, if needed.
endpoint_type : str
The endpoint type (model, alias)
target : str, optional
The target endpoint name for the alias to be added.
Note:
The version of this endpoint will be set to 1 since it is a new
endpoint.
"""
try:
if (self._check_endpoint_exists(name)):
raise ValueError(f"endpoint {name} already exists.")
endpoints = self.get_endpoints()
description = self._check_and_set_endpoint_description(description, "")
docstring = self._check_and_set_endpoint_docstring(
docstring, "-- no docstring found in query function --")
endpoint_type = self._check_and_set_endpoint_type(endpoint_type, None)
dependencies = self._check_and_set_dependencies(dependencies, [])
self._check_target(target)
if target and target not in endpoints:
raise ValueError("target endpoint is not valid.")
endpoint_info = {
"description": description,
"docstring": docstring,
"type": endpoint_type,
"version": 1,
"dependencies": dependencies,
"target": target,
"creation_time": int(time()),
"last_modified_time": int(time()),
"schema": schema,
}
endpoints[name] = endpoint_info
self._add_update_endpoints_config(endpoints)
except Exception as e:
logger.error(f"Error in add_endpoint: {e}")
raise
def _add_update_endpoints_config(self, endpoints):
# save the endpoint info to config
dstring = ""
for endpoint_name in endpoints:
try:
info = endpoints[endpoint_name]
dstring = str(
bytes(info["docstring"], "utf-8").decode("unicode_escape")
)
self._set_config_value(
_QUERY_OBJECT_DOCSTRING,
endpoint_name,
dstring,
_update_revision=False,
)
del info["docstring"]
self._set_config_value(
_DEPLOYMENT_SECTION_NAME, endpoint_name, json.dumps(info)
)
except Exception as e:
logger.error(f"Unable to write endpoints config: {e}")
raise
@state_lock
def update_endpoint(
self,
name,
description=None,
docstring=None,
endpoint_type=None,
version=None,
methods=None,
target=None,
dependencies=None,
schema=None,
):
"""
Update an existing endpoint on the TabPy.
Parameters
----------
name : str
Name of the endpoint
description : str, optional
Description of this endpoint
doc_string : str, optional
The doc string for this endpoint, if needed.
endpoint_type : str, optional
The endpoint type (model, alias)
version : str, optional
The version of this endpoint
dependencies=[]
List of dependent endpoints for this existing endpoint
target : str, optional
The target endpoint name for the alias.
Note:
For those parameters that are not specified, those values will not
get changed.
"""
try:
if (not self._check_endpoint_exists(name)):
raise ValueError(f"endpoint {name} does not exist.")
endpoints = self.get_endpoints()
endpoint_info = endpoints[name]
description = self._check_and_set_endpoint_description(
description, endpoint_info["description"])
docstring = self._check_and_set_endpoint_docstring(
docstring, endpoint_info["docstring"])
endpoint_type = self._check_and_set_endpoint_type(
endpoint_type, endpoint_info["type"])
dependencies = self._check_and_set_dependencies(
dependencies, endpoint_info.get("dependencies", []))
self._check_target(target)
if target and target not in endpoints:
raise ValueError("target endpoint is not valid.")
elif not target:
target = endpoint_info["target"]
if version and not isinstance(version, int):
raise ValueError("version must be an int.")
elif not version:
version = endpoint_info["version"]
endpoint_info = {
"description": description,
"docstring": docstring,
"type": endpoint_type,
"version": version,
"dependencies": dependencies,
"target": target,
"creation_time": endpoint_info["creation_time"],
"last_modified_time": int(time()),
"schema": schema,
}
endpoints[name] = endpoint_info
self._add_update_endpoints_config(endpoints)
except Exception as e:
logger.error(f"Error in update_endpoint: {e}")
raise
@state_lock
def delete_endpoint(self, name):
"""
Delete an existing endpoint on the TabPy
Parameters
----------
name : str
The name of the endpoint to be deleted.
Returns
-------
deleted endpoint object
Note:
Cannot delete this endpoint if other endpoints are currently
depending on this endpoint.
"""
if not name or name == "":
raise ValueError("Name of the endpoint must be a valid string.")
endpoints = self.get_endpoints()
if name not in endpoints:
raise ValueError(f"Endpoint {name} does not exist.")
endpoint_to_delete = endpoints[name]
# get dependencies and target
deps = set()
for endpoint_name in endpoints:
if endpoint_name != name:
deps_list = endpoints[endpoint_name].get("dependencies", [])
if name in deps_list:
deps.add(endpoint_name)
# check if other endpoints are depending on this endpoint
if len(deps) > 0:
raise ValueError(
f"Cannot remove endpoint {name}, it is currently "
f"used by {list(deps)} endpoints."
)
del endpoints[name]
# delete the endpoint from state
try:
self._remove_config_option(
_QUERY_OBJECT_DOCSTRING, name, _update_revision=False
)
self._remove_config_option(_DEPLOYMENT_SECTION_NAME, name)
return endpoint_to_delete
except Exception as e:
logger.error(f"Unable to delete endpoint {e}")
raise ValueError(f"Unable to delete endpoint: {e}")
@property
def name(self):
"""
Returns the name of the TabPy service.
"""
name = None
try:
name = self._get_config_value(_SERVICE_INFO_SECTION_NAME, "Name")
except Exception as e:
logger.error(f"Unable to get name: {e}")
return name
@property
def creation_time(self):
"""
Returns the creation time of the TabPy service.
"""
creation_time = 0
try:
creation_time = self._get_config_value(
_SERVICE_INFO_SECTION_NAME, "Creation Time"
)
except Exception as e:
logger.error(f"Unable to get name: {e}")
return creation_time
@state_lock
def set_name(self, name):
"""
Set the name of this TabPy service.
Parameters
----------
name : str
Name of TabPy service.
"""
if not isinstance(name, str):
raise ValueError("name must be a string.")
try:
self._set_config_value(_SERVICE_INFO_SECTION_NAME, "Name", name)
except Exception as e:
logger.error(f"Unable to set name: {e}")
def get_description(self):
"""
Returns the description of the TabPy service.
"""
description = None
try:
description = self._get_config_value(
_SERVICE_INFO_SECTION_NAME, "Description"
)
except Exception as e:
logger.error(f"Unable to get description: {e}")
return description
@state_lock
def set_description(self, description):
"""
Set the description of this TabPy service.
Parameters
----------
description : str
Description of TabPy service.
"""
if not isinstance(description, str):
raise ValueError("Description must be a string.")
try:
self._set_config_value(
_SERVICE_INFO_SECTION_NAME, "Description", description
)
except Exception as e:
logger.error(f"Unable to set description: {e}")
def get_revision_number(self):
"""
Returns the revision number of this TabPy service.
"""
rev = -1
try:
rev = int(self._get_config_value(_META_SECTION_NAME, "Revision Number"))
except Exception as e:
logger.error(f"Unable to get revision number: {e}")
return rev
def get_access_control_allow_origin(self):
"""
Returns Access-Control-Allow-Origin of this TabPy service.
"""
_cors_origin = ""
try:
logger.debug("Collecting Access-Control-Allow-Origin from state file ...")
_cors_origin = self._get_config_value(
"Service Info", "Access-Control-Allow-Origin"
)
except Exception as e:
logger.error(e)
return _cors_origin
def get_access_control_allow_headers(self):
"""
Returns Access-Control-Allow-Headers of this TabPy service.
"""
_cors_headers = ""
try:
_cors_headers = self._get_config_value(
"Service Info", "Access-Control-Allow-Headers"
)
except Exception:
pass
return _cors_headers
def get_access_control_allow_methods(self):
"""
Returns Access-Control-Allow-Methods of this TabPy service.
"""
_cors_methods = ""
try:
_cors_methods = self._get_config_value(
"Service Info", "Access-Control-Allow-Methods"
)
except Exception:
pass
return _cors_methods
def _set_revision_number(self, revision_number):
"""
Set the revision number of this TabPy service.
"""
if not isinstance(revision_number, int):
raise ValueError("revision number must be an int.")
try:
self._set_config_value(
_META_SECTION_NAME, "Revision Number", revision_number
)
except Exception as e:
logger.error(f"Unable to set revision number: {e}")
def _remove_config_option(
self,
section_name,
option_name,
logger=logging.getLogger(__name__),
_update_revision=True,
):
if not self.config:
raise ValueError("State configuration not yet loaded.")
self.config.remove_option(section_name, option_name)
# update revision number
if _update_revision:
self._increase_revision_number()
self._write_state(logger=logger)
def _has_config_value(self, section_name, option_name):
if not self.config:
raise ValueError("State configuration not yet loaded.")
return self.config.has_option(section_name, option_name)
def _increase_revision_number(self):
if not self.config:
raise ValueError("State configuration not yet loaded.")
cur_rev = int(self.config.get(_META_SECTION_NAME, "Revision Number"))
self.config.set(_META_SECTION_NAME, "Revision Number", str(cur_rev + 1))
def _set_config_value(
self,
section_name,
option_name,
option_value,
logger=logging.getLogger(__name__),
_update_revision=True,
):
if not self.config:
raise ValueError("State configuration not yet loaded.")
if not self.config.has_section(section_name):
logger.log(logging.DEBUG, f"Adding config section {section_name}")
self.config.add_section(section_name)
self.config.set(section_name, option_name, option_value)
# update revision number
if _update_revision:
self._increase_revision_number()
self._write_state(logger=logger)
def _get_config_items(self, section_name):
if not self.config:
raise ValueError("State configuration not yet loaded.")
return self.config.items(section_name)
def _get_config_value(
self, section_name, option_name, optional=False, default_value=None
):
logger.log(
logging.DEBUG,
f"Loading option '{option_name}' from section [{section_name}]...")
if not self.config:
msg = "State configuration not yet loaded."
logging.log(msg)
raise ValueError(msg)
res = None
if not option_name:
res = self.config.options(section_name)
elif self.config.has_option(section_name, option_name):
res = self.config.get(section_name, option_name)
elif optional:
res = default_value
else:
raise ValueError(
f"Cannot find option name {option_name} "
f"under section {section_name}"
)
logger.log(logging.DEBUG, f"Returning value '{res}'")
return res
def _write_state(self, logger=logging.getLogger(__name__)):
"""
Write state (ConfigParser) to Consul
"""
logger.log(logging.INFO, "Writing state to config")
write_state_config(self.config, self.settings, logger=logger)
|
|
from __future__ import unicode_literals
import ctypes
import json
import random
import threading
from binascii import a2b_hex, b2a_hex
from io import BytesIO
from unittest import skipUnless
from django.contrib.gis import gdal
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.geos import (
HAS_GEOS, GeometryCollection, GEOSException, GEOSGeometry, LinearRing,
LineString, MultiLineString, MultiPoint, MultiPolygon, Point, Polygon,
fromfile, fromstr, libgeos,
)
from django.contrib.gis.geos.base import GEOSBase
from django.contrib.gis.geos.libgeos import geos_version_info
from django.contrib.gis.shortcuts import numpy
from django.template import Context
from django.template.engine import Engine
from django.test import SimpleTestCase, ignore_warnings, mock
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_bytes
from django.utils.six.moves import range
from ..test_data import TestDataMixin
@skipUnless(HAS_GEOS, "Geos is required.")
class GEOSTest(SimpleTestCase, TestDataMixin):
def test_base(self):
"Tests out the GEOSBase class."
# Testing out GEOSBase class, which provides a `ptr` property
# that abstracts out access to underlying C pointers.
class FakeGeom1(GEOSBase):
pass
# This one only accepts pointers to floats
c_float_p = ctypes.POINTER(ctypes.c_float)
class FakeGeom2(GEOSBase):
ptr_type = c_float_p
# Default ptr_type is `c_void_p`.
fg1 = FakeGeom1()
# Default ptr_type is C float pointer
fg2 = FakeGeom2()
# These assignments are OK -- None is allowed because
# it's equivalent to the NULL pointer.
fg1.ptr = ctypes.c_void_p()
fg1.ptr = None
fg2.ptr = c_float_p(ctypes.c_float(5.23))
fg2.ptr = None
# Because pointers have been set to NULL, an exception should be
# raised when we try to access it. Raising an exception is
# preferable to a segmentation fault that commonly occurs when
# a C method is given a NULL memory reference.
for fg in (fg1, fg2):
# Equivalent to `fg.ptr`
self.assertRaises(GEOSException, fg._get_ptr)
# Anything that is either not None or the acceptable pointer type will
# result in a TypeError when trying to assign it to the `ptr` property.
# Thus, memory addresses (integers) and pointers of the incorrect type
# (in `bad_ptrs`) will not be allowed.
bad_ptrs = (5, ctypes.c_char_p(b'foobar'))
for bad_ptr in bad_ptrs:
# Equivalent to `fg.ptr = bad_ptr`
self.assertRaises(TypeError, fg1._set_ptr, bad_ptr)
self.assertRaises(TypeError, fg2._set_ptr, bad_ptr)
def test_wkt(self):
"Testing WKT output."
for g in self.geometries.wkt_out:
geom = fromstr(g.wkt)
if geom.hasz:
self.assertEqual(g.ewkt, geom.wkt)
def test_hex(self):
"Testing HEX output."
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
self.assertEqual(g.hex, geom.hex.decode())
def test_hexewkb(self):
"Testing (HEX)EWKB output."
# For testing HEX(EWKB).
ogc_hex = b'01010000000000000000000000000000000000F03F'
ogc_hex_3d = b'01010000800000000000000000000000000000F03F0000000000000040'
# `SELECT ST_AsHEXEWKB(ST_GeomFromText('POINT(0 1)', 4326));`
hexewkb_2d = b'0101000020E61000000000000000000000000000000000F03F'
# `SELECT ST_AsHEXEWKB(ST_GeomFromEWKT('SRID=4326;POINT(0 1 2)'));`
hexewkb_3d = b'01010000A0E61000000000000000000000000000000000F03F0000000000000040'
pnt_2d = Point(0, 1, srid=4326)
pnt_3d = Point(0, 1, 2, srid=4326)
# OGC-compliant HEX will not have SRID value.
self.assertEqual(ogc_hex, pnt_2d.hex)
self.assertEqual(ogc_hex_3d, pnt_3d.hex)
# HEXEWKB should be appropriate for its dimension -- have to use an
# a WKBWriter w/dimension set accordingly, else GEOS will insert
# garbage into 3D coordinate if there is none.
self.assertEqual(hexewkb_2d, pnt_2d.hexewkb)
self.assertEqual(hexewkb_3d, pnt_3d.hexewkb)
self.assertEqual(True, GEOSGeometry(hexewkb_3d).hasz)
# Same for EWKB.
self.assertEqual(six.memoryview(a2b_hex(hexewkb_2d)), pnt_2d.ewkb)
self.assertEqual(six.memoryview(a2b_hex(hexewkb_3d)), pnt_3d.ewkb)
# Redundant sanity check.
self.assertEqual(4326, GEOSGeometry(hexewkb_2d).srid)
def test_kml(self):
"Testing KML output."
for tg in self.geometries.wkt_out:
geom = fromstr(tg.wkt)
kml = getattr(tg, 'kml', False)
if kml:
self.assertEqual(kml, geom.kml)
def test_errors(self):
"Testing the Error handlers."
# string-based
for err in self.geometries.errors:
with self.assertRaises((GEOSException, ValueError)):
fromstr(err.wkt)
# Bad WKB
self.assertRaises(GEOSException, GEOSGeometry, six.memoryview(b'0'))
class NotAGeometry(object):
pass
# Some other object
self.assertRaises(TypeError, GEOSGeometry, NotAGeometry())
# None
self.assertRaises(TypeError, GEOSGeometry, None)
def test_wkb(self):
"Testing WKB output."
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
wkb = geom.wkb
self.assertEqual(b2a_hex(wkb).decode().upper(), g.hex)
def test_create_hex(self):
"Testing creation from HEX."
for g in self.geometries.hex_wkt:
geom_h = GEOSGeometry(g.hex)
# we need to do this so decimal places get normalized
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test_create_wkb(self):
"Testing creation from WKB."
for g in self.geometries.hex_wkt:
wkb = six.memoryview(a2b_hex(g.hex.encode()))
geom_h = GEOSGeometry(wkb)
# we need to do this so decimal places get normalized
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test_ewkt(self):
"Testing EWKT."
srids = (-1, 32140)
for srid in srids:
for p in self.geometries.polygons:
ewkt = 'SRID=%d;%s' % (srid, p.wkt)
poly = fromstr(ewkt)
self.assertEqual(srid, poly.srid)
self.assertEqual(srid, poly.shell.srid)
self.assertEqual(srid, fromstr(poly.ewkt).srid) # Checking export
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_json(self):
"Testing GeoJSON input/output (via GDAL)."
for g in self.geometries.json_geoms:
geom = GEOSGeometry(g.wkt)
if not hasattr(g, 'not_equal'):
# Loading jsons to prevent decimal differences
self.assertEqual(json.loads(g.json), json.loads(geom.json))
self.assertEqual(json.loads(g.json), json.loads(geom.geojson))
self.assertEqual(GEOSGeometry(g.wkt), GEOSGeometry(geom.json))
def test_fromfile(self):
"Testing the fromfile() factory."
ref_pnt = GEOSGeometry('POINT(5 23)')
wkt_f = BytesIO()
wkt_f.write(force_bytes(ref_pnt.wkt))
wkb_f = BytesIO()
wkb_f.write(bytes(ref_pnt.wkb))
# Other tests use `fromfile()` on string filenames so those
# aren't tested here.
for fh in (wkt_f, wkb_f):
fh.seek(0)
pnt = fromfile(fh)
self.assertEqual(ref_pnt, pnt)
def test_eq(self):
"Testing equivalence."
p = fromstr('POINT(5 23)')
self.assertEqual(p, p.wkt)
self.assertNotEqual(p, 'foo')
ls = fromstr('LINESTRING(0 0, 1 1, 5 5)')
self.assertEqual(ls, ls.wkt)
self.assertNotEqual(p, 'bar')
# Error shouldn't be raise on equivalence testing with
# an invalid type.
for g in (p, ls):
self.assertNotEqual(g, None)
self.assertNotEqual(g, {'foo': 'bar'})
self.assertNotEqual(g, False)
def test_points(self):
"Testing Point objects."
prev = fromstr('POINT(0 0)')
for p in self.geometries.points:
# Creating the point from the WKT
pnt = fromstr(p.wkt)
self.assertEqual(pnt.geom_type, 'Point')
self.assertEqual(pnt.geom_typeid, 0)
self.assertEqual(pnt.dims, 0)
self.assertEqual(p.x, pnt.x)
self.assertEqual(p.y, pnt.y)
self.assertEqual(pnt, fromstr(p.wkt))
self.assertEqual(False, pnt == prev) # Use assertEqual to test __eq__
# Making sure that the point's X, Y components are what we expect
self.assertAlmostEqual(p.x, pnt.tuple[0], 9)
self.assertAlmostEqual(p.y, pnt.tuple[1], 9)
# Testing the third dimension, and getting the tuple arguments
if hasattr(p, 'z'):
self.assertEqual(True, pnt.hasz)
self.assertEqual(p.z, pnt.z)
self.assertEqual(p.z, pnt.tuple[2], 9)
tup_args = (p.x, p.y, p.z)
set_tup1 = (2.71, 3.14, 5.23)
set_tup2 = (5.23, 2.71, 3.14)
else:
self.assertEqual(False, pnt.hasz)
self.assertIsNone(pnt.z)
tup_args = (p.x, p.y)
set_tup1 = (2.71, 3.14)
set_tup2 = (3.14, 2.71)
# Centroid operation on point should be point itself
self.assertEqual(p.centroid, pnt.centroid.tuple)
# Now testing the different constructors
pnt2 = Point(tup_args) # e.g., Point((1, 2))
pnt3 = Point(*tup_args) # e.g., Point(1, 2)
self.assertEqual(pnt, pnt2)
self.assertEqual(pnt, pnt3)
# Now testing setting the x and y
pnt.y = 3.14
pnt.x = 2.71
self.assertEqual(3.14, pnt.y)
self.assertEqual(2.71, pnt.x)
# Setting via the tuple/coords property
pnt.tuple = set_tup1
self.assertEqual(set_tup1, pnt.tuple)
pnt.coords = set_tup2
self.assertEqual(set_tup2, pnt.coords)
prev = pnt # setting the previous geometry
def test_multipoints(self):
"Testing MultiPoint objects."
for mp in self.geometries.multipoints:
mpnt = fromstr(mp.wkt)
self.assertEqual(mpnt.geom_type, 'MultiPoint')
self.assertEqual(mpnt.geom_typeid, 4)
self.assertEqual(mpnt.dims, 0)
self.assertAlmostEqual(mp.centroid[0], mpnt.centroid.tuple[0], 9)
self.assertAlmostEqual(mp.centroid[1], mpnt.centroid.tuple[1], 9)
self.assertRaises(IndexError, mpnt.__getitem__, len(mpnt))
self.assertEqual(mp.centroid, mpnt.centroid.tuple)
self.assertEqual(mp.coords, tuple(m.tuple for m in mpnt))
for p in mpnt:
self.assertEqual(p.geom_type, 'Point')
self.assertEqual(p.geom_typeid, 0)
self.assertEqual(p.empty, False)
self.assertEqual(p.valid, True)
def test_linestring(self):
"Testing LineString objects."
prev = fromstr('POINT(0 0)')
for l in self.geometries.linestrings:
ls = fromstr(l.wkt)
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.dims, 1)
self.assertEqual(ls.empty, False)
self.assertEqual(ls.ring, False)
if hasattr(l, 'centroid'):
self.assertEqual(l.centroid, ls.centroid.tuple)
if hasattr(l, 'tup'):
self.assertEqual(l.tup, ls.tuple)
self.assertEqual(ls, fromstr(l.wkt))
self.assertEqual(False, ls == prev) # Use assertEqual to test __eq__
self.assertRaises(IndexError, ls.__getitem__, len(ls))
prev = ls
# Creating a LineString from a tuple, list, and numpy array
self.assertEqual(ls, LineString(ls.tuple)) # tuple
self.assertEqual(ls, LineString(*ls.tuple)) # as individual arguments
self.assertEqual(ls, LineString([list(tup) for tup in ls.tuple])) # as list
# Point individual arguments
self.assertEqual(ls.wkt, LineString(*tuple(Point(tup) for tup in ls.tuple)).wkt)
if numpy:
self.assertEqual(ls, LineString(numpy.array(ls.tuple))) # as numpy array
with self.assertRaisesMessage(TypeError, 'Each coordinate should be a sequence (list or tuple)'):
LineString((0, 0))
with self.assertRaisesMessage(ValueError, 'LineString requires at least 2 points, got 1.'):
LineString([(0, 0)])
if numpy:
with self.assertRaisesMessage(ValueError, 'LineString requires at least 2 points, got 1.'):
LineString(numpy.array([(0, 0)]))
with mock.patch('django.contrib.gis.geos.linestring.numpy', False):
with self.assertRaisesMessage(TypeError, 'Invalid initialization input for LineStrings.'):
LineString('wrong input')
def test_multilinestring(self):
"Testing MultiLineString objects."
prev = fromstr('POINT(0 0)')
for l in self.geometries.multilinestrings:
ml = fromstr(l.wkt)
self.assertEqual(ml.geom_type, 'MultiLineString')
self.assertEqual(ml.geom_typeid, 5)
self.assertEqual(ml.dims, 1)
self.assertAlmostEqual(l.centroid[0], ml.centroid.x, 9)
self.assertAlmostEqual(l.centroid[1], ml.centroid.y, 9)
self.assertEqual(ml, fromstr(l.wkt))
self.assertEqual(False, ml == prev) # Use assertEqual to test __eq__
prev = ml
for ls in ml:
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.empty, False)
self.assertRaises(IndexError, ml.__getitem__, len(ml))
self.assertEqual(ml.wkt, MultiLineString(*tuple(s.clone() for s in ml)).wkt)
self.assertEqual(ml, MultiLineString(*tuple(LineString(s.tuple) for s in ml)))
def test_linearring(self):
"Testing LinearRing objects."
for rr in self.geometries.linearrings:
lr = fromstr(rr.wkt)
self.assertEqual(lr.geom_type, 'LinearRing')
self.assertEqual(lr.geom_typeid, 2)
self.assertEqual(lr.dims, 1)
self.assertEqual(rr.n_p, len(lr))
self.assertEqual(True, lr.valid)
self.assertEqual(False, lr.empty)
# Creating a LinearRing from a tuple, list, and numpy array
self.assertEqual(lr, LinearRing(lr.tuple))
self.assertEqual(lr, LinearRing(*lr.tuple))
self.assertEqual(lr, LinearRing([list(tup) for tup in lr.tuple]))
if numpy:
self.assertEqual(lr, LinearRing(numpy.array(lr.tuple)))
with self.assertRaisesMessage(ValueError, 'LinearRing requires at least 4 points, got 3.'):
LinearRing((0, 0), (1, 1), (0, 0))
with self.assertRaisesMessage(ValueError, 'LinearRing requires at least 4 points, got 1.'):
LinearRing([(0, 0)])
if numpy:
with self.assertRaisesMessage(ValueError, 'LinearRing requires at least 4 points, got 1.'):
LinearRing(numpy.array([(0, 0)]))
def test_polygons_from_bbox(self):
"Testing `from_bbox` class method."
bbox = (-180, -90, 180, 90)
p = Polygon.from_bbox(bbox)
self.assertEqual(bbox, p.extent)
# Testing numerical precision
x = 3.14159265358979323
bbox = (0, 0, 1, x)
p = Polygon.from_bbox(bbox)
y = p.extent[-1]
self.assertEqual(format(x, '.13f'), format(y, '.13f'))
def test_polygons(self):
"Testing Polygon objects."
prev = fromstr('POINT(0 0)')
for p in self.geometries.polygons:
# Creating the Polygon, testing its properties.
poly = fromstr(p.wkt)
self.assertEqual(poly.geom_type, 'Polygon')
self.assertEqual(poly.geom_typeid, 3)
self.assertEqual(poly.dims, 2)
self.assertEqual(poly.empty, False)
self.assertEqual(poly.ring, False)
self.assertEqual(p.n_i, poly.num_interior_rings)
self.assertEqual(p.n_i + 1, len(poly)) # Testing __len__
self.assertEqual(p.n_p, poly.num_points)
# Area & Centroid
self.assertAlmostEqual(p.area, poly.area, 9)
self.assertAlmostEqual(p.centroid[0], poly.centroid.tuple[0], 9)
self.assertAlmostEqual(p.centroid[1], poly.centroid.tuple[1], 9)
# Testing the geometry equivalence
self.assertEqual(poly, fromstr(p.wkt))
# Should not be equal to previous geometry
self.assertEqual(False, poly == prev) # Use assertEqual to test __eq__
self.assertNotEqual(poly, prev) # Use assertNotEqual to test __ne__
# Testing the exterior ring
ring = poly.exterior_ring
self.assertEqual(ring.geom_type, 'LinearRing')
self.assertEqual(ring.geom_typeid, 2)
if p.ext_ring_cs:
self.assertEqual(p.ext_ring_cs, ring.tuple)
self.assertEqual(p.ext_ring_cs, poly[0].tuple) # Testing __getitem__
# Testing __getitem__ and __setitem__ on invalid indices
self.assertRaises(IndexError, poly.__getitem__, len(poly))
self.assertRaises(IndexError, poly.__setitem__, len(poly), False)
self.assertRaises(IndexError, poly.__getitem__, -1 * len(poly) - 1)
# Testing __iter__
for r in poly:
self.assertEqual(r.geom_type, 'LinearRing')
self.assertEqual(r.geom_typeid, 2)
# Testing polygon construction.
self.assertRaises(TypeError, Polygon, 0, [1, 2, 3])
self.assertRaises(TypeError, Polygon, 'foo')
# Polygon(shell, (hole1, ... holeN))
rings = tuple(r for r in poly)
self.assertEqual(poly, Polygon(rings[0], rings[1:]))
# Polygon(shell_tuple, hole_tuple1, ... , hole_tupleN)
ring_tuples = tuple(r.tuple for r in poly)
self.assertEqual(poly, Polygon(*ring_tuples))
# Constructing with tuples of LinearRings.
self.assertEqual(poly.wkt, Polygon(*tuple(r for r in poly)).wkt)
self.assertEqual(poly.wkt, Polygon(*tuple(LinearRing(r.tuple) for r in poly)).wkt)
def test_polygons_templates(self):
# Accessing Polygon attributes in templates should work.
engine = Engine()
template = engine.from_string('{{ polygons.0.wkt }}')
polygons = [fromstr(p.wkt) for p in self.geometries.multipolygons[:2]]
content = template.render(Context({'polygons': polygons}))
self.assertIn('MULTIPOLYGON (((100', content)
def test_polygon_comparison(self):
p1 = Polygon(((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
p2 = Polygon(((0, 0), (0, 1), (1, 0), (0, 0)))
self.assertGreater(p1, p2)
self.assertLess(p2, p1)
p3 = Polygon(((0, 0), (0, 1), (1, 1), (2, 0), (0, 0)))
p4 = Polygon(((0, 0), (0, 1), (2, 2), (1, 0), (0, 0)))
self.assertGreater(p4, p3)
self.assertLess(p3, p4)
def test_multipolygons(self):
"Testing MultiPolygon objects."
fromstr('POINT (0 0)')
for mp in self.geometries.multipolygons:
mpoly = fromstr(mp.wkt)
self.assertEqual(mpoly.geom_type, 'MultiPolygon')
self.assertEqual(mpoly.geom_typeid, 6)
self.assertEqual(mpoly.dims, 2)
self.assertEqual(mp.valid, mpoly.valid)
if mp.valid:
self.assertEqual(mp.num_geom, mpoly.num_geom)
self.assertEqual(mp.n_p, mpoly.num_coords)
self.assertEqual(mp.num_geom, len(mpoly))
self.assertRaises(IndexError, mpoly.__getitem__, len(mpoly))
for p in mpoly:
self.assertEqual(p.geom_type, 'Polygon')
self.assertEqual(p.geom_typeid, 3)
self.assertEqual(p.valid, True)
self.assertEqual(mpoly.wkt, MultiPolygon(*tuple(poly.clone() for poly in mpoly)).wkt)
def test_memory_hijinks(self):
"Testing Geometry __del__() on rings and polygons."
# #### Memory issues with rings and poly
# These tests are needed to ensure sanity with writable geometries.
# Getting a polygon with interior rings, and pulling out the interior rings
poly = fromstr(self.geometries.polygons[1].wkt)
ring1 = poly[0]
ring2 = poly[1]
# These deletes should be 'harmless' since they are done on child geometries
del ring1
del ring2
ring1 = poly[0]
ring2 = poly[1]
# Deleting the polygon
del poly
# Access to these rings is OK since they are clones.
str(ring1)
str(ring2)
def test_coord_seq(self):
"Testing Coordinate Sequence objects."
for p in self.geometries.polygons:
if p.ext_ring_cs:
# Constructing the polygon and getting the coordinate sequence
poly = fromstr(p.wkt)
cs = poly.exterior_ring.coord_seq
self.assertEqual(p.ext_ring_cs, cs.tuple) # done in the Polygon test too.
self.assertEqual(len(p.ext_ring_cs), len(cs)) # Making sure __len__ works
# Checks __getitem__ and __setitem__
for i in range(len(p.ext_ring_cs)):
c1 = p.ext_ring_cs[i] # Expected value
c2 = cs[i] # Value from coordseq
self.assertEqual(c1, c2)
# Constructing the test value to set the coordinate sequence with
if len(c1) == 2:
tset = (5, 23)
else:
tset = (5, 23, 8)
cs[i] = tset
# Making sure every set point matches what we expect
for j in range(len(tset)):
cs[i] = tset
self.assertEqual(tset[j], cs[i][j])
def test_relate_pattern(self):
"Testing relate() and relate_pattern()."
g = fromstr('POINT (0 0)')
self.assertRaises(GEOSException, g.relate_pattern, 0, 'invalid pattern, yo')
for rg in self.geometries.relate_geoms:
a = fromstr(rg.wkt_a)
b = fromstr(rg.wkt_b)
self.assertEqual(rg.result, a.relate_pattern(b, rg.pattern))
self.assertEqual(rg.pattern, a.relate(b))
def test_intersection(self):
"Testing intersects() and intersection()."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
i1 = fromstr(self.geometries.intersect_geoms[i].wkt)
self.assertEqual(True, a.intersects(b))
i2 = a.intersection(b)
self.assertEqual(i1, i2)
self.assertEqual(i1, a & b) # __and__ is intersection operator
a &= b # testing __iand__
self.assertEqual(i1, a)
def test_union(self):
"Testing union()."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
u1 = fromstr(self.geometries.union_geoms[i].wkt)
u2 = a.union(b)
self.assertEqual(u1, u2)
self.assertEqual(u1, a | b) # __or__ is union operator
a |= b # testing __ior__
self.assertEqual(u1, a)
def test_unary_union(self):
"Testing unary_union."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
u1 = fromstr(self.geometries.union_geoms[i].wkt)
u2 = GeometryCollection(a, b).unary_union
self.assertTrue(u1.equals(u2))
def test_difference(self):
"Testing difference()."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.diff_geoms[i].wkt)
d2 = a.difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a - b) # __sub__ is difference operator
a -= b # testing __isub__
self.assertEqual(d1, a)
def test_symdifference(self):
"Testing sym_difference()."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.sdiff_geoms[i].wkt)
d2 = a.sym_difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator
a ^= b # testing __ixor__
self.assertEqual(d1, a)
def test_buffer(self):
"Testing buffer()."
for bg in self.geometries.buffer_geoms:
g = fromstr(bg.wkt)
# The buffer we expect
exp_buf = fromstr(bg.buffer_wkt)
quadsegs = bg.quadsegs
width = bg.width
# Can't use a floating-point for the number of quadsegs.
self.assertRaises(ctypes.ArgumentError, g.buffer, width, float(quadsegs))
# Constructing our buffer
buf = g.buffer(width, quadsegs)
self.assertEqual(exp_buf.num_coords, buf.num_coords)
self.assertEqual(len(exp_buf), len(buf))
# Now assuring that each point in the buffer is almost equal
for j in range(len(exp_buf)):
exp_ring = exp_buf[j]
buf_ring = buf[j]
self.assertEqual(len(exp_ring), len(buf_ring))
for k in range(len(exp_ring)):
# Asserting the X, Y of each point are almost equal (due to floating point imprecision)
self.assertAlmostEqual(exp_ring[k][0], buf_ring[k][0], 9)
self.assertAlmostEqual(exp_ring[k][1], buf_ring[k][1], 9)
def test_covers(self):
poly = Polygon(((0, 0), (0, 10), (10, 10), (10, 0), (0, 0)))
self.assertTrue(poly.covers(Point(5, 5)))
self.assertFalse(poly.covers(Point(100, 100)))
def test_closed(self):
ls_closed = LineString((0, 0), (1, 1), (0, 0))
ls_not_closed = LineString((0, 0), (1, 1))
self.assertFalse(ls_not_closed.closed)
self.assertTrue(ls_closed.closed)
if geos_version_info()['version'] >= '3.5':
self.assertFalse(MultiLineString(ls_closed, ls_not_closed).closed)
self.assertTrue(MultiLineString(ls_closed, ls_closed).closed)
with mock.patch('django.contrib.gis.geos.collections.geos_version_info', lambda: {'version': '3.4.9'}):
with self.assertRaisesMessage(GEOSException, "MultiLineString.closed requires GEOS >= 3.5.0."):
MultiLineString().closed
def test_srid(self):
"Testing the SRID property and keyword."
# Testing SRID keyword on Point
pnt = Point(5, 23, srid=4326)
self.assertEqual(4326, pnt.srid)
pnt.srid = 3084
self.assertEqual(3084, pnt.srid)
with self.assertRaises(ctypes.ArgumentError):
pnt.srid = '4326'
# Testing SRID keyword on fromstr(), and on Polygon rings.
poly = fromstr(self.geometries.polygons[1].wkt, srid=4269)
self.assertEqual(4269, poly.srid)
for ring in poly:
self.assertEqual(4269, ring.srid)
poly.srid = 4326
self.assertEqual(4326, poly.shell.srid)
# Testing SRID keyword on GeometryCollection
gc = GeometryCollection(Point(5, 23), LineString((0, 0), (1.5, 1.5), (3, 3)), srid=32021)
self.assertEqual(32021, gc.srid)
for i in range(len(gc)):
self.assertEqual(32021, gc[i].srid)
# GEOS may get the SRID from HEXEWKB
# 'POINT(5 23)' at SRID=4326 in hex form -- obtained from PostGIS
# using `SELECT GeomFromText('POINT (5 23)', 4326);`.
hex = '0101000020E610000000000000000014400000000000003740'
p1 = fromstr(hex)
self.assertEqual(4326, p1.srid)
p2 = fromstr(p1.hex)
self.assertIsNone(p2.srid)
p3 = fromstr(p1.hex, srid=-1) # -1 is intended.
self.assertEqual(-1, p3.srid)
# Testing that geometry SRID could be set to its own value
pnt_wo_srid = Point(1, 1)
pnt_wo_srid.srid = pnt_wo_srid.srid
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_custom_srid(self):
"""Test with a null srid and a srid unknown to GDAL."""
for srid in [None, 999999]:
pnt = Point(111200, 220900, srid=srid)
self.assertTrue(pnt.ewkt.startswith(("SRID=%s;" % srid if srid else '') + "POINT (111200.0"))
self.assertIsInstance(pnt.ogr, gdal.OGRGeometry)
self.assertIsNone(pnt.srs)
# Test conversion from custom to a known srid
c2w = gdal.CoordTransform(
gdal.SpatialReference(
'+proj=mill +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +R_A +ellps=WGS84 '
'+datum=WGS84 +units=m +no_defs'
),
gdal.SpatialReference(4326))
new_pnt = pnt.transform(c2w, clone=True)
self.assertEqual(new_pnt.srid, 4326)
self.assertAlmostEqual(new_pnt.x, 1, 3)
self.assertAlmostEqual(new_pnt.y, 2, 3)
def test_mutable_geometries(self):
"Testing the mutability of Polygons and Geometry Collections."
# ### Testing the mutability of Polygons ###
for p in self.geometries.polygons:
poly = fromstr(p.wkt)
# Should only be able to use __setitem__ with LinearRing geometries.
self.assertRaises(TypeError, poly.__setitem__, 0, LineString((1, 1), (2, 2)))
# Constructing the new shell by adding 500 to every point in the old shell.
shell_tup = poly.shell.tuple
new_coords = []
for point in shell_tup:
new_coords.append((point[0] + 500., point[1] + 500.))
new_shell = LinearRing(*tuple(new_coords))
# Assigning polygon's exterior ring w/the new shell
poly.exterior_ring = new_shell
str(new_shell) # new shell is still accessible
self.assertEqual(poly.exterior_ring, new_shell)
self.assertEqual(poly[0], new_shell)
# ### Testing the mutability of Geometry Collections
for tg in self.geometries.multipoints:
mp = fromstr(tg.wkt)
for i in range(len(mp)):
# Creating a random point.
pnt = mp[i]
new = Point(random.randint(21, 100), random.randint(21, 100))
# Testing the assignment
mp[i] = new
str(new) # what was used for the assignment is still accessible
self.assertEqual(mp[i], new)
self.assertEqual(mp[i].wkt, new.wkt)
self.assertNotEqual(pnt, mp[i])
# MultiPolygons involve much more memory management because each
# Polygon w/in the collection has its own rings.
for tg in self.geometries.multipolygons:
mpoly = fromstr(tg.wkt)
for i in range(len(mpoly)):
poly = mpoly[i]
old_poly = mpoly[i]
# Offsetting the each ring in the polygon by 500.
for j in range(len(poly)):
r = poly[j]
for k in range(len(r)):
r[k] = (r[k][0] + 500., r[k][1] + 500.)
poly[j] = r
self.assertNotEqual(mpoly[i], poly)
# Testing the assignment
mpoly[i] = poly
str(poly) # Still accessible
self.assertEqual(mpoly[i], poly)
self.assertNotEqual(mpoly[i], old_poly)
# Extreme (!!) __setitem__ -- no longer works, have to detect
# in the first object that __setitem__ is called in the subsequent
# objects -- maybe mpoly[0, 0, 0] = (3.14, 2.71)?
# mpoly[0][0][0] = (3.14, 2.71)
# self.assertEqual((3.14, 2.71), mpoly[0][0][0])
# Doing it more slowly..
# self.assertEqual((3.14, 2.71), mpoly[0].shell[0])
# del mpoly
def test_point_list_assignment(self):
p = Point(0, 0)
p[:] = (1, 2, 3)
self.assertEqual(p, Point(1, 2, 3))
p[:] = ()
self.assertEqual(p.wkt, Point())
p[:] = (1, 2)
self.assertEqual(p.wkt, Point(1, 2))
with self.assertRaises(ValueError):
p[:] = (1,)
with self.assertRaises(ValueError):
p[:] = (1, 2, 3, 4, 5)
def test_linestring_list_assignment(self):
ls = LineString((0, 0), (1, 1))
ls[:] = ()
self.assertEqual(ls, LineString())
ls[:] = ((0, 0), (1, 1), (2, 2))
self.assertEqual(ls, LineString((0, 0), (1, 1), (2, 2)))
with self.assertRaises(ValueError):
ls[:] = (1,)
def test_linearring_list_assignment(self):
ls = LinearRing((0, 0), (0, 1), (1, 1), (0, 0))
ls[:] = ()
self.assertEqual(ls, LinearRing())
ls[:] = ((0, 0), (0, 1), (1, 1), (1, 0), (0, 0))
self.assertEqual(ls, LinearRing((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
with self.assertRaises(ValueError):
ls[:] = ((0, 0), (1, 1), (2, 2))
def test_polygon_list_assignment(self):
pol = Polygon()
pol[:] = (((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)),)
self.assertEqual(pol, Polygon(((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)),))
pol[:] = ()
self.assertEqual(pol, Polygon())
def test_geometry_collection_list_assignment(self):
p = Point()
gc = GeometryCollection()
gc[:] = [p]
self.assertEqual(gc, GeometryCollection(p))
gc[:] = ()
self.assertEqual(gc, GeometryCollection())
def test_threed(self):
"Testing three-dimensional geometries."
# Testing a 3D Point
pnt = Point(2, 3, 8)
self.assertEqual((2., 3., 8.), pnt.coords)
with self.assertRaises(TypeError):
pnt.tuple = (1., 2.)
pnt.coords = (1., 2., 3.)
self.assertEqual((1., 2., 3.), pnt.coords)
# Testing a 3D LineString
ls = LineString((2., 3., 8.), (50., 250., -117.))
self.assertEqual(((2., 3., 8.), (50., 250., -117.)), ls.tuple)
self.assertRaises(TypeError, ls.__setitem__, 0, (1., 2.))
ls[0] = (1., 2., 3.)
self.assertEqual((1., 2., 3.), ls[0])
def test_distance(self):
"Testing the distance() function."
# Distance to self should be 0.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.distance(Point(0, 0)))
# Distance should be 1
self.assertEqual(1.0, pnt.distance(Point(0, 1)))
# Distance should be ~ sqrt(2)
self.assertAlmostEqual(1.41421356237, pnt.distance(Point(1, 1)), 11)
# Distances are from the closest vertex in each geometry --
# should be 3 (distance from (2, 2) to (5, 2)).
ls1 = LineString((0, 0), (1, 1), (2, 2))
ls2 = LineString((5, 2), (6, 1), (7, 0))
self.assertEqual(3, ls1.distance(ls2))
def test_length(self):
"Testing the length property."
# Points have 0 length.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.length)
# Should be ~ sqrt(2)
ls = LineString((0, 0), (1, 1))
self.assertAlmostEqual(1.41421356237, ls.length, 11)
# Should be circumference of Polygon
poly = Polygon(LinearRing((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
self.assertEqual(4.0, poly.length)
# Should be sum of each element's length in collection.
mpoly = MultiPolygon(poly.clone(), poly)
self.assertEqual(8.0, mpoly.length)
def test_emptyCollections(self):
"Testing empty geometries and collections."
geoms = [
GeometryCollection([]),
fromstr('GEOMETRYCOLLECTION EMPTY'),
GeometryCollection(),
fromstr('POINT EMPTY'),
Point(),
fromstr('LINESTRING EMPTY'),
LineString(),
fromstr('POLYGON EMPTY'),
Polygon(),
fromstr('MULTILINESTRING EMPTY'),
MultiLineString(),
fromstr('MULTIPOLYGON EMPTY'),
MultiPolygon(()),
MultiPolygon(),
]
if numpy:
geoms.append(LineString(numpy.array([])))
for g in geoms:
self.assertEqual(True, g.empty)
# Testing len() and num_geom.
if isinstance(g, Polygon):
self.assertEqual(1, len(g)) # Has one empty linear ring
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g[0]))
elif isinstance(g, (Point, LineString)):
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g))
else:
self.assertEqual(0, g.num_geom)
self.assertEqual(0, len(g))
# Testing __getitem__ (doesn't work on Point or Polygon)
if isinstance(g, Point):
with self.assertRaises(IndexError):
g.x
elif isinstance(g, Polygon):
lr = g.shell
self.assertEqual('LINEARRING EMPTY', lr.wkt)
self.assertEqual(0, len(lr))
self.assertEqual(True, lr.empty)
self.assertRaises(IndexError, lr.__getitem__, 0)
else:
self.assertRaises(IndexError, g.__getitem__, 0)
def test_collection_dims(self):
gc = GeometryCollection([])
self.assertEqual(gc.dims, -1)
gc = GeometryCollection(Point(0, 0))
self.assertEqual(gc.dims, 0)
gc = GeometryCollection(LineString((0, 0), (1, 1)), Point(0, 0))
self.assertEqual(gc.dims, 1)
gc = GeometryCollection(LineString((0, 0), (1, 1)), Polygon(((0, 0), (0, 1), (1, 1), (0, 0))), Point(0, 0))
self.assertEqual(gc.dims, 2)
def test_collections_of_collections(self):
"Testing GeometryCollection handling of other collections."
# Creating a GeometryCollection WKT string composed of other
# collections and polygons.
coll = [mp.wkt for mp in self.geometries.multipolygons if mp.valid]
coll.extend(mls.wkt for mls in self.geometries.multilinestrings)
coll.extend(p.wkt for p in self.geometries.polygons)
coll.extend(mp.wkt for mp in self.geometries.multipoints)
gc_wkt = 'GEOMETRYCOLLECTION(%s)' % ','.join(coll)
# Should construct ok from WKT
gc1 = GEOSGeometry(gc_wkt)
# Should also construct ok from individual geometry arguments.
gc2 = GeometryCollection(*tuple(g for g in gc1))
# And, they should be equal.
self.assertEqual(gc1, gc2)
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_gdal(self):
"Testing `ogr` and `srs` properties."
g1 = fromstr('POINT(5 23)')
self.assertIsInstance(g1.ogr, gdal.OGRGeometry)
self.assertIsNone(g1.srs)
g1_3d = fromstr('POINT(5 23 8)')
self.assertIsInstance(g1_3d.ogr, gdal.OGRGeometry)
self.assertEqual(g1_3d.ogr.z, 8)
g2 = fromstr('LINESTRING(0 0, 5 5, 23 23)', srid=4326)
self.assertIsInstance(g2.ogr, gdal.OGRGeometry)
self.assertIsInstance(g2.srs, gdal.SpatialReference)
self.assertEqual(g2.hex, g2.ogr.hex)
self.assertEqual('WGS 84', g2.srs.name)
def test_copy(self):
"Testing use with the Python `copy` module."
import copy
poly = GEOSGeometry('POLYGON((0 0, 0 23, 23 23, 23 0, 0 0), (5 5, 5 10, 10 10, 10 5, 5 5))')
cpy1 = copy.copy(poly)
cpy2 = copy.deepcopy(poly)
self.assertNotEqual(poly._ptr, cpy1._ptr)
self.assertNotEqual(poly._ptr, cpy2._ptr)
@skipUnless(HAS_GDAL, "GDAL is required to transform geometries")
def test_transform(self):
"Testing `transform` method."
orig = GEOSGeometry('POINT (-104.609 38.255)', 4326)
trans = GEOSGeometry('POINT (992385.4472045 481455.4944650)', 2774)
# Using a srid, a SpatialReference object, and a CoordTransform object
# for transformations.
t1, t2, t3 = orig.clone(), orig.clone(), orig.clone()
t1.transform(trans.srid)
t2.transform(gdal.SpatialReference('EPSG:2774'))
ct = gdal.CoordTransform(gdal.SpatialReference('WGS84'), gdal.SpatialReference(2774))
t3.transform(ct)
# Testing use of the `clone` keyword.
k1 = orig.clone()
k2 = k1.transform(trans.srid, clone=True)
self.assertEqual(k1, orig)
self.assertNotEqual(k1, k2)
prec = 3
for p in (t1, t2, t3, k2):
self.assertAlmostEqual(trans.x, p.x, prec)
self.assertAlmostEqual(trans.y, p.y, prec)
@skipUnless(HAS_GDAL, "GDAL is required to transform geometries")
def test_transform_3d(self):
p3d = GEOSGeometry('POINT (5 23 100)', 4326)
p3d.transform(2774)
self.assertEqual(p3d.z, 100)
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_transform_noop(self):
""" Testing `transform` method (SRID match) """
# transform() should no-op if source & dest SRIDs match,
# regardless of whether GDAL is available.
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
gt = g.tuple
g.transform(4326)
self.assertEqual(g.tuple, gt)
self.assertEqual(g.srid, 4326)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
g1 = g.transform(4326, clone=True)
self.assertEqual(g1.tuple, g.tuple)
self.assertEqual(g1.srid, 4326)
self.assertIsNot(g1, g, "Clone didn't happen")
with mock.patch('django.contrib.gis.gdal.HAS_GDAL', False):
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
gt = g.tuple
g.transform(4326)
self.assertEqual(g.tuple, gt)
self.assertEqual(g.srid, 4326)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
g1 = g.transform(4326, clone=True)
self.assertEqual(g1.tuple, g.tuple)
self.assertEqual(g1.srid, 4326)
self.assertIsNot(g1, g, "Clone didn't happen")
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_transform_nosrid(self):
""" Testing `transform` method (no SRID or negative SRID) """
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
@mock.patch('django.contrib.gis.gdal.HAS_GDAL', False)
def test_transform_nogdal(self):
""" Testing `transform` method (GDAL not available) """
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
def test_extent(self):
"Testing `extent` method."
# The xmin, ymin, xmax, ymax of the MultiPoint should be returned.
mp = MultiPoint(Point(5, 23), Point(0, 0), Point(10, 50))
self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent)
pnt = Point(5.23, 17.8)
# Extent of points is just the point itself repeated.
self.assertEqual((5.23, 17.8, 5.23, 17.8), pnt.extent)
# Testing on the 'real world' Polygon.
poly = fromstr(self.geometries.polygons[3].wkt)
ring = poly.shell
x, y = ring.x, ring.y
xmin, ymin = min(x), min(y)
xmax, ymax = max(x), max(y)
self.assertEqual((xmin, ymin, xmax, ymax), poly.extent)
def test_pickle(self):
"Testing pickling and unpickling support."
# Using both pickle and cPickle -- just 'cause.
from django.utils.six.moves import cPickle
import pickle
# Creating a list of test geometries for pickling,
# and setting the SRID on some of them.
def get_geoms(lst, srid=None):
return [GEOSGeometry(tg.wkt, srid) for tg in lst]
tgeoms = get_geoms(self.geometries.points)
tgeoms.extend(get_geoms(self.geometries.multilinestrings, 4326))
tgeoms.extend(get_geoms(self.geometries.polygons, 3084))
tgeoms.extend(get_geoms(self.geometries.multipolygons, 3857))
for geom in tgeoms:
s1, s2 = cPickle.dumps(geom), pickle.dumps(geom)
g1, g2 = cPickle.loads(s1), pickle.loads(s2)
for tmpg in (g1, g2):
self.assertEqual(geom, tmpg)
self.assertEqual(geom.srid, tmpg.srid)
def test_prepared(self):
"Testing PreparedGeometry support."
# Creating a simple multipolygon and getting a prepared version.
mpoly = GEOSGeometry('MULTIPOLYGON(((0 0,0 5,5 5,5 0,0 0)),((5 5,5 10,10 10,10 5,5 5)))')
prep = mpoly.prepared
# A set of test points.
pnts = [Point(5, 5), Point(7.5, 7.5), Point(2.5, 7.5)]
for pnt in pnts:
# Results should be the same (but faster)
self.assertEqual(mpoly.contains(pnt), prep.contains(pnt))
self.assertEqual(mpoly.intersects(pnt), prep.intersects(pnt))
self.assertEqual(mpoly.covers(pnt), prep.covers(pnt))
self.assertTrue(prep.crosses(fromstr('LINESTRING(1 1, 15 15)')))
self.assertTrue(prep.disjoint(Point(-5, -5)))
poly = Polygon(((-1, -1), (1, 1), (1, 0), (-1, -1)))
self.assertTrue(prep.overlaps(poly))
poly = Polygon(((-5, 0), (-5, 5), (0, 5), (-5, 0)))
self.assertTrue(prep.touches(poly))
poly = Polygon(((-1, -1), (-1, 11), (11, 11), (11, -1), (-1, -1)))
self.assertTrue(prep.within(poly))
# Original geometry deletion should not crash the prepared one (#21662)
del mpoly
self.assertTrue(prep.covers(Point(5, 5)))
def test_line_merge(self):
"Testing line merge support"
ref_geoms = (fromstr('LINESTRING(1 1, 1 1, 3 3)'),
fromstr('MULTILINESTRING((1 1, 3 3), (3 3, 4 2))'),
)
ref_merged = (fromstr('LINESTRING(1 1, 3 3)'),
fromstr('LINESTRING (1 1, 3 3, 4 2)'),
)
for geom, merged in zip(ref_geoms, ref_merged):
self.assertEqual(merged, geom.merged)
def test_valid_reason(self):
"Testing IsValidReason support"
g = GEOSGeometry("POINT(0 0)")
self.assertTrue(g.valid)
self.assertIsInstance(g.valid_reason, six.string_types)
self.assertEqual(g.valid_reason, "Valid Geometry")
g = GEOSGeometry("LINESTRING(0 0, 0 0)")
self.assertFalse(g.valid)
self.assertIsInstance(g.valid_reason, six.string_types)
self.assertTrue(g.valid_reason.startswith("Too few points in geometry component"))
def test_linearref(self):
"Testing linear referencing"
ls = fromstr('LINESTRING(0 0, 0 10, 10 10, 10 0)')
mls = fromstr('MULTILINESTRING((0 0, 0 10), (10 0, 10 10))')
self.assertEqual(ls.project(Point(0, 20)), 10.0)
self.assertEqual(ls.project(Point(7, 6)), 24)
self.assertEqual(ls.project_normalized(Point(0, 20)), 1.0 / 3)
self.assertEqual(ls.interpolate(10), Point(0, 10))
self.assertEqual(ls.interpolate(24), Point(10, 6))
self.assertEqual(ls.interpolate_normalized(1.0 / 3), Point(0, 10))
self.assertEqual(mls.project(Point(0, 20)), 10)
self.assertEqual(mls.project(Point(7, 6)), 16)
self.assertEqual(mls.interpolate(9), Point(0, 9))
self.assertEqual(mls.interpolate(17), Point(10, 7))
def test_geos_version(self):
"""Testing the GEOS version regular expression."""
from django.contrib.gis.geos.libgeos import version_regex
versions = [('3.0.0rc4-CAPI-1.3.3', '3.0.0', '1.3.3'),
('3.0.0-CAPI-1.4.1', '3.0.0', '1.4.1'),
('3.4.0dev-CAPI-1.8.0', '3.4.0', '1.8.0'),
('3.4.0dev-CAPI-1.8.0 r0', '3.4.0', '1.8.0')]
for v_init, v_geos, v_capi in versions:
m = version_regex.match(v_init)
self.assertTrue(m, msg="Unable to parse the version string '%s'" % v_init)
self.assertEqual(m.group('version'), v_geos)
self.assertEqual(m.group('capi_version'), v_capi)
def test_geos_threads(self):
pnt = Point()
context_ptrs = []
geos_init = libgeos.lgeos.initGEOS_r
geos_finish = libgeos.lgeos.finishGEOS_r
def init(*args, **kwargs):
result = geos_init(*args, **kwargs)
context_ptrs.append(result)
return result
def finish(*args, **kwargs):
result = geos_finish(*args, **kwargs)
destructor_called.set()
return result
for i in range(2):
destructor_called = threading.Event()
patch_path = 'django.contrib.gis.geos.libgeos.lgeos'
with mock.patch.multiple(patch_path, initGEOS_r=mock.DEFAULT, finishGEOS_r=mock.DEFAULT) as mocked:
mocked['initGEOS_r'].side_effect = init
mocked['finishGEOS_r'].side_effect = finish
with mock.patch('django.contrib.gis.geos.prototypes.predicates.geos_hasz.func') as mocked_hasz:
thread = threading.Thread(target=lambda: pnt.hasz)
thread.start()
thread.join()
# We can't be sure that members of thread locals are
# garbage collected right after `thread.join()` so
# we must wait until destructor is actually called.
# Fail if destructor wasn't called within a second.
self.assertTrue(destructor_called.wait(1))
context_ptr = context_ptrs[i]
self.assertIsInstance(context_ptr, libgeos.CONTEXT_PTR)
mocked_hasz.assert_called_once_with(context_ptr, pnt.ptr)
mocked['finishGEOS_r'].assert_called_once_with(context_ptr)
# Check that different contexts were used for the different threads.
self.assertNotEqual(context_ptrs[0], context_ptrs[1])
@ignore_warnings(category=RemovedInDjango20Warning)
def test_deprecated_srid_getters_setters(self):
p = Point(1, 2, srid=123)
self.assertEqual(p.get_srid(), p.srid)
p.set_srid(321)
self.assertEqual(p.srid, 321)
@ignore_warnings(category=RemovedInDjango20Warning)
def test_deprecated_point_coordinate_getters_setters(self):
p = Point(1, 2, 3)
self.assertEqual((p.get_x(), p.get_y(), p.get_z()), (p.x, p.y, p.z))
p.set_x(3)
p.set_y(2)
p.set_z(1)
self.assertEqual((p.x, p.y, p.z), (3, 2, 1))
@ignore_warnings(category=RemovedInDjango20Warning)
def test_deprecated_point_tuple_getters_setters(self):
p = Point(1, 2, 3)
self.assertEqual(p.get_coords(), (p.x, p.y, p.z))
p.set_coords((3, 2, 1))
self.assertEqual(p.get_coords(), (3, 2, 1))
@ignore_warnings(category=RemovedInDjango20Warning)
def test_deprecated_cascaded_union(self):
for geom in self.geometries.multipolygons:
mpoly = GEOSGeometry(geom.wkt)
self.assertEqual(mpoly.cascaded_union, mpoly.unary_union)
|
|
from django.db import models
# Create your models here.
from wagtail.wagtailcore.models import Page
from wagtail.wagtailcore.fields import RichTextField, StreamField
from wagtail.wagtailadmin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, StreamFieldPanel, FieldRowPanel
from wagtail.wagtailimages.edit_handlers import ImageChooserPanel
from wagtail.wagtaildocs.edit_handlers import DocumentChooserPanel
# Contact Form
from wagtailcaptcha.models import WagtailCaptchaEmailForm
from wagtail.wagtailforms.models import AbstractFormField
from modelcluster.fields import ParentalKey
# Blocks
from wagtail.wagtailcore import blocks
from wagtail.wagtailimages.blocks import ImageChooserBlock
class FormField(AbstractFormField):
page = ParentalKey('FormPage', related_name='form_fields')
class FormPage(WagtailCaptchaEmailForm):
intro = RichTextField(blank=True, help_text='Edit the content you want to see before the form.')
thank_you_text = RichTextField(blank=True, help_text='Set the message users will see after submitting the form.')
headline = models.CharField(max_length=255, default="Contact us")
background = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
class Meta:
verbose_name = "Form submission page"
content_panels = WagtailCaptchaEmailForm.content_panels + [
FieldPanel('intro', classname="full"),
ImageChooserPanel('background'),
FieldPanel('thank_you_text'),
InlinePanel('form_fields', label="Form fields"),
MultiFieldPanel([
FieldPanel('to_address', classname="full"),
FieldPanel('from_address', classname="full"),
FieldPanel('subject', classname="full"),
], "Email")
]
class MainTitleBlock(blocks.StructBlock):
bg_gray = blocks.BooleanBlock(required=False, help_text="Select if background should by gray")
maintitle = blocks.CharBlock()
class Meta:
template = 'homepage/main_title_block.html'
icon = 'placeholder'
label = 'MainTitle'
class ParagraphBlock(blocks.StructBlock):
bg_gray = blocks.BooleanBlock(required=False, help_text="Select if background should by gray")
paragraph = blocks.RichTextBlock()
class Meta:
template = 'homepage/paragraph_block.html'
icon = 'placeholder'
label = 'Paragraph'
class ImageBlock(blocks.StructBlock):
WIDTH_CHOICES = (
('original', 'original'),
('1920', '1920'),
('800', '800'),
('640', '640'),
('400', '400'),
)
bg_gray = blocks.BooleanBlock(required=False, help_text="Select if background should by gray")
image = ImageChooserBlock()
endtoend = blocks.BooleanBlock()
width = blocks.ChoiceBlock(choices=WIDTH_CHOICES, default="800")
class Meta:
template = 'homepage/image_block.html'
icon = 'placeholder'
label = 'Image'
class TimeLineEntryBlock(blocks.StructBlock):
headline = blocks.CharBlock()
date = blocks.CharBlock()
text = blocks.TextBlock()
photo = ImageChooserBlock()
class Meta:
template = 'homepage/couplestory_timeline_block.html'
icon = 'placeholder'
label = 'TimelineEntry'
class TimeLineBlock(blocks.StructBlock):
bg_gray = blocks.BooleanBlock(required=False, help_text="Select if background should by gray")
headline = blocks.CharBlock()
maintitle = blocks.CharBlock()
text = blocks.TextBlock()
timelineentry = blocks.StreamBlock(
[('TimelineEntry', TimeLineEntryBlock()),
], default="")
class Meta:
template = 'homepage/couplestory_block.html'
icon = 'placeholder'
label = 'Timeline'
class EventBlock(blocks.StructBlock):
background = ImageChooserBlock()
headline = blocks.CharBlock()
maintitle = blocks.CharBlock()
lefttitle = blocks.CharBlock()
lefttime = blocks.CharBlock()
leftdate = blocks.CharBlock()
lefttext = blocks.TextBlock()
righttitle = blocks.CharBlock()
righttime = blocks.CharBlock()
rightdate = blocks.CharBlock()
righttext = blocks.TextBlock()
class Meta:
template = 'homepage/event_block.html'
icon = 'placeholder'
label = 'Event Block'
class CoupleBlock(blocks.StructBlock):
bg_gray = blocks.BooleanBlock(required=False, help_text="Select if background should by gray")
maintitle = blocks.CharBlock()
subtitle = blocks.CharBlock()
headline = blocks.CharBlock()
bridename = blocks.CharBlock()
bridetext = blocks.TextBlock()
bridephoto = ImageChooserBlock()
groomame = blocks.CharBlock()
groomtext = blocks.TextBlock()
groomphoto = ImageChooserBlock()
class Meta:
template = 'homepage/couple_block.html'
icon = 'placeholder'
label = 'Couple Block'
class GoogleMapBlock(blocks.StructBlock):
map_width = blocks.CharBlock(required=True, max_length=4, default="600")
map_height = blocks.CharBlock(required=True, max_length=4, default="450")
map_params = blocks.CharBlock(required=True, max_length=300, help_text="No spaces, add + instead")
class Meta:
template = 'homepage/google_map_block.html'
icon = 'cogs'
label = 'Google Map'
class TwoColumnBlock(blocks.StructBlock):
COLOUR_CHOICES = (
('theme-white', 'White'),
('theme-black', 'Black'),
('theme-darker', 'Dark Gray'),
('theme-body-color', 'Body Color'),
('theme-primary', 'Primary Color'),
('theme-secondary', 'Secondary Color'),
)
background = blocks.ChoiceBlock(choices=COLOUR_CHOICES, default="white")
left_column = blocks.StreamBlock([
('heading', blocks.CharBlock(classname="full title")),
('paragraph', blocks.RichTextBlock()),
('image', ImageChooserBlock()),
('google_map', GoogleMapBlock()),
], icon='arrow-left', label='Left column content')
right_column = blocks.StreamBlock([
('heading', blocks.CharBlock(classname="full title")),
('paragraph', blocks.RichTextBlock()),
('image', ImageChooserBlock()),
('google_map', GoogleMapBlock()),
], icon='arrow-right', label='Right column content')
class Meta:
template = 'homepage/two_column_block.html'
icon = 'placeholder'
label = 'Two Columns'
class ImpressumPage(Page):
body = RichTextField()
content_panels = Page.content_panels + [
FieldPanel('body')
]
class BlockPage(Page):
body = StreamField(
[('CoupleBlock', CoupleBlock()),
('TwoColumnBlock', TwoColumnBlock()),
('GoogleMapBlock', GoogleMapBlock()),
('RichTextBlock', blocks.RichTextBlock()),
('TimeLineBlock', TimeLineBlock()),
], default="")
content_panels = Page.content_panels + [
StreamFieldPanel('body'),
]
class HomePage(Page):
YEAR_CHOICES = (
('2017', '2017'),
('2018', '2018'),
('2019', '2019'),
)
MONTH_CHOICES = (
('1', '1'),
('2', '2'),
('3', '3'),
('4', '4'),
('5', '5'),
('6', '6'),
('7', '7'),
('8', '8'),
('9', '9'),
('10', '10'),
('11', '11'),
('12', '12'),
)
DAY_CHOICES = (
('1', '1'),
('2', '2'),
('3', '3'),
('4', '4'),
('5', '5'),
('6', '6'),
('7', '7'),
('8', '8'),
('9', '9'),
('10', '10'),
('11', '11'),
('12', '12'),
('13', '13'),
('14', '14'),
('15', '15'),
('16', '16'),
('17', '17'),
('18', '18'),
('19', '19'),
('20', '20'),
('21', '21'),
('22', '22'),
('23', '23'),
('24', '24'),
('25', '25'),
('26', '26'),
('27', '27'),
('28', '28'),
('29', '29'),
('30', '30'),
('31', '31'),
)
HOUR_CHOICES = (
('1', '1'),
('2', '2'),
('3', '3'),
('4', '4'),
('5', '5'),
('6', '6'),
('7', '7'),
('8', '8'),
('9', '9'),
('10', '10'),
('11', '11'),
('12', '12'),
('13', '13'),
('14', '14'),
('15', '15'),
('16', '16'),
('17', '17'),
('18', '18'),
('19', '19'),
('20', '20'),
('21', '21'),
('22', '22'),
('23', '23'),
('24', '24'),
)
MINSEC_CHOICES = (
('0', '0'),
('1', '1'),
('2', '2'),
('3', '3'),
('4', '4'),
('5', '5'),
('6', '6'),
('7', '7'),
('8', '8'),
('9', '9'),
('10', '10'),
('11', '11'),
('12', '12'),
('13', '13'),
('14', '14'),
('15', '15'),
('16', '16'),
('17', '17'),
('18', '18'),
('19', '19'),
('20', '20'),
('21', '21'),
('22', '22'),
('23', '23'),
('24', '24'),
('25', '25'),
('26', '26'),
('27', '27'),
('28', '28'),
('29', '29'),
('30', '30'),
('31', '31'),
('32', '32'),
('33', '33'),
('34', '34'),
('35', '35'),
('36', '36'),
('37', '37'),
('38', '38'),
('39', '39'),
('40', '40'),
('41', '41'),
('42', '42'),
('43', '43'),
('44', '44'),
('45', '45'),
('46', '46'),
('47', '47'),
('48', '48'),
('49', '49'),
('50', '50'),
('51', '51'),
('52', '52'),
('53', '53'),
('54', '54'),
('55', '55'),
('56', '56'),
('57', '57'),
('58', '58'),
('59', '59'),
)
background = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
couplename = models.CharField(max_length=255, default="Someone & Someone")
subtitle = models.CharField(max_length=255, blank=True, default="We are getting married")
year = models.CharField(max_length=4, choices=YEAR_CHOICES, default="2017")
month = models.CharField(max_length=2, choices=MONTH_CHOICES, default="7")
day = models.CharField(max_length=2, choices=DAY_CHOICES, default="1")
hour = models.CharField(max_length=2, choices=HOUR_CHOICES, default="12")
minute = models.CharField(max_length=2, choices=MINSEC_CHOICES, default="0")
seconds = models.CharField(max_length=2, choices=MINSEC_CHOICES, default="0")
icsfile = models.ForeignKey(
'wagtaildocs.Document',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
body = StreamField(
[('CoupleBlock', CoupleBlock()),
('TimeLineBlock', TimeLineBlock()),
], default="")
content_panels = Page.content_panels + [
FieldPanel('couplename', classname="full"),
FieldPanel('subtitle', classname="full"),
FieldRowPanel([
FieldPanel('year'),
FieldPanel('month'),
FieldPanel('day'),
FieldPanel('hour'),
FieldPanel('minute'),
FieldPanel('seconds')
]),
ImageChooserPanel('background'),
DocumentChooserPanel('icsfile'),
StreamFieldPanel('body'),
]
class OurDayPage(Page):
body = StreamField(
[
('MainTitle', MainTitleBlock()),
('Paragraph', ParagraphBlock()),
('Image', ImageBlock()),
('TwoColumnBlock', TwoColumnBlock()),
('EventBlock', EventBlock()),
], default="")
content_panels = Page.content_panels + [
StreamFieldPanel('body'),
]
class OurWishPage(Page):
body = StreamField(
[
('heading', blocks.CharBlock(classname="full title")),
('paragraph', blocks.RichTextBlock()),
('image', ImageChooserBlock()),
('TwoColumnBlock', TwoColumnBlock()),
], default="")
content_panels = Page.content_panels + [
StreamFieldPanel('body'),
]
class AccommodationPage(Page):
body = StreamField(
[
('heading', blocks.CharBlock(classname="full title")),
('paragraph', blocks.RichTextBlock()),
('image', ImageChooserBlock()),
('TwoColumnBlock', TwoColumnBlock()),
], default="")
content_panels = Page.content_panels + [
StreamFieldPanel('body'),
]
class GoodToKnowPage(Page):
body = StreamField(
[
('heading', blocks.CharBlock(classname="full title")),
('paragraph', blocks.RichTextBlock()),
('image', ImageChooserBlock()),
('TwoColumnBlock', TwoColumnBlock()),
], default="")
content_panels = Page.content_panels + [
StreamFieldPanel('body'),
]
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import unittest
from telemetry.core.timeline import trace_event_importer
import telemetry.core.timeline.counter as tracing_counter
import telemetry.core.timeline.model as timeline_model
from telemetry.core.backends.chrome import tracing_timeline_data
def FindEventNamed(events, name):
for event in events:
if event.name == name:
return event
raise ValueError('No event found with name %s' % name)
class TraceEventTimelineImporterTest(unittest.TestCase):
def testCanImportEmpty(self):
# TraceEventTimelineImporter needs to return false for empty lists and
# strings, because it assumes that they are >0 in len. But, TimelineMode can
# still import empty lists and strings (wrapped in a TimelineData object)
# via EmptyTimelineDataImporter.
self.assertFalse(
trace_event_importer.TraceEventTimelineImporter.CanImport(
tracing_timeline_data.TracingTimelineData([])))
self.assertFalse(
trace_event_importer.TraceEventTimelineImporter.CanImport(
tracing_timeline_data.TracingTimelineData('')))
def testBasicSingleThreadNonnestedParsing(self):
events = [
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 520, 'tts': 280, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 560, 'tts': 310, 'cat': 'foo',
'tid': 53, 'ph': 'E'},
{'name': 'b', 'args': {}, 'pid': 52, 'ts': 629, 'tts': 356, 'cat': 'bar',
'tid': 53, 'ph': 'B'},
{'name': 'b', 'args': {}, 'pid': 52, 'ts': 631, 'tts': 357, 'cat': 'bar',
'tid': 53, 'ph': 'E'},
{'name': 'c', 'args': {}, 'pid': 52, 'ts': 633, 'cat': 'baz',
'tid': 53, 'ph': 'B'},
{'name': 'c', 'args': {}, 'pid': 52, 'ts': 637, 'cat': 'baz',
'tid': 53, 'ph': 'E'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
processes = m.GetAllProcesses()
self.assertEqual(1, len(processes))
p = processes[0]
self.assertEqual(52, p.pid)
self.assertEqual(1, len(p.threads))
t = p.threads[53]
self.assertEqual(3, len(t.all_slices))
self.assertEqual(53, t.tid)
slice_event = t.all_slices[0]
self.assertEqual('a', slice_event.name)
self.assertEqual('foo', slice_event.category)
self.assertAlmostEqual(0, slice_event.start)
self.assertAlmostEqual((560 - 520) / 1000.0, slice_event.duration)
self.assertAlmostEqual((560 - 520) / 1000.0, slice_event.end)
self.assertAlmostEqual(280 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual((310 - 280) / 1000.0, slice_event.thread_duration)
self.assertAlmostEqual(310 / 1000.0, slice_event.thread_end)
self.assertEqual(0, len(slice_event.sub_slices))
slice_event = t.all_slices[1]
self.assertEqual('b', slice_event.name)
self.assertEqual('bar', slice_event.category)
self.assertAlmostEqual((629 - 520) / 1000.0, slice_event.start)
self.assertAlmostEqual((631 - 629) / 1000.0, slice_event.duration)
self.assertAlmostEqual((631 - 520) / 1000.0, slice_event.end)
self.assertAlmostEqual(356 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual((357 - 356) / 1000.0, slice_event.thread_duration)
self.assertAlmostEqual(357 / 1000.0, slice_event.thread_end)
self.assertEqual(0, len(slice_event.sub_slices))
slice_event = t.all_slices[2]
self.assertEqual('c', slice_event.name)
self.assertEqual('baz', slice_event.category)
self.assertAlmostEqual((633 - 520) / 1000.0, slice_event.start)
self.assertAlmostEqual((637 - 633) / 1000.0, slice_event.duration)
self.assertEqual(None, slice_event.thread_start)
self.assertEqual(None, slice_event.thread_duration)
self.assertEqual(None, slice_event.thread_end)
self.assertEqual(0, len(slice_event.sub_slices))
def testArgumentDupeCreatesNonFailingImportError(self):
events = [
{'name': 'a', 'args': {'x': 1}, 'pid': 1, 'ts': 520, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'a', 'args': {'x': 2}, 'pid': 1, 'ts': 560, 'cat': 'foo',
'tid': 1, 'ph': 'E'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
processes = m.GetAllProcesses()
t = processes[0].threads[1]
slice_a = FindEventNamed(t.all_slices, 'a')
self.assertEqual(2, slice_a.args['x'])
self.assertEqual(1, len(m.import_errors))
def testCategoryBeginEndMismatchPreferslice_begin(self):
events = [
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 520, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 560, 'cat': 'bar',
'tid': 53, 'ph': 'E'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
processes = m.GetAllProcesses()
self.assertEqual(1, len(processes))
p = processes[0]
self.assertEqual(52, p.pid)
self.assertEqual(1, len(p.threads))
t = p.threads[53]
self.assertEqual(1, len(t.all_slices))
self.assertEqual(53, t.tid)
slice_event = t.all_slices[0]
self.assertEqual('a', slice_event.name)
self.assertEqual('foo', slice_event.category)
def testNestedParsing(self):
events = [
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 1, 'tts': 2, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'b', 'args': {}, 'pid': 1, 'ts': 3, 'tts': 3, 'cat': 'bar',
'tid': 1, 'ph': 'B'},
{'name': 'b', 'args': {}, 'pid': 1, 'ts': 5, 'tts': 4, 'cat': 'bar',
'tid': 1, 'ph': 'E'},
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 7, 'tts': 5, 'cat': 'foo',
'tid': 1, 'ph': 'E'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data,
shift_world_to_zero=False)
t = m.GetAllProcesses()[0].threads[1]
slice_a = FindEventNamed(t.all_slices, 'a')
slice_b = FindEventNamed(t.all_slices, 'b')
self.assertEqual('a', slice_a.name)
self.assertEqual('foo', slice_a.category)
self.assertAlmostEqual(0.001, slice_a.start)
self.assertAlmostEqual(0.006, slice_a.duration)
self.assertAlmostEqual(0.002, slice_a.thread_start)
self.assertAlmostEqual(0.003, slice_a.thread_duration)
self.assertEqual('b', slice_b.name)
self.assertEqual('bar', slice_b.category)
self.assertAlmostEqual(0.003, slice_b.start)
self.assertAlmostEqual(0.002, slice_b.duration)
self.assertAlmostEqual(0.003, slice_b.thread_start)
self.assertAlmostEqual(0.001, slice_b.thread_duration)
def testAutoclosing(self):
events = [
# Slices that don't finish.
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 1, 'tts': 1, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'b', 'args': {}, 'pid': 1, 'ts': 2, 'tts': 2, 'cat': 'foo',
'tid': 2, 'ph': 'B'},
# Slices on thread 1 and 2 that do finish to give an 'end time' to make
# autoclosing work.
{'name': 'c', 'args': {}, 'pid': 1, 'ts': 2, 'tts': 1.5, 'cat': 'bar',
'tid': 1, 'ph': 'B'},
{'name': 'c', 'args': {}, 'pid': 1, 'ts': 4, 'tts': 3, 'cat': 'bar',
'tid': 1, 'ph': 'E'},
{'name': 'd', 'args': {}, 'pid': 1, 'ts': 3, 'tts': 2.5, 'cat': 'bar',
'tid': 2, 'ph': 'B'},
{'name': 'd', 'args': {}, 'pid': 1, 'ts': 7, 'tts': 5, 'cat': 'bar',
'tid': 2, 'ph': 'E'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
p = m.GetAllProcesses()[0]
t1 = p.threads[1]
slice_event = FindEventNamed(t1.all_slices, 'a')
self.assertEqual('a', slice_event.name)
self.assertEqual('foo', slice_event.category)
self.assertTrue(slice_event.did_not_finish)
self.assertAlmostEqual(0, slice_event.start)
self.assertAlmostEqual((7 - 1) / 1000.0, slice_event.duration)
self.assertAlmostEqual(1 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual((3 - 1) / 1000.0, slice_event.thread_duration)
t2 = p.threads[2]
slice_event = FindEventNamed(t2.all_slices, 'b')
self.assertEqual('b', slice_event.name)
self.assertEqual('foo', slice_event.category)
self.assertTrue(slice_event.did_not_finish)
self.assertAlmostEqual((2 - 1) / 1000.0, slice_event.start)
self.assertAlmostEqual((7 - 2) / 1000.0, slice_event.duration)
self.assertAlmostEqual(2 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual((5 - 2) / 1000.0, slice_event.thread_duration)
def testAutoclosingLoneBegin(self):
events = [
# Slice that doesn't finish.
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 1, 'tts': 1, 'cat': 'foo',
'tid': 1, 'ph': 'B'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
p = m.GetAllProcesses()[0]
t = p.threads[1]
slice_event = t.all_slices[0]
self.assertEqual('a', slice_event.name)
self.assertEqual('foo', slice_event.category)
self.assertTrue(slice_event.did_not_finish)
self.assertAlmostEqual(0, slice_event.start)
self.assertAlmostEqual(0, slice_event.duration)
self.assertAlmostEqual(1 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual(0, slice_event.thread_duration)
def testAutoclosingWithSubTasks(self):
events = [
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 1, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'b1', 'args': {}, 'pid': 1, 'ts': 2, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'b1', 'args': {}, 'pid': 1, 'ts': 3, 'cat': 'foo',
'tid': 1, 'ph': 'E'},
{'name': 'b2', 'args': {}, 'pid': 1, 'ts': 3, 'cat': 'foo',
'tid': 1, 'ph': 'B'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data,
shift_world_to_zero=False)
t = m.GetAllProcesses()[0].threads[1]
slice_a = FindEventNamed(t.all_slices, 'a')
slice_b1 = FindEventNamed(t.all_slices, 'b1')
slice_b2 = FindEventNamed(t.all_slices, 'b2')
self.assertAlmostEqual(0.003, slice_a.end)
self.assertAlmostEqual(0.003, slice_b1.end)
self.assertAlmostEqual(0.003, slice_b2.end)
def testAutoclosingWithEventsOutsideBounds(self):
events = [
# Slice that begins before min and ends after max of the other threads.
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 0, 'tts': 0, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'b', 'args': {}, 'pid': 1, 'ts': 6, 'tts': 3, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
# Slice that does finish to give an 'end time' to establish a basis
{'name': 'c', 'args': {}, 'pid': 1, 'ts': 2, 'tts': 1, 'cat': 'bar',
'tid': 2, 'ph': 'B'},
{'name': 'c', 'args': {}, 'pid': 1, 'ts': 4, 'tts': 2, 'cat': 'bar',
'tid': 2, 'ph': 'E'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data,
shift_world_to_zero=False)
p = m.GetAllProcesses()[0]
t1 = p.threads[1]
self.assertAlmostEqual(0.000, m.thread_time_bounds[t1].min)
self.assertAlmostEqual(0.003, m.thread_time_bounds[t1].max)
self.assertEqual(2, len(t1.all_slices))
slice_event = FindEventNamed(t1.all_slices, 'a')
self.assertEqual('a', slice_event.name)
self.assertEqual('foo', slice_event.category)
self.assertAlmostEqual(0, slice_event.start)
self.assertAlmostEqual(0.006, slice_event.duration)
self.assertAlmostEqual(0, slice_event.thread_start)
self.assertAlmostEqual(0.003, slice_event.thread_duration)
t2 = p.threads[2]
self.assertAlmostEqual(0.001, m.thread_time_bounds[t2].min)
self.assertAlmostEqual(0.002, m.thread_time_bounds[t2].max)
slice2 = FindEventNamed(t2.all_slices, 'c')
self.assertEqual('c', slice2.name)
self.assertEqual('bar', slice2.category)
self.assertAlmostEqual(0.002, slice2.start)
self.assertAlmostEqual(0.002, slice2.duration)
self.assertAlmostEqual(0.001, slice2.thread_start)
self.assertAlmostEqual(0.001, slice2.thread_duration)
self.assertAlmostEqual(0.000, m.bounds.min)
self.assertAlmostEqual(0.006, m.bounds.max)
def testNestedAutoclosing(self):
events = [
# Tasks that don't finish.
{'name': 'a1', 'args': {}, 'pid': 1, 'ts': 1, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'a2', 'args': {}, 'pid': 1, 'ts': 1.5, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
# Slice that does finish to give an 'end time' to make autoclosing work.
{'name': 'b', 'args': {}, 'pid': 1, 'ts': 1, 'cat': 'foo',
'tid': 2, 'ph': 'B'},
{'name': 'b', 'args': {}, 'pid': 1, 'ts': 2, 'cat': 'foo',
'tid': 2, 'ph': 'E'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data,
shift_world_to_zero=False)
t1 = m.GetAllProcesses()[0].threads[1]
t2 = m.GetAllProcesses()[0].threads[2]
slice_a1 = FindEventNamed(t1.all_slices, 'a1')
slice_a2 = FindEventNamed(t1.all_slices, 'a2')
FindEventNamed(t2.all_slices, 'b')
self.assertAlmostEqual(0.002, slice_a1.end)
self.assertAlmostEqual(0.002, slice_a2.end)
def testMultipleThreadParsing(self):
events = [
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 2, 'tts': 1, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 4, 'tts': 2, 'cat': 'foo',
'tid': 1, 'ph': 'E'},
{'name': 'b', 'args': {}, 'pid': 1, 'ts': 6, 'tts': 3, 'cat': 'bar',
'tid': 2, 'ph': 'B'},
{'name': 'b', 'args': {}, 'pid': 1, 'ts': 8, 'tts': 4, 'cat': 'bar',
'tid': 2, 'ph': 'E'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
processes = m.GetAllProcesses()
self.assertEqual(1, len(processes))
p = processes[0]
self.assertEqual(2, len(p.threads))
# Check thread 1.
t = p.threads[1]
self.assertAlmostEqual(1, len(t.all_slices))
self.assertAlmostEqual(1, t.tid)
slice_event = t.all_slices[0]
self.assertEqual('a', slice_event.name)
self.assertEqual('foo', slice_event.category)
self.assertAlmostEqual(0, slice_event.start)
self.assertAlmostEqual((4 - 2) / 1000.0, slice_event.duration)
self.assertAlmostEqual(1 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual((2 - 1) / 1000.0, slice_event.thread_duration)
# Check thread 2.
t = p.threads[2]
self.assertAlmostEqual(1, len(t.all_slices))
self.assertAlmostEqual(2, t.tid)
slice_event = t.all_slices[0]
self.assertEqual('b', slice_event.name)
self.assertEqual('bar', slice_event.category)
self.assertAlmostEqual((6 - 2) / 1000.0, slice_event.start)
self.assertAlmostEqual((8 - 6) / 1000.0, slice_event.duration)
self.assertAlmostEqual(3 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual((4 - 3) / 1000.0, slice_event.thread_duration)
def testMultiplePidParsing(self):
events = [
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 2, 'tts': 1, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 4, 'tts': 2, 'cat': 'foo',
'tid': 1, 'ph': 'E'},
{'name': 'b', 'args': {}, 'pid': 2, 'ts': 6, 'tts': 3, 'cat': 'bar',
'tid': 2, 'ph': 'B'},
{'name': 'b', 'args': {}, 'pid': 2, 'ts': 8, 'tts': 4, 'cat': 'bar',
'tid': 2, 'ph': 'E'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
processes = m.GetAllProcesses()
self.assertEqual(2, len(processes))
p = processes[0]
self.assertEqual(1, p.pid)
self.assertEqual(1, len(p.threads))
# Check process 1 thread 1.
t = p.threads[1]
self.assertEqual(1, len(t.all_slices))
self.assertEqual(1, t.tid)
slice_event = t.all_slices[0]
self.assertEqual('a', slice_event.name)
self.assertEqual('foo', slice_event.category)
self.assertAlmostEqual(0, slice_event.start)
self.assertAlmostEqual((4 - 2) / 1000.0, slice_event.duration)
self.assertAlmostEqual(1 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual((2 - 1) / 1000.0, slice_event.thread_duration)
# Check process 2 thread 2.
# TODO: will this be in deterministic order?
p = processes[1]
self.assertEqual(2, p.pid)
self.assertEqual(1, len(p.threads))
t = p.threads[2]
self.assertEqual(1, len(t.all_slices))
self.assertEqual(2, t.tid)
slice_event = t.all_slices[0]
self.assertEqual('b', slice_event.name)
self.assertEqual('bar', slice_event.category)
self.assertAlmostEqual((6 - 2) / 1000.0, slice_event.start)
self.assertAlmostEqual((8 - 6) / 1000.0, slice_event.duration)
self.assertAlmostEqual(3 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual((4 - 3) / 1000.0, slice_event.thread_duration)
# Check getAllThreads.
self.assertEqual([processes[0].threads[1],
processes[1].threads[2]],
m.GetAllThreads())
def testThreadNames(self):
events = [
{'name': 'thread_name', 'args': {'name': 'Thread 1'},
'pid': 1, 'ts': 0, 'tid': 1, 'ph': 'M'},
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 1, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 2, 'cat': 'foo',
'tid': 1, 'ph': 'E'},
{'name': 'b', 'args': {}, 'pid': 2, 'ts': 3, 'cat': 'foo',
'tid': 2, 'ph': 'B'},
{'name': 'b', 'args': {}, 'pid': 2, 'ts': 4, 'cat': 'foo',
'tid': 2, 'ph': 'E'},
{'name': 'thread_name', 'args': {'name': 'Thread 2'},
'pid': 2, 'ts': 0, 'tid': 2, 'ph': 'M'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
processes = m.GetAllProcesses()
self.assertEqual('Thread 1', processes[0].threads[1].name)
self.assertEqual('Thread 2', processes[1].threads[2].name)
def testParsingWhenEndComesFirst(self):
events = [
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 1, 'tts': 1, 'cat': 'foo',
'tid': 1, 'ph': 'E'},
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 4, 'tts': 4, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 5, 'tts': 5, 'cat': 'foo',
'tid': 1, 'ph': 'E'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data,
shift_world_to_zero=False)
p = m.GetAllProcesses()[0]
t = p.threads[1]
self.assertEqual(1, len(t.all_slices))
self.assertEqual('a', t.all_slices[0].name)
self.assertEqual('foo', t.all_slices[0].category)
self.assertEqual(0.004, t.all_slices[0].start)
self.assertEqual(0.001, t.all_slices[0].duration)
self.assertEqual(0.004, t.all_slices[0].thread_start)
self.assertEqual(0.001, t.all_slices[0].thread_duration)
self.assertEqual(1, len(m.import_errors))
def testImmediateParsing(self):
events = [
# Need to include immediates inside a task so the timeline
# recentering/zeroing doesn't clobber their timestamp.
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 2, 'tts': 1, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'immediate', 'args': {}, 'pid': 1, 'ts': 4, 'cat': 'bar',
'tid': 1, 'ph': 'I'},
{'name': 'slower', 'args': {}, 'pid': 1, 'ts': 8, 'cat': 'baz',
'tid': 1, 'ph': 'i'},
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 8, 'tts': 4, 'cat': 'foo',
'tid': 1, 'ph': 'E'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data,
shift_world_to_zero=False)
p = m.GetAllProcesses()[0]
t = p.threads[1]
self.assertEqual(3, len(t.all_slices))
i = m.GetAllEventsOfName('immediate')[0]
self.assertEqual('immediate', i.name)
self.assertEqual('bar', i.category)
self.assertAlmostEqual(0.004, i.start)
self.assertAlmostEqual(0, i.duration)
slower = m.GetAllEventsOfName('slower')[0]
self.assertEqual('slower', slower.name)
self.assertEqual('baz', slower.category)
self.assertAlmostEqual(0.008, slower.start)
self.assertAlmostEqual(0, slower.duration)
a = m.GetAllEventsOfName('a')[0]
self.assertEqual('a', a.name)
self.assertEqual('foo', a.category)
self.assertAlmostEqual(0.002, a.start)
self.assertAlmostEqual(0.006, a.duration)
self.assertAlmostEqual(0.001, a.thread_start)
self.assertAlmostEqual(0.003, a.thread_duration)
def testSimpleCounter(self):
events = [
{'name': 'ctr', 'args': {'value': 0}, 'pid': 1, 'ts': 0, 'cat': 'foo',
'tid': 1, 'ph': 'C'},
{'name': 'ctr', 'args': {'value': 10}, 'pid': 1, 'ts': 10, 'cat': 'foo',
'tid': 1, 'ph': 'C'},
{'name': 'ctr', 'args': {'value': 0}, 'pid': 1, 'ts': 20, 'cat': 'foo',
'tid': 1, 'ph': 'C'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
p = m.GetAllProcesses()[0]
ctr = p.counters['foo.ctr']
self.assertEqual('ctr', ctr.name)
self.assertEqual('foo', ctr.category)
self.assertEqual(3, ctr.num_samples)
self.assertEqual(1, ctr.num_series)
self.assertEqual(['value'], ctr.series_names)
self.assertEqual([0, 0.01, 0.02], ctr.timestamps)
self.assertEqual([0, 10, 0], ctr.samples)
self.assertEqual([0, 10, 0], ctr.totals)
self.assertEqual(10, ctr.max_total)
def testInstanceCounter(self):
events = [
{'name': 'ctr', 'args': {'value': 0}, 'pid': 1, 'ts': 0, 'cat': 'foo',
'tid': 1,
'ph': 'C', 'id': 0},
{'name': 'ctr', 'args': {'value': 10}, 'pid': 1, 'ts': 10, 'cat': 'foo',
'tid': 1,
'ph': 'C', 'id': 0},
{'name': 'ctr', 'args': {'value': 10}, 'pid': 1, 'ts': 10, 'cat': 'foo',
'tid': 1,
'ph': 'C', 'id': 1},
{'name': 'ctr', 'args': {'value': 20}, 'pid': 1, 'ts': 15, 'cat': 'foo',
'tid': 1,
'ph': 'C', 'id': 1},
{'name': 'ctr', 'args': {'value': 30}, 'pid': 1, 'ts': 18, 'cat': 'foo',
'tid': 1,
'ph': 'C', 'id': 1},
{'name': 'ctr', 'args': {'value': 40}, 'pid': 1, 'ts': 20, 'cat': 'bar',
'tid': 1,
'ph': 'C', 'id': 2}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
p = m.GetAllProcesses()[0]
ctr = p.counters['foo.ctr[0]']
self.assertEqual('ctr[0]', ctr.name)
self.assertEqual('foo', ctr.category)
self.assertEqual(2, ctr.num_samples)
self.assertEqual(1, ctr.num_series)
self.assertEqual([0, 0.01], ctr.timestamps)
self.assertEqual([0, 10], ctr.samples)
ctr = m.GetAllProcesses()[0].counters['foo.ctr[1]']
self.assertEqual('ctr[1]', ctr.name)
self.assertEqual('foo', ctr.category)
self.assertEqual(3, ctr.num_samples)
self.assertEqual(1, ctr.num_series)
self.assertEqual([0.01, 0.015, 0.018], ctr.timestamps)
self.assertEqual([10, 20, 30], ctr.samples)
ctr = m.GetAllProcesses()[0].counters['bar.ctr[2]']
self.assertEqual('ctr[2]', ctr.name)
self.assertEqual('bar', ctr.category)
self.assertEqual(1, ctr.num_samples)
self.assertEqual(1, ctr.num_series)
self.assertEqual([0.02], ctr.timestamps)
self.assertEqual([40], ctr.samples)
def testMultiCounterUpdateBounds(self):
ctr = tracing_counter.Counter(None, 'testBasicCounter',
'testBasicCounter')
ctr.series_names = ['value1', 'value2']
ctr.timestamps = [0, 1, 2, 3, 4, 5, 6, 7]
ctr.samples = [0, 0,
1, 0,
1, 1,
2, 1.1,
3, 0,
1, 7,
3, 0,
3.1, 0.5]
ctr.FinalizeImport()
self.assertEqual(8, ctr.max_total)
self.assertEqual([0, 0,
1, 1,
1, 2,
2, 3.1,
3, 3,
1, 8,
3, 3,
3.1, 3.6], ctr.totals)
def testMultiCounter(self):
events = [
{'name': 'ctr', 'args': {'value1': 0, 'value2': 7}, 'pid': 1, 'ts': 0,
'cat': 'foo', 'tid': 1, 'ph': 'C'},
{'name': 'ctr', 'args': {'value1': 10, 'value2': 4}, 'pid': 1, 'ts': 10,
'cat': 'foo', 'tid': 1, 'ph': 'C'},
{'name': 'ctr', 'args': {'value1': 0, 'value2': 1 }, 'pid': 1, 'ts': 20,
'cat': 'foo', 'tid': 1, 'ph': 'C'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
p = m.GetAllProcesses()[0]
ctr = p.counters['foo.ctr']
self.assertEqual('ctr', ctr.name)
self.assertEqual('ctr', ctr.name)
self.assertEqual('foo', ctr.category)
self.assertEqual(3, ctr.num_samples)
self.assertEqual(2, ctr.num_series)
self.assertEqual(sorted(['value1', 'value2']), sorted(ctr.series_names))
self.assertEqual(sorted([0, 0.01, 0.02]), sorted(ctr.timestamps))
self.assertEqual(sorted([0, 7, 10, 4, 0, 1]), sorted(ctr.samples))
# We can't check ctr.totals here because it can change depending on
# the order in which the series names are added.
self.assertEqual(14, ctr.max_total)
def testImportObjectInsteadOfArray(self):
events = { 'traceEvents': [
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 524, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 560, 'cat': 'foo',
'tid': 53, 'ph': 'E'}
] }
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
self.assertEqual(1, len(m.GetAllProcesses()))
def testImportString(self):
events = [
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 524, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 560, 'cat': 'foo',
'tid': 53, 'ph': 'E'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(
json.dumps(events))
m = timeline_model.TimelineModel(timeline_data=timeline_data)
self.assertEqual(1, len(m.GetAllProcesses()))
def testImportStringWithTrailingNewLine(self):
events = [
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 524, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 560, 'cat': 'foo',
'tid': 53, 'ph': 'E'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(
json.dumps(events) + '\n')
m = timeline_model.TimelineModel(timeline_data=timeline_data)
self.assertEqual(1, len(m.GetAllProcesses()))
def testImportStringWithMissingCloseSquareBracket(self):
events = [
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 524, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 560, 'cat': 'foo',
'tid': 53, 'ph': 'E'}
]
tmp = json.dumps(events)
self.assertEqual(']', tmp[-1])
# Drop off the trailing ]
dropped = tmp[:-1]
timeline_data = tracing_timeline_data.TracingTimelineData(dropped)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
self.assertEqual(1, len(m.GetAllProcesses()))
def testImportStringWithEndingCommaButMissingCloseSquareBracket(self):
lines = [
'[',
'{"name": "a", "args": {}, "pid": 52, "ts": 524, "cat": "foo", '
'"tid": 53, "ph": "B"},',
'{"name": "a", "args": {}, "pid": 52, "ts": 560, "cat": "foo", '
'"tid": 53, "ph": "E"},'
]
text = '\n'.join(lines)
timeline_data = tracing_timeline_data.TracingTimelineData(text)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
processes = m.GetAllProcesses()
self.assertEqual(1, len(processes))
self.assertEqual(1, len(processes[0].threads[53].all_slices))
def testImportStringWithMissingCloseSquareBracketAndNewline(self):
events = [
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 524, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 560, 'cat': 'foo',
'tid': 53, 'ph': 'E'}
]
tmp = json.dumps(events)
self.assertEqual(']', tmp[-1])
# Drop off the trailing ] and add a newline
dropped = tmp[:-1]
timeline_data = tracing_timeline_data.TracingTimelineData(dropped + '\n')
m = timeline_model.TimelineModel(timeline_data=timeline_data)
self.assertEqual(1, len(m.GetAllProcesses()))
def testImportStringWithEndingCommaButMissingCloseSquareBracketCRLF(self):
lines = [
'[',
'{"name": "a", "args": {}, "pid": 52, "ts": 524, "cat": "foo", '
'"tid": 53, "ph": "B"},',
'{"name": "a", "args": {}, "pid": 52, "ts": 560, "cat": "foo", '
'"tid": 53, "ph": "E"},'
]
text = '\r\n'.join(lines)
timeline_data = tracing_timeline_data.TracingTimelineData(text)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
processes = m.GetAllProcesses()
self.assertEqual(1, len(processes))
self.assertEqual(1, len(processes[0].threads[53].all_slices))
def testImportOldFormat(self):
lines = [
'[',
'{"cat":"a","pid":9,"tid":8,"ts":194,"ph":"E","name":"I","args":{}},',
'{"cat":"b","pid":9,"tid":8,"ts":194,"ph":"B","name":"I","args":{}}',
']'
]
text = '\n'.join(lines)
timeline_data = tracing_timeline_data.TracingTimelineData(text)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
processes = m.GetAllProcesses()
self.assertEqual(1, len(processes))
self.assertEqual(1, len(processes[0].threads[8].all_slices))
def testStartFinishOneSliceOneThread(self):
events = [
# Time is intentionally out of order.
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 560, 'cat': 'cat',
'tid': 53,
'ph': 'F', 'id': 72},
{'name': 'a', 'pid': 52, 'ts': 524, 'cat': 'cat',
'tid': 53,
'ph': 'S', 'id': 72, 'args': {'foo': 'bar'}}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
self.assertEqual(2, len(m.GetAllEvents()))
processes = m.GetAllProcesses()
t = processes[0].threads[53]
slices = t.async_slices
self.assertEqual(1, len(slices))
self.assertEqual('a', slices[0].name)
self.assertEqual('cat', slices[0].category)
self.assertEqual(72, slices[0].id)
self.assertEqual('bar', slices[0].args['foo'])
self.assertEqual(0, slices[0].start)
self.assertAlmostEqual((60 - 24) / 1000.0, slices[0].duration)
self.assertEqual(t, slices[0].start_thread)
self.assertEqual(t, slices[0].end_thread)
def testEndArgsAddedToSlice(self):
events = [
{'name': 'a', 'args': {'x': 1}, 'pid': 52, 'ts': 520, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'a', 'args': {'y': 2}, 'pid': 52, 'ts': 560, 'cat': 'foo',
'tid': 53, 'ph': 'E'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
processes = m.GetAllProcesses()
self.assertEqual(1, len(processes))
p = processes[0]
self.assertEqual(1, len(p.threads))
t = p.threads[53]
self.assertEqual(1, len(t.all_slices))
self.assertEqual(53, t.tid)
slice_event = t.all_slices[0]
self.assertEqual('a', slice_event.name)
self.assertEqual('foo', slice_event.category)
self.assertEqual(0, slice_event.start)
self.assertEqual(1, slice_event.args['x'])
self.assertEqual(2, slice_event.args['y'])
def testEndArgOverrwritesOriginalArgValueIfDuplicated(self):
events = [
{'name': 'b', 'args': {'z': 3}, 'pid': 52, 'ts': 629, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'b', 'args': {'z': 4}, 'pid': 52, 'ts': 631, 'cat': 'foo',
'tid': 53, 'ph': 'E'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
processes = m.GetAllProcesses()
self.assertEqual(1, len(processes))
p = processes[0]
self.assertEqual(1, len(p.threads))
t = p.threads[53]
slice_event = t.all_slices[0]
self.assertEqual('b', slice_event.name)
self.assertEqual('foo', slice_event.category)
self.assertEqual(0, slice_event.start)
self.assertEqual(4, slice_event.args['z'])
def testSliceHierarchy(self):
''' The slice hierarchy should look something like this:
[ a ]
[ b ] [ d ]
[ c ] [ e ]
'''
events = [
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 100, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 200, 'cat': 'foo',
'tid': 53, 'ph': 'E'},
{'name': 'b', 'args': {}, 'pid': 52, 'ts': 125, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'b', 'args': {}, 'pid': 52, 'ts': 165, 'cat': 'foo',
'tid': 53, 'ph': 'E'},
{'name': 'c', 'args': {}, 'pid': 52, 'ts': 125, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'c', 'args': {}, 'pid': 52, 'ts': 135, 'cat': 'foo',
'tid': 53, 'ph': 'E'},
{'name': 'd', 'args': {}, 'pid': 52, 'ts': 175, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'd', 'args': {}, 'pid': 52, 'ts': 190, 'cat': 'foo',
'tid': 53, 'ph': 'E'},
{'name': 'e', 'args': {}, 'pid': 52, 'ts': 155, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'e', 'args': {}, 'pid': 52, 'ts': 165, 'cat': 'foo',
'tid': 53, 'ph': 'E'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data,
shift_world_to_zero=False)
processes = m.GetAllProcesses()
self.assertEqual(1, len(processes))
p = processes[0]
self.assertEqual(1, len(p.threads))
t = p.threads[53]
slice_a = t.all_slices[0]
self.assertEqual(4, len(slice_a.GetAllSubSlices()))
self.assertEqual('a', slice_a.name)
self.assertEqual(100 / 1000.0, slice_a.start)
self.assertEqual(200 / 1000.0, slice_a.end)
self.assertEqual(2, len(slice_a.sub_slices))
slice_b = slice_a.sub_slices[0]
self.assertEqual('b', slice_b.name)
self.assertEqual(2, len(slice_b.sub_slices))
self.assertEqual('c', slice_b.sub_slices[0].name)
self.assertEqual('e', slice_b.sub_slices[1].name)
slice_d = slice_a.sub_slices[1]
self.assertEqual('d', slice_d.name)
self.assertEqual(0, len(slice_d.sub_slices))
def testAsyncEndArgAddedToSlice(self):
events = [
# Time is intentionally out of order.
{'name': 'c', 'args': {'y': 2}, 'pid': 52, 'ts': 560, 'cat': 'foo',
'tid': 53,
'ph': 'F', 'id': 72},
{'name': 'c', 'args': {'x': 1}, 'pid': 52, 'ts': 524, 'cat': 'foo',
'tid': 53,
'ph': 'S', 'id': 72}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
t = m.GetAllProcesses()[0].threads[53]
self.assertEqual(1, len(t.async_slices))
parent_slice = t.async_slices[0]
self.assertEqual('c', parent_slice.name)
self.assertEqual('foo', parent_slice.category)
self.assertEqual(1, len(parent_slice.sub_slices))
sub_slice = parent_slice.sub_slices[0]
self.assertEqual(1, sub_slice.args['x'])
self.assertEqual(2, sub_slice.args['y'])
def testAsyncEndArgOverrwritesOriginalArgValueIfDuplicated(self):
events = [
# Time is intentionally out of order.
{'name': 'd', 'args': {'z': 4}, 'pid': 52, 'ts': 560, 'cat': 'foo',
'tid': 53,
'ph': 'F', 'id': 72},
{'name': 'd', 'args': {'z': 3}, 'pid': 52, 'ts': 524, 'cat': 'foo',
'tid': 53,
'ph': 'S', 'id': 72}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
t = m.GetAllProcesses()[0].threads[53]
self.assertEqual(1, len(t.async_slices))
parent_slice = t.async_slices[0]
self.assertEqual('d', parent_slice.name)
self.assertEqual('foo', parent_slice.category)
self.assertEqual(1, len(parent_slice.sub_slices))
sub_slice = parent_slice.sub_slices[0]
self.assertEqual(4, sub_slice.args['z'])
def testAsyncStepsInOneThread(self):
events = [
# Time is intentionally out of order.
{'name': 'a', 'args': {'z': 3}, 'pid': 52, 'ts': 560, 'cat': 'foo',
'tid': 53, 'ph': 'F', 'id': 72, 'tts': 25},
{'name': 'a', 'args': {'step': 's1', 'y': 2}, 'pid': 52, 'ts': 548,
'cat': 'foo', 'tid': 53, 'ph': 'T', 'id': 72, 'tts': 20},
{'name': 'a', 'args': {'x': 1}, 'pid': 52, 'ts': 524, 'cat': 'foo',
'tid': 53, 'ph': 'S', 'id': 72, 'tts': 17}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
t = m.GetAllProcesses()[0].threads[53]
self.assertEqual(1, len(t.async_slices))
parent_slice = t.async_slices[0]
self.assertEqual('a', parent_slice.name)
self.assertEqual('foo', parent_slice.category)
self.assertEqual(0, parent_slice.start)
self.assertEqual(2, len(parent_slice.sub_slices))
sub_slice = parent_slice.sub_slices[0]
self.assertEqual('a', sub_slice.name)
self.assertEqual('foo', sub_slice.category)
self.assertAlmostEqual(0, sub_slice.start)
self.assertAlmostEqual((548 - 524) / 1000.0, sub_slice.duration)
self.assertAlmostEqual((20 - 17) / 1000.0, sub_slice.thread_duration)
self.assertEqual(1, sub_slice.args['x'])
sub_slice = parent_slice.sub_slices[1]
self.assertEqual('a:s1', sub_slice.name)
self.assertEqual('foo', sub_slice.category)
self.assertAlmostEqual((548 - 524) / 1000.0, sub_slice.start)
self.assertAlmostEqual((560 - 548) / 1000.0, sub_slice.duration)
self.assertAlmostEqual((25 - 20) / 1000.0, sub_slice.thread_duration)
self.assertEqual(2, sub_slice.args['y'])
self.assertEqual(3, sub_slice.args['z'])
def testAsyncStepsMissingStart(self):
events = [
# Time is intentionally out of order.
{'name': 'a', 'args': {'z': 3}, 'pid': 52, 'ts': 560, 'cat': 'foo',
'tid': 53, 'ph': 'F', 'id': 72},
{'name': 'a', 'args': {'step': 's1', 'y': 2}, 'pid': 52, 'ts': 548,
'cat': 'foo', 'tid': 53, 'ph': 'T', 'id': 72}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
t = m.GetAllProcesses()[0].threads[53]
self.assertTrue(t is not None)
def testAsyncStepsMissingFinish(self):
events = [
# Time is intentionally out of order.
{'name': 'a', 'args': {'step': 's1', 'y': 2}, 'pid': 52, 'ts': 548,
'cat': 'foo', 'tid': 53, 'ph': 'T', 'id': 72},
{'name': 'a', 'args': {'z': 3}, 'pid': 52, 'ts': 560, 'cat': 'foo',
'tid': 53, 'ph': 'S', 'id': 72}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
t = m.GetAllProcesses()[0].threads[53]
self.assertTrue(t is not None)
def testImportSamples(self):
events = [
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 548, 'cat': 'test',
'tid': 53, 'ph': 'P'},
{'name': 'b', 'args': {}, 'pid': 52, 'ts': 548, 'cat': 'test',
'tid': 53, 'ph': 'P'},
{'name': 'c', 'args': {}, 'pid': 52, 'ts': 558, 'cat': 'test',
'tid': 53, 'ph': 'P'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
p = m.GetAllProcesses()[0]
t = p.threads[53]
self.assertEqual(3, len(t.samples))
self.assertEqual(0.0, t.samples[0].start)
self.assertEqual(0.0, t.samples[1].start)
self.assertAlmostEqual(0.01, t.samples[2].start)
self.assertEqual('a', t.samples[0].name)
self.assertEqual('b', t.samples[1].name)
self.assertEqual('c', t.samples[2].name)
self.assertEqual(0, len(m.import_errors))
def testImportSamplesMissingArgs(self):
events = [
{'name': 'a', 'pid': 52, 'ts': 548, 'cat': 'test',
'tid': 53, 'ph': 'P'},
{'name': 'b', 'pid': 52, 'ts': 548, 'cat': 'test',
'tid': 53, 'ph': 'P'},
{'name': 'c', 'pid': 52, 'ts': 549, 'cat': 'test',
'tid': 53, 'ph': 'P'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
p = m.GetAllProcesses()[0]
t = p.threads[53]
self.assertEqual(3, len(t.samples))
self.assertEqual(0, len(m.import_errors))
def testImportCompleteEvent(self):
events = [
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 629, 'tts': 538, 'dur': 1,
'tdur': 1, 'cat': 'baz', 'tid': 53, 'ph': 'X'},
{'name': 'b', 'args': {}, 'pid': 52, 'ts': 730, 'tts': 620, 'dur': 20,
'tdur': 14, 'cat': 'foo', 'tid': 53, 'ph': 'X'},
{'name': 'c', 'args': {}, 'pid': 52, 'ts': 740, 'tts': 625, 'cat': 'baz',
'tid': 53, 'ph': 'X'},
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
p = m.GetAllProcesses()[0]
t = p.threads[53]
self.assertEqual(3, len(t.all_slices))
slice_event = t.all_slices[0]
self.assertEqual('a', slice_event.name)
self.assertAlmostEqual(0.0, slice_event.start)
self.assertAlmostEqual(1 / 1000.0, slice_event.duration)
self.assertAlmostEqual(538 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual(1 / 1000.0, slice_event.thread_duration)
self.assertFalse(slice_event.did_not_finish)
self.assertEqual(0, len(slice_event.sub_slices))
slice_event = t.all_slices[1]
self.assertEqual('b', slice_event.name)
self.assertAlmostEqual((730 - 629) / 1000.0, slice_event.start)
self.assertAlmostEqual(20 / 1000.0, slice_event.duration)
self.assertAlmostEqual(620 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual(14 / 1000.0, slice_event.thread_duration)
self.assertFalse(slice_event.did_not_finish)
self.assertEqual(1, len(slice_event.sub_slices))
self.assertEqual(t.all_slices[2], slice_event.sub_slices[0])
slice_event = t.all_slices[2]
self.assertEqual('c', slice_event.name)
self.assertAlmostEqual((740 - 629) / 1000.0, slice_event.start)
self.assertAlmostEqual(10 / 1000.0, slice_event.duration)
self.assertAlmostEqual(625 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual(9 / 1000.0, slice_event.thread_duration)
self.assertTrue(slice_event.did_not_finish)
self.assertEqual(0, len(slice_event.sub_slices))
def testImportFlowEvent(self):
events = [
{'name': 'a', 'cat': 'foo', 'id': 72, 'pid': 52, 'tid': 53, 'ts': 548,
'ph': 's', 'args': {}},
{'name': 'a', 'cat': 'foo', 'id': 72, 'pid': 52, 'tid': 53, 'ts': 560,
'ph': 't', 'args': {}},
{'name': 'a', 'cat': 'foo', 'id': 72, 'pid': 52, 'tid': 53, 'ts': 580,
'ph': 'f', 'args': {}},
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
p = m.GetAllProcesses()[0]
t = p.threads[53]
self.assertTrue(t is not None)
self.assertEqual(2, len(m.flow_events))
start = m.flow_events[0][0]
step = m.flow_events[0][1]
finish = m.flow_events[1][1]
self.assertEqual('a', start.name)
self.assertEqual('foo', start.category)
self.assertEqual(72, start.event_id)
self.assertEqual(0, start.start)
self.assertEqual(0, start.duration)
self.assertEqual(start.name, step.name)
self.assertEqual(start.category, step.category)
self.assertEqual(start.event_id, step.event_id)
self.assertAlmostEqual(12 / 1000.0, step.start)
self.assertEquals(0, step.duration)
self.assertEqual(start.name, finish.name)
self.assertEqual(start.category, finish.category)
self.assertEqual(start.event_id, finish.event_id)
self.assertAlmostEqual((20 + 12) / 1000.0, finish.start)
self.assertEqual(0, finish.duration)
def testImportOutOfOrderFlowEvent(self):
events = [
{'name': 'a', 'cat': 'foo', 'id': 72, 'pid': 52, 'tid': 53, 'ts': 548,
'ph': 's', 'args': {}},
{'name': 'b', 'cat': 'foo', 'id': 73, 'pid': 52, 'tid': 53, 'ts': 148,
'ph': 's', 'args': {}},
{'name': 'b', 'cat': 'foo', 'id': 73, 'pid': 52, 'tid': 53, 'ts': 570,
'ph': 'f', 'args': {}},
{'name': 'a', 'cat': 'foo', 'id': 72, 'pid': 52, 'tid': 53, 'ts': 560,
'ph': 't', 'args': {}},
{'name': 'a', 'cat': 'foo', 'id': 72, 'pid': 52, 'tid': 53, 'ts': 580,
'ph': 'f', 'args': {}},
]
expected = [[0.4, 0.412], [0.0, 0.422], [0.412, 0.432]]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
self.assertEqual(3, len(m.flow_events))
for i in range(len(expected)):
self.assertAlmostEqual(expected[i][0], m.flow_events[i][0].start)
self.assertAlmostEqual(expected[i][1], m.flow_events[i][1].start)
def testImportErrornousFlowEvent(self):
events = [
{'name': 'a', 'cat': 'foo', 'id': 70, 'pid': 52, 'tid': 53, 'ts': 548,
'ph': 's', 'args': {}},
{'name': 'a2', 'cat': 'foo', 'id': 70, 'pid': 52, 'tid': 53, 'ts': 550,
'ph': 's', 'args': {}},
{'name': 'b', 'cat': 'foo', 'id': 73, 'pid': 52, 'tid': 53, 'ts': 570,
'ph': 'f', 'args': {}},
{'name': 'a', 'cat': 'foo', 'id': 72, 'pid': 52, 'tid': 53, 'ts': 560,
'ph': 't', 'args': {}},
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
self.assertEqual(0, len(m.flow_events))
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains utility and supporting functions for ResNet.
This module contains ResNet code which does not directly build layers. This
includes dataset management, hyperparameter and optimizer code, and argument
parsing. Code for defining the ResNet layers can be found in resnet_model.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
import multiprocessing
import os
from absl import flags
import tensorflow as tf
from official.r1.resnet import imagenet_preprocessing
from official.r1.resnet import resnet_model
from official.r1.utils import export
from official.utils.flags import core as flags_core
from official.utils.logs import hooks_helper
from official.utils.logs import logger
from official.utils.misc import distribution_utils
from official.utils.misc import model_helpers
################################################################################
# Functions for input processing.
################################################################################
def process_record_dataset(dataset,
is_training,
batch_size,
shuffle_buffer,
parse_record_fn,
num_epochs=1,
dtype=tf.float32,
datasets_num_private_threads=None,
drop_remainder=False,
tf_data_experimental_slack=False):
"""Given a Dataset with raw records, return an iterator over the records.
Args:
dataset: A Dataset representing raw records
is_training: A boolean denoting whether the input is for training.
batch_size: The number of samples per batch.
shuffle_buffer: The buffer size to use when shuffling records. A larger
value results in better randomness, but smaller values reduce startup
time and use less memory.
parse_record_fn: A function that takes a raw record and returns the
corresponding (image, label) pair.
num_epochs: The number of epochs to repeat the dataset.
dtype: Data type to use for images/features.
datasets_num_private_threads: Number of threads for a private
threadpool created for all datasets computation.
drop_remainder: A boolean indicates whether to drop the remainder of the
batches. If True, the batch dimension will be static.
tf_data_experimental_slack: Whether to enable tf.data's
`experimental_slack` option.
Returns:
Dataset of (image, label) pairs ready for iteration.
"""
# Defines a specific size thread pool for tf.data operations.
if datasets_num_private_threads:
options = tf.data.Options()
options.experimental_threading.private_threadpool_size = (
datasets_num_private_threads)
dataset = dataset.with_options(options)
tf.compat.v1.logging.info('datasets_num_private_threads: %s',
datasets_num_private_threads)
# Disable intra-op parallelism to optimize for throughput instead of latency.
options = tf.data.Options()
options.experimental_threading.max_intra_op_parallelism = 1
dataset = dataset.with_options(options)
# Prefetches a batch at a time to smooth out the time taken to load input
# files for shuffling and processing.
dataset = dataset.prefetch(buffer_size=batch_size)
if is_training:
# Shuffles records before repeating to respect epoch boundaries.
dataset = dataset.shuffle(buffer_size=shuffle_buffer)
# Repeats the dataset for the number of epochs to train.
dataset = dataset.repeat(num_epochs)
# Parses the raw records into images and labels.
dataset = dataset.map(
lambda value: parse_record_fn(value, is_training, dtype),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(batch_size, drop_remainder=drop_remainder)
# Operations between the final prefetch and the get_next call to the iterator
# will happen synchronously during run time. We prefetch here again to
# background all of the above processing work and keep it out of the
# critical training path. Setting buffer_size to tf.data.experimental.AUTOTUNE
# allows DistributionStrategies to adjust how many batches to fetch based
# on how many devices are present.
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
if tf_data_experimental_slack:
options = tf.data.Options()
options.experimental_slack = True
dataset = dataset.with_options(options)
return dataset
def get_synth_input_fn(height, width, num_channels, num_classes,
dtype=tf.float32):
"""Returns an input function that returns a dataset with random data.
This input_fn returns a data set that iterates over a set of random data and
bypasses all preprocessing, e.g. jpeg decode and copy. The host to device
copy is still included. This used to find the upper throughput bound when
tunning the full input pipeline.
Args:
height: Integer height that will be used to create a fake image tensor.
width: Integer width that will be used to create a fake image tensor.
num_channels: Integer depth that will be used to create a fake image tensor.
num_classes: Number of classes that should be represented in the fake labels
tensor
dtype: Data type for features/images.
Returns:
An input_fn that can be used in place of a real one to return a dataset
that can be used for iteration.
"""
# pylint: disable=unused-argument
def input_fn(is_training, data_dir, batch_size, *args, **kwargs):
"""Returns dataset filled with random data."""
# Synthetic input should be within [0, 255].
inputs = tf.random.truncated_normal(
[batch_size] + [height, width, num_channels],
dtype=dtype,
mean=127,
stddev=60,
name='synthetic_inputs')
labels = tf.random.uniform(
[batch_size],
minval=0,
maxval=num_classes - 1,
dtype=tf.int32,
name='synthetic_labels')
data = tf.data.Dataset.from_tensors((inputs, labels)).repeat()
data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return data
return input_fn
def image_bytes_serving_input_fn(image_shape, dtype=tf.float32):
"""Serving input fn for raw jpeg images."""
def _preprocess_image(image_bytes):
"""Preprocess a single raw image."""
# Bounding box around the whole image.
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=dtype, shape=[1, 1, 4])
height, width, num_channels = image_shape
image = imagenet_preprocessing.preprocess_image(
image_bytes, bbox, height, width, num_channels, is_training=False)
return image
image_bytes_list = tf.compat.v1.placeholder(
shape=[None], dtype=tf.string, name='input_tensor')
images = tf.map_fn(
_preprocess_image, image_bytes_list, back_prop=False, dtype=dtype)
return tf.estimator.export.TensorServingInputReceiver(
images, {'image_bytes': image_bytes_list})
def override_flags_and_set_envars_for_gpu_thread_pool(flags_obj):
"""Override flags and set env_vars for performance.
These settings exist to test the difference between using stock settings
and manual tuning. It also shows some of the ENV_VARS that can be tweaked to
squeeze a few extra examples per second. These settings are defaulted to the
current platform of interest, which changes over time.
On systems with small numbers of cpu cores, e.g. under 8 logical cores,
setting up a gpu thread pool with `tf_gpu_thread_mode=gpu_private` may perform
poorly.
Args:
flags_obj: Current flags, which will be adjusted possibly overriding
what has been set by the user on the command-line.
"""
cpu_count = multiprocessing.cpu_count()
tf.compat.v1.logging.info('Logical CPU cores: %s', cpu_count)
# Sets up thread pool for each GPU for op scheduling.
per_gpu_thread_count = 1
total_gpu_thread_count = per_gpu_thread_count * flags_obj.num_gpus
os.environ['TF_GPU_THREAD_MODE'] = flags_obj.tf_gpu_thread_mode
os.environ['TF_GPU_THREAD_COUNT'] = str(per_gpu_thread_count)
tf.compat.v1.logging.info('TF_GPU_THREAD_COUNT: %s',
os.environ['TF_GPU_THREAD_COUNT'])
tf.compat.v1.logging.info('TF_GPU_THREAD_MODE: %s',
os.environ['TF_GPU_THREAD_MODE'])
# Reduces general thread pool by number of threads used for GPU pool.
main_thread_count = cpu_count - total_gpu_thread_count
flags_obj.inter_op_parallelism_threads = main_thread_count
# Sets thread count for tf.data. Logical cores minus threads assign to the
# private GPU pool along with 2 thread per GPU for event monitoring and
# sending / receiving tensors.
num_monitoring_threads = 2 * flags_obj.num_gpus
flags_obj.datasets_num_private_threads = (cpu_count - total_gpu_thread_count
- num_monitoring_threads)
################################################################################
# Functions for running training/eval/validation loops for the model.
################################################################################
def learning_rate_with_decay(
batch_size, batch_denom, num_images, boundary_epochs, decay_rates,
base_lr=0.1, warmup=False):
"""Get a learning rate that decays step-wise as training progresses.
Args:
batch_size: the number of examples processed in each training batch.
batch_denom: this value will be used to scale the base learning rate.
`0.1 * batch size` is divided by this number, such that when
batch_denom == batch_size, the initial learning rate will be 0.1.
num_images: total number of images that will be used for training.
boundary_epochs: list of ints representing the epochs at which we
decay the learning rate.
decay_rates: list of floats representing the decay rates to be used
for scaling the learning rate. It should have one more element
than `boundary_epochs`, and all elements should have the same type.
base_lr: Initial learning rate scaled based on batch_denom.
warmup: Run a 5 epoch warmup to the initial lr.
Returns:
Returns a function that takes a single argument - the number of batches
trained so far (global_step)- and returns the learning rate to be used
for training the next batch.
"""
initial_learning_rate = base_lr * batch_size / batch_denom
batches_per_epoch = num_images / batch_size
# Reduce the learning rate at certain epochs.
# CIFAR-10: divide by 10 at epoch 100, 150, and 200
# ImageNet: divide by 10 at epoch 30, 60, 80, and 90
boundaries = [int(batches_per_epoch * epoch) for epoch in boundary_epochs]
vals = [initial_learning_rate * decay for decay in decay_rates]
def learning_rate_fn(global_step):
"""Builds scaled learning rate function with 5 epoch warm up."""
lr = tf.compat.v1.train.piecewise_constant(global_step, boundaries, vals)
if warmup:
warmup_steps = int(batches_per_epoch * 5)
warmup_lr = (
initial_learning_rate * tf.cast(global_step, tf.float32) / tf.cast(
warmup_steps, tf.float32))
return tf.cond(pred=global_step < warmup_steps,
true_fn=lambda: warmup_lr,
false_fn=lambda: lr)
return lr
def poly_rate_fn(global_step):
"""Handles linear scaling rule, gradual warmup, and LR decay.
The learning rate starts at 0, then it increases linearly per step. After
FLAGS.poly_warmup_epochs, we reach the base learning rate (scaled to account
for batch size). The learning rate is then decayed using a polynomial rate
decay schedule with power 2.0.
Args:
global_step: the current global_step
Returns:
returns the current learning rate
"""
# Learning rate schedule for LARS polynomial schedule
if flags.FLAGS.batch_size < 8192:
plr = 5.0
w_epochs = 5
elif flags.FLAGS.batch_size < 16384:
plr = 10.0
w_epochs = 5
elif flags.FLAGS.batch_size < 32768:
plr = 25.0
w_epochs = 5
else:
plr = 32.0
w_epochs = 14
w_steps = int(w_epochs * batches_per_epoch)
wrate = (plr * tf.cast(global_step, tf.float32) / tf.cast(
w_steps, tf.float32))
# TODO(pkanwar): use a flag to help calc num_epochs.
num_epochs = 90
train_steps = batches_per_epoch * num_epochs
min_step = tf.constant(1, dtype=tf.int64)
decay_steps = tf.maximum(min_step, tf.subtract(global_step, w_steps))
poly_rate = tf.train.polynomial_decay(
plr,
decay_steps,
train_steps - w_steps + 1,
power=2.0)
return tf.where(global_step <= w_steps, wrate, poly_rate)
# For LARS we have a new learning rate schedule
if flags.FLAGS.enable_lars:
return poly_rate_fn
return learning_rate_fn
def resnet_model_fn(features, labels, mode, model_class,
resnet_size, weight_decay, learning_rate_fn, momentum,
data_format, resnet_version, loss_scale,
loss_filter_fn=None, dtype=resnet_model.DEFAULT_DTYPE,
fine_tune=False, label_smoothing=0.0):
"""Shared functionality for different resnet model_fns.
Initializes the ResnetModel representing the model layers
and uses that model to build the necessary EstimatorSpecs for
the `mode` in question. For training, this means building losses,
the optimizer, and the train op that get passed into the EstimatorSpec.
For evaluation and prediction, the EstimatorSpec is returned without
a train op, but with the necessary parameters for the given mode.
Args:
features: tensor representing input images
labels: tensor representing class labels for all input images
mode: current estimator mode; should be one of
`tf.estimator.ModeKeys.TRAIN`, `EVALUATE`, `PREDICT`
model_class: a class representing a TensorFlow model that has a __call__
function. We assume here that this is a subclass of ResnetModel.
resnet_size: A single integer for the size of the ResNet model.
weight_decay: weight decay loss rate used to regularize learned variables.
learning_rate_fn: function that returns the current learning rate given
the current global_step
momentum: momentum term used for optimization
data_format: Input format ('channels_last', 'channels_first', or None).
If set to None, the format is dependent on whether a GPU is available.
resnet_version: Integer representing which version of the ResNet network to
use. See README for details. Valid values: [1, 2]
loss_scale: The factor to scale the loss for numerical stability. A detailed
summary is present in the arg parser help text.
loss_filter_fn: function that takes a string variable name and returns
True if the var should be included in loss calculation, and False
otherwise. If None, batch_normalization variables will be excluded
from the loss.
dtype: the TensorFlow dtype to use for calculations.
fine_tune: If True only train the dense layers(final layers).
label_smoothing: If greater than 0 then smooth the labels.
Returns:
EstimatorSpec parameterized according to the input params and the
current mode.
"""
# Generate a summary node for the images
tf.compat.v1.summary.image('images', features, max_outputs=6)
# Checks that features/images have same data type being used for calculations.
assert features.dtype == dtype
model = model_class(resnet_size, data_format, resnet_version=resnet_version,
dtype=dtype)
logits = model(features, mode == tf.estimator.ModeKeys.TRAIN)
# This acts as a no-op if the logits are already in fp32 (provided logits are
# not a SparseTensor). If dtype is is low precision, logits must be cast to
# fp32 for numerical stability.
logits = tf.cast(logits, tf.float32)
predictions = {
'classes': tf.argmax(input=logits, axis=1),
'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
}
if mode == tf.estimator.ModeKeys.PREDICT:
# Return the predictions and the specification for serving a SavedModel
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
export_outputs={
'predict': tf.estimator.export.PredictOutput(predictions)
})
# Calculate loss, which includes softmax cross entropy and L2 regularization.
if label_smoothing != 0.0:
one_hot_labels = tf.one_hot(labels, 1001)
cross_entropy = tf.losses.softmax_cross_entropy(
logits=logits, onehot_labels=one_hot_labels,
label_smoothing=label_smoothing)
else:
cross_entropy = tf.compat.v1.losses.sparse_softmax_cross_entropy(
logits=logits, labels=labels)
# Create a tensor named cross_entropy for logging purposes.
tf.identity(cross_entropy, name='cross_entropy')
tf.compat.v1.summary.scalar('cross_entropy', cross_entropy)
# If no loss_filter_fn is passed, assume we want the default behavior,
# which is that batch_normalization variables are excluded from loss.
def exclude_batch_norm(name):
return 'batch_normalization' not in name
loss_filter_fn = loss_filter_fn or exclude_batch_norm
# Add weight decay to the loss.
l2_loss = weight_decay * tf.add_n(
# loss is computed using fp32 for numerical stability.
[
tf.nn.l2_loss(tf.cast(v, tf.float32))
for v in tf.compat.v1.trainable_variables()
if loss_filter_fn(v.name)
])
tf.compat.v1.summary.scalar('l2_loss', l2_loss)
loss = cross_entropy + l2_loss
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.compat.v1.train.get_or_create_global_step()
learning_rate = learning_rate_fn(global_step)
# Create a tensor named learning_rate for logging purposes
tf.identity(learning_rate, name='learning_rate')
tf.compat.v1.summary.scalar('learning_rate', learning_rate)
if flags.FLAGS.enable_lars:
from tensorflow.contrib import opt as contrib_opt # pylint: disable=g-import-not-at-top
optimizer = contrib_opt.LARSOptimizer(
learning_rate,
momentum=momentum,
weight_decay=weight_decay,
skip_list=['batch_normalization', 'bias'])
else:
optimizer = tf.compat.v1.train.MomentumOptimizer(
learning_rate=learning_rate,
momentum=momentum
)
fp16_implementation = getattr(flags.FLAGS, 'fp16_implementation', None)
if fp16_implementation == 'graph_rewrite':
optimizer = (
tf.compat.v1.train.experimental.enable_mixed_precision_graph_rewrite(
optimizer, loss_scale=loss_scale))
def _dense_grad_filter(gvs):
"""Only apply gradient updates to the final layer.
This function is used for fine tuning.
Args:
gvs: list of tuples with gradients and variable info
Returns:
filtered gradients so that only the dense layer remains
"""
return [(g, v) for g, v in gvs if 'dense' in v.name]
if loss_scale != 1 and fp16_implementation != 'graph_rewrite':
# When computing fp16 gradients, often intermediate tensor values are
# so small, they underflow to 0. To avoid this, we multiply the loss by
# loss_scale to make these tensor values loss_scale times bigger.
scaled_grad_vars = optimizer.compute_gradients(loss * loss_scale)
if fine_tune:
scaled_grad_vars = _dense_grad_filter(scaled_grad_vars)
# Once the gradient computation is complete we can scale the gradients
# back to the correct scale before passing them to the optimizer.
unscaled_grad_vars = [(grad / loss_scale, var)
for grad, var in scaled_grad_vars]
minimize_op = optimizer.apply_gradients(unscaled_grad_vars, global_step)
else:
grad_vars = optimizer.compute_gradients(loss)
if fine_tune:
grad_vars = _dense_grad_filter(grad_vars)
minimize_op = optimizer.apply_gradients(grad_vars, global_step)
update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
train_op = tf.group(minimize_op, update_ops)
else:
train_op = None
accuracy = tf.compat.v1.metrics.accuracy(labels, predictions['classes'])
accuracy_top_5 = tf.compat.v1.metrics.mean(
tf.nn.in_top_k(predictions=logits, targets=labels, k=5, name='top_5_op'))
metrics = {'accuracy': accuracy,
'accuracy_top_5': accuracy_top_5}
# Create a tensor named train_accuracy for logging purposes
tf.identity(accuracy[1], name='train_accuracy')
tf.identity(accuracy_top_5[1], name='train_accuracy_top_5')
tf.compat.v1.summary.scalar('train_accuracy', accuracy[1])
tf.compat.v1.summary.scalar('train_accuracy_top_5', accuracy_top_5[1])
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=metrics)
def resnet_main(
flags_obj, model_function, input_function, dataset_name, shape=None):
"""Shared main loop for ResNet Models.
Args:
flags_obj: An object containing parsed flags. See define_resnet_flags()
for details.
model_function: the function that instantiates the Model and builds the
ops for train/eval. This will be passed directly into the estimator.
input_function: the function that processes the dataset and returns a
dataset that the estimator can train on. This will be wrapped with
all the relevant flags for running and passed to estimator.
dataset_name: the name of the dataset for training and evaluation. This is
used for logging purpose.
shape: list of ints representing the shape of the images used for training.
This is only used if flags_obj.export_dir is passed.
Returns:
Dict of results of the run. Contains the keys `eval_results` and
`train_hooks`. `eval_results` contains accuracy (top_1) and accuracy_top_5.
`train_hooks` is a list the instances of hooks used during training.
"""
model_helpers.apply_clean(flags.FLAGS)
# Ensures flag override logic is only executed if explicitly triggered.
if flags_obj.tf_gpu_thread_mode:
override_flags_and_set_envars_for_gpu_thread_pool(flags_obj)
# Configures cluster spec for distribution strategy.
num_workers = distribution_utils.configure_cluster(flags_obj.worker_hosts,
flags_obj.task_index)
# Creates session config. allow_soft_placement = True, is required for
# multi-GPU and is not harmful for other modes.
session_config = tf.compat.v1.ConfigProto(
inter_op_parallelism_threads=flags_obj.inter_op_parallelism_threads,
intra_op_parallelism_threads=flags_obj.intra_op_parallelism_threads,
allow_soft_placement=True)
distribution_strategy = distribution_utils.get_distribution_strategy(
distribution_strategy=flags_obj.distribution_strategy,
num_gpus=flags_core.get_num_gpus(flags_obj),
all_reduce_alg=flags_obj.all_reduce_alg,
num_packs=flags_obj.num_packs)
# Creates a `RunConfig` that checkpoints every 24 hours which essentially
# results in checkpoints determined only by `epochs_between_evals`.
run_config = tf.estimator.RunConfig(
train_distribute=distribution_strategy,
session_config=session_config,
save_checkpoints_secs=60*60*24,
save_checkpoints_steps=None)
# Initializes model with all but the dense layer from pretrained ResNet.
if flags_obj.pretrained_model_checkpoint_path is not None:
warm_start_settings = tf.estimator.WarmStartSettings(
flags_obj.pretrained_model_checkpoint_path,
vars_to_warm_start='^(?!.*dense)')
else:
warm_start_settings = None
classifier = tf.estimator.Estimator(
model_fn=model_function, model_dir=flags_obj.model_dir, config=run_config,
warm_start_from=warm_start_settings, params={
'resnet_size': int(flags_obj.resnet_size),
'data_format': flags_obj.data_format,
'batch_size': flags_obj.batch_size,
'resnet_version': int(flags_obj.resnet_version),
'loss_scale': flags_core.get_loss_scale(flags_obj,
default_for_fp16=128),
'dtype': flags_core.get_tf_dtype(flags_obj),
'fine_tune': flags_obj.fine_tune,
'num_workers': num_workers,
})
run_params = {
'batch_size': flags_obj.batch_size,
'dtype': flags_core.get_tf_dtype(flags_obj),
'resnet_size': flags_obj.resnet_size,
'resnet_version': flags_obj.resnet_version,
'synthetic_data': flags_obj.use_synthetic_data,
'train_epochs': flags_obj.train_epochs,
'num_workers': num_workers,
}
if flags_obj.use_synthetic_data:
dataset_name = dataset_name + '-synthetic'
benchmark_logger = logger.get_benchmark_logger()
benchmark_logger.log_run_info('resnet', dataset_name, run_params,
test_id=flags_obj.benchmark_test_id)
train_hooks = hooks_helper.get_train_hooks(
flags_obj.hooks,
model_dir=flags_obj.model_dir,
batch_size=flags_obj.batch_size)
def input_fn_train(num_epochs, input_context=None):
return input_function(
is_training=True,
data_dir=flags_obj.data_dir,
batch_size=distribution_utils.per_replica_batch_size(
flags_obj.batch_size, flags_core.get_num_gpus(flags_obj)),
num_epochs=num_epochs,
dtype=flags_core.get_tf_dtype(flags_obj),
datasets_num_private_threads=flags_obj.datasets_num_private_threads,
input_context=input_context)
def input_fn_eval():
return input_function(
is_training=False,
data_dir=flags_obj.data_dir,
batch_size=distribution_utils.per_replica_batch_size(
flags_obj.batch_size, flags_core.get_num_gpus(flags_obj)),
num_epochs=1,
dtype=flags_core.get_tf_dtype(flags_obj))
train_epochs = (0 if flags_obj.eval_only or not flags_obj.train_epochs else
flags_obj.train_epochs)
use_train_and_evaluate = flags_obj.use_train_and_evaluate or num_workers > 1
if use_train_and_evaluate:
train_spec = tf.estimator.TrainSpec(
input_fn=lambda input_context=None: input_fn_train(
train_epochs, input_context=input_context),
hooks=train_hooks,
max_steps=flags_obj.max_train_steps)
eval_spec = tf.estimator.EvalSpec(input_fn=input_fn_eval)
tf.compat.v1.logging.info('Starting to train and evaluate.')
tf.estimator.train_and_evaluate(classifier, train_spec, eval_spec)
# tf.estimator.train_and_evalute doesn't return anything in multi-worker
# case.
eval_results = {}
else:
if train_epochs == 0:
# If --eval_only is set, perform a single loop with zero train epochs.
schedule, n_loops = [0], 1
else:
# Compute the number of times to loop while training. All but the last
# pass will train for `epochs_between_evals` epochs, while the last will
# train for the number needed to reach `training_epochs`. For instance if
# train_epochs = 25 and epochs_between_evals = 10
# schedule will be set to [10, 10, 5]. That is to say, the loop will:
# Train for 10 epochs and then evaluate.
# Train for another 10 epochs and then evaluate.
# Train for a final 5 epochs (to reach 25 epochs) and then evaluate.
n_loops = math.ceil(train_epochs / flags_obj.epochs_between_evals)
schedule = [flags_obj.epochs_between_evals for _ in range(int(n_loops))]
schedule[-1] = train_epochs - sum(schedule[:-1]) # over counting.
for cycle_index, num_train_epochs in enumerate(schedule):
tf.compat.v1.logging.info('Starting cycle: %d/%d', cycle_index,
int(n_loops))
if num_train_epochs:
# Since we are calling classifier.train immediately in each loop, the
# value of num_train_epochs in the lambda function will not be changed
# before it is used. So it is safe to ignore the pylint error here
# pylint: disable=cell-var-from-loop
classifier.train(
input_fn=lambda input_context=None: input_fn_train(
num_train_epochs, input_context=input_context),
hooks=train_hooks,
max_steps=flags_obj.max_train_steps)
# flags_obj.max_train_steps is generally associated with testing and
# profiling. As a result it is frequently called with synthetic data,
# which will iterate forever. Passing steps=flags_obj.max_train_steps
# allows the eval (which is generally unimportant in those circumstances)
# to terminate. Note that eval will run for max_train_steps each loop,
# regardless of the global_step count.
tf.compat.v1.logging.info('Starting to evaluate.')
eval_results = classifier.evaluate(input_fn=input_fn_eval,
steps=flags_obj.max_train_steps)
benchmark_logger.log_evaluation_result(eval_results)
if model_helpers.past_stop_threshold(
flags_obj.stop_threshold, eval_results['accuracy']):
break
if flags_obj.export_dir is not None:
# Exports a saved model for the given classifier.
export_dtype = flags_core.get_tf_dtype(flags_obj)
if flags_obj.image_bytes_as_serving_input:
input_receiver_fn = functools.partial(
image_bytes_serving_input_fn, shape, dtype=export_dtype)
else:
input_receiver_fn = export.build_tensor_serving_input_receiver_fn(
shape, batch_size=flags_obj.batch_size, dtype=export_dtype)
classifier.export_savedmodel(flags_obj.export_dir, input_receiver_fn,
strip_default_attrs=True)
stats = {}
stats['eval_results'] = eval_results
stats['train_hooks'] = train_hooks
return stats
def define_resnet_flags(resnet_size_choices=None, dynamic_loss_scale=False,
fp16_implementation=False):
"""Add flags and validators for ResNet."""
flags_core.define_base(clean=True, train_epochs=True,
epochs_between_evals=True, stop_threshold=True,
num_gpu=True, hooks=True, export_dir=True,
distribution_strategy=True)
flags_core.define_performance(num_parallel_calls=False,
inter_op=True,
intra_op=True,
synthetic_data=True,
dtype=True,
all_reduce_alg=True,
num_packs=True,
tf_gpu_thread_mode=True,
datasets_num_private_threads=True,
dynamic_loss_scale=dynamic_loss_scale,
fp16_implementation=fp16_implementation,
loss_scale=True,
tf_data_experimental_slack=True,
max_train_steps=True)
flags_core.define_image()
flags_core.define_benchmark()
flags_core.define_distribution()
flags.adopt_module_key_flags(flags_core)
flags.DEFINE_enum(
name='resnet_version', short_name='rv', default='1',
enum_values=['1', '2'],
help=flags_core.help_wrap(
'Version of ResNet. (1 or 2) See README.md for details.'))
flags.DEFINE_bool(
name='fine_tune', short_name='ft', default=False,
help=flags_core.help_wrap(
'If True do not train any parameters except for the final layer.'))
flags.DEFINE_string(
name='pretrained_model_checkpoint_path', short_name='pmcp', default=None,
help=flags_core.help_wrap(
'If not None initialize all the network except the final layer with '
'these values'))
flags.DEFINE_boolean(
name='eval_only', default=False,
help=flags_core.help_wrap('Skip training and only perform evaluation on '
'the latest checkpoint.'))
flags.DEFINE_boolean(
name='image_bytes_as_serving_input', default=False,
help=flags_core.help_wrap(
'If True exports savedmodel with serving signature that accepts '
'JPEG image bytes instead of a fixed size [HxWxC] tensor that '
'represents the image. The former is easier to use for serving at '
'the expense of image resize/cropping being done as part of model '
'inference. Note, this flag only applies to ImageNet and cannot '
'be used for CIFAR.'))
flags.DEFINE_boolean(
name='use_train_and_evaluate', default=False,
help=flags_core.help_wrap(
'If True, uses `tf.estimator.train_and_evaluate` for the training '
'and evaluation loop, instead of separate calls to `classifier.train '
'and `classifier.evaluate`, which is the default behavior.'))
flags.DEFINE_bool(
name='enable_lars', default=False,
help=flags_core.help_wrap(
'Enable LARS optimizer for large batch training.'))
flags.DEFINE_float(
name='label_smoothing', default=0.0,
help=flags_core.help_wrap(
'Label smoothing parameter used in the softmax_cross_entropy'))
flags.DEFINE_float(
name='weight_decay', default=1e-4,
help=flags_core.help_wrap(
'Weight decay coefficiant for l2 regularization.'))
choice_kwargs = dict(
name='resnet_size', short_name='rs', default='50',
help=flags_core.help_wrap('The size of the ResNet model to use.'))
if resnet_size_choices is None:
flags.DEFINE_string(**choice_kwargs)
else:
flags.DEFINE_enum(enum_values=resnet_size_choices, **choice_kwargs)
|
|
from datetime import datetime
from flask import current_app
from flask.ext.login import login_user, current_user, logout_user
from flaskbb.forum.models import Category, Forum, Topic, Post, ForumsRead, \
TopicsRead, Report
from flaskbb.user.models import User
from flaskbb.utils.settings import flaskbb_config
def test_category_save(database):
"""Test the save category method."""
category = Category(title="Test Category")
category.save()
assert category.title == "Test Category"
def test_category_delete(category):
"""Test the delete category method."""
category.delete()
category = Category.query.filter_by(id=category.id).first()
assert category is None
def test_category_delete_with_user(topic):
"""Test the delete category method with recounting the users post counts."""
user = topic.user
forum = topic.forum
category = topic.forum.category
assert user.post_count == 1
assert forum.post_count == 1
assert forum.topic_count == 1
category.delete([user])
assert user.post_count == 0
category = Category.query.filter_by(id=category.id).first()
topic = Topic.query.filter_by(id=topic.id).first()
assert category is None
# The topic should also be deleted
assert topic is None
def test_category_delete_with_forum(forum):
"""When deleting a category, all of his forums should also be deleted."""
forum.category.delete()
assert forum is not None
assert forum.category is not None
category = Category.query.filter_by(id=forum.category.id).first()
forum = Forum.query.filter_by(id=forum.id).first()
assert forum is None
assert category is None
def test_category_get_forums(forum, user):
category = forum.category
with current_app.test_request_context():
# Test with logged in user
login_user(user)
assert current_user.is_authenticated()
cat, forums = Category.get_forums(category.id, current_user)
# Check if it is a list because in a category there are normally more
# than one forum in it (not in these tests)
assert isinstance(forums, list) is True
assert forums == [(forum, None)]
assert cat == category
# Test the same thing with a logged out user
logout_user()
assert not current_user.is_authenticated()
cat, forums = Category.get_forums(category.id, current_user)
# Check if it is a list because in a category there are normally more
# than one forum in it (not in these tests)
assert isinstance(forums, list) is True
assert forums == [(forum, None)]
assert cat == category
def test_category_get_all(forum, user):
category = forum.category
with current_app.test_request_context():
# Test with logged in user
login_user(user)
assert current_user.is_authenticated()
categories = Category.get_all(current_user)
# All categories are stored in a list
assert isinstance(categories, list)
# The forums for a category are also stored in a list
assert isinstance(categories[0][1], list)
assert categories == [(category, [(forum, None)])]
# Test with logged out user
logout_user()
assert not current_user.is_authenticated()
categories = Category.get_all(current_user)
# All categories are stored in a list
assert isinstance(categories, list)
# The forums for a category are also stored in a list
assert isinstance(categories[0][1], list)
assert categories == [(category, [(forum, None)])]
def test_forum_save(category, moderator_user):
"""Test the save forum method"""
forum = Forum(title="Test Forum", category_id=category.id)
forum.save()
assert forum.title == "Test Forum"
# Test with adding a moderator
forum.save([moderator_user])
assert forum.moderators == [moderator_user]
def test_forum_delete(forum):
"""Test the delete forum method."""
forum.delete()
forum = Forum.query.filter_by(id=forum.id).first()
assert forum is None
def test_forum_delete_with_user_and_topic(topic, user):
"""Now test the delete forum method with a topic inside."""
assert user.post_count == 1
topic.forum.delete([user])
forum = Forum.query.filter_by(id=topic.forum_id).first()
assert forum is None
assert user.post_count == 0
def test_forum_update_last_post(topic, user):
"""Test the update last post method."""
post = Post(content="Test Content 2")
post.save(topic=topic, user=user)
assert topic.forum.last_post == post
post.delete()
topic.forum.update_last_post()
assert topic.forum.last_post == topic.first_post
def test_forum_update_read(database, user, topic):
"""Test the update read method."""
forumsread = ForumsRead.query.\
filter(ForumsRead.user_id == user.id,
ForumsRead.forum_id == topic.forum_id).first()
topicsread = TopicsRead.query.\
filter(TopicsRead.user_id == user.id,
TopicsRead.topic_id == topic.id).first()
forum = topic.forum
with current_app.test_request_context():
# Test with logged in user
login_user(user)
# Should return False because topicsread is None
assert not forum.update_read(current_user, forumsread, topicsread)
# This is the first time the user visits the topic
topicsread = TopicsRead()
topicsread.user_id = user.id
topicsread.topic_id = topic.id
topicsread.forum_id = topic.forum_id
topicsread.last_read = datetime.utcnow()
topicsread.save()
# hence, we also need to create a new forumsread entry
assert forum.update_read(current_user, forumsread, topicsread)
forumsread = ForumsRead.query.\
filter(ForumsRead.user_id == user.id,
ForumsRead.forum_id == topic.forum_id).first()
# everything should be up-to-date now
assert not forum.update_read(current_user, forumsread, topicsread)
post = Post(content="Test Content")
post.save(user=user, topic=topic)
# Updating the topicsread tracker
topicsread.last_read = datetime.utcnow()
topicsread.save()
# now the forumsread tracker should also need a update
assert forum.update_read(current_user, forumsread, topicsread)
logout_user()
# should fail because the user is logged out
assert not forum.update_read(current_user, forumsread, topicsread)
def test_forum_update_read_two_topics(database, user, topic, topic_moderator):
"""Test if the ForumsRead tracker will be updated if there are two topics
and where one is unread and the other is read.
"""
forumsread = ForumsRead.query.\
filter(ForumsRead.user_id == user.id,
ForumsRead.forum_id == topic.forum_id).first()
forum = topic.forum
with current_app.test_request_context():
# Test with logged in user
login_user(user)
# This is the first time the user visits the topic
topicsread = TopicsRead()
topicsread.user_id = user.id
topicsread.topic_id = topic.id
topicsread.forum_id = topic.forum_id
topicsread.last_read = datetime.utcnow()
topicsread.save()
# will not create a entry because there is still one unread topic
assert not forum.update_read(current_user, forumsread, topicsread)
def test_forum_url(forum):
assert forum.url == "http://localhost:5000/forum/1-test-forum"
def test_forum_slugify(forum):
assert forum.slug == "test-forum"
def test_forum_get_forum(forum, user):
with current_app.test_request_context():
# Test with logged in user
login_user(user)
forum_with_forumsread = \
Forum.get_forum(forum_id=forum.id, user=current_user)
assert forum_with_forumsread == (forum, None)
# Test with logged out user
logout_user()
forum_with_forumsread = \
Forum.get_forum(forum_id=forum.id, user=current_user)
assert forum_with_forumsread == (forum, None)
def test_forum_get_topics(topic, user):
forum = topic.forum
with current_app.test_request_context():
# Test with logged in user
login_user(user)
topics = Forum.get_topics(forum_id=forum.id, user=current_user)
assert topics.items == [(topic, None)]
# Test with logged out user
logout_user()
topics = Forum.get_topics(forum_id=forum.id, user=current_user)
assert topics.items == [(topic, None)]
def test_topic_save(forum, user):
"""Test the save topic method with creating and editing a topic."""
post = Post(content="Test Content")
topic = Topic(title="Test Title")
assert forum.last_post_id is None
assert forum.post_count == 0
assert forum.topic_count == 0
topic.save(forum=forum, post=post, user=user)
assert topic.title == "Test Title"
topic.title = "Test Edit Title"
topic.save()
assert topic.title == "Test Edit Title"
# The first post in the topic is also the last post
assert topic.first_post_id == post.id
assert topic.last_post_id == post.id
assert forum.last_post_id == post.id
assert forum.post_count == 1
assert forum.topic_count == 1
def test_topic_delete(topic):
"""Test the delete topic method"""
assert topic.user.post_count == 1
assert topic.post_count == 1
assert topic.forum.topic_count == 1
assert topic.forum.post_count == 1
topic.delete(users=[topic.user])
forum = Forum.query.filter_by(id=topic.forum_id).first()
user = User.query.filter_by(id=topic.user_id).first()
topic = Topic.query.filter_by(id=topic.id).first()
assert topic is None
assert user.post_count == 0
assert forum.topic_count == 0
assert forum.post_count == 0
assert forum.last_post_id is None
def test_topic_merge(topic):
"""Tests the topic merge method."""
topic_other = Topic(title="Test Topic Merge")
post = Post(content="Test Content Merge")
topic_other.save(post=post, user=topic.user, forum=topic.forum)
# Save the last_post_id in another variable because topic_other will be
# overwritten later
last_post_other = topic_other.last_post_id
assert topic_other.merge(topic)
# I just want to be sure that the topic is deleted
topic_other = Topic.query.filter_by(id=topic_other.id).first()
assert topic_other is None
assert topic.post_count == 2
assert topic.last_post_id == last_post_other
def test_topic_merge_other_forum(topic):
"""You cannot merge a topic with a topic from another forum."""
forum_other = Forum(title="Test Forum 2", category_id=1)
forum_other.save()
topic_other = Topic(title="Test Topic 2")
post_other = Post(content="Test Content 2")
topic_other.save(user=topic.user, forum=forum_other, post=post_other)
assert not topic.merge(topic_other)
def test_topic_move(topic):
"""Tests the topic move method."""
forum_other = Forum(title="Test Forum 2", category_id=1)
forum_other.save()
forum_old = Forum.query.filter_by(id=topic.forum_id).first()
assert topic.move(forum_other)
assert forum_old.topics == []
assert forum_old.last_post_id is None
assert forum_old.topic_count == 0
assert forum_old.post_count == 0
assert forum_other.last_post_id == topic.last_post_id
assert forum_other.topic_count == 1
assert forum_other.post_count == 1
def test_topic_move_same_forum(topic):
"""You cannot move a topic within the same forum."""
assert not topic.move(topic.forum)
def test_topic_tracker_needs_update(database, user, topic):
"""Tests if the topicsread tracker needs an update if a new post has been
submitted.
"""
forumsread = ForumsRead.query.\
filter(ForumsRead.user_id == user.id,
ForumsRead.forum_id == topic.forum_id).first()
topicsread = TopicsRead.query.\
filter(TopicsRead.user_id == user.id,
TopicsRead.topic_id == topic.id).first()
with current_app.test_request_context():
assert topic.tracker_needs_update(forumsread, topicsread)
# Update the tracker
topicsread = TopicsRead()
topicsread.user_id = user.id
topicsread.topic_id = topic.id
topicsread.forum_id = topic.forum_id
topicsread.last_read = datetime.utcnow()
topicsread.save()
forumsread = ForumsRead()
forumsread.user_id = user.id
forumsread.forum_id = topic.forum_id
forumsread.last_read = datetime.utcnow()
forumsread.save()
# Now the topic should be read
assert not topic.tracker_needs_update(forumsread, topicsread)
post = Post(content="Test Content")
post.save(topic=topic, user=user)
assert topic.tracker_needs_update(forumsread, topicsread)
def test_topic_tracker_needs_update_cleared(database, user, topic):
"""Tests if the topicsread needs an update if the forum has been marked
as cleared.
"""
forumsread = ForumsRead.query.\
filter(ForumsRead.user_id == user.id,
ForumsRead.forum_id == topic.forum_id).first()
topicsread = TopicsRead.query.\
filter(TopicsRead.user_id == user.id,
TopicsRead.topic_id == topic.id).first()
with current_app.test_request_context():
assert topic.tracker_needs_update(forumsread, topicsread)
# Update the tracker
forumsread = ForumsRead()
forumsread.user_id = user.id
forumsread.forum_id = topic.forum_id
forumsread.last_read = datetime.utcnow()
forumsread.cleared = datetime.utcnow()
forumsread.save()
# Now the topic should be read
assert not topic.tracker_needs_update(forumsread, topicsread)
def test_topic_update_read(database, user, topic):
"""Tests the update read method if the topic is unread/read."""
forumsread = ForumsRead.query.\
filter(ForumsRead.user_id == user.id,
ForumsRead.forum_id == topic.forum_id).first()
with current_app.test_request_context():
# Test with logged in user
login_user(user)
assert current_user.is_authenticated()
# Update the tracker
assert topic.update_read(current_user, topic.forum, forumsread)
# Because the tracker is already up-to-date, it shouldn't update it
# again.
assert not topic.update_read(current_user, topic.forum, forumsread)
# Adding a new post - now the tracker shouldn't be up-to-date anymore.
post = Post(content="Test Content")
post.save(topic=topic, user=user)
forumsread = ForumsRead.query.\
filter(ForumsRead.user_id == user.id,
ForumsRead.forum_id == topic.forum_id).first()
# Test tracker length
flaskbb_config["TRACKER_LENGTH"] = 0
assert not topic.update_read(current_user, topic.forum, forumsread)
flaskbb_config["TRACKER_LENGTH"] = 1
assert topic.update_read(current_user, topic.forum, forumsread)
# Test with logged out user
logout_user()
assert not current_user.is_authenticated()
assert not topic.update_read(current_user, topic.forum, forumsread)
def test_topic_url(topic):
assert topic.url == "http://localhost:5000/topic/1-test-topic-normal"
def test_topic_slug(topic):
assert topic.slug == "test-topic-normal"
def test_post_save(topic, user):
"""Tests the save post method."""
post = Post(content="Test Content")
post.save(topic=topic, user=user)
assert post.content == "Test Content"
post.content = "Test Edit Content"
post.save()
assert post.content == "Test Edit Content"
assert topic.user.post_count == 2
assert topic.post_count == 2
assert topic.last_post == post
assert topic.forum.post_count == 2
def test_post_delete(topic):
"""Tests the delete post method with three different post types.
The three types are:
* First Post
* A post between the first and last post (middle)
* Last Post
"""
post_middle = Post(content="Test Content Middle")
post_middle.save(topic=topic, user=topic.user)
post_last = Post(content="Test Content Last")
post_last.save(topic=topic, user=topic.user)
assert topic.post_count == 3
assert topic.forum.post_count == 3
assert topic.user.post_count == 3
post_middle.delete()
# Check the last posts
assert topic.last_post == post_last
assert topic.forum.last_post == post_last
post_last.delete()
# That was a bit trickier..
assert topic.post_count == 1
assert topic.forum.post_count == 1
assert topic.user.post_count == 1
assert topic.first_post_id == topic.last_post_id
assert topic.forum.last_post_id == topic.last_post_id
def test_report(topic, user):
"""Tests if the reports can be saved/edited and deleted with the
implemented save and delete methods."""
report = Report(reason="Test Report")
report.save(user=user, post=topic.first_post)
assert report.reason == "Test Report"
report.reason = "Test Report Edited"
report.save()
assert report.reason == "Test Report Edited"
report.delete()
report = Report.query.filter_by(id=report.id).first()
assert report is None
def test_forumsread(topic, user):
"""Tests if the forumsread tracker can be saved/edited and deleted with the
implemented save and delete methods."""
forumsread = ForumsRead()
forumsread.user_id = user.id
forumsread.forum_id = topic.forum_id
forumsread.last_read = datetime.utcnow()
forumsread.save()
assert forumsread is not None
forumsread.delete()
forumsread = ForumsRead.query.filter_by(forum_id=forumsread.forum_id).first()
assert forumsread is None
def test_topicsread(topic, user):
"""Tests if the topicsread trakcer can be saved/edited and deleted with the
implemented save and delete methods."""
topicsread = TopicsRead()
topicsread.user_id = user.id
topicsread.topic_id = topic.id
topicsread.forum_id = topic.forum_id
topicsread.last_read = datetime.utcnow()
topicsread.save()
assert topicsread is not None
topicsread.delete()
topicsread = TopicsRead.query.filter_by(topic_id=topicsread.topic_id).first()
assert topicsread is None
|
|
# Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import sympy
import cirq
import cirq_ionq as ionq
def test_serialize_empty_circuit_invalid():
empty = cirq.Circuit()
serializer = ionq.Serializer()
with pytest.raises(ValueError, match='empty'):
_ = serializer.serialize(empty)
def test_serialize_non_terminal_measurements():
q0 = cirq.LineQubit(0)
circuit = cirq.Circuit(cirq.measure(q0, key='d'), cirq.X(q0))
serializer = ionq.Serializer()
with pytest.raises(ValueError, match='end of circuit'):
_ = serializer.serialize(circuit)
def test_serialize_not_line_qubits_invalid():
q0 = cirq.NamedQubit('a')
circuit = cirq.Circuit(cirq.X(q0))
serializer = ionq.Serializer()
with pytest.raises(ValueError, match='NamedQubit'):
_ = serializer.serialize(circuit)
def test_serialize_parameterized_invalid():
q = cirq.LineQubit(0)
circuit = cirq.Circuit(cirq.X(q) ** (sympy.Symbol('x')))
serializer = ionq.Serializer()
with pytest.raises(ValueError, match='parameterized'):
_ = serializer.serialize(circuit)
def test_serialize_implicit_num_qubits():
q0 = cirq.LineQubit(2)
circuit = cirq.Circuit(cirq.X(q0))
serializer = ionq.Serializer()
result = serializer.serialize(circuit)
assert result.body['qubits'] == 3
def test_serialize_non_gate_op_invalid():
q0 = cirq.LineQubit(0)
circuit = cirq.Circuit(cirq.X(q0), cirq.CircuitOperation(cirq.FrozenCircuit()))
serializer = ionq.Serializer()
with pytest.raises(ValueError, match='CircuitOperation'):
_ = serializer.serialize(circuit)
def test_serialize_negative_line_qubit_invalid():
q0 = cirq.LineQubit(-1)
circuit = cirq.Circuit(cirq.X(q0))
serializer = ionq.Serializer()
with pytest.raises(ValueError, match='-1'):
_ = serializer.serialize(circuit)
def test_serialize_pow_gates():
q0 = cirq.LineQubit(0)
serializer = ionq.Serializer()
for name, gate in (('rx', cirq.X), ('ry', cirq.Y), ('rz', cirq.Z)):
for exponent in (1.1, 0.6):
circuit = cirq.Circuit((gate ** exponent)(q0))
result = serializer.serialize(circuit)
assert result == ionq.SerializedProgram(
body={
'qubits': 1,
'circuit': [{'gate': name, 'targets': [0], 'rotation': exponent * np.pi}],
},
metadata={},
)
def test_serialize_pauli_gates():
q0 = cirq.LineQubit(0)
serializer = ionq.Serializer()
for gate, name in ((cirq.X, 'x'), (cirq.Y, 'y'), (cirq.Z, 'z')):
circuit = cirq.Circuit(gate(q0))
result = serializer.serialize(circuit)
assert result == ionq.SerializedProgram(
body={'qubits': 1, 'circuit': [{'gate': name, 'targets': [0]}]}, metadata={}
)
def test_serialize_sqrt_x_gate():
q0 = cirq.LineQubit(0)
serializer = ionq.Serializer()
circuit = cirq.Circuit(cirq.X(q0) ** (0.5))
result = serializer.serialize(circuit)
assert result == ionq.SerializedProgram(
body={'qubits': 1, 'circuit': [{'gate': 'v', 'targets': [0]}]}, metadata={}
)
circuit = cirq.Circuit(cirq.X(q0) ** (-0.5))
result = serializer.serialize(circuit)
assert result == ionq.SerializedProgram(
body={'qubits': 1, 'circuit': [{'gate': 'vi', 'targets': [0]}]}, metadata={}
)
def test_serialize_s_gate():
q0 = cirq.LineQubit(0)
serializer = ionq.Serializer()
circuit = cirq.Circuit(cirq.Z(q0) ** (0.5))
result = serializer.serialize(circuit)
assert result == ionq.SerializedProgram(
body={'qubits': 1, 'circuit': [{'gate': 's', 'targets': [0]}]}, metadata={}
)
circuit = cirq.Circuit(cirq.Z(q0) ** (-0.5))
result = serializer.serialize(circuit)
assert result == ionq.SerializedProgram(
body={'qubits': 1, 'circuit': [{'gate': 'si', 'targets': [0]}]}, metadata={}
)
def test_serialize_h_gate():
q0 = cirq.LineQubit(0)
serializer = ionq.Serializer()
circuit = cirq.Circuit(cirq.H(q0))
result = serializer.serialize(circuit)
assert result == ionq.SerializedProgram(
body={'qubits': 1, 'circuit': [{'gate': 'h', 'targets': [0]}]}, metadata={}
)
with pytest.raises(ValueError, match=r'H\*\*0.5'):
circuit = cirq.Circuit(cirq.H(q0) ** 0.5)
_ = serializer.serialize(circuit)
def test_serialize_t_gate():
q0 = cirq.LineQubit(0)
serializer = ionq.Serializer()
circuit = cirq.Circuit(cirq.Z(q0) ** (0.25))
result = serializer.serialize(circuit)
assert result == ionq.SerializedProgram(
body={'qubits': 1, 'circuit': [{'gate': 't', 'targets': [0]}]}, metadata={}
)
circuit = cirq.Circuit(cirq.Z(q0) ** (-0.25))
result = serializer.serialize(circuit)
assert result == ionq.SerializedProgram(
body={'qubits': 1, 'circuit': [{'gate': 'ti', 'targets': [0]}]}, metadata={}
)
def test_serialize_parity_pow_gate():
q0, q1 = cirq.LineQubit.range(2)
serializer = ionq.Serializer()
for gate, name in ((cirq.XXPowGate, 'xx'), (cirq.YYPowGate, 'yy'), (cirq.ZZPowGate, 'zz')):
for exponent in (0.5, 1.0, 1.5):
circuit = cirq.Circuit(gate(exponent=exponent)(q0, q1))
result = serializer.serialize(circuit)
assert result == ionq.SerializedProgram(
body={
'qubits': 2,
'circuit': [{'gate': name, 'targets': [0, 1], 'rotation': exponent * np.pi}],
},
metadata={},
)
def test_serialize_cnot_gate():
q0, q1 = cirq.LineQubit.range(2)
serializer = ionq.Serializer()
circuit = cirq.Circuit(cirq.CNOT(q0, q1))
result = serializer.serialize(circuit)
assert result == ionq.SerializedProgram(
body={'qubits': 2, 'circuit': [{'gate': 'cnot', 'control': 0, 'target': 1}]}, metadata={}
)
with pytest.raises(ValueError, match=r'CNOT\*\*0.5'):
circuit = cirq.Circuit(cirq.CNOT(q0, q1) ** 0.5)
_ = serializer.serialize(circuit)
def test_serialize_swap_gate():
q0, q1 = cirq.LineQubit.range(2)
serializer = ionq.Serializer()
circuit = cirq.Circuit(cirq.SWAP(q0, q1))
result = serializer.serialize(circuit)
assert result == ionq.SerializedProgram(
body={'qubits': 2, 'circuit': [{'gate': 'swap', 'targets': [0, 1]}]}, metadata={}
)
with pytest.raises(ValueError, match=r'SWAP\*\*0.5'):
circuit = cirq.Circuit(cirq.SWAP(q0, q1) ** 0.5)
_ = serializer.serialize(circuit)
def test_serialize_measurement_gate():
q0 = cirq.LineQubit(0)
circuit = cirq.Circuit(cirq.measure(q0, key='tomyheart'))
serializer = ionq.Serializer()
result = serializer.serialize(circuit)
assert result == ionq.SerializedProgram(
body={'qubits': 1, 'circuit': []}, metadata={'measurement0': f'tomyheart{chr(31)}0'}
)
def test_serialize_measurement_gate_target_order():
q0, _, q2 = cirq.LineQubit.range(3)
circuit = cirq.Circuit(cirq.measure(q2, q0, key='tomyheart'))
serializer = ionq.Serializer()
result = serializer.serialize(circuit)
assert result == ionq.SerializedProgram(
body={'qubits': 3, 'circuit': []},
metadata={'measurement0': f'tomyheart{chr(31)}2,0'},
)
def test_serialize_measurement_gate_split_across_dict():
q0 = cirq.LineQubit(0)
circuit = cirq.Circuit(cirq.measure(q0, key='a' * 60))
serializer = ionq.Serializer()
result = serializer.serialize(circuit)
assert result.metadata['measurement0'] == 'a' * 40
assert result.metadata['measurement1'] == 'a' * 20 + f'{chr(31)}0'
def test_serialize_measurement_gate_multiple_keys():
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit(cirq.measure(q0, key='a'), cirq.measure(q1, key='b'))
serializer = ionq.Serializer()
result = serializer.serialize(circuit)
assert result == ionq.SerializedProgram(
body={'qubits': 2, 'circuit': []},
metadata={'measurement0': f'a{chr(31)}0{chr(30)}b{chr(31)}1'},
)
def test_serialize_measurement_string_too_long():
q = cirq.LineQubit(0)
# Max limit for metadata is 9 keys of length 40. Here we create a key of length
# 40 * 9 - 1. When combined with one qubit for the qubit, 0, and the deliminator this
# is just too big to fix.
circuit = cirq.Circuit(cirq.measure(q, key='x' * (40 * 9 - 1)))
serializer = ionq.Serializer()
with pytest.raises(ValueError, match='too long'):
_ = serializer.serialize(circuit)
# Check that one fewer character is fine.
circuit = cirq.Circuit(cirq.measure(q, key='x' * (40 * 9 - 2)))
serializer = ionq.Serializer()
_ = serializer.serialize(circuit)
def test_serialize_measurement_key_cannot_be_deliminator():
q0 = cirq.LineQubit(0)
serializer = ionq.Serializer()
circuit = cirq.Circuit(cirq.measure(q0, key=f'ab{chr(30)}'))
with pytest.raises(ValueError, match=f'ab{chr(30)}'):
_ = serializer.serialize(circuit)
circuit = cirq.Circuit(cirq.measure(q0, key=f'ab{chr(31)}'))
with pytest.raises(ValueError, match=f'ab{chr(31)}'):
_ = serializer.serialize(circuit)
def test_serialize_not_serializable():
q0, q1 = cirq.LineQubit.range(2)
serializer = ionq.Serializer()
circuit = cirq.Circuit(cirq.PhasedISwapPowGate()(q0, q1))
with pytest.raises(ValueError, match='PhasedISWAP'):
_ = serializer.serialize(circuit)
def test_serialize_atol():
q0 = cirq.LineQubit(0)
serializer = ionq.Serializer(atol=1e-1)
# Within tolerance given above this is an X gate.
circuit = cirq.Circuit(cirq.X(q0) ** 1.09)
result = serializer.serialize(circuit)
assert result.body['circuit'][0]['gate'] == 'x'
|
|
# Copyright 2009 Shikhar Bhushan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from threading import Event, Lock
from uuid import uuid1
from ncclient.xml_ import *
from ncclient.transport import SessionListener
from errors import OperationError, TimeoutExpiredError, MissingCapabilityError
import logging
logger = logging.getLogger("ncclient.operations.rpc")
class RPCError(OperationError):
"Represents an `rpc-error`. It is a type of :exc:`OperationError` and can be raised as such."
tag_to_attr = {
qualify("error-type"): "_type",
qualify("error-tag"): "_tag",
qualify("error-severity"): "_severity",
qualify("error-info"): "_info",
qualify("error-path"): "_path",
qualify("error-message"): "_message"
}
def __init__(self, raw):
self._raw = raw
for attr in RPCError.tag_to_attr.values():
setattr(self, attr, None)
for subele in raw:
attr = RPCError.tag_to_attr.get(subele.tag, None)
if attr is not None:
setattr(self, attr, subele.text if attr != "_info" else to_xml(subele) )
if self.message is not None:
OperationError.__init__(self, self.message)
else:
OperationError.__init__(self, self.to_dict())
def to_dict(self):
return dict([ (attr[1:], getattr(self, attr)) for attr in RPCError.tag_to_attr.values() ])
@property
def xml(self):
"The `rpc-error` element as returned in XML."
return self._raw
@property
def type(self):
"The contents of the `error-type` element."
return self._type
@property
def tag(self):
"The contents of the `error-tag` element."
return self._tag
@property
def severity(self):
"The contents of the `error-severity` element."
return self._severity
@property
def path(self):
"The contents of the `error-path` element if present or `None`."
return self._path
@property
def message(self):
"The contents of the `error-message` element if present or `None`."
return self._message
@property
def info(self):
"XML string or `None`; representing the `error-info` element."
return self._info
class RPCReply:
"""Represents an *rpc-reply*. Only concerns itself with whether the operation was successful.
.. note::
If the reply has not yet been parsed there is an implicit, one-time parsing overhead to
accessing some of the attributes defined by this class.
"""
ERROR_CLS = RPCError
"Subclasses can specify a different error class, but it should be a subclass of `RPCError`."
def __init__(self, raw):
self._raw = raw
self._parsed = False
self._root = None
self._errors = []
def __repr__(self):
return self._raw
def parse(self):
"Parses the *rpc-reply*."
if self._parsed: return
root = self._root = to_ele(self._raw) # The <rpc-reply> element
# Per RFC 4741 an <ok/> tag is sent when there are no errors or warnings
ok = root.find(qualify("ok"))
if ok is None:
# Create RPCError objects from <rpc-error> elements
error = root.find(qualify("rpc-error"))
if error is not None:
for err in root.getiterator(error.tag):
# Process a particular <rpc-error>
self._errors.append(self.ERROR_CLS(err))
self._parsing_hook(root)
self._parsed = True
def _parsing_hook(self, root):
"No-op by default. Gets passed the *root* element for the reply."
pass
@property
def xml(self):
"*rpc-reply* element as returned."
return self._raw
@property
def ok(self):
"Boolean value indicating if there were no errors."
return not self.errors # empty list => false
@property
def error(self):
"Returns the first :class:`RPCError` and `None` if there were no errors."
self.parse()
if self._errors:
return self._errors[0]
else:
return None
@property
def errors(self):
"List of `RPCError` objects. Will be empty if there were no *rpc-error* elements in reply."
self.parse()
return self._errors
class RPCReplyListener(SessionListener): # internal use
creation_lock = Lock()
# one instance per session -- maybe there is a better way??
def __new__(cls, session):
with RPCReplyListener.creation_lock:
instance = session.get_listener_instance(cls)
if instance is None:
instance = object.__new__(cls)
instance._lock = Lock()
instance._id2rpc = {}
#instance._pipelined = session.can_pipeline
session.add_listener(instance)
return instance
def register(self, id, rpc):
with self._lock:
self._id2rpc[id] = rpc
def callback(self, root, raw):
tag, attrs = root
if tag != qualify("rpc-reply"):
return
for key in attrs: # in the <rpc-reply> attributes
if key == "message-id": # if we found msgid attr
id = attrs[key] # get the msgid
with self._lock:
try:
rpc = self._id2rpc[id] # the corresponding rpc
logger.debug("Delivering to %r" % rpc)
rpc.deliver_reply(raw)
except KeyError:
raise OperationError("Unknown 'message-id': %s", id)
# no catching other exceptions, fail loudly if must
else:
# if no error delivering, can del the reference to the RPC
del self._id2rpc[id]
break
else:
raise OperationError("Could not find 'message-id' attribute in <rpc-reply>")
def errback(self, err):
try:
for rpc in self._id2rpc.values():
rpc.deliver_error(err)
finally:
self._id2rpc.clear()
class RaiseMode(object):
NONE = 0
"Don't attempt to raise any type of `rpc-error` as :exc:`RPCError`."
ERRORS = 1
"Raise only when the `error-type` indicates it is an honest-to-god error."
ALL = 2
"Don't look at the `error-type`, always raise."
class RPC(object):
"""Base class for all operations, directly corresponding to *rpc* requests. Handles making the request, and taking delivery of the reply."""
DEPENDS = []
"""Subclasses can specify their dependencies on capabilities as a list of URI's or abbreviated names, e.g. ':writable-running'. These are verified at the time of instantiation. If the capability is not available, :exc:`MissingCapabilityError` is raised."""
REPLY_CLS = RPCReply
"By default :class:`RPCReply`. Subclasses can specify a :class:`RPCReply` subclass."
def __init__(self, session, async=False, timeout=30, raise_mode=RaiseMode.NONE):
"""
*session* is the :class:`~ncclient.transport.Session` instance
*async* specifies whether the request is to be made asynchronously, see :attr:`is_async`
*timeout* is the timeout for a synchronous request, see :attr:`timeout`
*raise_mode* specifies the exception raising mode, see :attr:`raise_mode`
"""
self._session = session
try:
for cap in self.DEPENDS:
self._assert(cap)
except AttributeError:
pass
self._async = async
self._timeout = timeout
self._raise_mode = raise_mode
self._id = uuid1().urn # Keeps things simple instead of having a class attr with running ID that has to be locked
self._listener = RPCReplyListener(session)
self._listener.register(self._id, self)
self._reply = None
self._error = None
self._event = Event()
def _wrap(self, subele):
# internal use
ele = new_ele("rpc", {"message-id": self._id}, xmlns=BASE_NS_1_0)
ele.append(subele)
return to_xml(ele)
def _request(self, op):
"""Implementations of :meth:`request` call this method to send the request and process the reply.
In synchronous mode, blocks until the reply is received and returns :class:`RPCReply`. Depending on the :attr:`raise_mode` a `rpc-error` element in the reply may lead to an :exc:`RPCError` exception.
In asynchronous mode, returns immediately, returning `self`. The :attr:`event` attribute will be set when the reply has been received (see :attr:`reply`) or an error occured (see :attr:`error`).
*op* is the operation to be requested as an :class:`~xml.etree.ElementTree.Element`
"""
logger.info('Requesting %r' % self.__class__.__name__)
req = self._wrap(op)
self._session.send(req)
if self._async:
logger.debug('Async request, returning %r', self)
return self
else:
logger.debug('Sync request, will wait for timeout=%r' % self._timeout)
self._event.wait(self._timeout)
if self._event.isSet():
if self._error:
# Error that prevented reply delivery
raise self._error
self._reply.parse()
if self._reply.error is not None:
# <rpc-error>'s [ RPCError ]
if self._raise_mode == RaiseMode.ALL:
raise self._reply.error
elif (self._raise_mode == RaiseMode.ERRORS and self._reply.error.type == "error"):
raise self._reply.error
return self._reply
else:
raise TimeoutExpiredError
def request(self):
"""Subclasses must implement this method. Typically only the request needs to be built as an
:class:`~xml.etree.ElementTree.Element` and everything else can be handed off to
:meth:`_request`."""
pass
def _assert(self, capability):
"""Subclasses can use this method to verify that a capability is available with the NETCONF
server, before making a request that requires it. A :exc:`MissingCapabilityError` will be
raised if the capability is not available."""
if capability not in self._session.server_capabilities:
raise MissingCapabilityError('Server does not support [%s]' % capability)
def deliver_reply(self, raw):
# internal use
self._reply = self.REPLY_CLS(raw)
self._event.set()
def deliver_error(self, err):
# internal use
self._error = err
self._event.set()
@property
def reply(self):
":class:`RPCReply` element if reply has been received or `None`"
return self._reply
@property
def error(self):
""":exc:`Exception` type if an error occured or `None`.
.. note::
This represents an error which prevented a reply from being received. An *rpc-error*
does not fall in that category -- see `RPCReply` for that.
"""
return self._error
@property
def id(self):
"The *message-id* for this RPC."
return self._id
@property
def session(self):
"The `~ncclient.transport.Session` object associated with this RPC."
return self._session
@property
def event(self):
""":class:`~threading.Event` that is set when reply has been received or when an error preventing
delivery of the reply occurs.
"""
return self._event
def __set_async(self, async=True):
self._async = async
if async and not session.can_pipeline:
raise UserWarning('Asynchronous mode not supported for this device/session')
def __set_raise_mode(self, mode):
assert(choice in ("all", "errors", "none"))
self._raise_mode = mode
def __set_timeout(self, timeout):
self._timeout = timeout
raise_mode = property(fget=lambda self: self._raise_mode, fset=__set_raise_mode)
"""Depending on this exception raising mode, an `rpc-error` in the reply may be raised as an :exc:`RPCError` exception. Valid values are the constants defined in :class:`RaiseMode`. """
is_async = property(fget=lambda self: self._async, fset=__set_async)
"""Specifies whether this RPC will be / was requested asynchronously. By default RPC's are synchronous."""
timeout = property(fget=lambda self: self._timeout, fset=__set_timeout)
"""Timeout in seconds for synchronous waiting defining how long the RPC request will block on a reply before raising :exc:`TimeoutExpiredError`.
Irrelevant for asynchronous usage.
"""
|
|
from __future__ import print_function
import functools
import json
import logging
import os
import sys
import runpy
from insights.client import InsightsClient
from insights.client.config import InsightsConfig
from insights.client.constants import InsightsConstants as constants
from insights.client.support import InsightsSupport
from insights.client.utilities import validate_remove_file, print_egg_versions
from insights.client.schedule import get_scheduler
from insights.client.apps.compliance import ComplianceClient
logger = logging.getLogger(__name__)
def phase(func):
@functools.wraps(func)
def _f():
try:
config = InsightsConfig().load_all()
client = InsightsClient(config)
except (ValueError, OSError) as e:
sys.stderr.write('ERROR: ' + str(e) + '\n')
sys.exit(constants.sig_kill_bad)
if config.debug:
logger.info("Core path: %s", os.path.dirname(__file__))
try:
func(client, config)
except Exception:
logger.exception("Fatal error")
sys.exit(1)
else:
sys.exit(0) # Exit gracefully
return _f
def get_phases():
return [{
'name': 'pre_update',
'run_as_root': True
}, {
'name': 'update',
'run_as_root': True
}, {
'name': 'post_update',
'run_as_root': True
}, {
'name': 'collect_and_output',
'run_as_root': True
}]
@phase
def pre_update(client, config):
if config.version:
logger.info(constants.version)
sys.exit(constants.sig_kill_ok)
# validate the remove file
if config.validate:
try:
validate_remove_file(config)
sys.exit(constants.sig_kill_ok)
except RuntimeError as e:
logger.error(e)
sys.exit(constants.sig_kill_bad)
# handle cron stuff
if config.enable_schedule:
# enable automatic scheduling
logger.debug('Updating config...')
scheduler = get_scheduler(config)
updated = scheduler.schedule()
if updated:
logger.info('Automatic scheduling for Insights has been enabled.')
sys.exit(constants.sig_kill_ok)
if config.disable_schedule:
# disable automatic schedling
updated = get_scheduler(config).remove_scheduling()
if updated:
logger.info('Automatic scheduling for Insights has been disabled.')
if not config.register:
sys.exit(constants.sig_kill_ok)
# test the insights connection
if config.test_connection:
logger.info("Running Connection Tests...")
rc = client.test_connection()
if rc == 0:
sys.exit(constants.sig_kill_ok)
else:
sys.exit(constants.sig_kill_bad)
if config.support:
support = InsightsSupport(config)
support.collect_support_info()
sys.exit(constants.sig_kill_ok)
if config.diagnosis:
remediation_id = None
if config.diagnosis is not True:
remediation_id = config.diagnosis
resp = client.get_diagnosis(remediation_id)
if not resp:
sys.exit(constants.sig_kill_bad)
print(json.dumps(resp))
sys.exit(constants.sig_kill_ok)
if config.checkin:
try:
checkin_success = client.checkin()
except Exception as e:
print(e)
sys.exit(constants.sig_kill_bad)
if checkin_success:
sys.exit(constants.sig_kill_ok)
else:
sys.exit(constants.sig_kill_bad)
@phase
def update(client, config):
client.update()
if config.payload:
logger.debug('Uploading a payload. Bypassing rules update.')
return
client.update_rules()
@phase
def post_update(client, config):
# create a machine id first thing. we'll need it for all uploads
logger.debug('Machine ID: %s', client.get_machine_id())
logger.debug("CONFIG: %s", config)
print_egg_versions()
if config.list_specs:
client.list_specs()
sys.exit(constants.sig_kill_ok)
if config.show_results:
try:
client.show_results()
sys.exit(constants.sig_kill_ok)
except Exception as e:
print(e)
sys.exit(constants.sig_kill_bad)
if config.check_results:
try:
client.check_results()
sys.exit(constants.sig_kill_ok)
except Exception as e:
print(e)
sys.exit(constants.sig_kill_bad)
# -------delete everything below this line-------
if config.legacy_upload:
if config.status:
reg_check = client.get_registration_status()
for msg in reg_check['messages']:
logger.info(msg)
if reg_check['status']:
sys.exit(constants.sig_kill_ok)
else:
sys.exit(constants.sig_kill_bad)
# put this first to avoid conflicts with register
if config.unregister:
if client.unregister():
sys.exit(constants.sig_kill_ok)
else:
sys.exit(constants.sig_kill_bad)
if config.offline:
logger.debug('Running client in offline mode. Bypassing registration.')
return
if config.no_upload:
logger.debug("Running client without uploading. Bypassing registration.")
return
if config.display_name and not config.register:
# setting display name independent of registration
if client.set_display_name(config.display_name):
if 'display_name' in config._cli_opts:
# only exit on success if it was invoked from command line
sys.exit(constants.sig_kill_ok)
else:
sys.exit(constants.sig_kill_bad)
reg = client.register()
if reg is None:
# API unreachable
logger.info('Could not connect to the Insights API. Run insights-client --test-connection for more information.')
sys.exit(constants.sig_kill_bad)
elif reg is False:
# unregistered
sys.exit(constants.sig_kill_bad)
if config.register and not config.disable_schedule:
scheduler = get_scheduler(config)
updated = scheduler.schedule()
if updated:
logger.info('Automatic scheduling for Insights has been enabled.')
return
# -------delete everything above this line-------
if config.offline:
logger.debug('Running client in offline mode. Bypassing registration.')
return
if config.no_upload:
logger.debug("Running client without uploading. Bypassing registration.")
return
# --payload short circuits registration check
if config.payload:
logger.debug('Uploading a specified archive. Bypassing registration.')
return
# check registration status before anything else
reg_check = client.get_registration_status()
if reg_check is None:
sys.exit(constants.sig_kill_bad)
# --status
if config.status:
if reg_check:
logger.info('This host is registered.')
sys.exit(constants.sig_kill_ok)
else:
logger.info('This host is unregistered.')
sys.exit(constants.sig_kill_bad)
# put this first to avoid conflicts with register
if config.unregister:
if reg_check:
logger.info('Unregistering this host from Insights.')
if client.unregister():
get_scheduler(config).remove_scheduling()
sys.exit(constants.sig_kill_ok)
else:
sys.exit(constants.sig_kill_bad)
else:
logger.info('This host is not registered, unregistration is not applicable.')
sys.exit(constants.sig_kill_bad)
# halt here if unregistered
if not reg_check and not config.register:
logger.info('This host has not been registered. '
'Use --register to register this host.')
sys.exit(constants.sig_kill_bad)
# --force-reregister, clear machine-id
if config.reregister:
reg_check = False
client.clear_local_registration()
# --register was called
if config.register:
# don't actually need to make a call to register() since
# system creation and upload are a single event on the platform
if reg_check:
logger.info('This host has already been registered.')
if not config.disable_schedule:
scheduler = get_scheduler(config)
updated = scheduler.schedule()
if updated:
logger.info('Automatic scheduling for Insights has been enabled.')
# set --display-name independent of register
# only do this if set from the CLI. normally display_name is sent on upload
if 'display_name' in config._cli_opts and not config.register:
if client.set_display_name(config.display_name):
sys.exit(constants.sig_kill_ok)
else:
sys.exit(constants.sig_kill_bad)
# set --ansible-hostname independent of register
# only do this if set from the CLI. normally display_name is sent on upload
if 'ansible_host' in config._cli_opts and not config.register:
if client.set_ansible_host(config.ansible_host):
sys.exit(constants.sig_kill_ok)
else:
sys.exit(constants.sig_kill_bad)
@phase
def collect_and_output(client, config):
# run a specified module
if config.module:
try:
runpy.run_module(config.module)
except ImportError as e:
logger.error(e)
sys.exit(constants.sig_kill_bad)
sys.exit(constants.sig_kill_ok)
# --compliance was called
if config.compliance:
config.payload, config.content_type = ComplianceClient(config).oscap_scan()
# default (below)
if config.payload:
insights_archive = config.payload
else:
try:
insights_archive = client.collect()
except RuntimeError as e:
logger.error(e)
sys.exit(constants.sig_kill_bad)
if not config.content_type:
config.content_type = 'application/vnd.redhat.advisor.collection+tgz'
if config.no_upload:
# output options for which upload is not performed
if config.output_dir:
client.copy_to_output_dir(insights_archive)
elif config.output_file:
client.copy_to_output_file(insights_archive)
else:
# upload the archive
if not insights_archive:
# no archive to upload, something went wrong
sys.exit(constants.sig_kill_bad)
resp = None
try:
resp = client.upload(payload=insights_archive, content_type=config.content_type)
except (IOError, ValueError, RuntimeError) as e:
logger.error(str(e))
sys.exit(constants.sig_kill_bad)
if resp:
if config.to_json:
print(json.dumps(resp))
client.show_inventory_deep_link()
client.delete_cached_branch_info()
# rotate eggs once client completes all work successfully
try:
client.rotate_eggs()
except IOError:
message = ("Failed to rotate %s to %s" %
(constants.insights_core_newest,
constants.insights_core_last_stable))
logger.debug(message)
raise IOError(message)
|
|
import json
from django.test import TestCase, RequestFactory
from django.contrib.auth.models import Group
from django.core.urlresolvers import reverse
from rest_framework import status
from rest_framework.exceptions import PermissionDenied
from hs_core import hydroshare
from hs_core.views import unshare_resource_with_user, unshare_resource_with_group
from hs_core.testing import MockIRODSTestCaseMixin
from hs_access_control.models import PrivilegeCodes
class TestUnshareResource(MockIRODSTestCaseMixin, TestCase):
def setUp(self):
super(TestUnshareResource, self).setUp()
self.group, _ = Group.objects.get_or_create(name='Resource Author')
self.owner = hydroshare.create_account(
'john@gmail.com',
username='john',
first_name='John',
last_name='Clarson',
superuser=False,
password='jhmypassword',
groups=[]
)
self.user = hydroshare.create_account(
'lisa@gmail.com',
username='lisaZ',
first_name='Lisa',
last_name='Ziggler',
superuser=False,
password='lzmypassword',
groups=[]
)
self.unauthorized_user = hydroshare.create_account(
'gary@gmail.com',
username='garyB',
first_name='Gary',
last_name='Brandon',
superuser=False,
password='gbmypassword',
groups=[]
)
# crate a group for testing group access to resource
self.test_group = self.owner.uaccess.create_group(
title='Test Group',
description="This is to test group access to resource",
purpose="Testing group access to resource")
self.gen_res = hydroshare.create_resource(
resource_type='GenericResource',
owner=self.owner,
title='Generic Resource Share Resource Testing'
)
self.factory = RequestFactory()
def test_unshare_resource_with_user(self):
# here we are testing the unshare_resource_with_user view function
# test unshare resource with self.user
# test self.user has no view permission
self.assertNotIn(self.user, self.gen_res.raccess.view_users)
# grant view access to self.user
self.owner.uaccess.share_resource_with_user(self.gen_res, self.user, PrivilegeCodes.VIEW)
# test self.user has now view permission
self.gen_res.raccess.refresh_from_db()
self.assertIn(self.user, self.gen_res.raccess.view_users)
self._check_unshare_with_user()
# test self.user has no view permission
self.assertNotIn(self.user, self.gen_res.raccess.view_users)
# grant edit access to self.user
self.owner.uaccess.share_resource_with_user(self.gen_res, self.user, PrivilegeCodes.CHANGE)
# test self.user has now edit permission
self.gen_res.raccess.refresh_from_db()
self.assertIn(self.user, self.gen_res.raccess.edit_users)
self._check_unshare_with_user()
# test self.user has no edit permission
self.assertNotIn(self.user, self.gen_res.raccess.edit_users)
# grant owner access to self.user
self.owner.uaccess.share_resource_with_user(self.gen_res, self.user, PrivilegeCodes.OWNER)
# test self.user has now owner permission
self.gen_res.raccess.refresh_from_db()
self.assertIn(self.user, self.gen_res.raccess.owners)
self._check_unshare_with_user()
# test self.user has no owner permission
self.assertNotIn(self.user, self.gen_res.raccess.owners)
# clean up
hydroshare.delete_resource(self.gen_res.short_id)
def test_unshare_resource_with_self(self):
# here we are testing the unshare_resource_with_user view function
# test unshare resource with self.user by self.user
# test self.user has no view permission
self.assertNotIn(self.user, self.gen_res.raccess.view_users)
# grant view access to self.user
self.owner.uaccess.share_resource_with_user(self.gen_res, self.user, PrivilegeCodes.VIEW)
# test self.user has now view permission
self.gen_res.raccess.refresh_from_db()
self.assertIn(self.user, self.gen_res.raccess.view_users)
url_params = {'shortkey': self.gen_res.short_id, 'user_id': self.user.id}
url = reverse('unshare_resource_with_user', kwargs=url_params)
request = self.factory.post(url, data={})
# self unsharing
request.user = self.user
# make it a ajax request
request.META['HTTP_X_REQUESTED_WITH'] = 'XMLHttpRequest'
response = unshare_resource_with_user(request, shortkey=self.gen_res.short_id,
user_id=self.user.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = json.loads(response.content)
self.assertEqual(response_data['status'], 'success')
self.assertEqual(response_data['redirect_to'], '/hydroshare/my-documents/')
self.gen_res.raccess.refresh_from_db()
self.assertNotIn(self.user, self.gen_res.raccess.view_users)
def test_unshare_resource_with_user_bad_request(self):
# here we are testing the unshare_resource_with_user view function
# test unshare resource with self.user by unauthorized_user
# test self.user has no view permission
self.assertNotIn(self.user, self.gen_res.raccess.view_users)
# grant view access to self.user
self.owner.uaccess.share_resource_with_user(self.gen_res, self.user, PrivilegeCodes.VIEW)
# test self.user has now view permission
self.gen_res.raccess.refresh_from_db()
self.assertIn(self.user, self.gen_res.raccess.view_users)
url_params = {'shortkey': self.gen_res.short_id, 'user_id': self.user.id}
url = reverse('unshare_resource_with_user', kwargs=url_params)
request = self.factory.post(url, data={})
# unauthorized user trying to remove access of self.user
request.user = self.unauthorized_user
# make it a ajax request
request.META['HTTP_X_REQUESTED_WITH'] = 'XMLHttpRequest'
with self.assertRaises(PermissionDenied):
unshare_resource_with_user(request, shortkey=self.gen_res.short_id,
user_id=self.user.id)
self.gen_res.raccess.refresh_from_db()
self.assertIn(self.user, self.gen_res.raccess.view_users)
def test_unshare_resource_with_group(self):
# here we are testing the unshare_resource_with_group view function
# test unshare resource with self.test_group
# test self.test_group has no view permission
self.assertNotIn(self.test_group, self.gen_res.raccess.view_groups)
# grant view access to self.test_group
self.owner.uaccess.share_resource_with_group(self.gen_res, self.test_group,
PrivilegeCodes.VIEW)
# test self.test_group has now view permission
self.gen_res.raccess.refresh_from_db()
self.assertIn(self.test_group, self.gen_res.raccess.view_groups)
self._check_unshare_with_group()
# test self.test_group has no view permission
self.assertNotIn(self.test_group, self.gen_res.raccess.view_groups)
# grant edit access to test_group
self.owner.uaccess.share_resource_with_group(self.gen_res, self.test_group,
PrivilegeCodes.CHANGE)
# test test_group has now edit permission
self.gen_res.raccess.refresh_from_db()
self.assertIn(self.test_group, self.gen_res.raccess.edit_groups)
self._check_unshare_with_group()
# test test_group has no edit permission
self.assertNotIn(self.test_group, self.gen_res.raccess.edit_groups)
# clean up
hydroshare.delete_resource(self.gen_res.short_id)
def test_unshare_resource_with_group_bad_request(self):
# here we are testing the unshare_resource_with_group view function
# test unshare resource with test_group by unauthorized_user
# test test_group has no view permission
self.assertNotIn(self.test_group, self.gen_res.raccess.view_groups)
# grant view access to test_group
self.owner.uaccess.share_resource_with_group(self.gen_res, self.test_group,
PrivilegeCodes.VIEW)
# test test_group has now view permission
self.gen_res.raccess.refresh_from_db()
self.assertIn(self.test_group, self.gen_res.raccess.view_groups)
url_params = {'shortkey': self.gen_res.short_id, 'group_id': self.test_group.id}
url = reverse('unshare_resource_with_group', kwargs=url_params)
request = self.factory.post(url, data={})
# unauthorized user trying to remove access of test_group
request.user = self.unauthorized_user
# make it a ajax request
request.META['HTTP_X_REQUESTED_WITH'] = 'XMLHttpRequest'
with self.assertRaises(PermissionDenied):
unshare_resource_with_group(request, shortkey=self.gen_res.short_id,
group_id=self.test_group.id)
self.gen_res.raccess.refresh_from_db()
self.assertIn(self.test_group, self.gen_res.raccess.view_groups)
def _check_unshare_with_user(self):
url_params = {'shortkey': self.gen_res.short_id, 'user_id': self.user.id}
url = reverse('unshare_resource_with_user', kwargs=url_params)
request = self.factory.post(url, data={})
request.user = self.owner
# make it a ajax request
request.META['HTTP_X_REQUESTED_WITH'] = 'XMLHttpRequest'
response = unshare_resource_with_user(request, shortkey=self.gen_res.short_id,
user_id=self.user.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = json.loads(response.content)
self.assertEqual(response_data['status'], 'success')
self.gen_res.raccess.refresh_from_db()
def _check_unshare_with_group(self):
url_params = {'shortkey': self.gen_res.short_id, 'group_id': self.test_group.id}
url = reverse('unshare_resource_with_group', kwargs=url_params)
request = self.factory.post(url, data={})
request.user = self.owner
# make it a ajax request
request.META['HTTP_X_REQUESTED_WITH'] = 'XMLHttpRequest'
response = unshare_resource_with_group(request, shortkey=self.gen_res.short_id,
group_id=self.test_group.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = json.loads(response.content)
self.assertEqual(response_data['status'], 'success')
self.gen_res.raccess.refresh_from_db()
|
|
from datetime import datetime
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.generic import ABCDateOffset
import pandas as pd
from pandas import (
DatetimeIndex, Index, PeriodIndex, Series, Timestamp, bdate_range,
date_range)
from pandas.tests.test_base import Ops
import pandas.util.testing as tm
from pandas.tseries.offsets import BDay, BMonthEnd, CDay, Day, Hour
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexOps(Ops):
def setup_method(self, method):
super(TestDatetimeIndexOps, self).setup_method(method)
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
f = lambda x: isinstance(x, DatetimeIndex)
self.check_ops_properties(DatetimeIndex._field_ops, f)
self.check_ops_properties(DatetimeIndex._object_ops, f)
self.check_ops_properties(DatetimeIndex._bool_ops, f)
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH#7206
for op in ['year', 'day', 'second', 'weekday']:
pytest.raises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
assert s.year == 2000
assert s.month == 1
assert s.day == 10
pytest.raises(AttributeError, lambda: s.weekday)
def test_minmax_tz(self, tz_naive_fixture):
tz = tz_naive_fixture
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
assert idx1.is_monotonic
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
assert not idx2.is_monotonic
for idx in [idx1, idx2]:
assert idx.min() == Timestamp('2011-01-01', tz=tz)
assert idx.max() == Timestamp('2011-01-03', tz=tz)
assert idx.argmin() == 0
assert idx.argmax() == 2
@pytest.mark.parametrize('op', ['min', 'max'])
def test_minmax_nat(self, op):
# Return NaT
obj = DatetimeIndex([])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
assert pd.isna(getattr(obj, op)())
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
assert np.min(dr) == Timestamp('2016-01-15 00:00:00', freq='D')
assert np.max(dr) == Timestamp('2016-01-20 00:00:00', freq='D')
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, errmsg, np.min, dr, out=0)
tm.assert_raises_regex(ValueError, errmsg, np.max, dr, out=0)
assert np.argmin(dr) == 0
assert np.argmax(dr) == 5
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(
ValueError, errmsg, np.argmin, dr, out=0)
tm.assert_raises_regex(
ValueError, errmsg, np.argmax, dr, out=0)
def test_repeat_range(self, tz_naive_fixture):
tz = tz_naive_fixture
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
assert result.freq is None
assert len(result) == 5 * len(rng)
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
def test_repeat(self, tz_naive_fixture):
tz = tz_naive_fixture
reps = 2
msg = "the 'axis' parameter is not supported"
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
assert res.freq is None
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assert_raises_regex(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_resolution(self, tz_naive_fixture):
tz = tz_naive_fixture
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
assert idx.resolution == expected
def test_value_counts_unique(self, tz_naive_fixture):
tz = tz_naive_fixture
# GH 7735
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
assert idx[0] in idx
@pytest.mark.parametrize('idx',
[
DatetimeIndex(
['2011-01-01',
'2011-01-02',
'2011-01-03'],
freq='D', name='idx'),
DatetimeIndex(
['2011-01-01 09:00',
'2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', name='tzidx', tz='Asia/Tokyo')
])
def test_order_with_freq(self, idx):
ordered = idx.sort_values()
tm.assert_index_equal(ordered, idx)
assert ordered.freq == idx.freq
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, idx)
tm.assert_numpy_array_equal(indexer, np.array([0, 1, 2]),
check_dtype=False)
assert ordered.freq == idx.freq
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
tm.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
@pytest.mark.parametrize('index_dates,expected_dates', [
(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05']),
(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05']),
([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT],
[pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'])
])
def test_order_without_freq(self, index_dates, expected_dates,
tz_naive_fixture):
tz = tz_naive_fixture
# without freq
index = DatetimeIndex(index_dates, tz=tz, name='idx')
expected = DatetimeIndex(expected_dates, tz=tz, name='idx')
ordered = index.sort_values()
tm.assert_index_equal(ordered, expected)
assert ordered.freq is None
ordered = index.sort_values(ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
assert ordered.freq is None
ordered, indexer = index.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
ordered, indexer = index.sort_values(return_indexer=True,
ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
tm.assert_index_equal(idx, result)
assert idx.freq == result.freq
idx_dup = idx.append(idx)
assert idx_dup.freq is None # freq is reset
result = idx_dup.drop_duplicates()
tm.assert_index_equal(idx, result)
assert result.freq is None
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
@pytest.mark.parametrize('freq', [
'A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S'])
def test_infer_freq(self, freq):
# GH 11018
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
assert result.freq == freq
def test_nat(self, tz_naive_fixture):
tz = tz_naive_fixture
assert pd.DatetimeIndex._na_value is pd.NaT
assert pd.DatetimeIndex([])._na_value is pd.NaT
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
assert idx.hasnans is False
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
assert idx.hasnans is True
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.astype(object))
assert idx.astype(object).equals(idx)
assert idx.astype(object).equals(idx.astype(object))
assert not idx.equals(list(idx))
assert not idx.equals(pd.Series(idx))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
assert not idx.equals(idx2)
assert not idx.equals(idx2.copy())
assert not idx.equals(idx2.astype(object))
assert not idx.astype(object).equals(idx2)
assert not idx.equals(list(idx2))
assert not idx.equals(pd.Series(idx2))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
assert not idx.equals(idx3)
assert not idx.equals(idx3.copy())
assert not idx.equals(idx3.astype(object))
assert not idx.astype(object).equals(idx3)
assert not idx.equals(list(idx3))
assert not idx.equals(pd.Series(idx3))
@pytest.mark.parametrize('values', [
['20180101', '20180103', '20180105'], []])
@pytest.mark.parametrize('freq', [
'2D', Day(2), '2B', BDay(2), '48H', Hour(48)])
@pytest.mark.parametrize('tz', [None, 'US/Eastern'])
def test_freq_setter(self, values, freq, tz):
# GH 20678
idx = DatetimeIndex(values, tz=tz)
# can set to an offset, converting from string if necessary
idx.freq = freq
assert idx.freq == freq
assert isinstance(idx.freq, ABCDateOffset)
# can reset to None
idx.freq = None
assert idx.freq is None
def test_freq_setter_errors(self):
# GH 20678
idx = DatetimeIndex(['20180101', '20180103', '20180105'])
# setting with an incompatible freq
msg = ('Inferred frequency 2D from passed values does not conform to '
'passed frequency 5D')
with tm.assert_raises_regex(ValueError, msg):
idx.freq = '5D'
# setting with non-freq string
with tm.assert_raises_regex(ValueError, 'Invalid frequency'):
idx.freq = 'foo'
def test_offset_deprecated(self):
# GH 20716
idx = pd.DatetimeIndex(['20180101', '20180102'])
# getter deprecated
with tm.assert_produces_warning(FutureWarning):
idx.offset
# setter deprecated
with tm.assert_produces_warning(FutureWarning):
idx.offset = BDay()
class TestBusinessDatetimeIndex(object):
def setup_method(self, method):
self.rng = bdate_range(START, END)
def test_comparison(self):
d = self.rng[10]
comp = self.rng > d
assert comp[11]
assert not comp[9]
def test_pickle_unpickle(self):
unpickled = tm.round_trip_pickle(self.rng)
assert unpickled.freq is not None
def test_copy(self):
cp = self.rng.copy()
repr(cp)
tm.assert_index_equal(cp, self.rng)
def test_shift(self):
shifted = self.rng.shift(5)
assert shifted[0] == self.rng[5]
assert shifted.freq == self.rng.freq
shifted = self.rng.shift(-5)
assert shifted[5] == self.rng[0]
assert shifted.freq == self.rng.freq
shifted = self.rng.shift(0)
assert shifted[0] == self.rng[0]
assert shifted.freq == self.rng.freq
rng = date_range(START, END, freq=BMonthEnd())
shifted = rng.shift(1, freq=BDay())
assert shifted[0] == rng[0] + BDay()
def test_equals(self):
assert not self.rng.equals(list(self.rng))
def test_identical(self):
t1 = self.rng.copy()
t2 = self.rng.copy()
assert t1.identical(t2)
# name
t1 = t1.rename('foo')
assert t1.equals(t2)
assert not t1.identical(t2)
t2 = t2.rename('foo')
assert t1.identical(t2)
# freq
t2v = Index(t2.values)
assert t1.equals(t2v)
assert not t1.identical(t2v)
class TestCustomDatetimeIndex(object):
def setup_method(self, method):
self.rng = bdate_range(START, END, freq='C')
def test_comparison(self):
d = self.rng[10]
comp = self.rng > d
assert comp[11]
assert not comp[9]
def test_copy(self):
cp = self.rng.copy()
repr(cp)
tm.assert_index_equal(cp, self.rng)
def test_shift(self):
shifted = self.rng.shift(5)
assert shifted[0] == self.rng[5]
assert shifted.freq == self.rng.freq
shifted = self.rng.shift(-5)
assert shifted[5] == self.rng[0]
assert shifted.freq == self.rng.freq
shifted = self.rng.shift(0)
assert shifted[0] == self.rng[0]
assert shifted.freq == self.rng.freq
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", pd.errors.PerformanceWarning)
rng = date_range(START, END, freq=BMonthEnd())
shifted = rng.shift(1, freq=CDay())
assert shifted[0] == rng[0] + CDay()
def test_shift_periods(self):
# GH #22458 : argument 'n' was deprecated in favor of 'periods'
idx = pd.DatetimeIndex(start=START, end=END,
periods=3)
tm.assert_index_equal(idx.shift(periods=0), idx)
tm.assert_index_equal(idx.shift(0), idx)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=True):
tm.assert_index_equal(idx.shift(n=0), idx)
def test_pickle_unpickle(self):
unpickled = tm.round_trip_pickle(self.rng)
assert unpickled.freq is not None
def test_equals(self):
assert not self.rng.equals(list(self.rng))
|
|
import asyncio
import json
import os
import re
import time
import traceback
import concurrent.futures
from urllib.parse import urlparse
import feedparser
from aiohttp import web
import ssl
import bcrypt
import uuid
from jinja2 import Environment, FileSystemLoader, select_autoescape
import appdaemon.dashboard as addashboard
import appdaemon.utils as utils
import appdaemon.stream.adstream as stream
import appdaemon.admin as adadmin
from appdaemon.appdaemon import AppDaemon
def securedata(myfunc):
"""
Take care of streams and service calls
"""
async def wrapper(*args):
self = args[0]
request = args[1]
if self.password is None:
return await myfunc(*args)
elif "adcreds" in request.cookies:
match = await utils.run_in_executor(
self, bcrypt.checkpw, str.encode(self.password), str.encode(request.cookies["adcreds"]),
)
if match:
return await myfunc(*args)
elif ("x-ad-access" in request.headers) and (request.headers["x-ad-access"] == self.password):
return await myfunc(*args)
elif "api_password" in request.query and request.query["api_password"] == self.password:
return await myfunc(*args)
else:
return self.get_response(request, "401", "Unauthorized")
return wrapper
def secure(myfunc):
"""
Take care of screen based security
"""
async def wrapper(*args):
self = args[0]
request = args[1]
if self.password is None:
return await myfunc(*args)
else:
if "adcreds" in request.cookies:
match = await utils.run_in_executor(
self, bcrypt.checkpw, str.encode(self.password), str.encode(request.cookies["adcreds"]),
)
if match:
return await myfunc(*args)
else:
return await self.forcelogon(request)
else:
return await self.forcelogon(request)
return wrapper
def route_secure(myfunc):
"""
Take care of streams and service calls
"""
async def wrapper(*args):
self = args[0]
request = args[1]
if self.password is None or self.valid_tokens == []:
return await myfunc(*args)
elif "adcreds" in request.cookies:
match = await utils.run_in_executor(
self, bcrypt.checkpw, str.encode(self.password), str.encode(request.cookies["adcreds"])
)
if match:
return await myfunc(*args)
elif "token" in request.query and request.query["token"] in self.valid_tokens:
return await myfunc(*args)
else:
return self.get_response(request, "401", "Unauthorized")
return wrapper
class HTTP:
def __init__(self, ad: AppDaemon, loop, logging, appdaemon, dashboard, admin, api, http):
self.AD = ad
self.logging = logging
self.logger = ad.logging.get_child("_http")
self.access = ad.logging.get_access()
self.appdaemon = appdaemon
self.dashboard = dashboard
self.dashboard_dir = None
self.admin = admin
self.http = http
self.api = api
self.runner = None
self.template_dir = os.path.join(os.path.dirname(__file__), "assets", "templates")
self.password = None
self.valid_tokens = []
self.url = None
self.work_factor = 12
self.ssl_certificate = None
self.ssl_key = None
self.transport = "ws"
self.config_dir = None
self._process_arg("config_dir", dashboard)
self.static_dirs = {}
self._process_http(http)
self.stopping = False
self.endpoints = {}
self.app_routes = {}
self.dashboard_obj = None
self.admin_obj = None
self.install_dir = os.path.dirname(__file__)
self.javascript_dir = os.path.join(self.install_dir, "assets", "javascript")
self.template_dir = os.path.join(self.install_dir, "assets", "templates")
self.css_dir = os.path.join(self.install_dir, "assets", "css")
self.fonts_dir = os.path.join(self.install_dir, "assets", "fonts")
self.webfonts_dir = os.path.join(self.install_dir, "assets", "webfonts")
self.images_dir = os.path.join(self.install_dir, "assets", "images")
try:
url = urlparse(self.url)
net = url.netloc.split(":")
self.host = net[0]
try:
self.port = net[1]
except IndexError:
self.port = 80
if self.host == "":
raise ValueError("Invalid host for 'url'")
self.app = web.Application()
if "headers" in self.http:
self.app.on_response_prepare.append(self.add_response_headers)
# Setup event stream
self.stream = stream.ADStream(self.AD, self.app, self.transport)
self.loop = loop
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=5)
if self.ssl_certificate is not None and self.ssl_key is not None:
self.context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
self.context.load_cert_chain(self.ssl_certificate, self.ssl_key)
else:
self.context = None
self.setup_http_routes()
#
# API
#
if api is not None:
self.logger.info("Starting API")
self.setup_api_routes()
else:
self.logger.info("API is disabled")
#
# Admin
#
if admin is not None:
self.logger.info("Starting Admin Interface")
self.stats_update = "realtime"
self._process_arg("stats_update", admin)
self.admin_obj = adadmin.Admin(
self.config_dir,
logging,
self.AD,
javascript_dir=self.javascript_dir,
template_dir=self.template_dir,
css_dir=self.css_dir,
fonts_dir=self.fonts_dir,
webfonts_dir=self.webfonts_dir,
images_dir=self.images_dir,
transport=self.transport,
**admin
)
else:
self.logger.info("Admin Interface is disabled")
#
# Dashboards
#
if dashboard is not None:
self.logger.info("Starting Dashboards")
self._process_arg("dashboard_dir", dashboard)
self.compile_on_start = True
self._process_arg("compile_on_start", dashboard)
self.force_compile = False
self._process_arg("force_compile", dashboard)
self.profile_dashboard = False
self._process_arg("profile_dashboard", dashboard)
self.rss_feeds = None
self._process_arg("rss_feeds", dashboard)
self.fa4compatibility = False
self._process_arg("fa4compatibility", dashboard)
if "rss_feeds" in dashboard:
self.rss_feeds = []
for feed in dashboard["rss_feeds"]:
if feed["target"].count(".") != 1:
self.logger.warning("Invalid RSS feed target: %s", feed["target"])
else:
self.rss_feeds.append(feed)
self.rss_update = None
self._process_arg("rss_update", dashboard)
self.rss_last_update = None
# find dashboard dir
if self.dashboard_dir is None:
if self.config_dir is None:
self.dashboard_dir = utils.find_path("dashboards")
else:
self.dashboard_dir = os.path.join(self.config_dir, "dashboards")
self.javascript_dir = os.path.join(self.install_dir, "assets", "javascript")
self.template_dir = os.path.join(self.install_dir, "assets", "templates")
self.css_dir = os.path.join(self.install_dir, "assets", "css")
self.fonts_dir = os.path.join(self.install_dir, "assets", "fonts")
self.webfonts_dir = os.path.join(self.install_dir, "assets", "webfonts")
self.images_dir = os.path.join(self.install_dir, "assets", "images")
#
# Setup compile directories
#
if self.config_dir is None:
self.compile_dir = utils.find_path("compiled")
else:
self.compile_dir = os.path.join(self.config_dir, "compiled")
self.dashboard_obj = addashboard.Dashboard(
self.config_dir,
self.logging,
dash_compile_on_start=self.compile_on_start,
dash_force_compile=self.force_compile,
profile_dashboard=self.profile_dashboard,
dashboard_dir=self.dashboard_dir,
fa4compatibility=self.fa4compatibility,
transport=self.transport,
javascript_dir=self.javascript_dir,
template_dir=self.template_dir,
css_dir=self.css_dir,
fonts_dir=self.fonts_dir,
webfonts_dir=self.webfonts_dir,
images_dir=self.images_dir,
)
self.setup_dashboard_routes()
else:
self.logger.info("Dashboards Disabled")
#
# Finish up and start the server
#
# handler = self.app.make_handler()
# f = loop.create_server(handler, "0.0.0.0", int(self.port), ssl=context)
# loop.create_task(f)
if self.dashboard_obj is not None:
loop.create_task(self.update_rss())
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in HTTP module")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
def _process_http(self, http):
self._process_arg("password", http)
self._process_arg("tokens", http)
self._process_arg("work_factor", http)
self._process_arg("ssl_certificate", http)
self._process_arg("ssl_key", http)
self._process_arg("url", http)
if not self.url:
self.logger.warning(
"'{arg}' is '{value}'. Please configure appdaemon.yaml".format(arg="url", value=self.url)
)
exit(0)
self._process_arg("transport", http)
self.logger.info("Using '%s' for event stream", self.transport)
self._process_arg("static_dirs", http)
async def start_server(self):
self.logger.info("Running on port %s", self.port)
self.runner = web.AppRunner(self.app)
await self.runner.setup()
site = web.TCPSite(self.runner, "0.0.0.0", int(self.port), ssl_context=self.context)
await site.start()
async def stop_server(self):
self.logger.info("Shutting down webserver")
#
# We should do this but it makes AD hang so ...
#
# await self.runner.cleanup()
async def add_response_headers(self, request, response):
for header, value in self.http["headers"].items():
response.headers[header] = value
def stop(self):
self.stopping = True
def _process_arg(self, arg, kwargs):
if kwargs:
if arg in kwargs:
setattr(self, arg, kwargs[arg])
@staticmethod
def check_password(password, hash):
return bcrypt.checkpw, str.encode(password), str.encode(hash)
async def forcelogon(self, request):
response = await self.logon_page(request)
return response
async def logon_response(self, request):
try:
data = await request.post()
password = data["password"]
if password == self.password:
self.access.info("Successful logon from %s", request.host)
hashed = bcrypt.hashpw(str.encode(self.password), bcrypt.gensalt(self.work_factor))
if self.admin is not None:
response = await self._admin_page(request)
else:
response = await self._list_dash(request)
self.logger.debug("hashed=%s", hashed)
# Set cookie to last for 1 year
response.set_cookie("adcreds", hashed.decode("utf-8"), max_age=31536000)
else:
self.access.warning("Unsuccessful logon from %s", request.host)
response = await self.logon_page(request)
return response
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in logon_response()")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
return self.get_response(request, 500, "Server error in logon_response()")
# noinspection PyUnusedLocal
@secure
async def list_dash(self, request):
return await self._list_dash(request)
async def _list_dash(self, request):
response = await utils.run_in_executor(self, self.dashboard_obj.get_dashboard_list)
return web.Response(text=response, content_type="text/html")
@secure
async def load_dash(self, request):
name = request.match_info.get("name", "Anonymous")
params = request.query
skin = params.get("skin", "default")
recompile = params.get("recompile", False)
if recompile == "1":
recompile = True
response = await utils.run_in_executor(self, self.dashboard_obj.get_dashboard, name, skin, recompile)
return web.Response(text=response, content_type="text/html")
async def update_rss(self):
# Grab RSS Feeds
if self.rss_feeds is not None and self.rss_update is not None:
while not self.stopping:
try:
if self.rss_last_update is None or (self.rss_last_update + self.rss_update) <= time.time():
self.rss_last_update = time.time()
for feed_data in self.rss_feeds:
feed = await utils.run_in_executor(self, feedparser.parse, feed_data["feed"])
if "bozo_exception" in feed:
self.logger.warning(
"Error in RSS feed %s: %s", feed_data["feed"], feed["bozo_exception"],
)
else:
new_state = {"feed": feed}
# RSS Feeds always live in the admin namespace
await self.AD.state.set_state("rss", "admin", feed_data["target"], state=new_state)
await asyncio.sleep(1)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in update_rss()")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
#
# REST API
#
@securedata
async def get_ad(self, request):
return web.json_response({"state": {"status": "active"}}, dumps=utils.convert_json)
@securedata
async def get_entity(self, request):
namespace = None
entity_id = None
try:
entity_id = request.match_info.get("entity")
namespace = request.match_info.get("namespace")
self.logger.debug("get_state() called, ns=%s, entity=%s", namespace, entity_id)
state = self.AD.state.get_entity(namespace, entity_id)
self.logger.debug("result = %s", state)
return web.json_response({"state": state}, dumps=utils.convert_json)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in get_entity()")
self.logger.warning("Namespace: %s, entity: %s", namespace, entity_id)
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
return self.get_response(request, 500, "Unexpected error in get_entity()")
@securedata
async def get_namespace(self, request):
namespace = None
try:
namespace = request.match_info.get("namespace")
self.logger.debug("get_namespace() called, ns=%s", namespace)
state = self.AD.state.get_entity(namespace)
self.logger.debug("result = %s", state)
if state is None:
return self.get_response(request, 404, "Namespace Not Found")
return web.json_response({"state": state}, dumps=utils.convert_json)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in get_namespace()")
self.logger.warning("Namespace: %s", namespace)
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
return self.get_response(request, 500, "Unexpected error in get_namespace()")
@securedata
async def get_namespace_entities(self, request):
namespace = None
try:
namespace = request.match_info.get("namespace")
self.logger.debug("get_namespace_entities() called, ns=%s", namespace)
state = self.AD.state.list_namespace_entities(namespace)
self.logger.debug("result = %s", state)
if state is None:
return self.get_response(request, 404, "Namespace Not Found")
return web.json_response({"state": state}, dumps=utils.convert_json)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in get_namespace_entities()")
self.logger.warning("Namespace: %s", namespace)
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
return self.get_response(request, 500, "Unexpected error in get_namespace_entities()")
@securedata
async def get_namespaces(self, request):
try:
self.logger.debug("get_namespaces() called)")
state = await self.AD.state.list_namespaces()
self.logger.debug("result = %s", state)
return web.json_response({"state": state}, dumps=utils.convert_json)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in get_namespaces()")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
return self.get_response(request, 500, "Unexpected error in get_namespaces()")
@securedata
async def get_services(self, request):
try:
self.logger.debug("get_services() called)")
state = self.AD.services.list_services()
self.logger.debug("result = %s", state)
return web.json_response({"state": state}, dumps=utils.convert_json)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in get_services()")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
return self.get_response(request, 500, "Unexpected error in get_services()")
@securedata
async def get_state(self, request):
try:
self.logger.debug("get_state() called")
state = self.AD.state.get_entity()
if state is None:
self.get_response(request, 404, "State Not Found")
self.logger.debug("result = %s", state)
return web.json_response({"state": state}, dumps=utils.convert_json)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in get_state()")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
return self.get_response(request, 500, "Unexpected error in get_state()")
@securedata
async def get_logs(self, request):
try:
self.logger.debug("get_logs() called")
logs = await utils.run_in_executor(self, self.AD.logging.get_admin_logs)
return web.json_response({"logs": logs}, dumps=utils.convert_json)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in get_logs()")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
return self.get_response(request, 500, "Unexpected error in get_logs()")
# noinspection PyUnusedLocal
@securedata
async def call_service(self, request):
try:
try:
data = await request.json()
except json.decoder.JSONDecodeError:
return self.get_response(request, 400, "JSON Decode Error")
args = {}
namespace = request.match_info.get("namespace")
domain = request.match_info.get("domain")
service = request.match_info.get("service")
#
# Some value munging for dashboard
#
for key in data:
if key == "service":
pass
elif key == "rgb_color":
m = re.search(r"\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)", data[key])
if m:
r = m.group(1)
g = m.group(2)
b = m.group(3)
args["rgb_color"] = [r, g, b]
elif key == "xy_color":
m = re.search(r"\s*(\d+\.\d+)\s*,\s*(\d+\.\d+)", data[key])
if m:
x = m.group(1)
y = m.group(2)
args["xy_color"] = [x, y]
elif key == "json_args":
json_args = json.loads(data[key])
for k in json_args.keys():
args[k] = json_args[k]
else:
args[key] = data[key]
self.logger.debug("call_service() args = %s", args)
await self.AD.services.call_service(namespace, domain, service, args)
return web.Response(status=200)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in call_service()")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
return web.Response(status=500)
@securedata
async def fire_event(self, request):
try:
try:
data = await request.json()
except json.decoder.JSONDecodeError:
return self.get_response(request, 400, "JSON Decode Error")
args = {}
namespace = request.match_info.get("namespace")
event = request.match_info.get("event")
#
# Some value munging for dashboard
#
for key in data:
if key == "event":
pass
else:
args[key] = data[key]
self.logger.debug("fire_event() args = %s", args)
await self.AD.events.fire_event(namespace, event, **args)
return web.Response(status=200)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in fire_event()")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
return web.Response(status=500)
# noinspection PyUnusedLocal
async def not_found(self, request):
return self.get_response(request, 404, "Not Found")
# Stream Handling
async def stream_update(self, namespace, data):
# self.logger.debug("stream_update() %s:%s", namespace, data)
data["namespace"] = namespace
self.AD.thread_async.call_async_no_wait(self.stream.process_event, data)
# Routes, Status and Templates
def setup_api_routes(self):
self.app.router.add_post("/api/appdaemon/service/{namespace}/{domain}/{service}", self.call_service)
self.app.router.add_post("/api/appdaemon/event/{namespace}/{event}", self.fire_event)
self.app.router.add_get("/api/appdaemon/service/", self.get_services)
self.app.router.add_get("/api/appdaemon/state/{namespace}/{entity}", self.get_entity)
self.app.router.add_get("/api/appdaemon/state/{namespace}", self.get_namespace)
self.app.router.add_get("/api/appdaemon/state/{namespace}/", self.get_namespace_entities)
self.app.router.add_get("/api/appdaemon/state/", self.get_namespaces)
self.app.router.add_get("/api/appdaemon/state", self.get_state)
self.app.router.add_get("/api/appdaemon/logs", self.get_logs)
self.app.router.add_post("/api/appdaemon/{app}", self.call_api)
self.app.router.add_get("/api/appdaemon", self.get_ad)
def setup_http_routes(self):
self.app.router.add_get("/favicon.ico", self.not_found)
self.app.router.add_get("/{gfx}.png", self.not_found)
self.app.router.add_post("/logon_response", self.logon_response)
# Add static path for JavaScript
self.app.router.add_static("/javascript", self.javascript_dir)
# Add static path for fonts
self.app.router.add_static("/fonts", self.fonts_dir)
# Add static path for webfonts
self.app.router.add_static("/webfonts", self.webfonts_dir)
# Add static path for images
self.app.router.add_static("/images", self.images_dir)
# Add static path for css
self.app.router.add_static("/css", self.css_dir)
if self.admin is not None:
self.app.router.add_get("/", self.admin_page)
elif self.dashboard is not None:
self.app.router.add_get("/", self.list_dash)
else:
self.app.router.add_get("/", self.error_page)
#
# For App based Web Server
#
self.app.router.add_get("/app/{route}", self.app_webserver)
#
# Add static path for apps
#
apps_static = os.path.join(self.AD.config_dir, "www")
exists = True
if not os.path.isdir(apps_static): # check if the folder exists
try:
os.mkdir(apps_static)
except OSError:
self.logger.warning("Creation of the Web directory %s failed", apps_static)
exists = False
else:
self.logger.debug("Successfully created the Web directory %s ", apps_static)
if exists:
self.app.router.add_static("/local", apps_static)
#
# Setup user defined static paths
#
for name, static_dir in self.static_dirs.items():
if not os.path.isdir(static_dir): # check if the folder exists
self.logger.warning("The Web directory %s doesn't exist. So static route not set up", static_dir)
else:
self.app.router.add_static("/{}".format(name), static_dir)
self.logger.debug("Successfully created the Web directory %s ", static_dir)
def setup_dashboard_routes(self):
self.app.router.add_get("/list", self.list_dash)
self.app.router.add_get("/{name}", self.load_dash)
# Setup Templates
self.app.router.add_static("/compiled_javascript", self.dashboard_obj.compiled_javascript_dir)
self.app.router.add_static("/compiled_css", self.dashboard_obj.compiled_css_dir)
# Add path for custom_css if it exists
custom_css = os.path.join(self.dashboard_obj.config_dir, "custom_css")
if os.path.isdir(custom_css):
self.app.router.add_static("/custom_css", custom_css)
# Add path for custom_javascript if it exists
custom_javascript = os.path.join(self.dashboard_obj.config_dir, "custom_javascript")
if os.path.isdir(custom_javascript):
self.app.router.add_static("/custom_javascript", custom_javascript)
# API
async def terminate_app(self, name):
if name in self.endpoints:
del self.endpoints[name]
if name in self.app_routes:
del self.app_routes[name]
def get_response(self, request, code, error):
res = "<html><head><title>{} {}</title></head><body><h1>{} {}</h1>Error in API Call</body></html>".format(
code, error, code, error
)
app = request.match_info.get("app", "system")
if code == 200:
self.access.info("API Call to %s: status: %s", app, code)
else:
self.access.warning("API Call to %s: status: %s, %s", app, code, error)
return web.Response(body=res, status=code)
def get_web_response(self, request, code, error):
res = "<html><head><title>{} {}</title></head><body><h1>{} {}</h1>Error in Web Service Call</body></html>".format(
code, error, code, error
)
app = request.match_info.get("app", "system")
if code == 200:
self.access.info("Web Call to %s: status: %s", app, code)
else:
self.access.warning("Web Call to %s: status: %s, %s", app, code, error)
return web.Response(text=res, content_type="text/html")
@securedata
async def call_api(self, request):
code = 200
ret = ""
app = request.match_info.get("app")
try:
args = await request.json()
except json.decoder.JSONDecodeError:
return self.get_response(request, 400, "JSON Decode Error")
try:
ret, code = await self.dispatch_app_by_name(app, args)
except Exception:
self.logger.error("-" * 60)
self.logger.error("Unexpected error during API call")
self.logger.error("-" * 60)
self.logger.error(traceback.format_exc())
self.logger.error("-" * 60)
if code == 404:
return self.get_response(request, 404, "App Not Found")
response = "OK"
self.access.info("API Call to %s: status: %s %s", app, code, response)
return web.json_response(ret, status=code, dumps=utils.convert_json)
# Routes, Status and Templates
async def register_endpoint(self, cb, name):
handle = uuid.uuid4().hex
if name not in self.endpoints:
self.endpoints[name] = {}
self.endpoints[name][handle] = {"callback": cb, "name": name}
return handle
async def unregister_endpoint(self, handle, name):
if name in self.endpoints and handle in self.endpoints[name]:
del self.endpoints[name][handle]
async def dispatch_app_by_name(self, name, args):
callback = None
for app in self.endpoints:
for handle in self.endpoints[app]:
if self.endpoints[app][handle]["name"] == name:
callback = self.endpoints[app][handle]["callback"]
if callback is not None:
if asyncio.iscoroutinefunction(callback):
return await callback(args)
else:
return await utils.run_in_executor(self, callback, args)
else:
return "", 404
#
# App based Web Server
#
async def register_route(self, cb, route, name, **kwargs):
if not asyncio.iscoroutinefunction(cb): # must be async function
self.logger.warning(
"Could not Register Callback for %s, using Route %s as Web Server Route. Callback must be Async",
name,
route,
)
return
handle = uuid.uuid4().hex
if name not in self.app_routes:
self.app_routes[name] = {}
token = kwargs.get("token")
self.app_routes[name][handle] = {"callback": cb, "route": route, "token": token}
return handle
async def unregister_route(self, handle, name):
if name in self.app_routes and handle in self.app_routes[name]:
del self.app_routes[name][handle]
@route_secure
async def app_webserver(self, request):
name = None
route = request.match_info.get("route")
token = request.query.get("token")
code = 404
error = "Requested Server does not exist"
callback = None
for name in self.app_routes:
if callback is not None: # a callback has been collected
break
for handle in self.app_routes[name]:
app_route = self.app_routes[name][handle]["route"]
app_token = self.app_routes[name][handle]["token"]
if app_route == route:
if app_token is not None and app_token != token:
return self.get_web_response(request, "401", "Unauthorized")
callback = self.app_routes[name][handle]["callback"]
break
if callback is not None:
self.access.debug("Web Call to %s for %s", route, name)
try:
f = asyncio.ensure_future(callback(request))
self.AD.futures.add_future(name, f)
return await f
except asyncio.CancelledError:
code = 503
error = "Request was Cancelled"
except Exception:
self.logger.error("-" * 60)
self.logger.error("Unexpected error during Web call")
self.logger.error("-" * 60)
self.logger.error(traceback.format_exc())
self.logger.error("-" * 60)
code = 503
error = "Request had an Error"
return self.get_web_response(request, str(code), error)
#
# Admin
#
@secure
async def admin_page(self, request):
return await self._admin_page(request)
# Insecure version
async def _admin_page(self, request):
response = await self.admin_obj.admin_page(request.scheme, request.host)
return web.Response(text=response, content_type="text/html")
async def logon_page(self, request):
response = await utils.run_in_executor(self, self.generate_logon_page, request.scheme, request.host)
return web.Response(text=response, content_type="text/html")
async def error_page(self, request):
response = await utils.run_in_executor(self, self.generate_error_page, request.scheme, request.host)
return web.Response(text=response, content_type="text/html")
def generate_logon_page(self, scheme, url):
try:
params = {}
env = Environment(
loader=FileSystemLoader(self.template_dir), autoescape=select_autoescape(["html", "xml"]),
)
template = env.get_template("logon.jinja2")
rendered_template = template.render(params)
return rendered_template
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error creating logon page")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
def generate_error_page(self, scheme, url):
try:
params = {}
env = Environment(
loader=FileSystemLoader(self.template_dir), autoescape=select_autoescape(["html", "xml"]),
)
template = env.get_template("error.jinja2")
rendered_template = template.render(params)
return rendered_template
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error creating logon page")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
|
|
# provides wrapper objects for specific device types
import logging
import urllib2
import time
import arp
import clients
import iplug
# TODO set setErrorStateOnServer(msg) appropriately
################################################################################
# wrapper base class for device types
class DeviceWrapper():
#---------------------------------------------------------------------------
def __init__(self, device):
raise NotImplementedError()
#---------------------------------------------------------------------------
# basic check to see if the virtual device is responding
def updateStatus(self):
device = self.device
if self.client.isAvailable():
self.logger.debug(u'%s is AVAILABLE', device.name)
device.updateStateOnServer('active', True)
device.updateStateOnServer('status', 'Active')
device.updateStateOnServer('lastActiveAt', time.strftime('%c'))
else:
self.logger.debug(u'%s is UNAVAILABLE', device.name)
device.updateStateOnServer('active', False)
device.updateStateOnServer('status', 'Inactive')
self.updateDeviceInfo()
#---------------------------------------------------------------------------
# sub-classes should overide this for their custom states
def updateDeviceInfo(self): pass
#---------------------------------------------------------------------------
@staticmethod
def validateConfig(values, errors):
pass
################################################################################
# base wrapper class for relay-type devices
class RelayDeviceWrapper(DeviceWrapper):
#---------------------------------------------------------------------------
def __init__(self, device):
raise NotImplementedError()
#---------------------------------------------------------------------------
# default behavior; subclasses should provide correct implementation
def turnOff(self):
self.logger.warn(u'Not supported - Turn Off %s', self.device.name)
#---------------------------------------------------------------------------
# default behavior; subclasses should provide correct implementation
def turnOn(self):
self.logger.warn(u'Not supported - Turn On %s', self.device.name)
#---------------------------------------------------------------------------
# basic check to see if the virtual device is responding
def updateStatus(self):
device = self.device
if self.client.isAvailable():
self.logger.debug(u'%s is AVAILABLE', device.name)
device.updateStateOnServer('onOffState', 'on')
else:
self.logger.debug(u'%s is UNAVAILABLE', device.name)
device.updateStateOnServer('onOffState', 'off')
self.updateDeviceInfo()
################################################################################
# plugin device wrapper for Network Service devices
class Service(DeviceWrapper):
#---------------------------------------------------------------------------
def __init__(self, device):
self.logger = logging.getLogger('Plugin.wrapper.Service')
address = device.pluginProps['address']
port = int(device.pluginProps['port'])
client = clients.ServiceClient(address, port)
self.device = device
self.client = client
#---------------------------------------------------------------------------
@staticmethod
def validateConfig(values, errors):
iplug.validateConfig_Hostname('address', values, errors, emptyOk=False)
iplug.validateConfig_Int('port', values, errors, min=1, max=65536)
################################################################################
# plugin device wrapper for Ping Status devices
class Ping(DeviceWrapper):
#---------------------------------------------------------------------------
def __init__(self, device):
self.logger = logging.getLogger('Plugin.wrapper.Ping')
address = device.pluginProps['address']
self.device = device
self.client = clients.PingClient(address)
#---------------------------------------------------------------------------
@staticmethod
def validateConfig(values, errors):
iplug.validateConfig_Hostname('address', values, errors, emptyOk=False)
################################################################################
# plugin device wrapper for External IP devices
class ExternalIP(DeviceWrapper):
#---------------------------------------------------------------------------
def __init__(self, device):
self.logger = logging.getLogger('Plugin.wrapper.ExternalIP')
addressType = device.pluginProps['addressType']
self.device = device
if addressType == 'ipv4':
self.client = clients.IPv4AddressClient()
elif addressType == 'ipv6':
self.client = clients.IPv6AddressClient()
#---------------------------------------------------------------------------
def updateDeviceInfo(self):
# the worker (client) keeps a record of the current address...
addr = self.client.current_address
device = self.device
props = device.pluginProps
props['address'] = addr
device.replacePluginPropsOnServer(props)
if addr is not None:
device.updateStateOnServer('lastKnownAddress', addr)
################################################################################
# plugin device wrapper for HTTP Status devices
class HTTP(DeviceWrapper):
#---------------------------------------------------------------------------
def __init__(self, device):
self.logger = logging.getLogger('Plugin.wrapper.HTTP')
url = device.pluginProps['url']
self.device = device
self.client = clients.HttpClient(url)
#---------------------------------------------------------------------------
@staticmethod
def validateConfig(values, errors):
iplug.validateConfig_URL('url', values, errors, emptyOk=False)
# update 'address' for proper display
url = values['url']
req = urllib2.Request(url)
values['address'] = req.get_host()
################################################################################
# plugin device wrapper for Local Device types
class Local(DeviceWrapper):
#---------------------------------------------------------------------------
def __init__(self, device, arpTable):
self.logger = logging.getLogger('Plugin.wrapper.Local')
address = device.pluginProps['address']
self.device = device
self.client = clients.ArpClient(address, arpTable)
#---------------------------------------------------------------------------
@staticmethod
def validateConfig(values, errors):
iplug.validateConfig_MAC('address', values, errors, emptyOk=False)
################################################################################
# plugin device wrapper for SSH Device types
class SSH(RelayDeviceWrapper):
#---------------------------------------------------------------------------
def __init__(self, device):
self.logger = logging.getLogger('Plugin.wrapper.SSH')
address = device.pluginProps['address']
port = int(device.pluginProps['port'])
uname = device.pluginProps['username']
client = clients.SSHClient(address, port=port, username=uname)
client.commands['status'] = device.pluginProps['cmd_status']
client.commands['shutdown'] = device.pluginProps['cmd_shutdown']
self.client = client
self.device = device
#---------------------------------------------------------------------------
@staticmethod
def validateConfig(values, errors):
iplug.validateConfig_Hostname('address', values, errors, emptyOk=False)
iplug.validateConfig_Int('port', values, errors, min=1, max=65536)
iplug.validateConfig_String('username', values, errors, emptyOk=False)
#iplug.validateConfig_String('password', values, errors, emptyOk=True)
iplug.validateConfig_String('cmd_status', values, errors, emptyOk=False)
iplug.validateConfig_String('cmd_shutdown', values, errors, emptyOk=False)
#---------------------------------------------------------------------------
def turnOff(self):
device = self.device
self.logger.info(u'Shutting down %s', device.name)
if not self.client.turnOff():
self.logger.error(u'Could not turn off remote server: %s', device.name)
################################################################################
# plugin device wrapper for macOS Device types
class macOS(RelayDeviceWrapper):
# XXX could we use remote management instead of SSH?
#---------------------------------------------------------------------------
def __init__(self, device):
self.logger = logging.getLogger('Plugin.wrapper.macOS')
address = device.pluginProps['address']
uname = device.pluginProps.get('username', None)
passwd = device.pluginProps.get('password', None)
client = clients.SSHClient(address, username=uname, password=passwd)
# macOS commands are known and cannot be changed by the user
client.commands['status'] = '/usr/bin/true'
client.commands['shutdown'] = '/sbin/shutdown -h now'
self.client = client
self.device = device
#---------------------------------------------------------------------------
@staticmethod
def validateConfig(values, errors):
iplug.validateConfig_Hostname('address', values, errors, emptyOk=False)
iplug.validateConfig_String('username', values, errors, emptyOk=False)
#iplug.validateConfig_String('password', values, errors, emptyOk=True)
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import time
import multiprocessing
import pytest
from requests.exceptions import Timeout
from calvin.requests.request_handler import RequestHandler, RT
from calvin.utilities.nodecontrol import dispatch_node, dispatch_storage_node
from calvin.utilities.security import Security
from calvin.utilities.attribute_resolver import format_index_string
import os
import json
import copy
from calvin.utilities import calvinlogger
from calvin.utilities import calvinconfig
_log = calvinlogger.get_logger(__name__)
_conf = calvinconfig.get()
request_handler = RequestHandler()
try:
ip_addr = os.environ["CALVIN_TEST_LOCALHOST"]
except:
import socket
ip_addr = socket.gethostbyname(socket.gethostname())
rt1 = None
rt2 = None
rt3 = None
rt4 = None
security_test_dir = None
@pytest.mark.slow
class TestSecurity(unittest.TestCase):
@pytest.fixture(autouse=True, scope="class")
def setup(self, request):
from calvin.Tools.csruntime import csruntime
from conftest import _config_pytest
global rt1
global rt2
global rt3
global rt4
global security_test_dir
security_test_dir = os.path.join(os.path.dirname(__file__), "security_test")
# Runtime 1: local authentication, signature verification, no authorization (no policy check).
rt1_conf = copy.deepcopy(_conf)
rt1_conf.set("security", "security_conf", {
"comment": "Local authentication, no authorization",
"signature_trust_store": os.path.join(security_test_dir, "trustStore"),
"authentication": {
"procedure": "local",
"local_users": {"user1": "pass1", "user2": "pass2", "user3": "pass3"}
}
})
rt1_conf.set('global', 'actor_paths', [os.path.join(security_test_dir, "store")])
rt1_conf.set('security', 'certificate_domain', "testdomain")
rt1_conf.set('security', 'certificate_conf', os.path.join(security_test_dir, "testdomain", "openssl.conf"))
rt1_conf.save("/tmp/calvin5001.conf")
try:
logfile = _config_pytest.getoption("logfile")+"5001"
outfile = os.path.join(os.path.dirname(logfile), os.path.basename(logfile).replace("log", "out"))
if outfile == logfile:
outfile = None
except:
logfile = None
outfile = None
csruntime(ip_addr, port=5001, controlport=5021, attr={'indexed_public':
{'owner':{'organization': 'org.testexample', 'personOrGroup': 'testOwner1'},
'node_name': {'organization': 'org.testexample', 'name': 'testNode1'},
'address': {'country': 'SE', 'locality': 'testCity', 'street': 'testStreet', 'streetNumber': 1}}},
loglevel=_config_pytest.getoption("loglevel"), logfile=logfile, outfile=outfile,
configfile="/tmp/calvin5001.conf")
rt1 = RT("http://%s:5021" % ip_addr)
# Runtime 2: local authentication, signature verification, local authorization.
# Can also act as authorization server for other runtimes.
rt2_conf = copy.deepcopy(_conf)
rt2_conf.set("security", "security_conf", {
"comment": "Local authentication, local authorization",
"signature_trust_store": os.path.join(security_test_dir, "trustStore"),
"authentication": {
"procedure": "local",
"local_users": {"user1": "pass1", "user2": "pass2", "user3": "pass3"}
},
"authorization": {
"procedure": "local",
"policy_storage_path": os.path.join(security_test_dir, "policies")
}
})
rt2_conf.set('global', 'actor_paths', [os.path.join(security_test_dir, "store")])
rt2_conf.set('security', 'certificate_domain', "testdomain")
rt2_conf.set('security', 'certificate_conf', os.path.join(security_test_dir, "testdomain", "openssl.conf"))
rt2_conf.save("/tmp/calvin5002.conf")
try:
logfile = _config_pytest.getoption("logfile")+"5002"
outfile = os.path.join(os.path.dirname(logfile), os.path.basename(logfile).replace("log", "out"))
if outfile == logfile:
outfile = None
except:
logfile = None
outfile = None
csruntime(ip_addr, port=5002, controlport=5022, attr={'indexed_public':
{'owner':{'organization': 'org.testexample', 'personOrGroup': 'testOwner1'},
'node_name': {'organization': 'org.testexample', 'name': 'testNode2'},
'address': {'country': 'SE', 'locality': 'testCity', 'street': 'testStreet', 'streetNumber': 1}}},
loglevel=_config_pytest.getoption("loglevel"), logfile=logfile, outfile=outfile,
configfile="/tmp/calvin5002.conf", authz_server=True)
rt2 = RT("http://%s:5022" % ip_addr)
# Runtime 3: external authentication (RADIUS), signature verification, local authorization.
rt3_conf = copy.deepcopy(_conf)
rt3_conf.set("security", "security_conf", {
"comment": "RADIUS authentication, local authorization",
"signature_trust_store": os.path.join(security_test_dir, "trustStore"),
"authentication": {
"procedure": "radius",
"server_ip": "localhost",
"secret": "elxghyc5lz1_passwd"
},
"authorization": {
"procedure": "local",
"policy_storage_path": os.path.join(security_test_dir, "policies")
}
})
rt3_conf.set('global', 'actor_paths', [os.path.join(security_test_dir, "store")])
rt3_conf.set('security', 'certificate_domain', "testdomain")
rt3_conf.set('security', 'certificate_conf', os.path.join(security_test_dir, "testdomain", "openssl.conf"))
rt3_conf.save("/tmp/calvin5003.conf")
try:
logfile = _config_pytest.getoption("logfile")+"5003"
outfile = os.path.join(os.path.dirname(logfile), os.path.basename(logfile).replace("log", "out"))
if outfile == logfile:
outfile = None
except:
logfile = None
outfile = None
csruntime(ip_addr, port=5003, controlport=5023, attr={'indexed_public':
{'owner':{'organization': 'org.testexample', 'personOrGroup': 'testOwner1'},
'node_name': {'organization': 'org.testexample', 'name': 'testNode3'},
'address': {'country': 'SE', 'locality': 'testCity', 'street': 'testStreet', 'streetNumber': 1}}},
loglevel=_config_pytest.getoption("loglevel"), logfile=logfile, outfile=outfile,
configfile="/tmp/calvin5003.conf")
rt3 = RT("http://%s:5023" % ip_addr)
# Runtime 4: local authentication, signature verification, external authorization (runtime 2).
rt4_conf = copy.deepcopy(_conf)
rt4_conf.set("security", "security_conf", {
"comment": "Local authentication, external authorization",
"signature_trust_store": os.path.join(security_test_dir, "trustStore"),
"authentication": {
"procedure": "local",
"local_users": {"user1": "pass1", "user2": "pass2", "user3": "pass3"}
},
"authorization": {
"procedure": "external",
"server_ip": ip_addr,
"server_port": 5022
}
})
rt4_conf.set('global', 'actor_paths', [os.path.join(security_test_dir, "store")])
rt4_conf.set('security', 'certificate_domain', "testdomain")
rt4_conf.set('security', 'certificate_conf', os.path.join(security_test_dir, "testdomain", "openssl.conf"))
rt4_conf.save("/tmp/calvin5004.conf")
try:
logfile = _config_pytest.getoption("logfile")+"5004"
outfile = os.path.join(os.path.dirname(logfile), os.path.basename(logfile).replace("log", "out"))
if outfile == logfile:
outfile = None
except:
logfile = None
outfile = None
csruntime(ip_addr, port=5004, controlport=5024, attr={'indexed_public':
{'owner':{'organization': 'org.testexample', 'personOrGroup': 'testOwner1'},
'node_name': {'organization': 'org.testexample', 'name': 'testNode4'},
'address': {'country': 'SE', 'locality': 'testCity', 'street': 'testStreet', 'streetNumber': 1}}},
loglevel=_config_pytest.getoption("loglevel"), logfile=logfile, outfile=outfile,
configfile="/tmp/calvin5004.conf")
rt4 = RT("http://%s:5024" % ip_addr)
request.addfinalizer(self.teardown)
def teardown(self):
global rt1
global rt2
global rt3
global rt4
request_handler.quit(rt1)
request_handler.quit(rt2)
request_handler.quit(rt3)
request_handler.quit(rt4)
time.sleep(0.2)
for p in multiprocessing.active_children():
p.terminate()
# They will die eventually (about 5 seconds) in most cases, but this makes sure without wasting time
os.system("pkill -9 -f 'csruntime -n %s -p 5001'" % (ip_addr,))
os.system("pkill -9 -f 'csruntime -n %s -p 5002'" % (ip_addr,))
os.system("pkill -9 -f 'csruntime -n %s -p 5003'" % (ip_addr,))
os.system("pkill -9 -f 'csruntime -n %s -p 5004'" % (ip_addr,))
time.sleep(0.2)
def verify_storage(self):
global rt1
global rt2
global rt3
global rt4
rt1_id = None
rt2_id = None
rt3_id = None
rt4_id = None
failed = True
# Try 30 times waiting for control API to be up and running
for i in range(30):
try:
rt1_id = rt1_id or request_handler.get_node_id(rt1)
rt2_id = rt2_id or request_handler.get_node_id(rt2)
rt3_id = rt3_id or request_handler.get_node_id(rt3)
rt4_id = rt4_id or request_handler.get_node_id(rt4)
failed = False
break
except:
time.sleep(0.1)
assert not failed
assert rt1_id
assert rt2_id
assert rt3_id
assert rt4_id
print "RUNTIMES:", rt1_id, rt2_id, rt3_id, rt4_id
_log.analyze("TESTRUN", "+ IDS", {'waited': 0.1*i})
failed = True
# Try 30 times waiting for storage to be connected
caps1 = []
caps2 = []
caps3 = []
caps4 = []
rt_ids = set([rt1_id, rt2_id, rt3_id, rt4_id])
for i in range(30):
try:
if not (rt1_id in caps1 and rt2_id in caps1 and rt3_id in caps1 and rt4_id in caps1):
caps1 = request_handler.get_index(rt1, "node/capabilities/calvinsys.native.python-json")['result']
if not (rt1_id in caps2 and rt2_id in caps2 and rt3_id in caps2 and rt4_id in caps2):
caps2 = request_handler.get_index(rt2, "node/capabilities/calvinsys.native.python-json")['result']
if not (rt1_id in caps3 and rt2_id in caps3 and rt3_id in caps3 and rt4_id in caps3):
caps3 = request_handler.get_index(rt3, "node/capabilities/calvinsys.native.python-json")['result']
if not (rt1_id in caps4 and rt2_id in caps4 and rt3_id in caps4 and rt4_id in caps4):
caps4 = request_handler.get_index(rt4, "node/capabilities/calvinsys.native.python-json")['result']
if rt_ids <= set(caps1) and rt_ids <= set(caps2) and rt_ids <= set(caps3) and rt_ids <= set(caps4):
failed = False
break
else:
time.sleep(0.1)
except:
time.sleep(0.1)
assert not failed
_log.analyze("TESTRUN", "+ STORAGE", {'waited': 0.1*i})
assert request_handler.get_index(rt1, format_index_string(['node_name', {'organization': 'org.testexample', 'name': 'testNode1'}]))
_log.analyze("TESTRUN", "+ RT1 INDEX", {})
assert request_handler.get_index(rt2, format_index_string(['node_name', {'organization': 'org.testexample', 'name': 'testNode2'}]))
_log.analyze("TESTRUN", "+ RT2 INDEX", {})
assert request_handler.get_index(rt3, format_index_string(['node_name', {'organization': 'org.testexample', 'name': 'testNode3'}]))
_log.analyze("TESTRUN", "+ RT3 INDEX", {})
assert request_handler.get_index(rt4, format_index_string(['node_name', {'organization': 'org.testexample', 'name': 'testNode4'}]))
_log.analyze("TESTRUN", "+ RT4 INDEX", {})
###################################
# Signature related tests
###################################
@pytest.mark.slow
def testSecurity_POSITIVE_CorrectlySignedApp_CorrectlySignedActors(self):
_log.analyze("TESTRUN", "+", {})
global rt1
global security_test_dir
self.verify_storage()
result = {}
try:
content = Security.verify_signature_get_files(os.path.join(security_test_dir, "scripts", "test_security1_correctly_signed.calvin"))
if not content:
raise Exception("Failed finding script, signature and cert, stopping here")
result = request_handler.deploy_application(rt1, "test_security1_correctly_signed", content['file'],
credentials={"user": ["user1"], "password": ["pass1"]}, content=content,
check=True)
except Exception as e:
if e.message.startswith("401"):
raise Exception("Failed security verification of app test_security1_correctly_signed")
_log.exception("Test deploy failed")
raise Exception("Failed deployment of app test_security1_correctly_signed, no use to verify if requirements fulfilled")
time.sleep(2)
# Verify that actors exist like this
actors = request_handler.get_actors(rt1)
assert result['actor_map']['test_security1_correctly_signed:src'] in actors
assert result['actor_map']['test_security1_correctly_signed:sum'] in actors
assert result['actor_map']['test_security1_correctly_signed:snk'] in actors
actual = request_handler.report(rt1, result['actor_map']['test_security1_correctly_signed:snk'])
assert len(actual) > 5
request_handler.delete_application(rt1, result['application_id'])
@pytest.mark.slow
def testSecurity_NEGATIVE_IncorrectlySignedApp(self):
_log.analyze("TESTRUN", "+", {})
global rt1
global security_test_dir
self.verify_storage()
result = {}
try:
content = Security.verify_signature_get_files(os.path.join(security_test_dir, "scripts", "test_security1_incorrectly_signed.calvin"))
if not content:
raise Exception("Failed finding script, signature and cert, stopping here")
result = request_handler.deploy_application(rt1, "test_security1_incorrectly_signed", content['file'],
credentials={"user": ["user1"], "password": ["pass1"]}, content=content,
check=True)
except Exception as e:
if e.message.startswith("401"):
# We were blocked, as we should
return
_log.exception("Test deploy failed for non security reasons")
raise Exception("Deployment of app test_security1_correctly_signed, did not fail for security reasons")
@pytest.mark.slow
def testSecurity_NEGATIVE_CorrectlySignedApp_IncorrectlySignedActor(self):
_log.analyze("TESTRUN", "+", {})
global rt1
global security_test_dir
self.verify_storage()
result = {}
try:
content = Security.verify_signature_get_files(os.path.join(security_test_dir, "scripts", "test_security1_correctlySignedApp_incorrectlySignedActor.calvin"))
if not content:
raise Exception("Failed finding script, signature and cert, stopping here")
result = request_handler.deploy_application(rt1, "test_security1_correctlySignedApp_incorrectlySignedActor", content['file'],
credentials={"user": ["user1"], "password": ["pass1"]}, content=content,
check=True)
except Exception as e:
_log.debug(str(e))
if e.message.startswith("401"):
raise Exception("Failed security verification of app test_security1_correctlySignedApp_incorrectlySignedActor")
_log.exception("Test deploy failed")
raise Exception("Failed deployment of app test_security1_correctlySignedApp_incorrectlySignedActor, no use to verify if requirements fulfilled")
time.sleep(2)
# Verify that actors exist like this
actors = request_handler.get_actors(rt1)
assert result['actor_map']['test_security1_correctlySignedApp_incorrectlySignedActor:src'] in actors
assert result['actor_map']['test_security1_correctlySignedApp_incorrectlySignedActor:sum'] in actors
assert result['actor_map']['test_security1_correctlySignedApp_incorrectlySignedActor:snk'] in actors
actual = request_handler.report(rt1, result['actor_map']['test_security1_correctlySignedApp_incorrectlySignedActor:snk'])
assert len(actual) == 0 # Means that the incorrectly signed actor was not accepted
request_handler.delete_application(rt1, result['application_id'])
###################################
# Policy related tests
###################################
@pytest.mark.slow
def testSecurity_POSITIVE_Permit_UnsignedApp_SignedActors(self):
_log.analyze("TESTRUN", "+", {})
global rt2
global security_test_dir
self.verify_storage()
result = {}
try:
content = Security.verify_signature_get_files(os.path.join(security_test_dir, "scripts", "test_security1_unsignedApp_signedActors.calvin"))
if not content:
raise Exception("Failed finding script, signature and cert, stopping here")
result = request_handler.deploy_application(rt2, "test_security1_unsignedApp_signedActors", content['file'],
credentials={"user": ["user2"], "password": ["pass2"]}, content=content,
check=True)
except Exception as e:
if e.message.startswith("401"):
raise Exception("Failed security verification of app test_security1_unsignedApp_signedActors")
_log.exception("Test deploy failed")
raise Exception("Failed deployment of app test_security1_unsignedApp_signedActors, no use to verify if requirements fulfilled")
time.sleep(2)
# Verify that actors exist like this
actors = request_handler.get_actors(rt2)
assert result['actor_map']['test_security1_unsignedApp_signedActors:src'] in actors
assert result['actor_map']['test_security1_unsignedApp_signedActors:sum'] in actors
assert result['actor_map']['test_security1_unsignedApp_signedActors:snk'] in actors
actual = request_handler.report(rt2, result['actor_map']['test_security1_unsignedApp_signedActors:snk'])
assert len(actual) > 5
request_handler.delete_application(rt2, result['application_id'])
@pytest.mark.slow
def testSecurity_POSITIVE_Permit_UnsignedApp_Unsigned_Actor(self):
_log.analyze("TESTRUN", "+", {})
global rt2
global security_test_dir
self.verify_storage()
result = {}
try:
content = Security.verify_signature_get_files(os.path.join(security_test_dir, "scripts", "test_security1_unsignedApp_unsignedActors.calvin"))
if not content:
raise Exception("Failed finding script, signature and cert, stopping here")
result = request_handler.deploy_application(rt2, "test_security1_unsignedApp_unsignedActors", content['file'],
credentials={"user": ["user3"], "password": ["pass3"]}, content=content,
check=True)
except Exception as e:
if e.message.startswith("401"):
raise Exception("Failed security verification of app test_security1_unsignedApp_unsignedActors")
_log.exception("Test deploy failed")
raise Exception("Failed deployment of app test_security1_unsignedApp_unsignedActors, no use to verify if requirements fulfilled")
time.sleep(2)
# Verify that actors exist like this
actors = request_handler.get_actors(rt2)
assert result['actor_map']['test_security1_unsignedApp_unsignedActors:src'] in actors
assert result['actor_map']['test_security1_unsignedApp_unsignedActors:sum'] in actors
assert result['actor_map']['test_security1_unsignedApp_unsignedActors:snk'] in actors
actual = request_handler.report(rt2, result['actor_map']['test_security1_unsignedApp_unsignedActors:snk'])
assert len(actual) > 5
request_handler.delete_application(rt2, result['application_id'])
@pytest.mark.slow
def testSecurity_NEGATIVE_Deny_SignedApp_SignedActor_UnallowedRequirement(self):
_log.analyze("TESTRUN", "+", {})
global rt2
global security_test_dir
self.verify_storage()
result = {}
try:
content = Security.verify_signature_get_files(os.path.join(security_test_dir, "scripts", "test_security1_correctly_signed.calvin"))
if not content:
raise Exception("Failed finding script, signature and cert, stopping here")
result = request_handler.deploy_application(rt2, "test_security1_correctly_signed", content['file'],
credentials={"user": ["user1"], "password": ["pass1"]}, content=content,
check=True)
except Exception as e:
_log.debug(str(e))
if e.message.startswith("401"):
raise Exception("Failed security verification of app test_security1_correctly_signed")
_log.exception("Test deploy failed")
raise Exception("Failed deployment of app test_security1_correctly_signed, no use to verify if requirements fulfilled")
time.sleep(2)
# Verify that actors exist like this
actors = request_handler.get_actors(rt2)
assert result['actor_map']['test_security1_correctly_signed:src'] in actors
assert result['actor_map']['test_security1_correctly_signed:sum'] in actors
assert result['actor_map']['test_security1_correctly_signed:snk'] in actors
actual = request_handler.report(rt2, result['actor_map']['test_security1_correctly_signed:snk'])
assert len(actual) == 0 # Means that the actor with unallowed requirements was not accepted
request_handler.delete_application(rt2, result['application_id'])
@pytest.mark.slow
def testSecurity_POSITIVE_External_Authorization(self):
_log.analyze("TESTRUN", "+", {})
global rt4
global security_test_dir
self.verify_storage()
result = {}
try:
content = Security.verify_signature_get_files(os.path.join(security_test_dir, "scripts", "test_security1_unsignedApp_signedActors.calvin"))
if not content:
raise Exception("Failed finding script, signature and cert, stopping here")
result = request_handler.deploy_application(rt4, "test_security1_unsignedApp_signedActors", content['file'],
credentials={"user": ["user2"], "password": ["pass2"]}, content=content,
check=True)
except Exception as e:
if e.message.startswith("401"):
raise Exception("Failed security verification of app test_security1_unsignedApp_signedActors")
_log.exception("Test deploy failed")
raise Exception("Failed deployment of app test_security1_unsignedApp_signedActors, no use to verify if requirements fulfilled")
time.sleep(2)
# Verify that actors exist like this
actors = request_handler.get_actors(rt4)
assert result['actor_map']['test_security1_unsignedApp_signedActors:src'] in actors
assert result['actor_map']['test_security1_unsignedApp_signedActors:sum'] in actors
assert result['actor_map']['test_security1_unsignedApp_signedActors:snk'] in actors
actual = request_handler.report(rt4, result['actor_map']['test_security1_unsignedApp_signedActors:snk'])
assert len(actual) > 5
request_handler.delete_application(rt4, result['application_id'])
###################################
# Authentication related tests
###################################
@pytest.mark.slow
def testSecurity_NEGATIVE_UnallowedUser(self):
_log.analyze("TESTRUN", "+", {})
global rt1
global security_test_dir
self.verify_storage()
result = {}
try:
content = Security.verify_signature_get_files(os.path.join(security_test_dir, "scripts", "test_security1_correctly_signed.calvin"))
if not content:
raise Exception("Failed finding script, signature and cert, stopping here")
result = request_handler.deploy_application(rt1, "test_security1_correctly_signed", content['file'],
credentials={"user": ["user_not_allowed"], "password": ["pass1"]}, content=content,
check=True)
except Exception as e:
if e.message.startswith("401"):
# We were blocked, as we should
return
_log.exception("Test deploy failed for non security reasons")
raise Exception("Deployment of app test_security1_correctly_signed did not fail for security reasons")
@pytest.mark.slow
def testSecurity_NEGATIVE_IncorrectPassword(self):
_log.analyze("TESTRUN", "+", {})
global rt1
global security_test_dir
self.verify_storage()
result = {}
try:
content = Security.verify_signature_get_files(os.path.join(security_test_dir, "scripts", "test_security1_correctly_signed.calvin"))
if not content:
raise Exception("Failed finding script, signature and cert, stopping here")
result = request_handler.deploy_application(rt1, "test_security1_correctly_signed", content['file'],
credentials={"user": ["user1"], "password": ["incorrect_password"]}, content=content,
check=True)
except Exception as e:
if e.message.startswith("401"):
# We were blocked, as we should
return
_log.exception("Test deploy failed for non security reasons")
raise Exception("Deployment of app test_security1_correctly_signed, did not fail for security reasons")
@pytest.mark.slow
def testSecurity_POSITIVE_RADIUS_Authentication(self):
_log.analyze("TESTRUN", "+", {})
global rt3
global security_test_dir
self.verify_storage()
result = {}
try:
content = Security.verify_signature_get_files(os.path.join(security_test_dir, "scripts", "test_security1_correctly_signed.calvin"))
if not content:
raise Exception("Failed finding script, signature and cert, stopping here")
result = request_handler.deploy_application(rt3, "test_security1_correctly_signed", content['file'],
credentials={"user": ["radius_user1"], "password": ["radius_passwd1"]}, content=content,
check=True)
except Exception as e:
if isinstance(e, Timeout):
raise Exception("Can't connect to RADIUS server. Have you started a RADIUS server?")
elif e.message.startswith("401"):
raise Exception("Failed security verification of app test_security1_correctly_signed")
_log.exception("Test deploy failed")
raise Exception("Failed deployment of app test_security1_correctly_signed, no use to verify if requirements fulfilled")
time.sleep(2)
# Verify that actors exist like this
actors = request_handler.get_actors(rt3)
assert result['actor_map']['test_security1_correctly_signed:src'] in actors
assert result['actor_map']['test_security1_correctly_signed:sum'] in actors
assert result['actor_map']['test_security1_correctly_signed:snk'] in actors
actual = request_handler.report(rt3, result['actor_map']['test_security1_correctly_signed:snk'])
assert len(actual) > 5
request_handler.delete_application(rt3, result['application_id'])
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 IBM Corp.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Authors:
# Erik Zaadi <erikz@il.ibm.com>
# Avishay Traeger <avishay@il.ibm.com>
import mox
from oslo.config import cfg
from cinder import exception
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers import xiv_ds8k
FAKE = "fake"
VOLUME = {'size': 16,
'name': FAKE,
'id': 1}
CONNECTOR = {'initiator': "iqn.2012-07.org.fake:01:948f189c4695", }
CONF = cfg.CONF
class XIVDS8KFakeProxyDriver(object):
"""Fake IBM XIV and DS8K Proxy Driver."""
def __init__(self, xiv_ds8k_info, logger, expt, driver=None):
"""Initialize Proxy."""
self.xiv_ds8k_info = xiv_ds8k_info
self.logger = logger
self.exception = expt
self.xiv_ds8k_portal = \
self.xiv_ds8k_iqn = FAKE
self.volumes = {}
self.driver = driver
def setup(self, context):
if self.xiv_ds8k_info['xiv_ds8k_user'] != self.driver\
.configuration.san_login:
raise self.exception.NotAuthorized()
if self.xiv_ds8k_info['xiv_ds8k_address'] != self.driver\
.configuration.san_ip:
raise self.exception.HostNotFound(host='fake')
def create_volume(self, volume):
if volume['size'] > 100:
raise self.exception.VolumeBackendAPIException(data='blah')
self.volumes[volume['name']] = volume
def volume_exists(self, volume):
return self.volumes.get(volume['name'], None) is not None
def delete_volume(self, volume):
if self.volumes.get(volume['name'], None) is not None:
del self.volumes[volume['name']]
def initialize_connection(self, volume, connector):
if not self.volume_exists(volume):
raise self.exception.VolumeNotFound(volume_id=volume['id'])
lun_id = volume['id']
self.volumes[volume['name']]['attached'] = connector
return {'driver_volume_type': 'iscsi',
'data': {'target_discovered': True,
'target_discovered': True,
'target_portal': self.xiv_ds8k_portal,
'target_iqn': self.xiv_ds8k_iqn,
'target_lun': lun_id,
'volume_id': volume['id'],
'multipath': True,
'provider_location': "%s,1 %s %s" % (
self.xiv_ds8k_portal,
self.xiv_ds8k_iqn,
lun_id), },
}
def terminate_connection(self, volume, connector):
if not self.volume_exists(volume):
raise self.exception.VolumeNotFound(volume_id=volume['id'])
if not self.is_volume_attached(volume, connector):
raise self.exception.NotFound(_('Volume not found for '
'instance %(instance_id)s.')
% {'instance_id': 'fake'})
del self.volumes[volume['name']]['attached']
def is_volume_attached(self, volume, connector):
if not self.volume_exists(volume):
raise self.exception.VolumeNotFound(volume_id=volume['id'])
return (self.volumes[volume['name']].get('attached', None)
== connector)
class XIVDS8KVolumeDriverTest(test.TestCase):
"""Test IBM XIV and DS8K volume driver."""
def setUp(self):
"""Initialize IBM XIV and DS8K Driver."""
super(XIVDS8KVolumeDriverTest, self).setUp()
configuration = mox.MockObject(conf.Configuration)
configuration.san_is_local = False
configuration.xiv_ds8k_proxy = \
'cinder.tests.test_xiv_ds8k.XIVDS8KFakeProxyDriver'
configuration.xiv_ds8k_connection_type = 'iscsi'
configuration.san_ip = FAKE
configuration.san_login = FAKE
configuration.san_clustername = FAKE
configuration.san_password = FAKE
configuration.append_config_values(mox.IgnoreArg())
self.driver = xiv_ds8k.XIVDS8KDriver(configuration=configuration)
def test_initialized_should_set_xiv_ds8k_info(self):
"""Test that the san flags are passed to the IBM proxy."""
self.assertEqual(
self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_user'],
self.driver.configuration.san_login)
self.assertEqual(
self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_pass'],
self.driver.configuration.san_password)
self.assertEqual(
self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_address'],
self.driver.configuration.san_ip)
self.assertEqual(
self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_vol_pool'],
self.driver.configuration.san_clustername)
def test_setup_should_fail_if_credentials_are_invalid(self):
"""Test that the xiv_ds8k_proxy validates credentials."""
self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_user'] = 'invalid'
self.assertRaises(exception.NotAuthorized, self.driver.do_setup, None)
def test_setup_should_fail_if_connection_is_invalid(self):
"""Test that the xiv_ds8k_proxy validates connection."""
self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_address'] = \
'invalid'
self.assertRaises(exception.HostNotFound, self.driver.do_setup, None)
def test_create_volume(self):
"""Test creating a volume."""
self.driver.do_setup(None)
self.driver.create_volume(VOLUME)
has_volume = self.driver.xiv_ds8k_proxy.volume_exists(VOLUME)
self.assertTrue(has_volume)
self.driver.delete_volume(VOLUME)
def test_volume_exists(self):
"""Test the volume exist method with a volume that doesn't exist."""
self.driver.do_setup(None)
self.assertFalse(
self.driver.xiv_ds8k_proxy.volume_exists({'name': FAKE}))
def test_delete_volume(self):
"""Verify that a volume is deleted."""
self.driver.do_setup(None)
self.driver.create_volume(VOLUME)
self.driver.delete_volume(VOLUME)
has_volume = self.driver.xiv_ds8k_proxy.volume_exists(VOLUME)
self.assertFalse(has_volume)
def test_delete_volume_should_fail_for_not_existing_volume(self):
"""Verify that deleting a non-existing volume is OK."""
self.driver.do_setup(None)
self.driver.delete_volume(VOLUME)
def test_create_volume_should_fail_if_no_pool_space_left(self):
"""Vertify that the xiv_ds8k_proxy validates volume pool space."""
self.driver.do_setup(None)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume,
{'name': FAKE,
'id': 1,
'size': 12000})
def test_initialize_connection(self):
"""Test that inititialize connection attaches volume to host."""
self.driver.do_setup(None)
self.driver.create_volume(VOLUME)
self.driver.initialize_connection(VOLUME, CONNECTOR)
self.assertTrue(
self.driver.xiv_ds8k_proxy.is_volume_attached(VOLUME, CONNECTOR))
self.driver.terminate_connection(VOLUME, CONNECTOR)
self.driver.delete_volume(VOLUME)
def test_initialize_connection_should_fail_for_non_existing_volume(self):
"""Verify that initialize won't work for non-existing volume."""
self.driver.do_setup(None)
self.assertRaises(exception.VolumeNotFound,
self.driver.initialize_connection,
VOLUME,
CONNECTOR)
def test_terminate_connection(self):
"""Test terminating a connection."""
self.driver.do_setup(None)
self.driver.create_volume(VOLUME)
self.driver.initialize_connection(VOLUME, CONNECTOR)
self.driver.terminate_connection(VOLUME, CONNECTOR)
self.assertFalse(self.driver.xiv_ds8k_proxy.is_volume_attached(
VOLUME,
CONNECTOR))
self.driver.delete_volume(VOLUME)
def test_terminate_connection_should_fail_on_non_existing_volume(self):
"""Test that terminate won't work for non-existing volumes."""
self.driver.do_setup(None)
self.assertRaises(exception.VolumeNotFound,
self.driver.terminate_connection,
VOLUME,
CONNECTOR)
|
|
# Copyright 2020 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface for graphs."""
import hashlib
from flask import current_app
import networkx as nx
from timesketch.lib.datastores.opensearch import OpenSearchDataStore
GRAPH_TYPES = {
'Graph': nx.Graph,
'MultiGraph': nx.MultiGraph,
'DiGraph': nx.DiGraph,
'MultiDiGraph': nx.MultiDiGraph
}
MAX_EVENTS_PER_EDGE = 500
class Graph:
"""Graph object with helper methods.
Attributes:
nx_instance: Networkx graph object.
"""
def __init__(self, graph_type):
"""Initialize Graph object.
Args:
graph_type: (str) Name of graph type.
"""
_nx_graph_class = GRAPH_TYPES.get(graph_type)
self.nx_instance = _nx_graph_class()
self._nodes = {}
self._edges = {}
def add_node(self, label, attributes):
"""Add node to graph.
Args:
label: (str) Label for the node.
attributes: (dict) Attributes to add to node.
Returns:
Instance of Node object.
"""
if not attributes:
attributes = {}
node = Node(label, attributes)
node.set_attribute('id', node.id)
if node.id not in self._nodes:
self._nodes[node.id] = node
return node
def add_edge(self, source, target, label, event, attributes=None):
"""Add edge to graph.
Args:
source: (Node) Node to use as source.
target: (Node) Node to use as target.
label: (str) Label for the node.
event: (dict): OpenSearch event.
attributes: (dict) Attributes to add to node.
"""
if not attributes:
attributes = {}
attributes['id'] = ''.join([source.id, target.id, label]).lower()
edge = Edge(source, target, label, attributes)
if edge.node_counter < MAX_EVENTS_PER_EDGE:
index = event.get('_index')
doc_id = event.get('_id')
events = edge.attributes.get('events', {})
doc_ids = events.get(index, [])
doc_ids.append(doc_id)
edge.node_counter += 1
events[index] = doc_ids
edge.set_attribute('events', events)
self._edges[edge.id] = edge
def commit(self):
"""Commit all nodes and edges to the networkx graph object."""
for node_id, node in self._nodes.items():
self.nx_instance.add_node(
node_id, label=node.label, **node.attributes)
for _, edge in self._edges.items():
label = edge.label + f' ({edge.node_counter})'
self.nx_instance.add_edge(
edge.source.id, edge.target.id, label=label,
**edge.attributes)
def to_cytoscape(self):
"""Output graph in Cytoscape JSON format.
Returns:
Graph in Cytoscape JSON format.
"""
return nx.readwrite.json_graph.cytoscape_data(self.nx_instance)
class BaseGraphElement:
"""Base class for graph elements.
Attributes:
label (str): Node/Edge label to show in the UI.
attributes (dict): Attributed to add to the node/edge.
id (str): Uniq value generated from the label.
"""
def __init__(self, label='', attributes=None):
"""Initialize the base element object.
Args:
label (str): Node/Edge label to show in the UI.
attributes (dict): Attributes to add to the node/edge.
"""
self.label = label
self.attributes = attributes or {}
self.id = self._generate_id()
def _generate_id(self):
"""Generate ID for node/edge.
Returns:
MD5 hash (str): MD5 hash of the provided label.
"""
id_string = self.attributes.get('id', self.label)
return hashlib.md5(id_string.encode('utf-8')).hexdigest()
def set_attribute(self, key, value):
"""Add or replace an attribute to the element.
Args:
key (str): Attribute key.
value (str): Attribute value.
"""
self.attributes[key] = value
class Node(BaseGraphElement):
"""Graph node object."""
# TODO: Add logic for Nodes when needed.
class Edge(BaseGraphElement):
"""Graph edge object.
Attributes:
source (Node): Node to add as source node.
target (Node): Node to add as target node.
node_counter (int): Counter for number of nodes referenced for the edge.
"""
def __init__(self, source, target, label='', attributes=None):
"""Initialize the Edge object.
Args:
label (str): Node/Edge label to show in the UI.
attributes (dict): Attributes to add to the edge.
"""
self.source = source
self.target = target
self.node_counter = 0
super().__init__(label, attributes)
class BaseGraphPlugin:
"""Base class for a graph.
Attributes:
datastore (OpenSearchDataStore): OpenSearch datastore object.
graph (nx.Graph): NetworkX Graph object.
"""
# Name that the graph will be registered as.
NAME = 'name'
# Display name (used in the UI)
DISPLAY_NAME = 'display_name'
# Description of the plugin (used in the UI)
DESCRIPTION = 'description'
# Type of graph. There are four supported types: Undirected Graph,
# Undirected Multi Graph, Directed Graph, Directed Multi Graph.
# If you have multiple edges between nodes you need to use the multi graphs.
#
# See NetworkX documentation for details:
# https://networkx.org/documentation/stable/reference/classes/index.html
GRAPH_TYPE = 'MultiDiGraph'
def __init__(self, sketch=None, timeline_ids=None):
"""Initialize the graph object.
Args:
sketch (Sketch): Sketch object.
timeline_ids (List[int]): An optional list of timeline IDs.
Raises:
KeyError if graph type specified is not supported.
"""
self.datastore = OpenSearchDataStore(
host=current_app.config['OPENSEARCH_HOST'],
port=current_app.config['OPENSEARCH_PORT'])
if not GRAPH_TYPES.get(self.GRAPH_TYPE):
raise KeyError(f'Graph type {self.GRAPH_TYPE} is not supported')
self.graph = Graph(self.GRAPH_TYPE)
self.sketch = sketch
self.timeline_ids = timeline_ids
def _get_sketch_indices(self):
"""List all indices in the Sketch, or those that belong to a timeline.
Returns:
List of index names.
"""
active_timelines = self.sketch.active_timelines
if self.timeline_ids:
indices = [t.searchindex.index_name for t in active_timelines
if t.id in self.timeline_ids]
else:
indices = [t.searchindex.index_name for t in active_timelines]
return indices
# TODO: Refactor this to reuse across analyzers and graphs.
def event_stream(
self, query_string=None, query_filter=None, query_dsl=None,
indices=None, return_fields=None, scroll=True):
"""Search OpenSearch.
Args:
query_string: Query string.
query_filter: Dictionary containing filters to apply.
query_dsl: Dictionary containing OpenSearch DSL query.
indices: List of indices to query.
return_fields: List of fields to return.
scroll: Boolean determining whether we support scrolling searches
or not. Defaults to True.
Returns:
Generator of Event objects.
Raises:
ValueError: if neither query_string or query_dsl is provided.
"""
if not (query_string or query_dsl):
raise ValueError('Both query_string and query_dsl are missing')
# Query all sketch indices if none are specified.
if not indices:
indices = self._get_sketch_indices()
if not query_filter:
query_filter = {}
return_fields = list(set(return_fields))
if self.timeline_ids:
timeline_ids = self.timeline_ids
else:
timeline_ids = None
event_generator = self.datastore.search_stream(
query_string=query_string,
query_filter=query_filter,
query_dsl=query_dsl,
indices=indices,
return_fields=return_fields,
timeline_ids=timeline_ids,
enable_scroll=scroll,
)
return event_generator
def generate(self):
"""Entry point for the graph."""
raise NotImplementedError
|
|
#!/usr/bin/env python
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Definition of targets run distribution package tests."""
import os.path
import sys
sys.path.insert(0, os.path.abspath('..'))
import python_utils.jobset as jobset
def create_docker_jobspec(name, dockerfile_dir, shell_command, environ={},
flake_retries=0, timeout_retries=0):
"""Creates jobspec for a task running under docker."""
environ = environ.copy()
environ['RUN_COMMAND'] = shell_command
environ['RELATIVE_COPY_PATH'] = 'test/distrib'
docker_args=[]
for k,v in environ.items():
docker_args += ['-e', '%s=%s' % (k, v)]
docker_env = {'DOCKERFILE_DIR': dockerfile_dir,
'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh'}
jobspec = jobset.JobSpec(
cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] + docker_args,
environ=docker_env,
shortname='distribtest.%s' % (name),
timeout_seconds=30*60,
flake_retries=flake_retries,
timeout_retries=timeout_retries)
return jobspec
def create_jobspec(name, cmdline, environ=None, shell=False,
flake_retries=0, timeout_retries=0):
"""Creates jobspec."""
jobspec = jobset.JobSpec(
cmdline=cmdline,
environ=environ,
shortname='distribtest.%s' % (name),
timeout_seconds=10*60,
flake_retries=flake_retries,
timeout_retries=timeout_retries,
shell=shell)
return jobspec
class CSharpDistribTest(object):
"""Tests C# NuGet package"""
def __init__(self, platform, arch, docker_suffix=None, use_dotnet_cli=False):
self.name = 'csharp_nuget_%s_%s' % (platform, arch)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.labels = ['distribtest', 'csharp', platform, arch]
self.script_suffix = ''
if docker_suffix:
self.name += '_%s' % docker_suffix
self.labels.append(docker_suffix)
if use_dotnet_cli:
self.name += '_dotnetcli'
self.script_suffix = '_dotnetcli'
self.labels.append('dotnetcli')
else:
self.labels.append('olddotnet')
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.platform == 'linux':
return create_docker_jobspec(self.name,
'tools/dockerfile/distribtest/csharp_%s_%s' % (
self.docker_suffix,
self.arch),
'test/distrib/csharp/run_distrib_test%s.sh' % self.script_suffix)
elif self.platform == 'macos':
return create_jobspec(self.name,
['test/distrib/csharp/run_distrib_test%s.sh' % self.script_suffix],
environ={'EXTERNAL_GIT_ROOT': '../../..'})
elif self.platform == 'windows':
if self.arch == 'x64':
environ={'MSBUILD_EXTRA_ARGS': '/p:Platform=x64',
'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\x64\\Debug'}
else:
environ={'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\\Debug'}
return create_jobspec(self.name,
['test\\distrib\\csharp\\run_distrib_test%s.bat' % self.script_suffix],
environ=environ)
else:
raise Exception("Not supported yet.")
def __str__(self):
return self.name
class NodeDistribTest(object):
"""Tests Node package"""
def __init__(self, platform, arch, docker_suffix, node_version):
self.name = 'node_npm_%s_%s_%s' % (platform, arch, node_version)
self.platform = platform
self.arch = arch
self.node_version = node_version
self.labels = ['distribtest', 'node', platform, arch,
'node-%s' % node_version]
if docker_suffix is not None:
self.name += '_%s' % docker_suffix
self.docker_suffix = docker_suffix
self.labels.append(docker_suffix)
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.platform == 'linux':
linux32 = ''
if self.arch == 'x86':
linux32 = 'linux32'
return create_docker_jobspec(self.name,
'tools/dockerfile/distribtest/node_%s_%s' % (
self.docker_suffix,
self.arch),
'%s test/distrib/node/run_distrib_test.sh %s' % (
linux32,
self.node_version))
elif self.platform == 'macos':
return create_jobspec(self.name,
['test/distrib/node/run_distrib_test.sh',
str(self.node_version)],
environ={'EXTERNAL_GIT_ROOT': '../../..'})
else:
raise Exception("Not supported yet.")
def __str__(self):
return self.name
class PythonDistribTest(object):
"""Tests Python package"""
def __init__(self, platform, arch, docker_suffix):
self.name = 'python_%s_%s_%s' % (platform, arch, docker_suffix)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.labels = ['distribtest', 'python', platform, arch, docker_suffix]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if not self.platform == 'linux':
raise Exception("Not supported yet.")
return create_docker_jobspec(self.name,
'tools/dockerfile/distribtest/python_%s_%s' % (
self.docker_suffix,
self.arch),
'test/distrib/python/run_distrib_test.sh')
def __str__(self):
return self.name
class RubyDistribTest(object):
"""Tests Ruby package"""
def __init__(self, platform, arch, docker_suffix):
self.name = 'ruby_%s_%s_%s' % (platform, arch, docker_suffix)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.labels = ['distribtest', 'ruby', platform, arch, docker_suffix]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if not self.platform == 'linux':
raise Exception("Not supported yet.")
return create_docker_jobspec(self.name,
'tools/dockerfile/distribtest/ruby_%s_%s' % (
self.docker_suffix,
self.arch),
'test/distrib/ruby/run_distrib_test.sh')
def __str__(self):
return self.name
class PHPDistribTest(object):
"""Tests PHP package"""
def __init__(self, platform, arch, docker_suffix=None):
self.name = 'php_%s_%s_%s' % (platform, arch, docker_suffix)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.labels = ['distribtest', 'php', platform, arch, docker_suffix]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.platform == 'linux':
return create_docker_jobspec(self.name,
'tools/dockerfile/distribtest/php_%s_%s' % (
self.docker_suffix,
self.arch),
'test/distrib/php/run_distrib_test.sh')
elif self.platform == 'macos':
return create_jobspec(self.name,
['test/distrib/php/run_distrib_test.sh'],
environ={'EXTERNAL_GIT_ROOT': '../../..'})
else:
raise Exception("Not supported yet.")
def __str__(self):
return self.name
class CppDistribTest(object):
"""Tests Cpp make intall by building examples."""
def __init__(self, platform, arch, docker_suffix=None):
self.name = 'cpp_%s_%s_%s' % (platform, arch, docker_suffix)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.labels = ['distribtest', 'cpp', platform, arch, docker_suffix]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.platform == 'linux':
return create_docker_jobspec(self.name,
'tools/dockerfile/distribtest/cpp_%s_%s' % (
self.docker_suffix,
self.arch),
'test/distrib/cpp/run_distrib_test.sh')
else:
raise Exception("Not supported yet.")
def __str__(self):
return self.name
def targets():
"""Gets list of supported targets"""
return [CppDistribTest('linux', 'x64', 'jessie'),
CSharpDistribTest('linux', 'x64', 'wheezy'),
CSharpDistribTest('linux', 'x64', 'jessie'),
CSharpDistribTest('linux', 'x86', 'jessie'),
CSharpDistribTest('linux', 'x64', 'centos7'),
CSharpDistribTest('linux', 'x64', 'ubuntu1404'),
CSharpDistribTest('linux', 'x64', 'ubuntu1504'),
CSharpDistribTest('linux', 'x64', 'ubuntu1510'),
CSharpDistribTest('linux', 'x64', 'ubuntu1604'),
CSharpDistribTest('linux', 'x64', 'ubuntu1404', use_dotnet_cli=True),
CSharpDistribTest('macos', 'x86'),
CSharpDistribTest('windows', 'x86'),
CSharpDistribTest('windows', 'x64'),
PythonDistribTest('linux', 'x64', 'wheezy'),
PythonDistribTest('linux', 'x64', 'jessie'),
PythonDistribTest('linux', 'x86', 'jessie'),
PythonDistribTest('linux', 'x64', 'centos6'),
PythonDistribTest('linux', 'x64', 'centos7'),
PythonDistribTest('linux', 'x64', 'fedora20'),
PythonDistribTest('linux', 'x64', 'fedora21'),
PythonDistribTest('linux', 'x64', 'fedora22'),
PythonDistribTest('linux', 'x64', 'fedora23'),
PythonDistribTest('linux', 'x64', 'opensuse'),
PythonDistribTest('linux', 'x64', 'arch'),
PythonDistribTest('linux', 'x64', 'ubuntu1204'),
PythonDistribTest('linux', 'x64', 'ubuntu1404'),
PythonDistribTest('linux', 'x64', 'ubuntu1504'),
PythonDistribTest('linux', 'x64', 'ubuntu1510'),
PythonDistribTest('linux', 'x64', 'ubuntu1604'),
RubyDistribTest('linux', 'x64', 'wheezy'),
RubyDistribTest('linux', 'x64', 'jessie'),
RubyDistribTest('linux', 'x86', 'jessie'),
RubyDistribTest('linux', 'x64', 'centos6'),
RubyDistribTest('linux', 'x64', 'centos7'),
RubyDistribTest('linux', 'x64', 'fedora20'),
RubyDistribTest('linux', 'x64', 'fedora21'),
RubyDistribTest('linux', 'x64', 'fedora22'),
RubyDistribTest('linux', 'x64', 'fedora23'),
RubyDistribTest('linux', 'x64', 'opensuse'),
RubyDistribTest('linux', 'x64', 'ubuntu1204'),
RubyDistribTest('linux', 'x64', 'ubuntu1404'),
RubyDistribTest('linux', 'x64', 'ubuntu1504'),
RubyDistribTest('linux', 'x64', 'ubuntu1510'),
RubyDistribTest('linux', 'x64', 'ubuntu1604'),
NodeDistribTest('macos', 'x64', None, '4'),
NodeDistribTest('macos', 'x64', None, '5'),
NodeDistribTest('linux', 'x86', 'jessie', '4'),
PHPDistribTest('linux', 'x64', 'jessie'),
PHPDistribTest('macos', 'x64'),
] + [
NodeDistribTest('linux', 'x64', os, version)
for os in ('wheezy', 'jessie', 'ubuntu1204', 'ubuntu1404',
'ubuntu1504', 'ubuntu1510', 'ubuntu1604')
for version in ('0.12', '3', '4', '5')
]
|
|
from asyncio import Future, CancelledError, TimeoutError, sleep, gather
from .consts import MAX_ASYNC_WHILE
from .access import (
get_event_loop, LOGGER, ensure_future, create_future
)
def return_false():
return False
def chain_future(future, callback=None, errback=None, next=None):
'''Chain a :class:`~asyncio.Future` to an existing ``future``.
This function `chain` the ``next`` future to an existing ``future``.
When the input ``future`` receive a result the optional
``callback`` is executed and its result set as the results of ``next``.
If an exception occurs the optional ``errback`` is executed.
:param future: the original :class:`~asyncio.Future` (can be a coroutine)
:param callback: optional callback to execute on the result of ``future``
:param errback: optional callback to execute on the exception of ``future``
:param next: optional :class:`~asyncio.Future` to chain.
If not provided a new future is created
:return: the future ``next``
'''
loop = next._loop if next else None
future = ensure_future(future, loop=loop)
if next is None:
next = create_future(future._loop)
def _callback(fut):
try:
try:
result = future.result()
except Exception as exc:
if errback:
result = errback(exc)
exc = None
else:
raise
else:
if callback:
result = callback(result)
except Exception as exc:
next.set_exception(exc)
else:
if isinstance(result, Future):
chain_future(result, next=next)
else:
next.set_result(result)
future.add_done_callback(_callback)
return next
def add_errback(future, callback, loop=None):
'''Add a ``callback`` to a ``future`` executed only if an exception
or cancellation has occurred.'''
def _error_back(fut):
if fut._exception:
callback(fut.exception())
elif fut.cancelled():
callback(CancelledError())
future = ensure_future(future, loop=None)
future.add_done_callback(_error_back)
return future
def maybe_async(value, *, loop=None):
'''Handle a possible asynchronous ``value``.
:parameter value: the value to convert to an asynchronous instance
if it needs to.
:parameter loop: optional :class:`.EventLoop`.
:return: a :class:`.Future` or a synchronous ``value``.
'''
try:
return ensure_future(value, loop=loop)
except TypeError:
return value
async def as_coroutine(value):
try:
value = await value
except TypeError:
pass
return value
def as_gather(*args):
"""Same as :func:`~.asyncio.gather` but allows sync values
"""
return gather(*[as_coroutine(arg) for arg in args])
async def async_while(timeout, while_clause, *args):
'''The asynchronous equivalent of ``while while_clause(*args):``
Use this function within a :ref:`coroutine <coroutine>` when you need
to wait ``while_clause`` to be satisfied.
:parameter timeout: a timeout in seconds after which this function stop.
:parameter while_clause: while clause callable.
:parameter args: optional arguments to pass to the ``while_clause``
callable.
:return: A :class:`.Future`.
'''
loop = get_event_loop()
start = loop.time()
di = 0.1
interval = 0
result = while_clause(*args)
while result:
interval = min(interval+di, MAX_ASYNC_WHILE)
try:
await sleep(interval, loop=loop)
except TimeoutError:
pass
if timeout and loop.time() - start >= timeout:
break
result = while_clause(*args)
return result
# ############################################################## Bench
class Bench:
'''Execute a given number of asynchronous requests and wait for results.
'''
start = None
'''The :meth:`~asyncio.BaseEventLoop.time` when the execution starts'''
finish = None
'''The :meth:`~asyncio.BaseEventLoop.time` when the execution finishes'''
result = ()
'''Tuple of results'''
def __init__(self, times, loop=None):
self._loop = loop or get_event_loop()
self.times = times
@property
def taken(self):
'''The total time taken for execution
'''
if self.finish:
return self.finish - self.start
def __call__(self, func, *args, **kwargs):
self.start = self._loop.time()
data = [func(*args, **kwargs) for _ in range(self.times)]
self.result = gather(*data, loop=self._loop)
return chain_future(self.result, callback=self._done)
def _done(self, result):
self.finish = self._loop.time()
self.result = result
return self
# ############################################################## AsyncObject
class AsyncObject:
'''Interface for :ref:`async objects <async-object>`
.. attribute:: _loop
The :ref:`event loop <asyncio-event-loop>` associated with this object
.. attribute:: _logger
Optional logger instance, used by the :attr:`logger` attribute
'''
_logger = None
_loop = None
@property
def logger(self):
'''The logger for this object.
It is either the :attr:`_logger` or the logger of the :attr:`_loop`
'''
return self._logger or getattr(self._loop, 'logger', LOGGER)
@property
def debug(self):
'''True when in debug mode
'''
return getattr(self._loop, 'get_debug', return_false)()
def timeit(self, method, times, *args, **kwargs):
'''Useful utility for benchmarking an asynchronous ``method``.
:param method: the name of the ``method`` to execute
:param times: number of times to execute the ``method``
:param args: positional arguments to pass to the ``method``
:param kwargs: key-valued arguments to pass to the ``method``
:return: a :class:`~asyncio.Future` which results in a :class:`Bench`
object if successful
The usage is simple::
>>> b = self.timeit('asyncmethod', 100)
'''
bench = Bench(times, loop=self._loop)
return bench(getattr(self, method), *args, **kwargs)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Contains Network, a composition of layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.estimator import util as estimator_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base
from tensorflow.python.layers import utils as layers_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
class InputLayer(base.Layer):
"""Layer to be used as an entry point into a Network (a graph of layers).
It can either wrap an existing tensor (pass an `input_tensor` argument)
or create its a placeholder tensor (pass arguments `input_shape`
as well as `dtype`).
It is generally recommend to use the functional layer API via `Input`,
(which creates an `InputLayer`) without directly using `InputLayer`.
Arguments:
input_shape: Shape tuple (not including the batch axis), or `TensorShape`
instance (not including the batch axis).
batch_size: Optional input batch size (integer or None).
dtype: Datatype of the input.
input_tensor: Optional tensor to use as layer input
instead of creating a placeholder.
sparse: Boolean, whether the placeholder created
is meant to be sparse.
name: Name of the layer (string).
Raises:
RuntimeError: If created in Eager mode.
"""
def __init__(self,
input_shape=None,
batch_size=None,
dtype=dtypes.float32,
input_tensor=None,
sparse=False,
name=None):
super(InputLayer, self).__init__(dtype=dtype, name=name)
self.built = True
self.sparse = sparse
self.batch_size = batch_size
if isinstance(input_shape, tensor_shape.TensorShape):
input_shape = tuple(input_shape.as_list())
if input_tensor is None:
if input_shape is not None:
batch_input_shape = (batch_size,) + tuple(input_shape)
else:
batch_input_shape = None
if context.in_eager_mode():
# In eager mode, create a temporary placeholder to call the layer on.
input_tensor = base._DeferredTensor( # pylint: disable=protected-access
shape=batch_input_shape,
dtype=dtype,
name=self.name)
else:
# In graph mode, create a graph placeholder to call the layer on.
if sparse:
input_tensor = array_ops.sparse_placeholder(
shape=batch_input_shape,
dtype=dtype,
name=self.name)
else:
input_tensor = array_ops.placeholder(
shape=batch_input_shape,
dtype=dtype,
name=self.name)
# For compatibility with Keras API.
self.is_placeholder = True
self._batch_input_shape = batch_input_shape
else:
# For compatibility with Keras API.
self.is_placeholder = False
self._batch_input_shape = tuple(input_tensor.get_shape().as_list())
# Create an input node to add to self.outbound_node
# and set output_tensors' _keras_history.
input_tensor._keras_history = (self, 0, 0) # pylint: disable=protected-access
base.Node(
self,
inbound_layers=[],
node_indices=[],
tensor_indices=[],
input_tensors=[input_tensor],
output_tensors=[input_tensor])
@tf_export('layers.Input')
def Input( # pylint: disable=invalid-name
shape=None,
batch_size=None,
name=None,
dtype=dtypes.float32,
sparse=False,
tensor=None):
"""`Input()` is used to instantiate an input tensor for use with a `Network`.
For instance, if a, b and c are tensors created via `Input`,
it becomes possible to do:
`network = Network(inputs=[a, b], outputs=c)`
Example:
```python
# This is a logistic regression
x = tf.layers.Input(shape=(32,))
y = tf.layers.Dense(16, activation='softmax')(x)
network = tf.layers.Network(x, y)
```
Arguments:
shape: A shape tuple (integer), not including the batch size.
For instance, `shape=(32,)` indicates that the expected input
will be batches of 32-dimensional vectors.
batch_size: Optional input batch size (integer or None).
name: An optional name string for the layer.
Should be unique in a model (do not reuse the same name twice).
It will be autogenerated if it isn't provided.
dtype: The data type expected by the input, as a string
(`float32`, `float64`, `int32`...)
sparse: A boolean specifying whether the placeholder
to be created is sparse.
tensor: Optional existing tensor to wrap into the `Input` layer.
If set, the layer will not create a placeholder tensor.
Returns:
A tensor: either a new placeholder (with history metadata) or
`tensor` (if passed), with added history metadata.
Raises:
RuntimeError: If called in Eager mode.
"""
input_layer = InputLayer(
input_shape=shape,
batch_size=batch_size,
name=name,
dtype=dtype,
sparse=sparse,
input_tensor=tensor)
# Return tensor including `_keras_history` metadata.
# Note that in this case train_output and test_output are the same pointer.
outputs = input_layer._inbound_nodes[0].output_tensors # pylint: disable=protected-access
if len(outputs) == 1:
return outputs[0]
else:
return outputs
class GraphNetwork(base.Layer):
"""A GraphNetwork is a directed acyclic graph of layers.
It is the topological form of a `tf.keras.models.Model`. A `Model` is simply a
`GraphNetwork` with added training/evaluation routines.
A `GraphNetwork` instance implements the full `Layer` API. In particular, a
`GraphNetwork` can be called on new inputs.
Example:
```python
# This is a logistic regression
x = tf.layers.Input(shape=(32,))
y = tf.layers.Dense(16, activation='softmax')(x)
network = tf.layers.GraphNetwork(x, y)
# It is then possible to call the network on compatible inputs:
z = tf.layers.Input(shape=(32,))
w = network(z)
# It is possible to retrieve the same properties as a layer:
weights = network.trainable_weights
```
Arguments:
inputs: Input tensor or list of input tensors.
Must come from `tf.layers.Input`.
output: Output tensor or list of output tensors. Must come from
tf.layers Layers or Keras layers.
name: Optional name of the model (string).
Attributes:
GraphNetwork has the same attributes as Layer. On top of it, it also has:
- layers: a list of the children layers of the network,
a list of layer instances, ordered from "earlier in the graph"
to "later in the graph".
Methods:
GraphNetwork has the same methods as Layer. On top of it, it also has:
- get_layer: retrieves a child layer by name or index in the graph.
Raises:
TypeError: If created when eager execution is enabled, with inputs that
don't come from a call to `Input` or outputs that don't come from layers.
"""
def __init__(self, inputs, outputs, name=None): # pylint: disable=super-init-not-called
if isinstance(inputs, (list, tuple)):
self.inputs = list(inputs) # Tensor or list of tensors.
else:
self.inputs = [inputs]
if isinstance(outputs, (list, tuple)):
self.outputs = list(outputs)
else:
self.outputs = [outputs]
if context.in_eager_mode():
# Check that all inputs/outputs are DeferredTensors.
for tensor in self.inputs:
if not isinstance(tensor, base._DeferredTensor): # pylint: disable=protected-access
raise TypeError('When eager execution is enabled, '
'inputs must come from a call to '
'`tf.keras.Input` (called after '
'tfe.enable_eager_execution()). '
'Received invalid input: ' + str(tensor))
for tensor in self.outputs:
if not isinstance(tensor, base._DeferredTensor): # pylint: disable=protected-access
raise TypeError('When eager execution is enabled, '
'outputs must come from a call to '
'a layer (called after '
'tfe.enable_eager_execution()). '
'Received invalid output: ' + str(tensor))
self._init_set_name(name)
self._activity_regularizer = None
with vs.variable_scope(
None, default_name=self._base_name) as captured_scope:
self._scope = captured_scope
call_fn_args = estimator_util.fn_args(self.call)
self._compute_previous_mask = ('mask' in call_fn_args or
hasattr(self, 'compute_mask'))
self._call_has_scope_arg = 'scope' in call_fn_args
# This acts just like the `trainable` attribute of any layer instance.
# It does not affect users of the underlying layers, only users of the
# GraphNetwork instance.
self.trainable = True
# A GraphNetwork does not create weights of its own, thus it is already
# built.
self.built = True
# A GraphNetwork does not create weights of its own, thus has no dtype.
self._dtype = None
self._is_graph_network = True
# The following are implemented as property functions:
# self.trainable_weights
# self.non_trainable_weights
# self.input_spec
# Private attributes to implement compatibility with Layer.
self._updates = []
self._losses = []
self._scope = None
self._reuse = None
self._graph = ops.get_default_graph()
# All layers in order of horizontal graph traversal.
# Entries are unique. Includes input and output layers.
self._layers = []
# Check for redundancy in inputs.
if len(set(self.inputs)) != len(self.inputs):
raise ValueError('The list of inputs passed to the model '
'is redundant. '
'All inputs should only appear once.'
' Found: ' + str(self.inputs))
# # List of initial layers (1 to 1 mapping with self.inputs,
# # hence the same layer might appear twice)
# self._input_layers = []
# self._input_layers_node_indices = []
# self._input_layers_tensor_indices = []
# # list of layers (1 to 1 mapping with self.inputs,
# # hence the same layer might appear twice)
# self._output_layers = []
# self._output_layers_node_indices = []
# self._output_layers_tensor_indices = []
self._input_layers = []
self._output_layers = []
self._input_coordinates = []
self._output_coordinates = []
# This is for performance optimization when calling the GraphNetwork on new
# inputs. Every time the GraphNetwork is called on a set on input tensors,
# we compute the output tensors, output masks and output shapes in one pass,
# then cache them here. When any of these outputs is queried later, we
# retrieve it from there instead of recomputing it.
self._output_mask_cache = {}
self._output_tensor_cache = {}
self._output_shape_cache = {}
# User-provided arguments validation.
for x in self.inputs:
# Check that x has appropriate `_keras_history` metadata.
if not hasattr(x, '_keras_history'):
cls_name = self.__class__.__name__
raise ValueError('Input tensors to a ' + cls_name + ' ' +
'must come from `tf.layers.Input`. '
'Received: ' + str(x) +
' (missing previous layer metadata).')
# Check that x is an input tensor.
# pylint: disable=protected-access
layer, node_index, tensor_index = x._keras_history
if len(layer._inbound_nodes) > 1 or (
layer._inbound_nodes and layer._inbound_nodes[0].inbound_layers):
cls_name = self.__class__.__name__
logging.warning(cls_name + ' inputs must come from '
'`tf.layers.Input` (thus holding past layer metadata), '
'they cannot be the output of '
'a previous non-Input layer. '
'Here, a tensor specified as '
'input to "' + self.name + '" was not an Input tensor, '
'it was generated by layer ' + layer.name + '.\n'
'Note that input tensors are '
'instantiated via `tensor = tf.layers.Input(shape)`.\n'
'The tensor that caused the issue was: ' + str(x.name))
# pylint: enable=protected-access
for x in self.outputs:
if not hasattr(x, '_keras_history'):
cls_name = self.__class__.__name__
raise ValueError('Output tensors to a ' + cls_name + ' must be '
'the output of a TensorFlow `Layer` '
'(thus holding past layer metadata). Found: ' + str(x))
# Build self._output_layers:
for x in self.outputs:
layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access
self._output_layers.append(layer)
self._output_coordinates.append((layer, node_index, tensor_index))
# Build self._input_layers:
for x in self.inputs:
layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access
# It's supposed to be an input layer, so only one node
# and one tensor output.
assert node_index == 0
assert tensor_index == 0
self._input_layers.append(layer)
self._input_coordinates.append((layer, node_index, tensor_index))
# Network_nodes: set of nodes included in the graph
# (not all nodes included in the layers
# are relevant to the current graph).
network_nodes = set() # ids of all nodes relevant to the GraphNetwork
nodes_depths = {} # dict {node: depth value}
layers_depths = {} # dict {layer: depth value}
layer_indices = {} # dict {layer: index in traversal}
nodes_in_decreasing_depth = []
def build_map_of_graph(tensor,
finished_nodes,
nodes_in_progress,
layer,
node_index,
tensor_index):
"""Builds a map of the graph of layers.
This recursively updates the map `layer_indices`,
the list `nodes_in_decreasing_depth` and the set `network_nodes`.
Arguments:
tensor: Some tensor in a graph.
finished_nodes: Set of nodes whose subgraphs have been traversed
completely. Useful to prevent duplicated work.
nodes_in_progress: Set of nodes that are currently active on the
recursion stack. Useful to detect cycles.
layer: Layer from which `tensor` comes from. If not provided,
will be obtained from `tensor._keras_history`.
node_index: Node index from which `tensor` comes from.
tensor_index: Tensor_index from which `tensor` comes from.
Raises:
ValueError: if a cycle is detected.
"""
node = layer._inbound_nodes[node_index] # pylint: disable=protected-access
# Prevent cycles.
if node in nodes_in_progress:
raise ValueError('The tensor ' + str(tensor) + ' at layer "' +
layer.name + '" is part of a cycle.')
# Don't repeat work for shared subgraphs
if node in finished_nodes:
return
node_key = _make_node_key(layer.name, node_index)
# Update network_nodes.
network_nodes.add(node_key)
# Store the traversal order for layer sorting.
if layer not in layer_indices:
layer_indices[layer] = len(layer_indices)
nodes_in_progress.add(node)
# Propagate to all previous tensors connected to this node.
for i in range(len(node.inbound_layers)):
x = node.input_tensors[i]
layer = node.inbound_layers[i]
node_index = node.node_indices[i]
tensor_index = node.tensor_indices[i]
build_map_of_graph(x, finished_nodes, nodes_in_progress, layer,
node_index, tensor_index)
finished_nodes.add(node)
nodes_in_progress.remove(node)
nodes_in_decreasing_depth.append(node)
finished_nodes = set()
nodes_in_progress = set()
for x in self.outputs:
layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access
build_map_of_graph(x, finished_nodes, nodes_in_progress,
layer=layer,
node_index=node_index,
tensor_index=tensor_index)
for node in reversed(nodes_in_decreasing_depth):
# If the depth is not set, the node has no outbound nodes (depth 0).
depth = nodes_depths.setdefault(node, 0)
# Update the depth of the corresponding layer
previous_depth = layers_depths.get(node.outbound_layer, 0)
# If we've seen this layer before at a higher depth,
# we should use that depth instead of the node depth.
# This is necessary for shared layers that have inputs at different
# depth levels in the graph.
depth = max(depth, previous_depth)
layers_depths[node.outbound_layer] = depth
nodes_depths[node] = depth
# Update the depth of inbound nodes.
# The "depth" of a node is the max of the depths
# of all layers it is connected to.
for i in range(len(node.inbound_layers)):
inbound_layer = node.inbound_layers[i]
node_index = node.node_indices[i]
inbound_node = inbound_layer._inbound_nodes[node_index] # pylint: disable=protected-access
previous_depth = nodes_depths.get(inbound_node, 0)
nodes_depths[inbound_node] = max(depth + 1, previous_depth)
# Build a dict {depth: list of nodes with this depth}
nodes_by_depth = {}
for node, depth in nodes_depths.items():
if depth not in nodes_by_depth:
nodes_by_depth[depth] = []
nodes_by_depth[depth].append(node)
# Build a dict {depth: list of layers with this depth}
layers_by_depth = {}
for layer, depth in layers_depths.items():
if depth not in layers_by_depth:
layers_by_depth[depth] = []
layers_by_depth[depth].append(layer)
# Get sorted list of layer depths.
depth_keys = list(layers_by_depth.keys())
depth_keys.sort(reverse=True)
# Set self.layers and self._layers_by_depth.
layers = []
for depth in depth_keys:
layers_for_depth = layers_by_depth[depth]
# GraphNetwork.layers needs to have a deterministic order:
# here we order them by traversal order.
layers_for_depth.sort(key=lambda x: layer_indices[x])
layers.extend(layers_for_depth)
self._layers = layers
self._layers_by_depth = layers_by_depth
# Get sorted list of node depths.
depth_keys = list(nodes_by_depth.keys())
depth_keys.sort(reverse=True)
# Check that all tensors required are computable.
# computable_tensors: all tensors in the graph
# that can be computed from the inputs provided.
computable_tensors = []
for x in self.inputs:
computable_tensors.append(x)
layers_with_complete_input = [] # To provide a better error msg.
for depth in depth_keys:
for node in nodes_by_depth[depth]:
layer = node.outbound_layer
if layer:
for x in node.input_tensors:
if x not in computable_tensors:
raise ValueError('Graph disconnected: '
'cannot obtain value for tensor ' + str(x) +
' at layer "' + layer.name + '". '
'The following previous layers '
'were accessed without issue: ' +
str(layers_with_complete_input))
for x in node.output_tensors:
computable_tensors.append(x)
layers_with_complete_input.append(layer.name)
# Keep track of the network's nodes.
self._network_nodes = network_nodes
self._nodes_by_depth = nodes_by_depth
# Ensure name unicity, which will be crucial for serialization
# (since serialized nodes refer to layers by their name).
all_names = [layer.name for layer in self.layers]
for name in all_names:
if all_names.count(name) != 1:
raise ValueError('The name "' + name + '" is used ' +
str(all_names.count(name)) + ' times in the model. '
'All layer names should be unique.')
# Layer parameters.
# The new network starts with a single inbound node
# for its inputs, and no outbound nodes.
self._outbound_nodes = [] # Will be appended to by future calls to __call__
self._inbound_nodes = [
] # Will be appended to below, and by future calls to __call__
# Create the node linking internal inputs to internal outputs.
base.Node(
outbound_layer=self,
inbound_layers=[],
node_indices=[],
tensor_indices=[],
input_tensors=self.inputs,
output_tensors=self.outputs)
@property
def layers(self):
return self._layers
def get_layer(self, name=None, index=None):
"""Retrieves a layer based on either its name (unique) or index.
Indices are based on order of horizontal graph traversal (bottom-up).
Arguments:
name: String, name of layer.
index: Integer, index of layer.
Returns:
A layer instance.
Raises:
ValueError: In case of invalid layer name or index.
"""
# TODO(fchollet): We could build a dictionary based on layer names
# since they are constant, but we have not done that yet.
if index is not None:
if len(self.layers) <= index:
raise ValueError('Was asked to retrieve layer at index ' + str(index) +
' but model only has ' + str(len(self.layers)) +
' layers.')
else:
return self.layers[index]
else:
if not name:
raise ValueError('Provide either a layer name or layer index.')
for layer in self.layers:
if layer.name == name:
return layer
raise ValueError('No such layer: ' + name)
@property
def stateful(self):
return any([(hasattr(layer, 'stateful') and layer.stateful)
for layer in self.layers])
@property
def updates(self):
"""Retrieve the network's updates.
Will only include updates that are either
unconditional, or conditional on inputs to this model
(e.g. will not include updates that were created by layers of this model
outside of the model).
Effectively, `network.updates` behaves like `layer.updates`.
Concrete example:
```python
bn = keras.layers.BatchNormalization()
x1 = keras.layers.Input(shape=(10,))
_ = bn(x1) # This creates 2 updates.
x2 = keras.layers.Input(shape=(10,))
y2 = bn(x2) # This creates 2 more updates.
# The BN layer has now 4 updates.
self.assertEqual(len(bn.updates), 4)
# Let's create a model from x2 to y2.
model = keras.models.Model(x2, y2)
# The model does not list all updates from its underlying layers,
# but only the updates that are relevant to it. Updates created by layers
# outside of the model are discarded.
self.assertEqual(len(model.updates), 2)
# If you keep calling the model, you append to its updates, just like
# what happens for a layer.
x3 = keras.layers.Input(shape=(10,))
y3 = model(x3)
self.assertEqual(len(model.updates), 4)
# But if you call the inner BN layer independently, you don't affect
# the model's updates.
x4 = keras.layers.Input(shape=(10,))
_ = bn(x4)
self.assertEqual(len(model.updates), 4)
```
Returns:
A list of update ops.
"""
if context.in_eager_mode():
return []
if not self.trainable and not self.stateful:
return []
updates = []
for layer in self.layers:
updates += layer.updates
# `updates` might contain irrelevant updates, so it needs to be filtered
# with respect to inputs the model has been called on.
relevant_inputs = self.inputs or []
for i in range(1, len(self._inbound_nodes)):
inputs = self.get_input_at(i)
if isinstance(inputs, list):
relevant_inputs += inputs
else:
relevant_inputs.append(inputs)
reachable = layers_util.get_reachable_from_inputs(relevant_inputs, updates)
relevant_conditional_updates = [x for x in updates if x in reachable]
unconditional_updates = [
x for x in updates if x._unconditional_update] # pylint: disable=protected-access
# A layer could be used multiple times in a nested structure,
# so the updates list must be de-duped.
return list(set(
relevant_conditional_updates + unconditional_updates + self._updates))
@property
def losses(self):
"""Retrieve the network's losses.
Will only include losses that are either
unconditional, or conditional on inputs to this model
(e.g. will not include losses that depend on tensors
that aren't inputs to this model).
Returns:
A list of loss tensors.
"""
losses = []
for layer in self.layers:
losses += layer.losses
if context.in_eager_mode():
return losses
relevant_inputs = self.inputs or []
for i in range(1, len(self._inbound_nodes)):
inputs = self.get_input_at(i)
if isinstance(inputs, list):
relevant_inputs += inputs
else:
relevant_inputs.append(inputs)
reachable = layers_util.get_reachable_from_inputs(relevant_inputs, losses)
relevant_conditional_losses = [x for x in losses if x in reachable]
unconditional_losses = [
x for x in losses if x._unconditional_loss] # pylint: disable=protected-access
return list(set(
relevant_conditional_losses + unconditional_losses + self._losses))
@property
def trainable_weights(self):
if not self.trainable:
return []
weights = []
for layer in self.layers:
weights += layer.trainable_weights
return weights
@property
def non_trainable_weights(self):
weights = []
for layer in self.layers:
weights += layer.non_trainable_weights
if not self.trainable:
trainable_weights = []
for layer in self.layers:
trainable_weights += layer.trainable_weights
return trainable_weights + weights
return weights
@property
def input_spec(self):
"""Gets the network's input specs.
Returns:
A list of `InputSpec` instances (one per input to the model)
or a single instance if the model has only one input.
"""
# If not a graph network, can't assume anything.
if not self._is_graph_network:
return None
specs = []
for layer in self._input_layers:
if layer.input_spec is None:
specs.append(None)
else:
if not isinstance(layer.input_spec, list):
raise TypeError('Layer ' + layer.name +
' has an input_spec attribute that '
'is not a list. We expect a list. '
'Found input_spec = ' + str(layer.input_spec))
specs += layer.input_spec
if len(specs) == 1:
return specs[0]
return specs
def call(self, inputs, mask=None):
"""Call the model on new inputs.
In this case `call` just reapplies
all ops in the graph to the new inputs
(e.g. build a new computational graph from the provided inputs).
Arguments:
inputs: A tensor or list of tensors.
mask: A mask or list of masks. A mask can be
either a tensor or None (no mask).
Returns:
A tensor if there is a single output, or
a list of tensors if there are more than one outputs.
"""
inputs = nest.flatten(inputs)
if mask is None:
masks = [None for _ in range(len(inputs))]
else:
masks = nest.flatten(mask)
if context.in_graph_mode():
# Try to retrieve cached outputs if the layer has already been called
# on these exact inputs.
cache_key = (layers_util.object_list_uid(inputs)
+ '_' + layers_util.object_list_uid(masks))
if cache_key in self._output_tensor_cache:
# Cache hit.
return self._output_tensor_cache[cache_key]
# Actually apply the network graph to the new inputs.
outputs, _ = self._run_internal_graph(inputs, masks)
return outputs
def compute_output_shape(self, input_shape):
if not self._is_graph_network:
raise NotImplementedError
if isinstance(input_shape, list):
input_shapes = []
for shape in input_shape:
if shape is not None:
input_shapes.append(tuple(tensor_shape.TensorShape(shape).as_list()))
else:
input_shapes.append(None)
else:
if input_shape is not None:
input_shapes = [tuple(tensor_shape.TensorShape(input_shape).as_list())]
else:
input_shapes = [None]
if len(input_shapes) != len(self._input_layers):
raise ValueError('Invalid input_shape argument ' + str(input_shape) +
': model has ' + str(len(self._input_layers)) +
' tensor inputs.')
cache_key = layers_util.object_list_uid(input_shapes)
if cache_key not in self._output_shape_cache:
# Cache miss. We have to run the network graph manually (recursive calls
# to `compute_output_shape`).
layers_to_output_shapes = {}
for i in range(len(input_shapes)):
layer = self._input_layers[i]
input_shape = input_shapes[i]
# It's an input layer: then `compute_output_shape` is identity,
# and there is only one node and one tensor output.
shape_key = layer.name + '_0_0'
layers_to_output_shapes[shape_key] = input_shape
depth_keys = list(self._nodes_by_depth.keys())
depth_keys.sort(reverse=True)
# Iterate over nodes, by depth level.
if len(depth_keys) > 1:
for depth in depth_keys:
nodes = self._nodes_by_depth[depth]
for node in nodes:
# This is always a single layer, never a list.
layer = node.outbound_layer
if layer in self._input_layers:
# We've already covered the input layers
# a few lines above.
continue
# Potentially redundant list,
# same size as node.input_tensors.
input_shapes = []
for j in range(len(node.inbound_layers)):
inbound_layer = node.inbound_layers[j]
node_index = node.node_indices[j]
tensor_index = node.tensor_indices[j]
shape_key = inbound_layer.name + '_%s_%s' % (node_index,
tensor_index)
input_shape = layers_to_output_shapes[shape_key]
input_shapes.append(input_shape)
if len(input_shapes) == 1:
output_shape = layer.compute_output_shape(input_shapes[0])
else:
output_shape = layer.compute_output_shape(input_shapes)
if isinstance(output_shape, list):
output_shapes = [
tuple(tensor_shape.TensorShape(shape).as_list())
for shape in output_shape
]
else:
output_shapes = [
tuple(tensor_shape.TensorShape(output_shape).as_list())
]
node_index = layer._inbound_nodes.index(node) # pylint: disable=protected-access
for j in range(len(output_shapes)):
shape_key = layer.name + '_%s_%s' % (node_index, j)
layers_to_output_shapes[shape_key] = output_shapes[j]
# Read final output shapes from layers_to_output_shapes.
output_shapes = []
for i in range(len(self._output_layers)):
layer, node_index, tensor_index = self._output_coordinates[i]
shape_key = layer.name + '_%s_%s' % (node_index, tensor_index)
output_shapes.append(layers_to_output_shapes[shape_key])
# Store in cache.
self._output_shape_cache[cache_key] = output_shapes
else:
# Cache hit.
output_shapes = self._output_shape_cache[cache_key]
if isinstance(output_shapes, list):
if len(output_shapes) == 1:
return tensor_shape.TensorShape(output_shapes[0])
else:
return [tensor_shape.TensorShape(shape) for shape in output_shapes]
else:
return tensor_shape.TensorShape(output_shapes)
def _run_internal_graph(self, inputs, masks=None):
"""Computes output tensors for new inputs.
# Note:
- Expects `inputs` to be a list (potentially with 1 element).
- Can be run on non-Keras tensors.
Arguments:
inputs: List of tensors
masks: List of masks (tensors or None).
Returns:
Three lists: output_tensors, output_masks, output_shapes
"""
# Note: masking support is relevant mainly for Keras.
# It cannot be factored out without having the fully reimplement the network
# calling logic on the Keras side. We choose to incorporate it in
# GraphNetwork because 1) it may be useful to fully support in tf.layers in
# the future and 2) Keras is a major user of GraphNetwork. If you don't
# use masking, it does not interfere with regular behavior at all and you
# can ignore it.
if masks is None:
masks = [None for _ in range(len(inputs))]
# Dictionary mapping reference tensors to tuples
# (computed tensor, compute mask)
# we assume a 1:1 mapping from tensor to mask
# TODO(fchollet): raise exception when a `.compute_mask()` call
# does not return a list the same size as `call`
tensor_map = {}
for x, y, mask in zip(self.inputs, inputs, masks):
tensor_map[str(id(x))] = (y, mask)
depth_keys = list(self._nodes_by_depth.keys())
depth_keys.sort(reverse=True)
for depth in depth_keys:
nodes = self._nodes_by_depth[depth]
for node in nodes:
# This is always a single layer, never a list.
layer = node.outbound_layer
reference_input_tensors = node.input_tensors
reference_output_tensors = node.output_tensors
# If all previous input tensors are available in tensor_map,
# then call node.inbound_layer on them.
computed_data = [] # List of tuples (input, mask).
for x in reference_input_tensors:
if str(id(x)) in tensor_map:
computed_data.append(tensor_map[str(id(x))])
if len(computed_data) == len(reference_input_tensors):
# Call layer (reapplying ops to new inputs).
with ops.name_scope(layer.name):
if node.arguments:
kwargs = node.arguments
else:
kwargs = {}
if len(computed_data) == 1:
computed_tensor, computed_mask = computed_data[0]
# Ensure mask propagation if applicable.
if 'mask' in estimator_util.fn_args(layer.call):
if 'mask' not in kwargs:
kwargs['mask'] = computed_mask
output_tensors = nest.flatten(
layer.call(computed_tensor, **kwargs))
if hasattr(layer, 'compute_mask'):
output_masks = nest.flatten(
layer.compute_mask(computed_tensor, computed_mask))
else:
output_masks = [None for _ in range(len(output_tensors))]
computed_tensors = [computed_tensor]
computed_masks = [computed_mask]
else:
computed_tensors = [x[0] for x in computed_data]
computed_masks = [x[1] for x in computed_data]
if 'mask' in estimator_util.fn_args(layer.call):
if 'mask' not in kwargs:
kwargs['mask'] = computed_masks
output_tensors = nest.flatten(
layer.call(computed_tensors, **kwargs))
if hasattr(layer, 'compute_mask'):
output_masks = nest.flatten(
layer.compute_mask(computed_tensors, computed_masks))
else:
output_masks = [None for _ in range(len(output_tensors))]
if context.in_graph_mode():
if layer.activity_regularizer is not None:
regularization_losses = [
layer.activity_regularizer(x) for x in output_tensors
]
# Apply activity regularizer if any:
layer.add_loss(regularization_losses, computed_tensors)
# Update tensor_map.
for x, y, mask in zip(reference_output_tensors, output_tensors,
output_masks):
tensor_map[str(id(x))] = (y, mask)
output_tensors = []
output_masks = []
output_shapes = []
for x in self.outputs:
assert str(id(x)) in tensor_map, 'Could not compute output ' + str(x)
tensor, mask = tensor_map[str(id(x))]
output_shapes.append(layers_util.static_shape(x))
output_tensors.append(tensor)
output_masks.append(mask)
if len(output_tensors) == 1:
output_tensors = output_tensors[0]
if output_shapes is not None:
output_shapes = output_shapes[0]
if output_masks is not None:
output_masks = output_masks[0]
if context.in_graph_mode():
# Update cache;
# keys are based on ids on input tensors and inputs masks.
cache_key = (layers_util.object_list_uid(inputs)
+ '_' + layers_util.object_list_uid(masks))
self._output_tensor_cache[cache_key] = output_tensors
self._output_mask_cache[cache_key] = output_masks
if output_shapes is not None:
input_shapes = [layers_util.static_shape(x) for x in inputs]
cache_key = layers_util.object_list_uid(input_shapes)
self._output_shape_cache[cache_key] = output_shapes
return output_tensors, output_masks
def _make_node_key(layer_name, node_index):
return layer_name + '_ib-' + str(node_index)
|
|
#
# Copyright (c) 2015 Advanced Micro Devices, Inc.
# All rights reserved.
#
# For use for simulation and test purposes only
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Sooraj Puthoor
#
import math
import m5
from m5.objects import *
from m5.defines import buildEnv
from Ruby import send_evicts
from topologies.Cluster import Cluster
class CntrlBase:
_seqs = 0
@classmethod
def seqCount(cls):
# Use SeqCount not class since we need global count
CntrlBase._seqs += 1
return CntrlBase._seqs - 1
_cntrls = 0
@classmethod
def cntrlCount(cls):
# Use CntlCount not class since we need global count
CntrlBase._cntrls += 1
return CntrlBase._cntrls - 1
_version = 0
@classmethod
def versionCount(cls):
cls._version += 1 # Use count for this particular type
return cls._version - 1
#
# Note: the L1 Cache latency is only used by the sequencer on fast path hits
#
class L1Cache(RubyCache):
resourceStalls = False
dataArrayBanks = 2
tagArrayBanks = 2
dataAccessLatency = 1
tagAccessLatency = 1
def create(self, size, assoc, options):
self.size = MemorySize(size)
self.assoc = assoc
self.replacement_policy = PseudoLRUReplacementPolicy()
class L2Cache(RubyCache):
resourceStalls = False
assoc = 16
dataArrayBanks = 16
tagArrayBanks = 16
def create(self, size, assoc, options):
self.size = MemorySize(size)
self.assoc = assoc
self.replacement_policy = PseudoLRUReplacementPolicy()
class CPCntrl(CorePair_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.L1Icache = L1Cache()
self.L1Icache.create(options.l1i_size, options.l1i_assoc, options)
self.L1D0cache = L1Cache()
self.L1D0cache.create(options.l1d_size, options.l1d_assoc, options)
self.L1D1cache = L1Cache()
self.L1D1cache.create(options.l1d_size, options.l1d_assoc, options)
self.L2cache = L2Cache()
self.L2cache.create(options.l2_size, options.l2_assoc, options)
self.sequencer = RubySequencer()
self.sequencer.version = self.seqCount()
self.sequencer.icache = self.L1Icache
self.sequencer.dcache = self.L1D0cache
self.sequencer.ruby_system = ruby_system
self.sequencer.coreid = 0
self.sequencer.is_cpu_sequencer = True
self.sequencer1 = RubySequencer()
self.sequencer1.version = self.seqCount()
self.sequencer1.icache = self.L1Icache
self.sequencer1.dcache = self.L1D1cache
self.sequencer1.ruby_system = ruby_system
self.sequencer1.coreid = 1
self.sequencer1.is_cpu_sequencer = True
self.issue_latency = 1
self.send_evictions = send_evicts(options)
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
class TCPCache(RubyCache):
size = "16kB"
assoc = 16
dataArrayBanks = 16
tagArrayBanks = 16
dataAccessLatency = 4
tagAccessLatency = 1
def create(self, options):
self.size = MemorySize(options.tcp_size)
self.dataArrayBanks = 16
self.tagArrayBanks = 16
self.dataAccessLatency = 4
self.tagAccessLatency = 1
self.resourceStalls = options.no_tcc_resource_stalls
self.replacement_policy = PseudoLRUReplacementPolicy(assoc = self.assoc)
class TCPCntrl(TCP_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.L1cache = TCPCache(dataAccessLatency = options.TCP_latency)
self.L1cache.create(options)
self.issue_latency = 1
self.coalescer = VIPERCoalescer()
self.coalescer.version = self.seqCount()
self.coalescer.icache = self.L1cache
self.coalescer.dcache = self.L1cache
self.coalescer.ruby_system = ruby_system
self.coalescer.support_inst_reqs = False
self.coalescer.is_cpu_sequencer = False
self.sequencer = RubySequencer()
self.sequencer.version = self.seqCount()
self.sequencer.icache = self.L1cache
self.sequencer.dcache = self.L1cache
self.sequencer.ruby_system = ruby_system
self.sequencer.is_cpu_sequencer = True
self.use_seq_not_coal = False
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
class SQCCache(RubyCache):
dataArrayBanks = 8
tagArrayBanks = 8
dataAccessLatency = 1
tagAccessLatency = 1
def create(self, options):
self.size = MemorySize(options.sqc_size)
self.assoc = options.sqc_assoc
self.replacement_policy = PseudoLRUReplacementPolicy(assoc = self.assoc)
class SQCCntrl(SQC_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.L1cache = SQCCache()
self.L1cache.create(options)
self.L1cache.resourceStalls = False
self.sequencer = RubySequencer()
self.sequencer.version = self.seqCount()
self.sequencer.icache = self.L1cache
self.sequencer.dcache = self.L1cache
self.sequencer.ruby_system = ruby_system
self.sequencer.support_data_reqs = False
self.sequencer.is_cpu_sequencer = False
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
class TCC(RubyCache):
size = MemorySize("256kB")
assoc = 16
dataAccessLatency = 8
tagAccessLatency = 2
resourceStalls = False
def create(self, options):
self.assoc = options.tcc_assoc
if hasattr(options, 'bw_scalor') and options.bw_scalor > 0:
s = options.num_compute_units
tcc_size = s * 128
tcc_size = str(tcc_size)+'kB'
self.size = MemorySize(tcc_size)
self.dataArrayBanks = 64
self.tagArrayBanks = 64
else:
self.size = MemorySize(options.tcc_size)
self.dataArrayBanks = 256 / options.num_tccs #number of data banks
self.tagArrayBanks = 256 / options.num_tccs #number of tag banks
self.size.value = self.size.value / options.num_tccs
if ((self.size.value / long(self.assoc)) < 128):
self.size.value = long(128 * self.assoc)
self.start_index_bit = math.log(options.cacheline_size, 2) + \
math.log(options.num_tccs, 2)
self.replacement_policy = PseudoLRUReplacementPolicy(assoc = self.assoc)
class TCCCntrl(TCC_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.L2cache = TCC()
self.L2cache.create(options)
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
class L3Cache(RubyCache):
dataArrayBanks = 16
tagArrayBanks = 16
def create(self, options, ruby_system, system):
self.size = MemorySize(options.l3_size)
self.size.value /= options.num_dirs
self.assoc = options.l3_assoc
self.dataArrayBanks /= options.num_dirs
self.tagArrayBanks /= options.num_dirs
self.dataArrayBanks /= options.num_dirs
self.tagArrayBanks /= options.num_dirs
self.dataAccessLatency = options.l3_data_latency
self.tagAccessLatency = options.l3_tag_latency
self.resourceStalls = False
self.replacement_policy = PseudoLRUReplacementPolicy(assoc = self.assoc)
class L3Cntrl(L3Cache_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.L3cache = L3Cache()
self.L3cache.create(options, ruby_system, system)
self.l3_response_latency = \
max(self.L3cache.dataAccessLatency, self.L3cache.tagAccessLatency)
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
def connectWireBuffers(self, req_to_dir, resp_to_dir, l3_unblock_to_dir,
req_to_l3, probe_to_l3, resp_to_l3):
self.reqToDir = req_to_dir
self.respToDir = resp_to_dir
self.l3UnblockToDir = l3_unblock_to_dir
self.reqToL3 = req_to_l3
self.probeToL3 = probe_to_l3
self.respToL3 = resp_to_l3
# Directory memory: Directory memory of infinite size which is
# used by directory controller to store the "states" of the
# state machine. The state machine is implemented per cache block
class DirMem(RubyDirectoryMemory, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
phys_mem_size = AddrRange(options.mem_size).size()
mem_module_size = phys_mem_size / options.num_dirs
dir_size = MemorySize('0B')
dir_size.value = mem_module_size
self.size = dir_size
# Directory controller: Contains directory memory, L3 cache and associated state
# machine which is used to accurately redirect a data request to L3 cache or to
# memory. The permissions requests do not come to this directory for region
# based protocols as they are handled exclusively by the region directory.
# However, region directory controller uses this directory controller for
# sending probe requests and receiving probe responses.
class DirCntrl(Directory_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.response_latency = 25
self.response_latency_regionDir = 1
self.directory = DirMem()
self.directory.create(options, ruby_system, system)
self.L3CacheMemory = L3Cache()
self.L3CacheMemory.create(options, ruby_system, system)
self.l3_hit_latency = \
max(self.L3CacheMemory.dataAccessLatency,
self.L3CacheMemory.tagAccessLatency)
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
def connectWireBuffers(self, req_to_dir, resp_to_dir, l3_unblock_to_dir,
req_to_l3, probe_to_l3, resp_to_l3):
self.reqToDir = req_to_dir
self.respToDir = resp_to_dir
self.l3UnblockToDir = l3_unblock_to_dir
self.reqToL3 = req_to_l3
self.probeToL3 = probe_to_l3
self.respToL3 = resp_to_l3
# Region directory : Stores region permissions
class RegionDir(RubyCache):
def create(self, options, ruby_system, system):
self.block_size = "%dB" % (64 * options.blocks_per_region)
self.size = options.region_dir_entries * \
self.block_size * options.num_compute_units
self.assoc = 8
self.tagArrayBanks = 8
self.tagAccessLatency = options.dir_tag_latency
self.dataAccessLatency = 1
self.resourceStalls = options.no_resource_stalls
self.start_index_bit = 6 + int(math.log(options.blocks_per_region, 2))
self.replacement_policy = PseudoLRUReplacementPolicy(assoc = self.assoc)
# Region directory controller : Contains region directory and associated state
# machine for dealing with region coherence requests.
class RegionCntrl(RegionDir_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.cacheMemory = RegionDir()
self.cacheMemory.create(options, ruby_system, system)
self.blocksPerRegion = options.blocks_per_region
self.toDirLatency = \
max(self.cacheMemory.dataAccessLatency,
self.cacheMemory.tagAccessLatency)
self.ruby_system = ruby_system
self.always_migrate = options.always_migrate
self.sym_migrate = options.symmetric_migrate
self.asym_migrate = options.asymmetric_migrate
if self.always_migrate:
assert(not self.asym_migrate and not self.sym_migrate)
if self.sym_migrate:
assert(not self.always_migrate and not self.asym_migrate)
if self.asym_migrate:
assert(not self.always_migrate and not self.sym_migrate)
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
# Region Buffer: A region directory cache which avoids some potential
# long latency lookup of region directory for getting region permissions
class RegionBuffer(RubyCache):
assoc = 4
dataArrayBanks = 256
tagArrayBanks = 256
dataAccessLatency = 1
tagAccessLatency = 1
resourceStalls = True
class RBCntrl(RegionBuffer_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.cacheMemory = RegionBuffer()
self.cacheMemory.resourceStalls = options.no_tcc_resource_stalls
self.cacheMemory.dataArrayBanks = 64
self.cacheMemory.tagArrayBanks = 64
self.blocksPerRegion = options.blocks_per_region
self.cacheMemory.block_size = "%dB" % (64 * self.blocksPerRegion)
self.cacheMemory.start_index_bit = \
6 + int(math.log(self.blocksPerRegion, 2))
self.cacheMemory.size = options.region_buffer_entries * \
self.cacheMemory.block_size * options.num_compute_units
self.toDirLatency = options.gpu_to_dir_latency
self.toRegionDirLatency = options.cpu_to_dir_latency
self.noTCCdir = True
TCC_bits = int(math.log(options.num_tccs, 2))
self.TCC_select_num_bits = TCC_bits
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
self.cacheMemory.replacement_policy = \
PseudoLRUReplacementPolicy(assoc = self.cacheMemory.assoc)
def define_options(parser):
parser.add_option("--num-subcaches", type="int", default=4)
parser.add_option("--l3-data-latency", type="int", default=20)
parser.add_option("--l3-tag-latency", type="int", default=15)
parser.add_option("--cpu-to-dir-latency", type="int", default=120)
parser.add_option("--gpu-to-dir-latency", type="int", default=60)
parser.add_option("--no-resource-stalls", action="store_false",
default=True)
parser.add_option("--no-tcc-resource-stalls", action="store_false",
default=True)
parser.add_option("--num-tbes", type="int", default=32)
parser.add_option("--l2-latency", type="int", default=50) # load to use
parser.add_option("--num-tccs", type="int", default=1,
help="number of TCC banks in the GPU")
parser.add_option("--sqc-size", type='string', default='32kB',
help="SQC cache size")
parser.add_option("--sqc-assoc", type='int', default=8,
help="SQC cache assoc")
parser.add_option("--WB_L1", action="store_true",
default=False, help="L2 Writeback Cache")
parser.add_option("--WB_L2", action="store_true",
default=False, help="L2 Writeback Cache")
parser.add_option("--TCP_latency",
type="int", default=4, help="TCP latency")
parser.add_option("--TCC_latency",
type="int", default=16, help="TCC latency")
parser.add_option("--tcc-size", type='string', default='2MB',
help="agregate tcc size")
parser.add_option("--tcc-assoc", type='int', default=16,
help="tcc assoc")
parser.add_option("--tcp-size", type='string', default='16kB',
help="tcp size")
parser.add_option("--dir-tag-latency", type="int", default=4)
parser.add_option("--dir-tag-banks", type="int", default=4)
parser.add_option("--blocks-per-region", type="int", default=16)
parser.add_option("--dir-entries", type="int", default=8192)
# Region buffer is a cache of region directory. Hence region
# directory is inclusive with respect to region directory.
# However, region directory is non-inclusive with respect to
# the caches in the system
parser.add_option("--region-dir-entries", type="int", default=1024)
parser.add_option("--region-buffer-entries", type="int", default=512)
parser.add_option("--always-migrate",
action="store_true", default=False)
parser.add_option("--symmetric-migrate",
action="store_true", default=False)
parser.add_option("--asymmetric-migrate",
action="store_true", default=False)
parser.add_option("--use-L3-on-WT", action="store_true", default=False)
def create_system(options, full_system, system, dma_devices, ruby_system):
if buildEnv['PROTOCOL'] != 'GPU_VIPER_Region':
panic("This script requires the GPU_VIPER_Region protocol to be built.")
cpu_sequencers = []
#
# The ruby network creation expects the list of nodes in the system to be
# consistent with the NetDest list. Therefore the l1 controller nodes
# must be listed before the directory nodes and directory nodes before
# dma nodes, etc.
#
dir_cntrl_nodes = []
# For an odd number of CPUs, still create the right number of controllers
TCC_bits = int(math.log(options.num_tccs, 2))
#
# Must create the individual controllers before the network to ensure the
# controller constructors are called before the network constructor
#
# For an odd number of CPUs, still create the right number of controllers
crossbar_bw = 16 * options.num_compute_units #Assuming a 2GHz clock
cpuCluster = Cluster(extBW = (crossbar_bw), intBW=crossbar_bw)
for i in xrange((options.num_cpus + 1) / 2):
cp_cntrl = CPCntrl()
cp_cntrl.create(options, ruby_system, system)
rb_cntrl = RBCntrl()
rb_cntrl.create(options, ruby_system, system)
rb_cntrl.number_of_TBEs = 256
rb_cntrl.isOnCPU = True
cp_cntrl.regionBufferNum = rb_cntrl.version
exec("system.cp_cntrl%d = cp_cntrl" % i)
exec("system.rb_cntrl%d = rb_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.extend([cp_cntrl.sequencer, cp_cntrl.sequencer1])
# Connect the CP controllers and the network
cp_cntrl.requestFromCore = MessageBuffer()
cp_cntrl.requestFromCore.master = ruby_system.network.slave
cp_cntrl.responseFromCore = MessageBuffer()
cp_cntrl.responseFromCore.master = ruby_system.network.slave
cp_cntrl.unblockFromCore = MessageBuffer()
cp_cntrl.unblockFromCore.master = ruby_system.network.slave
cp_cntrl.probeToCore = MessageBuffer()
cp_cntrl.probeToCore.slave = ruby_system.network.master
cp_cntrl.responseToCore = MessageBuffer()
cp_cntrl.responseToCore.slave = ruby_system.network.master
cp_cntrl.mandatoryQueue = MessageBuffer()
cp_cntrl.triggerQueue = MessageBuffer(ordered = True)
# Connect the RB controllers to the ruby network
rb_cntrl.requestFromCore = MessageBuffer(ordered = True)
rb_cntrl.requestFromCore.slave = ruby_system.network.master
rb_cntrl.responseFromCore = MessageBuffer()
rb_cntrl.responseFromCore.slave = ruby_system.network.master
rb_cntrl.requestToNetwork = MessageBuffer()
rb_cntrl.requestToNetwork.master = ruby_system.network.slave
rb_cntrl.notifyFromRegionDir = MessageBuffer()
rb_cntrl.notifyFromRegionDir.slave = ruby_system.network.master
rb_cntrl.probeFromRegionDir = MessageBuffer()
rb_cntrl.probeFromRegionDir.slave = ruby_system.network.master
rb_cntrl.unblockFromDir = MessageBuffer()
rb_cntrl.unblockFromDir.slave = ruby_system.network.master
rb_cntrl.responseToRegDir = MessageBuffer()
rb_cntrl.responseToRegDir.master = ruby_system.network.slave
rb_cntrl.triggerQueue = MessageBuffer(ordered = True)
cpuCluster.add(cp_cntrl)
cpuCluster.add(rb_cntrl)
gpuCluster = Cluster(extBW = (crossbar_bw), intBW = crossbar_bw)
for i in xrange(options.num_compute_units):
tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
issue_latency = 1,
number_of_TBEs = 2560)
# TBEs set to max outstanding requests
tcp_cntrl.create(options, ruby_system, system)
tcp_cntrl.WB = options.WB_L1
tcp_cntrl.disableL1 = False
exec("system.tcp_cntrl%d = tcp_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(tcp_cntrl.coalescer)
# Connect the CP (TCP) controllers to the ruby network
tcp_cntrl.requestFromTCP = MessageBuffer(ordered = True)
tcp_cntrl.requestFromTCP.master = ruby_system.network.slave
tcp_cntrl.responseFromTCP = MessageBuffer(ordered = True)
tcp_cntrl.responseFromTCP.master = ruby_system.network.slave
tcp_cntrl.unblockFromCore = MessageBuffer()
tcp_cntrl.unblockFromCore.master = ruby_system.network.slave
tcp_cntrl.probeToTCP = MessageBuffer(ordered = True)
tcp_cntrl.probeToTCP.slave = ruby_system.network.master
tcp_cntrl.responseToTCP = MessageBuffer(ordered = True)
tcp_cntrl.responseToTCP.slave = ruby_system.network.master
tcp_cntrl.mandatoryQueue = MessageBuffer()
gpuCluster.add(tcp_cntrl)
for i in xrange(options.num_sqc):
sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
sqc_cntrl.create(options, ruby_system, system)
exec("system.sqc_cntrl%d = sqc_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(sqc_cntrl.sequencer)
# Connect the SQC controller to the ruby network
sqc_cntrl.requestFromSQC = MessageBuffer(ordered = True)
sqc_cntrl.requestFromSQC.master = ruby_system.network.slave
sqc_cntrl.probeToSQC = MessageBuffer(ordered = True)
sqc_cntrl.probeToSQC.slave = ruby_system.network.master
sqc_cntrl.responseToSQC = MessageBuffer(ordered = True)
sqc_cntrl.responseToSQC.slave = ruby_system.network.master
sqc_cntrl.mandatoryQueue = MessageBuffer()
# SQC also in GPU cluster
gpuCluster.add(sqc_cntrl)
numa_bit = 6
for i in xrange(options.num_tccs):
tcc_cntrl = TCCCntrl()
tcc_cntrl.create(options, ruby_system, system)
tcc_cntrl.l2_request_latency = 1
tcc_cntrl.l2_response_latency = options.TCC_latency
tcc_cntrl.WB = options.WB_L2
tcc_cntrl.number_of_TBEs = 2560 * options.num_compute_units
# Connect the TCC controllers to the ruby network
tcc_cntrl.requestFromTCP = MessageBuffer(ordered = True)
tcc_cntrl.requestFromTCP.slave = ruby_system.network.master
tcc_cntrl.responseToCore = MessageBuffer(ordered = True)
tcc_cntrl.responseToCore.master = ruby_system.network.slave
tcc_cntrl.probeFromNB = MessageBuffer()
tcc_cntrl.probeFromNB.slave = ruby_system.network.master
tcc_cntrl.responseFromNB = MessageBuffer()
tcc_cntrl.responseFromNB.slave = ruby_system.network.master
tcc_cntrl.requestToNB = MessageBuffer(ordered = True)
tcc_cntrl.requestToNB.master = ruby_system.network.slave
tcc_cntrl.responseToNB = MessageBuffer()
tcc_cntrl.responseToNB.master = ruby_system.network.slave
tcc_cntrl.unblockToNB = MessageBuffer()
tcc_cntrl.unblockToNB.master = ruby_system.network.slave
tcc_cntrl.triggerQueue = MessageBuffer(ordered = True)
rb_cntrl = RBCntrl()
rb_cntrl.create(options, ruby_system, system)
rb_cntrl.number_of_TBEs = 2560 * options.num_compute_units
rb_cntrl.isOnCPU = False
# Connect the RB controllers to the ruby network
rb_cntrl.requestFromCore = MessageBuffer(ordered = True)
rb_cntrl.requestFromCore.slave = ruby_system.network.master
rb_cntrl.responseFromCore = MessageBuffer()
rb_cntrl.responseFromCore.slave = ruby_system.network.master
rb_cntrl.requestToNetwork = MessageBuffer()
rb_cntrl.requestToNetwork.master = ruby_system.network.slave
rb_cntrl.notifyFromRegionDir = MessageBuffer()
rb_cntrl.notifyFromRegionDir.slave = ruby_system.network.master
rb_cntrl.probeFromRegionDir = MessageBuffer()
rb_cntrl.probeFromRegionDir.slave = ruby_system.network.master
rb_cntrl.unblockFromDir = MessageBuffer()
rb_cntrl.unblockFromDir.slave = ruby_system.network.master
rb_cntrl.responseToRegDir = MessageBuffer()
rb_cntrl.responseToRegDir.master = ruby_system.network.slave
rb_cntrl.triggerQueue = MessageBuffer(ordered = True)
tcc_cntrl.regionBufferNum = rb_cntrl.version
exec("system.tcc_cntrl%d = tcc_cntrl" % i)
exec("system.tcc_rb_cntrl%d = rb_cntrl" % i)
# TCC cntrls added to the GPU cluster
gpuCluster.add(tcc_cntrl)
gpuCluster.add(rb_cntrl)
# Because of wire buffers, num_l3caches must equal num_dirs
# Region coherence only works with 1 dir
assert(options.num_l3caches == options.num_dirs == 1)
# This is the base crossbar that connects the L3s, Dirs, and cpu/gpu
# Clusters
mainCluster = Cluster(intBW = crossbar_bw)
dir_cntrl = DirCntrl()
dir_cntrl.create(options, ruby_system, system)
dir_cntrl.number_of_TBEs = 2560 * options.num_compute_units
dir_cntrl.useL3OnWT = options.use_L3_on_WT
# Connect the Directory controller to the ruby network
dir_cntrl.requestFromCores = MessageBuffer()
dir_cntrl.requestFromCores.slave = ruby_system.network.master
dir_cntrl.responseFromCores = MessageBuffer()
dir_cntrl.responseFromCores.slave = ruby_system.network.master
dir_cntrl.unblockFromCores = MessageBuffer()
dir_cntrl.unblockFromCores.slave = ruby_system.network.master
dir_cntrl.probeToCore = MessageBuffer()
dir_cntrl.probeToCore.master = ruby_system.network.slave
dir_cntrl.responseToCore = MessageBuffer()
dir_cntrl.responseToCore.master = ruby_system.network.slave
dir_cntrl.reqFromRegBuf = MessageBuffer()
dir_cntrl.reqFromRegBuf.slave = ruby_system.network.master
dir_cntrl.reqToRegDir = MessageBuffer(ordered = True)
dir_cntrl.reqToRegDir.master = ruby_system.network.slave
dir_cntrl.reqFromRegDir = MessageBuffer(ordered = True)
dir_cntrl.reqFromRegDir.slave = ruby_system.network.master
dir_cntrl.unblockToRegDir = MessageBuffer()
dir_cntrl.unblockToRegDir.master = ruby_system.network.slave
dir_cntrl.triggerQueue = MessageBuffer(ordered = True)
dir_cntrl.L3triggerQueue = MessageBuffer(ordered = True)
dir_cntrl.responseFromMemory = MessageBuffer()
exec("system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
mainCluster.add(dir_cntrl)
reg_cntrl = RegionCntrl(noTCCdir=True,TCC_select_num_bits = TCC_bits)
reg_cntrl.create(options, ruby_system, system)
reg_cntrl.number_of_TBEs = options.num_tbes
reg_cntrl.cpuRegionBufferNum = system.rb_cntrl0.version
reg_cntrl.gpuRegionBufferNum = system.tcc_rb_cntrl0.version
# Connect the Region Dir controllers to the ruby network
reg_cntrl.requestToDir = MessageBuffer(ordered = True)
reg_cntrl.requestToDir.master = ruby_system.network.slave
reg_cntrl.notifyToRBuffer = MessageBuffer()
reg_cntrl.notifyToRBuffer.master = ruby_system.network.slave
reg_cntrl.probeToRBuffer = MessageBuffer()
reg_cntrl.probeToRBuffer.master = ruby_system.network.slave
reg_cntrl.responseFromRBuffer = MessageBuffer()
reg_cntrl.responseFromRBuffer.slave = ruby_system.network.master
reg_cntrl.requestFromRegBuf = MessageBuffer()
reg_cntrl.requestFromRegBuf.slave = ruby_system.network.master
reg_cntrl.triggerQueue = MessageBuffer(ordered = True)
exec("system.reg_cntrl%d = reg_cntrl" % i)
mainCluster.add(reg_cntrl)
# Assuming no DMA devices
assert(len(dma_devices) == 0)
# Add cpu/gpu clusters to main cluster
mainCluster.add(cpuCluster)
mainCluster.add(gpuCluster)
ruby_system.network.number_of_virtual_networks = 10
return (cpu_sequencers, dir_cntrl_nodes, mainCluster)
|
|
#!flask/bin/python
import json
import os
import sys
import requests
from hashlib import md5
from logbook import Logger
from catalyst.utils.remote_utils import BACKTEST_PATH, STATUS_PATH, POST, \
GET, EXCEPTION_LOG, convert_date, prepare_args, handle_status, \
is_valid_uuid
from catalyst.exchange.utils.exchange_utils import get_remote_auth,\
get_remote_folder
from catalyst.exchange.exchange_errors import RemoteAuthEmpty
log = Logger('remote')
# AUTH_SERVER = 'http://localhost:5000'
AUTH_SERVER = "https://sandbox2.enigma.co"
BACKTEST = 'backtest'
STATUS = 'status'
def handle_response(response, mode):
"""
handles the response given by the server according to it's status code
:param response: the format returned from a request
:param mode: Backtest/ status
:return: DataFrame/ str
"""
if response.status_code == 500:
raise Exception("issues with cloud connections,\n"
"unable to run catalyst on the cloud,\n"
"try running again and if you get the same response,\n"
+ EXCEPTION_LOG
)
elif response.status_code == 502:
raise Exception("The server is down at the moment,\n" + EXCEPTION_LOG)
elif response.status_code == 400 or response.status_code == 401:
raise Exception("There is a connection but it was aborted due "
"to wrong arguments given to the server.\n" +
response.content.decode('utf-8') + '\n' +
EXCEPTION_LOG)
elif response.status_code == 202:
raise Exception("The server is under maintenance. "
"please try again in a few minutes")
else: # if the run was successful
if mode == BACKTEST:
algo_id = response.json()['algo_id']
log.info('In order to follow your algo run use the following id: '
+ algo_id)
return algo_id
elif mode == STATUS:
return handle_status(response.json())
def remote_backtest(
initialize,
handle_data,
before_trading_start,
analyze,
algofile,
algotext,
defines,
data_frequency,
capital_base,
data,
bundle,
bundle_timestamp,
start,
end,
output,
print_algo,
local_namespace,
environ,
live,
exchange,
algo_namespace,
quote_currency,
live_graph,
analyze_live,
simulate_orders,
auth_aliases,
stats_output,
mail,
):
if algotext or algofile:
# argument preparation - encode the file for transfer
algofile, algotext = prepare_args(algofile, algotext)
json_file = {'arguments': {
'initialize': initialize,
'handle_data': handle_data,
'before_trading_start': before_trading_start,
'analyze': analyze,
'algotext': algotext,
'defines': defines,
'data_frequency': data_frequency,
'capital_base': capital_base,
'data': data,
'bundle': bundle,
'bundle_timestamp': bundle_timestamp,
'start': start,
'end': end,
'local_namespace': local_namespace,
'environ': None,
'analyze_live': analyze_live,
'stats_output': stats_output,
'algofile': algofile,
'output': output,
'print_algo': print_algo,
'live': live,
'exchange': exchange,
'algo_namespace': algo_namespace,
'quote_currency': quote_currency,
'live_graph': live_graph,
'simulate_orders': simulate_orders,
'auth_aliases': auth_aliases,
'mail': mail,
'py_version': sys.version_info[0], # the python version running on
# the client's side. 2 or 3
}}
response = send_digest_request(
json_file=json_file, path=BACKTEST_PATH, method=POST
)
return handle_response(response, BACKTEST)
def get_remote_status(algo_id):
if not is_valid_uuid(algo_id):
raise Exception("the id you entered is invalid! "
"please enter a valid id.")
json_file = {'algo_id': algo_id}
response = send_digest_request(
json_file=json_file, path=STATUS_PATH, method=GET
)
return handle_response(response, STATUS)
def send_digest_request(json_file, path, method):
try:
key, secret = retrieve_remote_auth()
except json.JSONDecodeError as e:
log.error("your key and secret aren't stored properly\n{}".
format(e.msg))
raise
json_file['key'] = key
session = requests.Session()
if method == POST:
response = session.post('{}{}'.format(AUTH_SERVER, path),
headers={
'Authorization':
'Digest username="{0}",'
'password="{1}"'.format(key, secret)
},
)
else: # method == GET:
response = session.get('{}{}'.format(AUTH_SERVER, path),
headers={'Authorization':
'Digest username="{0}", '
'password="{1}"'.
format(key, secret)},
)
header = response.headers.get('WWW-Authenticate')
auth_type, auth_info = header.split(None, 1)
d = requests.utils.parse_dict_header(auth_info)
a1 = key + ":" + d['realm'] + ":" + secret
ha1 = md5(a1.encode('utf-8')).hexdigest()
a2 = "{}:{}".format(method, path)
ha2 = md5(a2.encode('utf-8')).hexdigest()
a3 = ha1 + ":" + d['nonce'] + ":" + ha2
result = md5(a3.encode('utf-8')).hexdigest()
if method == POST:
return session.post('{}{}'.format(AUTH_SERVER, path),
json=json.dumps(json_file, default=convert_date),
headers={
'Authorization': 'Digest username="{0}",'
'realm="{1}",nonce="{2}",'
'uri="{3}",'
'response="{4}",'
'opaque="{5}"'.
format(key, d['realm'], d['nonce'], path,
result, d['opaque'])})
else: # method == GET
return session.get('{}{}'.format(AUTH_SERVER, path),
json=json.dumps(json_file, default=convert_date),
headers={'Authorization':
'Digest username="{0}", realm="{1}",'
'nonce="{2}",uri="{3}", '
'response="{4}",opaque="{5}"'.
format(key, d['realm'], d['nonce'],
path, result, d['opaque'])})
def retrieve_remote_auth():
remote_auth_dict = get_remote_auth()
has_auth = (remote_auth_dict['key'] != '' and
remote_auth_dict['secret'] != '')
if not has_auth:
raise RemoteAuthEmpty(
filename=os.path.join(get_remote_folder(), 'remote_auth.json')
)
else:
return remote_auth_dict['key'], remote_auth_dict['secret']
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""An extremely simple WSGI web application framework.
This module exports three primary classes: Request, Response, and
RequestHandler. You implement a web application by subclassing RequestHandler.
As WSGI requests come in, they are passed to instances of your RequestHandlers.
The RequestHandler class provides access to the easy-to-use Request and
Response objects so you can interpret the request and write the response with
no knowledge of the esoteric WSGI semantics. Here is a simple example:
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
class MainPage(webapp.RequestHandler):
def get(self):
self.response.out.write(
'<html><body><form action="/hello" method="post">'
'Name: <input name="name" type="text" size="20"> '
'<input type="submit" value="Say Hello"></form></body></html>')
class HelloPage(webapp.RequestHandler):
def post(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write('Hello, %s' % self.request.get('name'))
application = webapp.WSGIApplication([
('/', MainPage),
('/hello', HelloPage)
], debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
The WSGIApplication class maps URI regular expressions to your RequestHandler
classes. It is a WSGI-compatible application object, so you can use it in
conjunction with wsgiref to make your web application into, e.g., a CGI
script or a simple HTTP server, as in the example above.
The framework does not support streaming output. All output from a response
is stored in memory before it is written.
"""
import cgi
import logging
import re
import StringIO
import sys
import traceback
import urlparse
import webob
import wsgiref.handlers
import wsgiref.headers
import wsgiref.util
wsgiref.handlers.BaseHandler.os_environ = {}
RE_FIND_GROUPS = re.compile('\(.*?\)')
_CHARSET_RE = re.compile(r';\s*charset=([^;\s]*)', re.I)
class Error(Exception):
"""Base of all exceptions in the webapp module."""
pass
class CannotReversePattern(Error):
"""Thrown when a url_pattern cannot be reversed."""
pass
class NoUrlFoundError(Error):
"""Thrown when RequestHandler.get_url() fails."""
pass
class Request(webob.Request):
"""Abstraction for an HTTP request.
Properties:
uri: the complete URI requested by the user
scheme: 'http' or 'https'
host: the host, including the port
path: the path up to the ';' or '?' in the URL
parameters: the part of the URL between the ';' and the '?', if any
query: the part of the URL after the '?'
You can access parsed query and POST values with the get() method; do not
parse the query string yourself.
"""
request_body_tempfile_limit = 0
uri = property(lambda self: self.url)
query = property(lambda self: self.query_string)
def __init__(self, environ):
"""Constructs a Request object from a WSGI environment.
If the charset isn't specified in the Content-Type header, defaults
to UTF-8.
Args:
environ: A WSGI-compliant environment dictionary.
"""
match = _CHARSET_RE.search(environ.get('CONTENT_TYPE', ''))
if match:
charset = match.group(1).lower()
else:
charset = 'utf-8'
webob.Request.__init__(self, environ, charset=charset,
unicode_errors= 'ignore', decode_param_names=True)
def get(self, argument_name, default_value='', allow_multiple=False):
"""Returns the query or POST argument with the given name.
We parse the query string and POST payload lazily, so this will be a
slower operation on the first call.
Args:
argument_name: the name of the query or POST argument
default_value: the value to return if the given argument is not present
allow_multiple: return a list of values with the given name (deprecated)
Returns:
If allow_multiple is False (which it is by default), we return the first
value with the given name given in the request. If it is True, we always
return a list.
"""
param_value = self.get_all(argument_name)
if allow_multiple:
logging.warning('allow_multiple is a deprecated param, please use the '
'Request.get_all() method instead.')
if len(param_value) > 0:
if allow_multiple:
return param_value
return param_value[0]
else:
if allow_multiple and not default_value:
return []
return default_value
def get_all(self, argument_name, default_value=None):
"""Returns a list of query or POST arguments with the given name.
We parse the query string and POST payload lazily, so this will be a
slower operation on the first call.
Args:
argument_name: the name of the query or POST argument
default_value: the value to return if the given argument is not present,
None may not be used as a default, if it is then an empty list will be
returned instead.
Returns:
A (possibly empty) list of values.
"""
if self.charset:
argument_name = argument_name.encode(self.charset)
if default_value is None:
default_value = []
param_value = self.params.getall(argument_name)
if param_value is None or len(param_value) == 0:
return default_value
for i in xrange(len(param_value)):
if isinstance(param_value[i], cgi.FieldStorage):
param_value[i] = param_value[i].value
return param_value
def arguments(self):
"""Returns a list of the arguments provided in the query and/or POST.
The return value is a list of strings.
"""
return list(set(self.params.keys()))
def get_range(self, name, min_value=None, max_value=None, default=0):
"""Parses the given int argument, limiting it to the given range.
Args:
name: the name of the argument
min_value: the minimum int value of the argument (if any)
max_value: the maximum int value of the argument (if any)
default: the default value of the argument if it is not given
Returns:
An int within the given range for the argument
"""
value = self.get(name, default)
if value is None:
return value
try:
value = int(value)
except ValueError:
value = default
if value is not None:
if max_value is not None:
value = min(value, max_value)
if min_value is not None:
value = max(value, min_value)
return value
class Response(object):
"""Abstraction for an HTTP response.
Properties:
out: file pointer for the output stream
headers: wsgiref.headers.Headers instance representing the output headers
"""
def __init__(self):
"""Constructs a response with the default settings."""
self.out = StringIO.StringIO()
self.__wsgi_headers = []
self.headers = wsgiref.headers.Headers(self.__wsgi_headers)
self.headers['Content-Type'] = 'text/html; charset=utf-8'
self.headers['Cache-Control'] = 'no-cache'
self.set_status(200)
@property
def status(self):
"""Returns current request status code."""
return self.__status[0]
@property
def status_message(self):
"""Returns current request status message."""
return self.__status[1]
def set_status(self, code, message=None):
"""Sets the HTTP status code of this response.
Args:
message: the HTTP status string to use
If no status string is given, we use the default from the HTTP/1.1
specification.
"""
if not message:
message = Response.http_status_message(code)
self.__status = (code, message)
def has_error(self):
"""Indicates whether the response was an error response."""
return self.__status[0] >= 400
def clear(self):
"""Clears all data written to the output stream so that it is empty."""
self.out.seek(0)
self.out.truncate(0)
def wsgi_write(self, start_response):
"""Writes this response using WSGI semantics with the given WSGI function.
Args:
start_response: the WSGI-compatible start_response function
"""
body = self.out.getvalue()
if isinstance(body, unicode):
body = body.encode('utf-8')
elif self.headers.get('Content-Type', '').endswith('; charset=utf-8'):
try:
body.decode('utf-8')
except UnicodeError, e:
logging.warning('Response written is not UTF-8: %s', e)
if (self.headers.get('Cache-Control') == 'no-cache' and
not self.headers.get('Expires')):
self.headers['Expires'] = 'Fri, 01 Jan 1990 00:00:00 GMT'
self.headers['Content-Length'] = str(len(body))
new_headers = []
for header, value in self.__wsgi_headers:
if not isinstance(value, basestring):
value = unicode(value)
if ('\n' in header or '\r' in header or
'\n' in value or '\r' in value):
logging.warning('Replacing newline in header: %s', repr((header,value)))
value = value.replace('\n','').replace('\r','')
header = header.replace('\n','').replace('\r','')
new_headers.append((header, value))
self.__wsgi_headers = new_headers
write = start_response('%d %s' % self.__status, self.__wsgi_headers)
write(body)
self.out.close()
def http_status_message(code):
"""Returns the default HTTP status message for the given code.
Args:
code: the HTTP code for which we want a message
"""
if not Response.__HTTP_STATUS_MESSAGES.has_key(code):
raise Error('Invalid HTTP status code: %d' % code)
return Response.__HTTP_STATUS_MESSAGES[code]
http_status_message = staticmethod(http_status_message)
__HTTP_STATUS_MESSAGES = {
100: 'Continue',
101: 'Switching Protocols',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Moved Temporarily',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
306: 'Unused',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Time-out',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request-URI Too Large',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Time-out',
505: 'HTTP Version not supported'
}
class RequestHandler(object):
"""Our base HTTP request handler. Clients should subclass this class.
Subclasses should override get(), post(), head(), options(), etc to handle
different HTTP methods.
"""
def initialize(self, request, response):
"""Initializes this request handler with the given Request and Response."""
self.request = request
self.response = response
def get(self, *args):
"""Handler method for GET requests."""
self.error(405)
def post(self, *args):
"""Handler method for POST requests."""
self.error(405)
def head(self, *args):
"""Handler method for HEAD requests."""
self.error(405)
def options(self, *args):
"""Handler method for OPTIONS requests."""
self.error(405)
def put(self, *args):
"""Handler method for PUT requests."""
self.error(405)
def delete(self, *args):
"""Handler method for DELETE requests."""
self.error(405)
def trace(self, *args):
"""Handler method for TRACE requests."""
self.error(405)
def error(self, code):
"""Clears the response output stream and sets the given HTTP error code.
Args:
code: the HTTP status error code (e.g., 501)
"""
self.response.set_status(code)
self.response.clear()
def redirect(self, uri, permanent=False):
"""Issues an HTTP redirect to the given relative URL.
Args:
uri: a relative or absolute URI (e.g., '../flowers.html')
permanent: if true, we use a 301 redirect instead of a 302 redirect
"""
if permanent:
self.response.set_status(301)
else:
self.response.set_status(302)
absolute_url = urlparse.urljoin(self.request.uri, uri)
self.response.headers['Location'] = str(absolute_url)
self.response.clear()
def handle_exception(self, exception, debug_mode):
"""Called if this handler throws an exception during execution.
The default behavior is to call self.error(500) and print a stack trace
if debug_mode is True.
Args:
exception: the exception that was thrown
debug_mode: True if the web application is running in debug mode
"""
self.error(500)
logging.exception(exception)
if debug_mode:
lines = ''.join(traceback.format_exception(*sys.exc_info()))
self.response.clear()
self.response.out.write('<pre>%s</pre>' % (cgi.escape(lines, quote=True)))
@classmethod
def new_factory(cls, *args, **kwargs):
"""Create new request handler factory.
Use factory method to create reusable request handlers that just
require a few configuration parameters to construct. Also useful
for injecting shared state between multiple request handler
instances without relying on global variables. For example, to
create a set of post handlers that will do simple text transformations
you can write:
class ChangeTextHandler(webapp.RequestHandler):
def __init__(self, transform):
self.transform = transform
def post(self):
response_text = self.transform(
self.request.request.body_file.getvalue())
self.response.out.write(response_text)
application = webapp.WSGIApplication(
[('/to_lower', ChangeTextHandler.new_factory(str.lower)),
('/to_upper', ChangeTextHandler.new_factory(str.upper)),
],
debug=True)
Text POSTed to /to_lower will be lower cased.
Text POSTed to /to_upper will be upper cased.
"""
def new_instance():
return cls(*args, **kwargs)
new_instance.__name__ = cls.__name__ + 'Factory'
return new_instance
@classmethod
def get_url(cls, *args, **kargs):
"""Returns the url for the given handler.
The default implementation uses the patterns passed to the active
WSGIApplication to create a url. However, it is different from Django's
urlresolvers.reverse() in the following ways:
- It does not try to resolve handlers via module loading
- It does not support named arguments
- It performs some post-prosessing on the url to remove some regex
operators.
- It will try to fill in the left-most missing arguments with the args
used in the active request.
Args:
args: Parameters for the url pattern's groups.
kwargs: Optionally contains 'implicit_args' that can either be a boolean
or a tuple. When it is True, it will use the arguments to the
active request as implicit arguments. When it is False (default),
it will not use any implicit arguments. When it is a tuple, it
will use the tuple as the implicit arguments.
the left-most args if some are missing from args.
Returns:
The url for this handler/args combination.
Raises:
NoUrlFoundError: No url pattern for this handler has the same
number of args that were passed in.
"""
app = WSGIApplication.active_instance
pattern_map = app._pattern_map
implicit_args = kargs.get('implicit_args', ())
if implicit_args == True:
implicit_args = app.current_request_args
min_params = len(args)
for pattern_tuple in pattern_map.get(cls, ()):
num_params_in_pattern = pattern_tuple[1]
if num_params_in_pattern < min_params:
continue
try:
num_implicit_args = max(0, num_params_in_pattern - len(args))
merged_args = implicit_args[:num_implicit_args] + args
url = _reverse_url_pattern(pattern_tuple[0], *merged_args)
url = url.replace('\\', '')
url = url.replace('?', '')
return url
except CannotReversePattern:
continue
logging.warning('get_url failed for Handler name: %r, Args: %r',
cls.__name__, args)
raise NoUrlFoundError
def _reverse_url_pattern(url_pattern, *args):
"""Turns a regex that matches a url back into a url by replacing
the url pattern's groups with the given args. Removes '^' and '$'
from the result.
Args:
url_pattern: A pattern used to match a URL.
args: list of values corresponding to groups in url_pattern.
Returns:
A string with url_pattern's groups filled in values from args.
Raises:
CannotReversePattern if either there aren't enough args to fill
url_pattern's groups, or if any arg isn't matched by the regular
expression fragment in the corresponding group.
"""
group_index = [0]
def expand_group(match):
group = match.group(1)
try:
value = str(args[group_index[0]])
group_index[0] += 1
except IndexError:
raise CannotReversePattern('Not enough arguments in url tag')
if not re.match(group + '$', value):
raise CannotReversePattern("Value %r doesn't match (%r)" % (value, group))
return value
result = re.sub(r'\(([^)]+)\)', expand_group, url_pattern.pattern)
result = result.replace('^', '')
result = result.replace('$', '')
return result
class RedirectHandler(RequestHandler):
"""Simple redirection handler.
Easily configure URLs to redirect to alternate targets. For example,
to configure a web application so that the root URL is always redirected
to the /home path, do:
application = webapp.WSGIApplication(
[('/', webapp.RedirectHandler.new_factory('/home', permanent=True)),
('/home', HomeHandler),
],
debug=True)
Handler also useful for setting up obsolete URLs to redirect to new paths.
"""
def __init__(self, path, permanent=False):
"""Constructor.
Do not use directly. Configure using new_factory method.
Args:
path: Path to redirect to.
permanent: if true, we use a 301 redirect instead of a 302 redirect.
"""
self.path = path
self.permanent = permanent
def get(self):
self.redirect(self.path, permanent=self.permanent)
class WSGIApplication(object):
"""Wraps a set of webapp RequestHandlers in a WSGI-compatible application.
To use this class, pass a list of (URI regular expression, RequestHandler)
pairs to the constructor, and pass the class instance to a WSGI handler.
See the example in the module comments for details.
The URL mapping is first-match based on the list ordering.
"""
REQUEST_CLASS = Request
RESPONSE_CLASS = Response
def __init__(self, url_mapping, debug=False):
"""Initializes this application with the given URL mapping.
Args:
url_mapping: list of (URI regular expression, RequestHandler) pairs
(e.g., [('/', ReqHan)])
debug: if true, we send Python stack traces to the browser on errors
"""
self._init_url_mappings(url_mapping)
self.__debug = debug
WSGIApplication.active_instance = self
self.current_request_args = ()
def __call__(self, environ, start_response):
"""Called by WSGI when a request comes in."""
request = self.REQUEST_CLASS(environ)
response = self.RESPONSE_CLASS()
WSGIApplication.active_instance = self
handler = None
groups = ()
for regexp, handler_class in self._url_mapping:
match = regexp.match(request.path)
if match:
handler = handler_class()
handler.initialize(request, response)
groups = match.groups()
break
self.current_request_args = groups
if handler:
try:
method = environ['REQUEST_METHOD']
if method == 'GET':
handler.get(*groups)
elif method == 'POST':
handler.post(*groups)
elif method == 'HEAD':
handler.head(*groups)
elif method == 'OPTIONS':
handler.options(*groups)
elif method == 'PUT':
handler.put(*groups)
elif method == 'DELETE':
handler.delete(*groups)
elif method == 'TRACE':
handler.trace(*groups)
else:
handler.error(501)
except Exception, e:
handler.handle_exception(e, self.__debug)
else:
response.set_status(404)
response.wsgi_write(start_response)
return ['']
def _init_url_mappings(self, handler_tuples):
"""Initializes the maps needed for mapping urls to handlers and handlers
to urls.
Args:
handler_tuples: list of (URI, RequestHandler) pairs.
"""
handler_map = {}
pattern_map = {}
url_mapping = []
for regexp, handler in handler_tuples:
try:
handler_name = handler.__name__
except AttributeError:
pass
else:
handler_map[handler_name] = handler
if not regexp.startswith('^'):
regexp = '^' + regexp
if not regexp.endswith('$'):
regexp += '$'
if regexp == '^/form$':
logging.warning('The URL "/form" is reserved and will not be matched.')
compiled = re.compile(regexp)
url_mapping.append((compiled, handler))
num_groups = len(RE_FIND_GROUPS.findall(regexp))
handler_patterns = pattern_map.setdefault(handler, [])
handler_patterns.append((compiled, num_groups))
self._handler_map = handler_map
self._pattern_map = pattern_map
self._url_mapping = url_mapping
def get_registered_handler_by_name(self, handler_name):
"""Returns the handler given the handler's name.
This uses the application's url mapping.
Args:
handler_name: The __name__ of a handler to return.
Returns:
The handler with the given name.
Raises:
KeyError: If the handler name is not found in the parent application.
"""
try:
return self._handler_map[handler_name]
except:
logging.error('Handler does not map to any urls: %s', handler_name)
raise
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 - 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Shared code between AMQP based openstack.common.rpc implementations.
The code in this module is shared between the rpc implemenations based on AMQP.
Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses
AMQP, but is deprecated and predates this code.
"""
import inspect
import sys
import uuid
from eventlet import greenpool
from eventlet import pools
from eventlet import semaphore
from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import local
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import common as rpc_common
LOG = logging.getLogger(__name__)
class Pool(pools.Pool):
"""Class that implements a Pool of Connections."""
def __init__(self, conf, connection_cls, *args, **kwargs):
self.connection_cls = connection_cls
self.conf = conf
kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size)
kwargs.setdefault("order_as_stack", True)
super(Pool, self).__init__(*args, **kwargs)
# TODO(comstud): Timeout connections not used in a while
def create(self):
LOG.debug(_('Pool creating new connection'))
return self.connection_cls(self.conf)
def empty(self):
while self.free_items:
self.get().close()
_pool_create_sem = semaphore.Semaphore()
def get_connection_pool(conf, connection_cls):
with _pool_create_sem:
# Make sure only one thread tries to create the connection pool.
if not connection_cls.pool:
connection_cls.pool = Pool(conf, connection_cls)
return connection_cls.pool
class ConnectionContext(rpc_common.Connection):
"""The class that is actually returned to the caller of
create_connection(). This is essentially a wrapper around
Connection that supports 'with'. It can also return a new
Connection, or one from a pool. The function will also catch
when an instance of this class is to be deleted. With that
we can return Connections to the pool on exceptions and so
forth without making the caller be responsible for catching
them. If possible the function makes sure to return a
connection to the pool.
"""
def __init__(self, conf, connection_pool, pooled=True, server_params=None):
"""Create a new connection, or get one from the pool"""
self.connection = None
self.conf = conf
self.connection_pool = connection_pool
if pooled:
self.connection = connection_pool.get()
else:
self.connection = connection_pool.connection_cls(
conf,
server_params=server_params)
self.pooled = pooled
def __enter__(self):
"""When with ConnectionContext() is used, return self"""
return self
def _done(self):
"""If the connection came from a pool, clean it up and put it back.
If it did not come from a pool, close it.
"""
if self.connection:
if self.pooled:
# Reset the connection so it's ready for the next caller
# to grab from the pool
self.connection.reset()
self.connection_pool.put(self.connection)
else:
try:
self.connection.close()
except Exception:
pass
self.connection = None
def __exit__(self, exc_type, exc_value, tb):
"""End of 'with' statement. We're done here."""
self._done()
def __del__(self):
"""Caller is done with this connection. Make sure we cleaned up."""
self._done()
def close(self):
"""Caller is done with this connection."""
self._done()
def create_consumer(self, topic, proxy, fanout=False):
self.connection.create_consumer(topic, proxy, fanout)
def create_worker(self, topic, proxy, pool_name):
self.connection.create_worker(topic, proxy, pool_name)
def consume_in_thread(self):
self.connection.consume_in_thread()
def __getattr__(self, key):
"""Proxy all other calls to the Connection instance"""
if self.connection:
return getattr(self.connection, key)
else:
raise rpc_common.InvalidRPCConnectionReuse()
def msg_reply(conf, msg_id, connection_pool, reply=None, failure=None,
ending=False, log_failure=True):
"""Sends a reply or an error on the channel signified by msg_id.
Failure should be a sys.exc_info() tuple.
"""
with ConnectionContext(conf, connection_pool) as conn:
if failure:
failure = rpc_common.serialize_remote_exception(failure,
log_failure)
try:
msg = {'result': reply, 'failure': failure}
except TypeError:
msg = {'result': dict((k, repr(v))
for k, v in reply.__dict__.iteritems()),
'failure': failure}
if ending:
msg['ending'] = True
conn.direct_send(msg_id, msg)
class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call"""
def __init__(self, **kwargs):
self.msg_id = kwargs.pop('msg_id', None)
self.conf = kwargs.pop('conf')
super(RpcContext, self).__init__(**kwargs)
def deepcopy(self):
values = self.to_dict()
values['conf'] = self.conf
values['msg_id'] = self.msg_id
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False,
connection_pool=None, log_failure=True):
if self.msg_id:
msg_reply(self.conf, self.msg_id, connection_pool, reply, failure,
ending, log_failure)
if ending:
self.msg_id = None
def unpack_context(conf, msg):
"""Unpack context from msg."""
context_dict = {}
for key in list(msg.keys()):
# NOTE(vish): Some versions of python don't like unicode keys
# in kwargs.
key = str(key)
if key.startswith('_context_'):
value = msg.pop(key)
context_dict[key[9:]] = value
context_dict['msg_id'] = msg.pop('_msg_id', None)
context_dict['conf'] = conf
ctx = RpcContext.from_dict(context_dict)
rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict())
return ctx
def pack_context(msg, context):
"""Pack context into msg.
Values for message keys need to be less than 255 chars, so we pull
context out into a bunch of separate keys. If we want to support
more arguments in rabbit messages, we may want to do the same
for args at some point.
"""
context_d = dict([('_context_%s' % key, value)
for (key, value) in context.to_dict().iteritems()])
msg.update(context_d)
class ProxyCallback(object):
"""Calls methods on a proxy object based on method and args."""
def __init__(self, conf, proxy, connection_pool):
self.proxy = proxy
self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size)
self.connection_pool = connection_pool
self.conf = conf
def __call__(self, message_data):
"""Consumer callback to call a method on a proxy object.
Parses the message for validity and fires off a thread to call the
proxy object method.
Message data should be a dictionary with two keys:
method: string representing the method to call
args: dictionary of arg: value
Example: {'method': 'echo', 'args': {'value': 42}}
"""
# It is important to clear the context here, because at this point
# the previous context is stored in local.store.context
if hasattr(local.store, 'context'):
del local.store.context
rpc_common._safe_log(LOG.debug, _('received %s'), message_data)
ctxt = unpack_context(self.conf, message_data)
method = message_data.get('method')
args = message_data.get('args', {})
version = message_data.get('version', None)
if not method:
LOG.warn(_('no method for message: %s') % message_data)
ctxt.reply(_('No method for message: %s') % message_data,
connection_pool=self.connection_pool)
return
self.pool.spawn_n(self._process_data, ctxt, version, method, args)
def _process_data(self, ctxt, version, method, args):
"""Process a message in a new thread.
If the proxy object we have has a dispatch method
(see rpc.dispatcher.RpcDispatcher), pass it the version,
method, and args and let it dispatch as appropriate. If not, use
the old behavior of magically calling the specified method on the
proxy we have here.
"""
ctxt.update_store()
try:
rval = self.proxy.dispatch(ctxt, version, method, **args)
# Check if the result was a generator
if inspect.isgenerator(rval):
for x in rval:
ctxt.reply(x, None, connection_pool=self.connection_pool)
else:
ctxt.reply(rval, None, connection_pool=self.connection_pool)
# This final None tells multicall that it is done.
ctxt.reply(ending=True, connection_pool=self.connection_pool)
except rpc_common.ClientException as e:
LOG.debug(_('Expected exception during message handling (%s)') %
e._exc_info[1])
ctxt.reply(None, e._exc_info,
connection_pool=self.connection_pool,
log_failure=False)
except Exception:
LOG.exception(_('Exception during message handling'))
ctxt.reply(None, sys.exc_info(),
connection_pool=self.connection_pool)
class MulticallWaiter(object):
def __init__(self, conf, connection, timeout):
self._connection = connection
self._iterator = connection.iterconsume(timeout=timeout or
conf.rpc_response_timeout)
self._result = None
self._done = False
self._got_ending = False
self._conf = conf
def done(self):
if self._done:
return
self._done = True
self._iterator.close()
self._iterator = None
self._connection.close()
def __call__(self, data):
"""The consume() callback will call this. Store the result."""
if data['failure']:
failure = data['failure']
self._result = rpc_common.deserialize_remote_exception(self._conf,
failure)
elif data.get('ending', False):
self._got_ending = True
else:
self._result = data['result']
def __iter__(self):
"""Return a result until we get a 'None' response from consumer"""
if self._done:
raise StopIteration
while True:
try:
self._iterator.next()
except Exception:
with excutils.save_and_reraise_exception():
self.done()
if self._got_ending:
self.done()
raise StopIteration
result = self._result
if isinstance(result, Exception):
self.done()
raise result
yield result
def create_connection(conf, new, connection_pool):
"""Create a connection"""
return ConnectionContext(conf, connection_pool, pooled=not new)
def multicall(conf, context, topic, msg, timeout, connection_pool):
"""Make a call that returns multiple times."""
# Can't use 'with' for multicall, as it returns an iterator
# that will continue to use the connection. When it's done,
# connection.close() will get called which will put it back into
# the pool
LOG.debug(_('Making asynchronous call on %s ...'), topic)
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
LOG.debug(_('MSG_ID is %s') % (msg_id))
pack_context(msg, context)
conn = ConnectionContext(conf, connection_pool)
wait_msg = MulticallWaiter(conf, conn, timeout)
conn.declare_direct_consumer(msg_id, wait_msg)
conn.topic_send(topic, msg)
return wait_msg
def call(conf, context, topic, msg, timeout, connection_pool):
"""Sends a message on a topic and wait for a response."""
rv = multicall(conf, context, topic, msg, timeout, connection_pool)
# NOTE(vish): return the last result from the multicall
rv = list(rv)
if not rv:
return
return rv[-1]
def cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a topic without waiting for a response."""
LOG.debug(_('Making asynchronous cast on %s...'), topic)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.topic_send(topic, msg)
def fanout_cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a fanout exchange without waiting for a response."""
LOG.debug(_('Making asynchronous fanout cast...'))
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.fanout_send(topic, msg)
def cast_to_server(conf, context, server_params, topic, msg, connection_pool):
"""Sends a message on a topic to a specific server."""
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
conn.topic_send(topic, msg)
def fanout_cast_to_server(conf, context, server_params, topic, msg,
connection_pool):
"""Sends a message on a fanout exchange to a specific server."""
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
conn.fanout_send(topic, msg)
def notify(conf, context, topic, msg, connection_pool):
"""Sends a notification event on a topic."""
LOG.debug(_('Sending %(event_type)s on %(topic)s'),
dict(event_type=msg.get('event_type'),
topic=topic))
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.notify_send(topic, msg)
def cleanup(connection_pool):
if connection_pool:
connection_pool.empty()
def get_control_exchange(conf):
try:
return conf.control_exchange
except cfg.NoSuchOptError:
return 'openstack'
|
|
"""
Test refinement of beam, detector and crystal orientation parameters using
generated reflection positions from ideal geometry, repeating tests with both a
single panel detector, and a geometrically identical 3x3 panel detector,
ensuring the results are the same.
"""
from __future__ import annotations
from collections import namedtuple
from math import pi
import pytest
from cctbx.sgtbx import space_group, space_group_symbols
from cctbx.uctbx import unit_cell
from dxtbx.model import Detector, Panel, ScanFactory
from dxtbx.model.experiment_list import Experiment, ExperimentList
from libtbx.phil import parse
from libtbx.test_utils import approx_equal
from rstbx.symmetry.constraints.parameter_reduction import symmetrize_reduce_enlarge
from scitbx import matrix
import dials.tests.algorithms.refinement.setup_geometry as setup_geometry
import dials.tests.algorithms.refinement.setup_minimiser as setup_minimiser
from dials.algorithms.refinement.parameterisation.beam_parameters import (
BeamParameterisation,
)
from dials.algorithms.refinement.parameterisation.crystal_parameters import (
CrystalOrientationParameterisation,
CrystalUnitCellParameterisation,
)
from dials.algorithms.refinement.parameterisation.detector_parameters import (
DetectorParameterisationMultiPanel,
DetectorParameterisationSinglePanel,
PyDetectorParameterisationMultiPanel,
)
from dials.algorithms.refinement.parameterisation.prediction_parameters import (
XYPhiPredictionParameterisation,
)
from dials.algorithms.refinement.prediction.managed_predictors import (
ScansExperimentsPredictor,
ScansRayPredictor,
)
from dials.algorithms.refinement.reflection_manager import ReflectionManager
from dials.algorithms.refinement.target import (
LeastSquaresPositionalResidualWithRmsdCutoff,
)
from dials.algorithms.spot_prediction import IndexGenerator, ray_intersection
from dials.array_family import flex
def make_panel_in_array(array_elt, reference_panel):
"""Helper function to make a panel in a coplanar array with each panel size
1/3 that of a reference panel"""
px_size = tuple((e / 3.0) for e in reference_panel.get_pixel_size())
ref_panel_size = reference_panel.get_image_size_mm()
x_shift = array_elt[0] * ref_panel_size[0] / 3.0
y_shift = array_elt[1] * ref_panel_size[1] / 3.0
origin = (
matrix.col(reference_panel.get_origin())
+ x_shift * matrix.col(reference_panel.get_fast_axis())
+ y_shift * matrix.col(reference_panel.get_slow_axis())
)
return Panel(
type="PAD",
name="Panel",
fast_axis=reference_panel.get_fast_axis(),
slow_axis=reference_panel.get_slow_axis(),
origin=origin,
pixel_size=px_size,
image_size=reference_panel.get_image_size(),
trusted_range=(0, 1.0e6),
thickness=0.0,
material="",
)
# Setup experimental models
master_phil = parse(
"""
include scope dials.tests.algorithms.refinement.geometry_phil
include scope dials.tests.algorithms.refinement.minimiser_phil
""",
process_includes=True,
)
@pytest.fixture(scope="session")
def init_test():
models = setup_geometry.Extract(master_phil)
single_panel_detector = models.detector
gonio = models.goniometer
crystal = models.crystal
beam = models.beam
# Make a 3x3 multi panel detector filling the same space as the existing
# single panel detector. Each panel of the multi-panel detector has pixels
# with 1/3 the length dimensions of the single panel.
multi_panel_detector = Detector()
for x in range(3):
for y in range(3):
new_panel = make_panel_in_array((x, y), single_panel_detector[0])
multi_panel_detector.add_panel(new_panel)
# Build a mock scan for a 180 degree sequence
sf = ScanFactory()
scan = sf.make_scan(
image_range=(1, 1800),
exposure_times=0.1,
oscillation=(0, 0.1),
epochs=list(range(1800)),
deg=True,
)
sequence_range = scan.get_oscillation_range(deg=False)
im_width = scan.get_oscillation(deg=False)[1]
assert sequence_range == (0.0, pi)
assert approx_equal(im_width, 0.1 * pi / 180.0)
# Build ExperimentLists
experiments_single_panel = ExperimentList()
experiments_multi_panel = ExperimentList()
experiments_single_panel.append(
Experiment(
beam=beam,
detector=single_panel_detector,
goniometer=gonio,
scan=scan,
crystal=crystal,
imageset=None,
)
)
experiments_multi_panel.append(
Experiment(
beam=beam,
detector=multi_panel_detector,
goniometer=gonio,
scan=scan,
crystal=crystal,
imageset=None,
)
)
# Generate some reflections
# All indices in a 2.0 Angstrom sphere
resolution = 2.0
index_generator = IndexGenerator(
crystal.get_unit_cell(),
space_group(space_group_symbols(1).hall()).type(),
resolution,
)
indices = index_generator.to_array()
# for the reflection predictor, it doesn't matter which experiment list is
# passed, as the detector is not used
ref_predictor = ScansRayPredictor(
experiments_single_panel, scan.get_oscillation_range(deg=False)
)
# get two sets of identical reflections
obs_refs_single = ref_predictor(indices)
obs_refs_multi = ref_predictor(indices)
for r1, r2 in zip(obs_refs_single.rows(), obs_refs_multi.rows()):
assert r1["s1"] == r2["s1"]
# get the panel intersections
sel = ray_intersection(single_panel_detector, obs_refs_single)
obs_refs_single = obs_refs_single.select(sel)
sel = ray_intersection(multi_panel_detector, obs_refs_multi)
obs_refs_multi = obs_refs_multi.select(sel)
assert len(obs_refs_single) == len(obs_refs_multi)
# Set 'observed' centroids from the predicted ones
obs_refs_single["xyzobs.mm.value"] = obs_refs_single["xyzcal.mm"]
obs_refs_multi["xyzobs.mm.value"] = obs_refs_multi["xyzcal.mm"]
# Invent some variances for the centroid positions of the simulated data
im_width = 0.1 * pi / 180.0
px_size = single_panel_detector[0].get_pixel_size()
var_x = flex.double(len(obs_refs_single), (px_size[0] / 2.0) ** 2)
var_y = flex.double(len(obs_refs_single), (px_size[1] / 2.0) ** 2)
var_phi = flex.double(len(obs_refs_single), (im_width / 2.0) ** 2)
# set the variances and frame numbers
obs_refs_single["xyzobs.mm.variance"] = flex.vec3_double(var_x, var_y, var_phi)
obs_refs_multi["xyzobs.mm.variance"] = flex.vec3_double(var_x, var_y, var_phi)
# Add in flags and ID columns by copying into standard reflection tables
tmp = flex.reflection_table.empty_standard(len(obs_refs_single))
tmp.update(obs_refs_single)
obs_refs_single = tmp
tmp = flex.reflection_table.empty_standard(len(obs_refs_multi))
tmp.update(obs_refs_multi)
obs_refs_multi = tmp
test_data = namedtuple(
"test_data",
[
"experiments_single_panel",
"experiments_multi_panel",
"observations_single_panel",
"observations_multi_panel",
],
)
return test_data(
experiments_single_panel,
experiments_multi_panel,
obs_refs_single,
obs_refs_multi,
)
def test(init_test):
single_panel_detector = init_test.experiments_single_panel.detectors()[0]
multi_panel_detector = init_test.experiments_multi_panel.detectors()[0]
beam = init_test.experiments_single_panel.beams()[0]
gonio = init_test.experiments_single_panel.goniometers()[0]
crystal = init_test.experiments_single_panel.crystals()[0]
# Parameterise the models
det_param = DetectorParameterisationSinglePanel(single_panel_detector)
s0_param = BeamParameterisation(beam, gonio)
xlo_param = CrystalOrientationParameterisation(crystal)
xluc_param = CrystalUnitCellParameterisation(crystal)
multi_det_param = DetectorParameterisationMultiPanel(multi_panel_detector, beam)
# Fix beam to the X-Z plane (imgCIF geometry), fix wavelength
s0_param.set_fixed([True, False, True])
# Link model parameterisations together into a parameterisation of the
# prediction equation, first for the single panel detector
pred_param = XYPhiPredictionParameterisation(
init_test.experiments_single_panel,
[det_param],
[s0_param],
[xlo_param],
[xluc_param],
)
# ... and now for the multi-panel detector
pred_param2 = XYPhiPredictionParameterisation(
init_test.experiments_multi_panel,
[multi_det_param],
[s0_param],
[xlo_param],
[xluc_param],
)
################################
# Apply known parameter shifts #
################################
# shift detectors by 1.0 mm each translation and 2 mrad each rotation
det_p_vals = det_param.get_param_vals()
p_vals = [a + b for a, b in zip(det_p_vals, [1.0, 1.0, 1.0, 2.0, 2.0, 2.0])]
det_param.set_param_vals(p_vals)
multi_det_p_vals = multi_det_param.get_param_vals()
p_vals = [a + b for a, b in zip(multi_det_p_vals, [1.0, 1.0, 1.0, 2.0, 2.0, 2.0])]
multi_det_param.set_param_vals(p_vals)
# shift beam by 2 mrad in free axis
s0_p_vals = s0_param.get_param_vals()
p_vals = list(s0_p_vals)
p_vals[0] += 2.0
s0_param.set_param_vals(p_vals)
# rotate crystal a bit (=2 mrad each rotation)
xlo_p_vals = xlo_param.get_param_vals()
p_vals = [a + b for a, b in zip(xlo_p_vals, [2.0, 2.0, 2.0])]
xlo_param.set_param_vals(p_vals)
# change unit cell a bit (=0.1 Angstrom length upsets, 0.1 degree of
# gamma angle)
xluc_p_vals = xluc_param.get_param_vals()
cell_params = crystal.get_unit_cell().parameters()
cell_params = [a + b for a, b in zip(cell_params, [0.1, 0.1, 0.1, 0.0, 0.0, 0.1])]
new_uc = unit_cell(cell_params)
newB = matrix.sqr(new_uc.fractionalization_matrix()).transpose()
S = symmetrize_reduce_enlarge(crystal.get_space_group())
S.set_orientation(orientation=newB)
X = tuple([e * 1.0e5 for e in S.forward_independent_parameters()])
xluc_param.set_param_vals(X)
###############################
# Undo known parameter shifts #
###############################
s0_param.set_param_vals(s0_p_vals)
det_param.set_param_vals(det_p_vals)
multi_det_param.set_param_vals(det_p_vals)
xlo_param.set_param_vals(xlo_p_vals)
xluc_param.set_param_vals(xluc_p_vals)
#####################################
# Select reflections for refinement #
#####################################
refman = ReflectionManager(
init_test.observations_single_panel, init_test.experiments_single_panel
)
refman2 = ReflectionManager(
init_test.observations_multi_panel, init_test.experiments_multi_panel
)
###############################
# Set up the target functions #
###############################
target = LeastSquaresPositionalResidualWithRmsdCutoff(
init_test.experiments_single_panel,
ScansExperimentsPredictor(init_test.experiments_single_panel),
refman,
pred_param,
restraints_parameterisation=None,
)
target2 = LeastSquaresPositionalResidualWithRmsdCutoff(
init_test.experiments_multi_panel,
ScansExperimentsPredictor(init_test.experiments_multi_panel),
refman2,
pred_param2,
restraints_parameterisation=None,
)
#################################
# Set up the refinement engines #
#################################
refiner = setup_minimiser.Extract(master_phil, target, pred_param).refiner
refiner2 = setup_minimiser.Extract(master_phil, target2, pred_param2).refiner
refiner.run()
# reset parameters and run refinement with the multi panel detector
s0_param.set_param_vals(s0_p_vals)
multi_det_param.set_param_vals(det_p_vals)
xlo_param.set_param_vals(xlo_p_vals)
xluc_param.set_param_vals(xluc_p_vals)
refiner2.run()
# same number of steps
assert refiner.get_num_steps() == refiner2.get_num_steps()
# same rmsds
for rmsd, rmsd2 in zip(refiner.history["rmsd"], refiner2.history["rmsd"]):
assert approx_equal(rmsd, rmsd2)
# same parameter values each step
for params, params2 in zip(
refiner.history["parameter_vector"], refiner.history["parameter_vector"]
):
assert approx_equal(params, params2)
def test_equivalence_of_python_and_cpp_multipanel_algorithms(init_test):
multi_panel_detector = init_test.experiments_multi_panel.detectors()[0]
beam = init_test.experiments_single_panel.beams()[0]
# Parameterise the models
det_param1 = DetectorParameterisationMultiPanel(multi_panel_detector, beam)
det_param2 = PyDetectorParameterisationMultiPanel(multi_panel_detector, beam)
# shift detectors by 1.0 mm each translation and 2 mrad each rotation
for dp in [det_param1, det_param2]:
p_vals = dp.get_param_vals()
p_vals = [a + b for a, b in zip(p_vals, [1.0, 1.0, 1.0, 2.0, 2.0, 2.0])]
dp.set_param_vals(p_vals)
dp.compose()
for pnl in range(3):
derivatives1 = det_param1.get_ds_dp(multi_state_elt=pnl)
derivatives2 = det_param2.get_ds_dp(multi_state_elt=pnl)
for a, b in zip(derivatives1, derivatives2):
for i, j in zip(a, b):
assert i == pytest.approx(j)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""IRModule that holds the functions and type definitions."""
from tvm._ffi.base import string_types
import tvm._ffi
from .base import Node
from . import expr as _expr
from . import type as _ty
from . import _ffi_api
@tvm._ffi.register_object("IRModule")
class IRModule(Node):
"""IRModule that holds functions and type definitions.
IRModule is the basic unit for all IR transformations across the stack.
Parameters
----------
functions: Optional[dict].
Map of global var to BaseFunc
"""
def __init__(self, functions=None, type_definitions=None):
if functions is None:
functions = {}
elif isinstance(functions, dict):
mapped_funcs = {}
for k, v in functions.items():
if isinstance(k, string_types):
k = _expr.GlobalVar(k)
if not isinstance(k, _expr.GlobalVar):
raise TypeError("Expect functions to be Dict[GlobalVar, Function]")
mapped_funcs[k] = v
functions = mapped_funcs
if type_definitions is None:
type_definitions = {}
elif isinstance(type_definitions, dict):
mapped_type_defs = {}
for k, v in type_definitions.items():
if isinstance(k, string_types):
k = _ty.GlobalTypeVar(k)
if not isinstance(k, _ty.GlobalTypeVar):
raise TypeError("Expect type_definitions to be Dict[GlobalTypeVar, Type]")
mapped_type_defs[k] = v
type_definitions = mapped_type_defs
self.__init_handle_by_constructor__(_ffi_api.IRModule, functions, type_definitions)
def __setitem__(self, var, val):
"""Add a mapping to the module.
Parameters
---------
var: GlobalVar
The global variable.
val: Union[Function, Type]
The value.
"""
return self._add(var, val, True)
def _add(self, var, val, update=True):
if isinstance(val, _expr.RelayExpr):
if isinstance(var, string_types):
if _ffi_api.Module_ContainGlobalVar(self, var):
var = _ffi_api.Module_GetGlobalVar(self, var)
else:
var = _expr.GlobalVar(var)
_ffi_api.Module_Add(self, var, val, update)
else:
assert isinstance(val, _ty.Type)
if isinstance(var, string_types):
var = _ty.GlobalTypeVar(var)
_ffi_api.Module_AddDef(self, var, val, update)
def __getitem__(self, var):
"""Lookup a global definition by name or by variable.
Parameters
----------
var: Union[String, GlobalVar, GlobalTypeVar]
The name or global variable.
Returns
-------
val: Union[Function, Type]
The definition referenced by :code:`var` (either a function or type).
"""
if isinstance(var, string_types):
return _ffi_api.Module_Lookup_str(self, var)
if isinstance(var, _expr.GlobalVar):
return _ffi_api.Module_Lookup(self, var)
return _ffi_api.Module_LookupDef(self, var)
def update(self, other):
"""Insert functions in another Module to current one.
Parameters
----------
other: IRModule
The module to merge into the current Module.
"""
if isinstance(other, dict):
other = IRModule(other)
return _ffi_api.Module_Update(self, other)
def update_func(self, var, func):
"""Update the function corresponding to a global variable in the
module.
Parameters
----------
var: GlobalVar
The global variable.
func: tvm.relay.Function
The function to be inserted.
"""
return _ffi_api.Module_UpdateFunction(self, var, func)
def get_global_var(self, name):
"""Get a global variable in the function by name.
Parameters
----------
name: str
The name of the global variable.
Returns
-------
global_var: GlobalVar
The global variable mapped to :code:`name`.
Raises
------
tvm.error.TVMError if we cannot find corresponding global var.
"""
return _ffi_api.Module_GetGlobalVar(self, name)
def get_global_vars(self):
"""Collect all global vars defined in this module.
Returns
-------
global_vars: Array[GlobalVar]
An array of global vars.
"""
return _ffi_api.Module_GetGlobalVars(self)
def get_global_type_vars(self):
"""Collect all global type vars defined in this module.
Returns
-------
global_type_vars: Array[GlobalTypeVar]
An array of global type vars.
"""
return _ffi_api.Module_GetGlobalTypeVars(self)
def get_global_type_var(self, name):
"""Get a global type variable in the function by name.
Parameters
----------
name: str
The name of the global type variable.
Returns
-------
global_type_var: GlobalTypeVar
The global variable mapped to :code:`name`.
Raises
------
tvm.error.TVMError if we cannot find corresponding global type var.
"""
return _ffi_api.Module_GetGlobalTypeVar(self, name)
def get_constructor(self, tag):
"""Look up an ADT constructor by tag.
Parameters
----------
tag: int
The tag for a constructor.
Returns
-------
constructor: Constructor
The constructor associated with the given tag,
Raises
------
tvm.error.TVMError if the corresponding constructor cannot be found.
"""
return _ffi_api.Module_LookupTag(self, tag)
def get_type(self, name):
ty_var = self.get_global_type_var(name)
ty_data = self.type_definitions[ty_var]
return tuple([ty_var] + list(ty_data.constructors))
@staticmethod
def from_expr(expr, functions=None, type_defs=None):
"""Construct a module from a standalone expression.
Parameters
----------
expr: RelayExpr
The starting expression
global_funcs: Optional[dict]
Map of global vars to function definitions
type_defs: Optional[dict]
Map of global type vars to type definitions
Returns
-------
mod: Module
A module containing the passed definitions,
where expr is set as the entry point
(wrapped in a function if necessary)
"""
funcs = functions if functions is not None else {}
defs = type_defs if type_defs is not None else {}
return _ffi_api.Module_FromExpr(expr, funcs, defs)
def _import(self, file_to_import):
return _ffi_api.Module_Import(self, file_to_import)
def import_from_std(self, file_to_import):
# TODO(@jroesch): clean up prelude
_ffi_api.Module_ImportFromStd(self, file_to_import)
return tvm.relay.transform.InferType()(self)
def __str__(self):
return _ffi_api.PrettyPrint(self)
def __repr__(self):
return self.astext()
def script(self, tir_prefix: str = "T", show_meta: bool = False) -> str:
"""Print IRModule into TVMScript
Parameters
----------
tir_prefix : str
The tir namespace prefix
show_meta : bool
Whether to show meta information
Returns
-------
script : str
The TVM Script of the IRModule
"""
return tvm._ffi.get_global_func("script.AsTVMScript")(
self, tir_prefix, show_meta
) # type: ignore
def get_attr(self, attr_key):
"""Get the IRModule attribute.
Parameters
----------
attr_key : str
The attribute key.
Returns
-------
attr_value : Any
Attribute value
"""
return _ffi_api.Module_GetAttr(self, attr_key)
def with_attr(self, attr_key, attr_value):
"""Copy the IRModule and add an attribute to it.
Parameters
----------
attr_key : str
The attribute key.
attr_value : Object
The new attribute value.
Returns
-------
mod : IRModule
A new copy of the IRModule with the attribute
"""
return _ffi_api.Module_WithAttr(self, attr_key, attr_value)
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""This module contains classes and functions to standardize access to
configuration files for Astropy and affiliated packages.
.. note::
The configuration system makes use of the 'configobj' package, which stores
configuration in a text format like that used in the standard library
`ConfigParser`. More information and documentation for configobj can be
found at https://configobj.readthedocs.io .
"""
import io
import pkgutil
import warnings
import importlib
import contextlib
import os
from os import path
from textwrap import TextWrapper
from warnings import warn
from contextlib import contextmanager, nullcontext
from astropy.extern.configobj import configobj, validate
from astropy.utils import find_current_module, silence
from astropy.utils.decorators import deprecated
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
from astropy.utils.introspection import resolve_name
from .paths import get_config_dir
__all__ = ('InvalidConfigurationItemWarning', 'ConfigurationMissingWarning',
'get_config', 'reload_config', 'ConfigNamespace', 'ConfigItem',
'generate_config', 'create_config_file')
class InvalidConfigurationItemWarning(AstropyWarning):
""" A Warning that is issued when the configuration value specified in the
astropy configuration file does not match the type expected for that
configuration value.
"""
# This was raised with Astropy < 4.3 when the configuration file was not found.
# It is kept for compatibility and should be removed at some point.
@deprecated('5.0')
class ConfigurationMissingWarning(AstropyWarning):
""" A Warning that is issued when the configuration directory cannot be
accessed (usually due to a permissions problem). If this warning appears,
configuration items will be set to their defaults rather than read from the
configuration file, and no configuration will persist across sessions.
"""
# these are not in __all__ because it's not intended that a user ever see them
class ConfigurationDefaultMissingError(ValueError):
""" An exception that is raised when the configuration defaults (which
should be generated at build-time) are missing.
"""
# this is used in astropy/__init__.py
class ConfigurationDefaultMissingWarning(AstropyWarning):
""" A warning that is issued when the configuration defaults (which
should be generated at build-time) are missing.
"""
class ConfigurationChangedWarning(AstropyWarning):
"""
A warning that the configuration options have changed.
"""
class _ConfigNamespaceMeta(type):
def __init__(cls, name, bases, dict):
if cls.__bases__[0] is object:
return
for key, val in dict.items():
if isinstance(val, ConfigItem):
val.name = key
class ConfigNamespace(metaclass=_ConfigNamespaceMeta):
"""
A namespace of configuration items. Each subpackage with
configuration items should define a subclass of this class,
containing `ConfigItem` instances as members.
For example::
class Conf(_config.ConfigNamespace):
unicode_output = _config.ConfigItem(
False,
'Use Unicode characters when outputting values, ...')
use_color = _config.ConfigItem(
sys.platform != 'win32',
'When True, use ANSI color escape sequences when ...',
aliases=['astropy.utils.console.USE_COLOR'])
conf = Conf()
"""
def __iter__(self):
for key, val in self.__class__.__dict__.items():
if isinstance(val, ConfigItem):
yield key
keys = __iter__
"""Iterate over configuration item names."""
def values(self):
"""Iterate over configuration item values."""
for val in self.__class__.__dict__.values():
if isinstance(val, ConfigItem):
yield val
def items(self):
"""Iterate over configuration item ``(name, value)`` pairs."""
for key, val in self.__class__.__dict__.items():
if isinstance(val, ConfigItem):
yield key, val
def set_temp(self, attr, value):
"""
Temporarily set a configuration value.
Parameters
----------
attr : str
Configuration item name
value : object
The value to set temporarily.
Examples
--------
>>> import astropy
>>> with astropy.conf.set_temp('use_color', False):
... pass
... # console output will not contain color
>>> # console output contains color again...
"""
if hasattr(self, attr):
return self.__class__.__dict__[attr].set_temp(value)
raise AttributeError(f"No configuration parameter '{attr}'")
def reload(self, attr=None):
"""
Reload a configuration item from the configuration file.
Parameters
----------
attr : str, optional
The name of the configuration parameter to reload. If not
provided, reload all configuration parameters.
"""
if attr is not None:
if hasattr(self, attr):
return self.__class__.__dict__[attr].reload()
raise AttributeError(f"No configuration parameter '{attr}'")
for item in self.values():
item.reload()
def reset(self, attr=None):
"""
Reset a configuration item to its default.
Parameters
----------
attr : str, optional
The name of the configuration parameter to reload. If not
provided, reset all configuration parameters.
"""
if attr is not None:
if hasattr(self, attr):
prop = self.__class__.__dict__[attr]
prop.set(prop.defaultvalue)
return
raise AttributeError(f"No configuration parameter '{attr}'")
for item in self.values():
item.set(item.defaultvalue)
class ConfigItem:
"""
A setting and associated value stored in a configuration file.
These objects should be created as members of
`ConfigNamespace` subclasses, for example::
class _Conf(config.ConfigNamespace):
unicode_output = config.ConfigItem(
False,
'Use Unicode characters when outputting values, and writing widgets '
'to the console.')
conf = _Conf()
Parameters
----------
defaultvalue : object, optional
The default value for this item. If this is a list of strings, this
item will be interpreted as an 'options' value - this item must be one
of those values, and the first in the list will be taken as the default
value.
description : str or None, optional
A description of this item (will be shown as a comment in the
configuration file)
cfgtype : str or None, optional
A type specifier like those used as the *values* of a particular key
in a ``configspec`` file of ``configobj``. If None, the type will be
inferred from the default value.
module : str or None, optional
The full module name that this item is associated with. The first
element (e.g. 'astropy' if this is 'astropy.config.configuration')
will be used to determine the name of the configuration file, while
the remaining items determine the section. If None, the package will be
inferred from the package within which this object's initializer is
called.
aliases : str, or list of str, optional
The deprecated location(s) of this configuration item. If the
config item is not found at the new location, it will be
searched for at all of the old locations.
Raises
------
RuntimeError
If ``module`` is `None`, but the module this item is created from
cannot be determined.
"""
# this is used to make validation faster so a Validator object doesn't
# have to be created every time
_validator = validate.Validator()
cfgtype = None
"""
A type specifier like those used as the *values* of a particular key in a
``configspec`` file of ``configobj``.
"""
rootname = 'astropy'
"""
Rootname sets the base path for all config files.
"""
def __init__(self, defaultvalue='', description=None, cfgtype=None,
module=None, aliases=None):
from astropy.utils import isiterable
if module is None:
module = find_current_module(2)
if module is None:
msg1 = 'Cannot automatically determine get_config module, '
msg2 = 'because it is not called from inside a valid module'
raise RuntimeError(msg1 + msg2)
else:
module = module.__name__
self.module = module
self.description = description
self.__doc__ = description
# now determine cfgtype if it is not given
if cfgtype is None:
if (isiterable(defaultvalue) and not
isinstance(defaultvalue, str)):
# it is an options list
dvstr = [str(v) for v in defaultvalue]
cfgtype = 'option(' + ', '.join(dvstr) + ')'
defaultvalue = dvstr[0]
elif isinstance(defaultvalue, bool):
cfgtype = 'boolean'
elif isinstance(defaultvalue, int):
cfgtype = 'integer'
elif isinstance(defaultvalue, float):
cfgtype = 'float'
elif isinstance(defaultvalue, str):
cfgtype = 'string'
defaultvalue = str(defaultvalue)
self.cfgtype = cfgtype
self._validate_val(defaultvalue)
self.defaultvalue = defaultvalue
if aliases is None:
self.aliases = []
elif isinstance(aliases, str):
self.aliases = [aliases]
else:
self.aliases = aliases
def __set__(self, obj, value):
return self.set(value)
def __get__(self, obj, objtype=None):
if obj is None:
return self
return self()
def set(self, value):
"""
Sets the current value of this ``ConfigItem``.
This also updates the comments that give the description and type
information.
Parameters
----------
value
The value this item should be set to.
Raises
------
TypeError
If the provided ``value`` is not valid for this ``ConfigItem``.
"""
try:
value = self._validate_val(value)
except validate.ValidateError as e:
msg = 'Provided value for configuration item {0} not valid: {1}'
raise TypeError(msg.format(self.name, e.args[0]))
sec = get_config(self.module, rootname=self.rootname)
sec[self.name] = value
@contextmanager
def set_temp(self, value):
"""
Sets this item to a specified value only inside a with block.
Use as::
ITEM = ConfigItem('ITEM', 'default', 'description')
with ITEM.set_temp('newval'):
#... do something that wants ITEM's value to be 'newval' ...
print(ITEM)
# ITEM is now 'default' after the with block
Parameters
----------
value
The value to set this item to inside the with block.
"""
initval = self()
self.set(value)
try:
yield
finally:
self.set(initval)
def reload(self):
""" Reloads the value of this ``ConfigItem`` from the relevant
configuration file.
Returns
-------
val : object
The new value loaded from the configuration file.
"""
self.set(self.defaultvalue)
baseobj = get_config(self.module, True, rootname=self.rootname)
secname = baseobj.name
cobj = baseobj
# a ConfigObj's parent is itself, so we look for the parent with that
while cobj.parent is not cobj:
cobj = cobj.parent
newobj = configobj.ConfigObj(cobj.filename, interpolation=False)
if secname is not None:
if secname not in newobj:
return baseobj.get(self.name)
newobj = newobj[secname]
if self.name in newobj:
baseobj[self.name] = newobj[self.name]
return baseobj.get(self.name)
def __repr__(self):
out = '<{}: name={!r} value={!r} at 0x{:x}>'.format(
self.__class__.__name__, self.name, self(), id(self))
return out
def __str__(self):
out = '\n'.join(('{0}: {1}',
' cfgtype={2!r}',
' defaultvalue={3!r}',
' description={4!r}',
' module={5}',
' value={6!r}'))
out = out.format(self.__class__.__name__, self.name, self.cfgtype,
self.defaultvalue, self.description, self.module,
self())
return out
def __call__(self):
""" Returns the value of this ``ConfigItem``
Returns
-------
val : object
This item's value, with a type determined by the ``cfgtype``
attribute.
Raises
------
TypeError
If the configuration value as stored is not this item's type.
"""
def section_name(section):
if section == '':
return 'at the top-level'
else:
return f'in section [{section}]'
options = []
sec = get_config(self.module, rootname=self.rootname)
if self.name in sec:
options.append((sec[self.name], self.module, self.name))
for alias in self.aliases:
module, name = alias.rsplit('.', 1)
sec = get_config(module, rootname=self.rootname)
if '.' in module:
filename, module = module.split('.', 1)
else:
filename = module
module = ''
if name in sec:
if '.' in self.module:
new_module = self.module.split('.', 1)[1]
else:
new_module = ''
warn(
"Config parameter '{}' {} of the file '{}' "
"is deprecated. Use '{}' {} instead.".format(
name, section_name(module), get_config_filename(filename,
rootname=self.rootname),
self.name, section_name(new_module)),
AstropyDeprecationWarning)
options.append((sec[name], module, name))
if len(options) == 0:
self.set(self.defaultvalue)
options.append((self.defaultvalue, None, None))
if len(options) > 1:
filename, sec = self.module.split('.', 1)
warn(
"Config parameter '{}' {} of the file '{}' is "
"given by more than one alias ({}). Using the first.".format(
self.name, section_name(sec), get_config_filename(filename,
rootname=self.rootname),
', '.join([
'.'.join(x[1:3]) for x in options if x[1] is not None])),
AstropyDeprecationWarning)
val = options[0][0]
try:
return self._validate_val(val)
except validate.ValidateError as e:
raise TypeError('Configuration value not valid:' + e.args[0])
def _validate_val(self, val):
""" Validates the provided value based on cfgtype and returns the
type-cast value
throws the underlying configobj exception if it fails
"""
# note that this will normally use the *class* attribute `_validator`,
# but if some arcane reason is needed for making a special one for an
# instance or sub-class, it will be used
return self._validator.check(self.cfgtype, val)
# this dictionary stores the primary copy of the ConfigObj's for each
# root package
_cfgobjs = {}
def get_config_filename(packageormod=None, rootname=None):
"""
Get the filename of the config file associated with the given
package or module.
"""
cfg = get_config(packageormod, rootname=rootname)
while cfg.parent is not cfg:
cfg = cfg.parent
return cfg.filename
# This is used by testing to override the config file, so we can test
# with various config files that exercise different features of the
# config system.
_override_config_file = None
def get_config(packageormod=None, reload=False, rootname=None):
""" Gets the configuration object or section associated with a particular
package or module.
Parameters
----------
packageormod : str or None
The package for which to retrieve the configuration object. If a
string, it must be a valid package name, or if ``None``, the package from
which this function is called will be used.
reload : bool, optional
Reload the file, even if we have it cached.
rootname : str or None
Name of the root configuration directory. If ``None`` and
``packageormod`` is ``None``, this defaults to be the name of
the package from which this function is called. If ``None`` and
``packageormod`` is not ``None``, this defaults to ``astropy``.
Returns
-------
cfgobj : ``configobj.ConfigObj`` or ``configobj.Section``
If the requested package is a base package, this will be the
``configobj.ConfigObj`` for that package, or if it is a subpackage or
module, it will return the relevant ``configobj.Section`` object.
Raises
------
RuntimeError
If ``packageormod`` is `None`, but the package this item is created
from cannot be determined.
"""
if packageormod is None:
packageormod = find_current_module(2)
if packageormod is None:
msg1 = 'Cannot automatically determine get_config module, '
msg2 = 'because it is not called from inside a valid module'
raise RuntimeError(msg1 + msg2)
else:
packageormod = packageormod.__name__
_autopkg = True
else:
_autopkg = False
packageormodspl = packageormod.split('.')
pkgname = packageormodspl[0]
secname = '.'.join(packageormodspl[1:])
if rootname is None:
if _autopkg:
rootname = pkgname
else:
rootname = 'astropy' # so we don't break affiliated packages
cobj = _cfgobjs.get(pkgname, None)
if cobj is None or reload:
cfgfn = None
try:
# This feature is intended only for use by the unit tests
if _override_config_file is not None:
cfgfn = _override_config_file
else:
cfgfn = path.join(get_config_dir(rootname=rootname), pkgname + '.cfg')
cobj = configobj.ConfigObj(cfgfn, interpolation=False)
except OSError:
# This can happen when HOME is not set
cobj = configobj.ConfigObj(interpolation=False)
# This caches the object, so if the file becomes accessible, this
# function won't see it unless the module is reloaded
_cfgobjs[pkgname] = cobj
if secname: # not the root package
if secname not in cobj:
cobj[secname] = {}
return cobj[secname]
else:
return cobj
def generate_config(pkgname='astropy', filename=None, verbose=False):
"""Generates a configuration file, from the list of `ConfigItem`
objects for each subpackage.
.. versionadded:: 4.1
Parameters
----------
pkgname : str or None
The package for which to retrieve the configuration object.
filename : str or file-like or None
If None, the default configuration path is taken from `get_config`.
"""
if verbose:
verbosity = nullcontext
filter_warnings = AstropyDeprecationWarning
else:
verbosity = silence
filter_warnings = Warning
package = importlib.import_module(pkgname)
with verbosity(), warnings.catch_warnings():
warnings.simplefilter('ignore', category=filter_warnings)
for mod in pkgutil.walk_packages(path=package.__path__,
prefix=package.__name__ + '.'):
if (mod.module_finder.path.endswith(('test', 'tests')) or
mod.name.endswith('setup_package')):
# Skip test and setup_package modules
continue
if mod.name.split('.')[-1].startswith('_'):
# Skip private modules
continue
with contextlib.suppress(ImportError):
importlib.import_module(mod.name)
wrapper = TextWrapper(initial_indent="## ", subsequent_indent='## ',
width=78)
if filename is None:
filename = get_config_filename(pkgname)
with contextlib.ExitStack() as stack:
if isinstance(filename, (str, os.PathLike)):
fp = stack.enter_context(open(filename, 'w'))
else:
# assume it's a file object, or io.StringIO
fp = filename
# Parse the subclasses, ordered by their module name
subclasses = ConfigNamespace.__subclasses__()
processed = set()
for conf in sorted(subclasses, key=lambda x: x.__module__):
mod = conf.__module__
# Skip modules for other packages, e.g. astropy modules that
# would be imported when running the function for astroquery.
if mod.split('.')[0] != pkgname:
continue
# Check that modules are not processed twice, which can happen
# when they are imported in another module.
if mod in processed:
continue
else:
processed.add(mod)
print_module = True
for item in conf().values():
if print_module:
# If this is the first item of the module, we print the
# module name, but not if this is the root package...
if item.module != pkgname:
modname = item.module.replace(f'{pkgname}.', '')
fp.write(f"[{modname}]\n\n")
print_module = False
fp.write(wrapper.fill(item.description) + '\n')
if isinstance(item.defaultvalue, (tuple, list)):
if len(item.defaultvalue) == 0:
fp.write(f'# {item.name} = ,\n\n')
elif len(item.defaultvalue) == 1:
fp.write(f'# {item.name} = {item.defaultvalue[0]},\n\n')
else:
fp.write(f'# {item.name} = {",".join(map(str, item.defaultvalue))}\n\n')
else:
fp.write(f'# {item.name} = {item.defaultvalue}\n\n')
def reload_config(packageormod=None, rootname=None):
""" Reloads configuration settings from a configuration file for the root
package of the requested package/module.
This overwrites any changes that may have been made in `ConfigItem`
objects. This applies for any items that are based on this file, which
is determined by the *root* package of ``packageormod``
(e.g. ``'astropy.cfg'`` for the ``'astropy.config.configuration'``
module).
Parameters
----------
packageormod : str or None
The package or module name - see `get_config` for details.
rootname : str or None
Name of the root configuration directory - see `get_config`
for details.
"""
sec = get_config(packageormod, True, rootname=rootname)
# look for the section that is its own parent - that's the base object
while sec.parent is not sec:
sec = sec.parent
sec.reload()
def is_unedited_config_file(content, template_content=None):
"""
Determines if a config file can be safely replaced because it doesn't
actually contain any meaningful content, i.e. if it contains only comments
or is completely empty.
"""
buffer = io.StringIO(content)
raw_cfg = configobj.ConfigObj(buffer, interpolation=True)
# If any of the items is set, return False
return not any(len(v) > 0 for v in raw_cfg.values())
# This function is no more used by astropy but it is kept for the other
# packages that may use it (e.g. astroquery). It should be removed at some
# point.
# this is not in __all__ because it's not intended that a user uses it
@deprecated('5.0')
def update_default_config(pkg, default_cfg_dir_or_fn, version=None, rootname='astropy'):
"""
Checks if the configuration file for the specified package exists,
and if not, copy over the default configuration. If the
configuration file looks like it has already been edited, we do
not write over it, but instead write a file alongside it named
``pkg.version.cfg`` as a "template" for the user.
Parameters
----------
pkg : str
The package to be updated.
default_cfg_dir_or_fn : str
The filename or directory name where the default configuration file is.
If a directory name, ``'pkg.cfg'`` will be used in that directory.
version : str, optional
The current version of the given package. If not provided, it will
be obtained from ``pkg.__version__``.
rootname : str
Name of the root configuration directory.
Returns
-------
updated : bool
If the profile was updated, `True`, otherwise `False`.
Raises
------
AttributeError
If the version number of the package could not determined.
"""
if path.isdir(default_cfg_dir_or_fn):
default_cfgfn = path.join(default_cfg_dir_or_fn, pkg + '.cfg')
else:
default_cfgfn = default_cfg_dir_or_fn
if not path.isfile(default_cfgfn):
# There is no template configuration file, which basically
# means the affiliated package is not using the configuration
# system, so just return.
return False
cfgfn = get_config(pkg, rootname=rootname).filename
with open(default_cfgfn, 'rt', encoding='latin-1') as fr:
template_content = fr.read()
doupdate = False
if cfgfn is not None:
if path.exists(cfgfn):
with open(cfgfn, 'rt', encoding='latin-1') as fd:
content = fd.read()
identical = (content == template_content)
if not identical:
doupdate = is_unedited_config_file(
content, template_content)
elif path.exists(path.dirname(cfgfn)):
doupdate = True
identical = False
if version is None:
version = resolve_name(pkg, '__version__')
# Don't install template files for dev versions, or we'll end up
# spamming `~/.astropy/config`.
if version and 'dev' not in version and cfgfn is not None:
template_path = path.join(
get_config_dir(rootname=rootname), f'{pkg}.{version}.cfg')
needs_template = not path.exists(template_path)
else:
needs_template = False
if doupdate or needs_template:
if needs_template:
with open(template_path, 'wt', encoding='latin-1') as fw:
fw.write(template_content)
# If we just installed a new template file and we can't
# update the main configuration file because it has user
# changes, display a warning.
if not identical and not doupdate:
warn(
"The configuration options in {} {} may have changed, "
"your configuration file was not updated in order to "
"preserve local changes. A new configuration template "
"has been saved to '{}'.".format(
pkg, version, template_path),
ConfigurationChangedWarning)
if doupdate and not identical:
with open(cfgfn, 'wt', encoding='latin-1') as fw:
fw.write(template_content)
return True
return False
def create_config_file(pkg, rootname='astropy', overwrite=False):
"""
Create the default configuration file for the specified package.
If the file already exists, it is updated only if it has not been
modified. Otherwise the ``overwrite`` flag is needed to overwrite it.
Parameters
----------
pkg : str
The package to be updated.
rootname : str
Name of the root configuration directory.
overwrite : bool
Force updating the file if it already exists.
Returns
-------
updated : bool
If the profile was updated, `True`, otherwise `False`.
"""
# local import to prevent using the logger before it is configured
from astropy.logger import log
cfgfn = get_config_filename(pkg, rootname=rootname)
# generate the default config template
template_content = io.StringIO()
generate_config(pkg, template_content)
template_content.seek(0)
template_content = template_content.read()
doupdate = True
# if the file already exists, check that it has not been modified
if cfgfn is not None and path.exists(cfgfn):
with open(cfgfn, 'rt', encoding='latin-1') as fd:
content = fd.read()
doupdate = is_unedited_config_file(content, template_content)
if doupdate or overwrite:
with open(cfgfn, 'wt', encoding='latin-1') as fw:
fw.write(template_content)
log.info('The configuration file has been successfully written '
f'to {cfgfn}')
return True
elif not doupdate:
log.warning('The configuration file already exists and seems to '
'have been customized, so it has not been updated. '
'Use overwrite=True if you really want to update it.')
return False
|
|
# Dataviews.py
#
# Copyright 2019 OSIsoft, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# <http://www.apache.org/licenses/LICENSE-2.0>
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from urllib.parse import urlparse
import urllib.request, urllib.parse, urllib.error
import http.client as http
import json
from .SdsError import SdsError
from .Dataview.Dataview import Dataview
from .Dataview.Datagroup import Datagroup
from .BaseClient import BaseClient as BaseClient
import requests
class Dataviews(object):
"""Handles communication with Sds Service"""
def __init__(self, client):
self.__baseClient = client
self.__setPathAndQueryTemplates()
def postDataview(self, namespace_id, dataview):
"""Tells Sds Service to create a dataview based on local 'dataview' or get if existing dataview matches"""
if namespace_id is None:
raise TypeError
if dataview is None or not isinstance(dataview, Dataview):
raise TypeError
response = requests.post(
self.__baseClient.uri_API + self.__dataviewPath.format(tenant_id=self.__baseClient.tenant, namespace_id=namespace_id, dataview_id=dataview.Id),
data=dataview.toJson(),
headers= self.__baseClient.sdsHeaders())
if response.status_code < 200 or response.status_code >= 300:
response.close()
raise SdsError(
"Failed to create dataview, {dataview_id}. {status}:{reason}".format(dataview_id=dataview.Id, status=response.status_code, reason=response.text))
dataview = Dataview.fromJson(json.loads(response.content))
response.close()
return dataview
def putDataview(self, namespace_id, dataview):
"""Tells Sds Service to update a dataview based on local 'dataview'"""
if namespace_id is None:
raise TypeError
if dataview is None or not isinstance(dataview, Dataview):
raise TypeError
response = requests.put(
self.__baseClient.uri_API + self.__dataviewPath.format(tenant_id=self.__baseClient.tenant, namespace_id=namespace_id, dataview_id=dataview.Id),
data=dataview.toJson(),
headers=self.__baseClient.sdsHeaders())
if response.status_code < 200 or response.status_code >= 300:
response.close()
raise SdsError(
"Failed to update dataview, {dataview_id}. {status}:{reason}".format(dataview_id=dataview.Id, status=response.status_code, reason=response.text))
dataview = Dataview.fromJson(json.loads(response.content))
response.close()
return dataview
def deleteDataview(self, namespace_id, dataview_id):
"""Tells Sds Service to delete a dataview based on 'dataview_id'"""
if namespace_id is None:
raise TypeError
if dataview_id is None:
raise TypeError
response = requests.delete(
self.__baseClient.uri_API + self.__dataviewPath.format(tenant_id=self.__baseClient.tenant, namespace_id=namespace_id, dataview_id=dataview_id),
headers=self.__baseClient.sdsHeaders())
if response.status_code < 200 or response.status_code >= 300:
response.close()
raise SdsError(
"Failed to delete dataview, {dataview_id}. {status}:{reason}".format(dataview_id=dataview_id, status=response.status_code, reason=response.text))
response.close()
return
def getDataview(self, namespace_id, dataview_id):
"""Retrieves the dataview specified by 'dataview_id' from Sds Service"""
if namespace_id is None:
raise TypeError
if dataview_id is None:
raise TypeError
response = requests.get(
self.__baseClient.uri_API + self.__dataviewPath.format(tenant_id=self.__baseClient.tenant, namespace_id=namespace_id, dataview_id=dataview_id),
headers=self.__baseClient.sdsHeaders())
if response.status_code < 200 or response.status_code >= 300:
response.close()
raise SdsError("Failed to get dataview, {dataview_id}. {status}:{reason}".
format(dataview_id=dataview_id, status=response.status_code, reason=response.text))
dataview = Dataview.fromJson(json.loads(response.content))
response.close()
return dataview
def getDataviews(self, namespace_id, skip = 0, count =100):
"""Retrieves all of the dataviews from Sds Service"""
if namespace_id is None:
raise TypeError
response = requests.get(
self.__baseClient.uri_API + self.__getDataviews.format(tenant_id=self.__baseClient.tenant, namespace_id=namespace_id, skip=skip, count=count),
headers=self.__baseClient.sdsHeaders())
if response.status_code < 200 or response.status_code >= 300:
response.close()
raise SdsError("Failed to get dataviews. {status}:{reason}".
format( status=response.status_code, reason=response.text))
dataviews = json.loads(response.content)
results = []
for t in dataviews:
results.append(Dataview.fromJson(t))
response.close()
return results
def getDatagroups(self, namespace_id,dataview_id, skip = 0, count = 100):
"""Retrieves all of the datagroups from the specified dataview from Sds Service"""
if namespace_id is None:
raise TypeError
response = requests.get(
self.__baseClient.uri_API + self.__getDatagroups.format(tenant_id=self.__baseClient.tenant, namespace_id=namespace_id, dataview_id=dataview_id, skip=skip, count=count),
headers=self.__baseClient.sdsHeaders())
if response.status_code < 200 or response.status_code >= 300:
response.close()
raise SdsError("Failed to get datagroups for dataview, . {status}:{reason}".
format(dataview_id=dataview_id, status=response.status_code, reason=response.text))
datagroups = json.loads(response.content)
results = {}
for key, value in datagroups.items():
innerobj = {}
for key2, value2 in value.items():
innerobj[key2] = Datagroup.fromJson(value2)
results[key] = innerobj
response.close()
return results
def getDatagroup(self, namespace_id,dataview_id, datagroup_id):
"""Retrieves a datagroupby 'datagroup_id' from the specified dataview from Sds Service"""
if namespace_id is None:
raise TypeError
response = requests.get(
self.__baseClient.uri_API + self.__getDatagroup.format(tenant_id=self.__baseClient.tenant, namespace_id=namespace_id, dataview_id=dataview_id, datagroup_id=datagroup_id),
headers=self.__baseClient.sdsHeaders())
if response.status_code < 200 or response.status_code >= 300:
response.close()
raise SdsError("Failed to get datagroup for dataview, . {status}:{reason}".
format(dataview_id=dataview_id, status=response.status_code, reason=response.text))
datagroup = Datagroup.fromJson(json.loads(response.content))
return datagroup
#needs other parameters with smart
def getDataviewPreview(self, namespace_id, dataview_id, startIndex = None, endIndex = None, interval = None, form = None, count = -1, value_class = None):
"""Retrieves the dataviewpreview of the 'dataview_id' from Sds Service"""
if namespace_id is None:
raise TypeError
if dataview_id is None:
raise TypeError
urlAdd = []
urlAddStr = ""
if startIndex is not None:
urlAdd.append("startIndex=" +startIndex)
if endIndex is not None:
urlAdd.append("endIndex=" +endIndex)
if interval is not None:
urlAdd.append("interval=" +interval)
if form is not None:
urlAdd.append("form=" +form)
if count != -1:
urlAdd.append("count=" + str(count))
if len(urlAdd) != 0:
urlAddStr = "?" + '&'.join(str(x) for x in urlAdd)
response = requests.get(
self.__baseClient.uri_API + self.__getDataviewPreview.format(tenant_id=self.__baseClient.tenant, namespace_id=namespace_id, dataview_id=dataview_id) + urlAddStr,
headers=self.__baseClient.sdsHeaders())
if response.status_code < 200 or response.status_code >= 300:
response.close()
raise SdsError("Failed to get dataview preview for dataview {dataview_id}. {status}:{reason}".
format(dataview_id=dataview_id, status=response.status_code, reason=response.text))
content = json.loads(response.content)
response.close()
if value_class is None:
return (content)
return value_class.fromJson(content)
def getDataInterpolated(self, namespace_id, dataview_id, skip = -1, count = -1, form = None, sessionId = -1, value_class = None):
"""Retrieves the dataviewpreview of the 'dataview_id' from Sds Service"""
if namespace_id is None:
raise TypeError
if dataview_id is None:
raise TypeError
urlAdd = []
urlAddStr = ""
if count != -1:
urlAdd.append("count=" + str(count))
if skip != -1:
urlAdd.append("skip=" + str(count))
if form is not None:
urlAdd.append("form=" +form)
if sessionId != -1:
urlAdd.append("sessionId=" + str(count))
if len(urlAdd) != 0:
urlAddStr = "?" + '&'.join(str(x) for x in urlAdd)
response = requests.get(
self.__baseClient.uri_API + self.__getDataInterpolated.format(tenant_id=self.__baseClient.tenant, namespace_id=namespace_id, dataview_id=dataview_id) + urlAddStr,
headers=self.__baseClient.sdsHeaders())
if response.status_code < 200 or response.status_code >= 300:
response.close()
raise SdsError("Failed to get dataview data interpolated for dataview {dataview_id}. {status}:{reason}".
format(dataview_id=dataview_id, status=response.status_code, reason=response.text))
if(form is not None):
return response.content.decode("utf-8")
content = json.loads(response.content)
response.close()
if value_class is None:
return (content)
return value_class.fromJson(content)
def __setPathAndQueryTemplates(self):
self.__basePath = "/Tenants/{tenant_id}/Namespaces/{namespace_id}"
self.__dataviewsPath = self.__basePath + "/Dataviews"
self.__getDataviews= self.__dataviewsPath + "?skip={skip}&count={count}"
self.__dataviewPath = self.__dataviewsPath + "/{dataview_id}"
self.__datagroupPath= self.__dataviewPath + "/Datagroups"
self.__getDatagroup = self.__datagroupPath + "/{datagroup_id}"
self.__getDatagroups = self.__datagroupPath + "?skip={skip}&count={count}"
self.__getDataviewPreview = self.__dataviewPath + "/preview/interpolated"
self.__getDataInterpolated= self.__dataviewPath + "/data/interpolated"
|
|
# Copyright 2013 Donald Stufft and individual contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import binascii
import pytest
from nacl.encoding import HexEncoder
from nacl.exceptions import CryptoError
from nacl.public import Box, PrivateKey, PublicKey
from nacl.utils import random
from .test_bindings import _box_from_seed_vectors
from .utils import check_type_error
VECTORS = [
# privalice, pubalice, privbob, pubbob, nonce, plaintext, ciphertext
(
b"77076d0a7318a57d3c16c17251b26645df4c2f87ebc0992ab177fba51db92c2a",
b"8520f0098930a754748b7ddcb43ef75a0dbf3a0d26381af4eba4a98eaa9b4e6a",
b"5dab087e624a8a4b79e17f8b83800ee66f3bb1292618b6fd1c2f8b27ff88e0eb",
b"de9edb7d7b7dc1b4d35b61c2ece435373f8343c85b78674dadfc7e146f882b4f",
b"69696ee955b62b73cd62bda875fc73d68219e0036b7a0b37",
(
b"be075fc53c81f2d5cf141316ebeb0c7b5228c52a4c62cbd44b66849b64244ffce5e"
b"cbaaf33bd751a1ac728d45e6c61296cdc3c01233561f41db66cce314adb310e3be8"
b"250c46f06dceea3a7fa1348057e2f6556ad6b1318a024a838f21af1fde048977eb4"
b"8f59ffd4924ca1c60902e52f0a089bc76897040e082f937763848645e0705"
),
(
b"f3ffc7703f9400e52a7dfb4b3d3305d98e993b9f48681273c29650ba32fc76ce483"
b"32ea7164d96a4476fb8c531a1186ac0dfc17c98dce87b4da7f011ec48c97271d2c2"
b"0f9b928fe2270d6fb863d51738b48eeee314a7cc8ab932164548e526ae902243685"
b"17acfeabd6bb3732bc0e9da99832b61ca01b6de56244a9e88d5f9b37973f622a43d"
b"14a6599b1f654cb45a74e355a5"
),
),
]
def test_generate_private_key():
PrivateKey.generate()
def test_generate_private_key_from_random_seed():
PrivateKey.from_seed(random(PrivateKey.SEED_SIZE))
@pytest.mark.parametrize(
("seed", "public_key", "secret_key"), _box_from_seed_vectors()
)
def test_generate_private_key_from_seed(
seed: bytes, public_key: bytes, secret_key: bytes
):
prvt = PrivateKey.from_seed(seed, encoder=HexEncoder)
sk = binascii.unhexlify(secret_key)
pk = binascii.unhexlify(public_key)
assert bytes(prvt) == sk
assert bytes(prvt.public_key) == pk
def test_box_creation():
pub = PublicKey(
b"ec2bee2d5be613ca82e377c96a0bf2220d823ce980cdff6279473edc52862798",
encoder=HexEncoder,
)
priv = PrivateKey(
b"5c2bee2d5be613ca82e377c96a0bf2220d823ce980cdff6279473edc52862798",
encoder=HexEncoder,
)
Box(priv, pub)
def test_box_decode():
pub = PublicKey(
b"ec2bee2d5be613ca82e377c96a0bf2220d823ce980cdff6279473edc52862798",
encoder=HexEncoder,
)
priv = PrivateKey(
b"5c2bee2d5be613ca82e377c96a0bf2220d823ce980cdff6279473edc52862798",
encoder=HexEncoder,
)
b1 = Box(priv, pub)
b2 = Box.decode(b1._shared_key)
assert b1._shared_key == b2._shared_key
def test_box_bytes():
pub = PublicKey(
b"ec2bee2d5be613ca82e377c96a0bf2220d823ce980cdff6279473edc52862798",
encoder=HexEncoder,
)
priv = PrivateKey(
b"5c2bee2d5be613ca82e377c96a0bf2220d823ce980cdff6279473edc52862798",
encoder=HexEncoder,
)
b = Box(priv, pub)
assert bytes(b) == b._shared_key
@pytest.mark.parametrize(
(
"privalice",
"pubalice",
"privbob",
"pubbob",
"nonce",
"plaintext",
"ciphertext",
),
VECTORS,
)
def test_box_encryption(
privalice: bytes,
pubalice: bytes,
privbob: bytes,
pubbob: bytes,
nonce: bytes,
plaintext: bytes,
ciphertext: bytes,
):
pubalice_decoded = PublicKey(pubalice, encoder=HexEncoder)
privbob_decoded = PrivateKey(privbob, encoder=HexEncoder)
box = Box(privbob_decoded, pubalice_decoded)
encrypted = box.encrypt(
binascii.unhexlify(plaintext),
binascii.unhexlify(nonce),
encoder=HexEncoder,
)
expected = binascii.hexlify(
binascii.unhexlify(nonce) + binascii.unhexlify(ciphertext),
)
assert encrypted == expected
assert encrypted.nonce == nonce
assert encrypted.ciphertext == ciphertext
@pytest.mark.parametrize(
(
"privalice",
"pubalice",
"privbob",
"pubbob",
"nonce",
"plaintext",
"ciphertext",
),
VECTORS,
)
def test_box_decryption(
privalice: bytes,
pubalice: bytes,
privbob: bytes,
pubbob: bytes,
nonce: bytes,
plaintext: bytes,
ciphertext: bytes,
):
pubbob_decoded = PublicKey(pubbob, encoder=HexEncoder)
privalice_decoded = PrivateKey(privalice, encoder=HexEncoder)
box = Box(privalice_decoded, pubbob_decoded)
nonce = binascii.unhexlify(nonce)
decrypted = binascii.hexlify(
box.decrypt(ciphertext, nonce, encoder=HexEncoder),
)
assert decrypted == plaintext
@pytest.mark.parametrize(
(
"privalice",
"pubalice",
"privbob",
"pubbob",
"nonce",
"plaintext",
"ciphertext",
),
VECTORS,
)
def test_box_decryption_combined(
privalice: bytes,
pubalice: bytes,
privbob: bytes,
pubbob: bytes,
nonce: bytes,
plaintext: bytes,
ciphertext: bytes,
):
pubbob_decoded = PublicKey(pubbob, encoder=HexEncoder)
privalice_decoded = PrivateKey(privalice, encoder=HexEncoder)
box = Box(privalice_decoded, pubbob_decoded)
combined = binascii.hexlify(
binascii.unhexlify(nonce) + binascii.unhexlify(ciphertext),
)
decrypted = binascii.hexlify(box.decrypt(combined, encoder=HexEncoder))
assert decrypted == plaintext
@pytest.mark.parametrize(
(
"privalice",
"pubalice",
"privbob",
"pubbob",
"nonce",
"plaintext",
"ciphertext",
),
VECTORS,
)
def test_box_optional_nonce(
privalice: bytes,
pubalice: bytes,
privbob: bytes,
pubbob: bytes,
nonce: bytes,
plaintext: bytes,
ciphertext: bytes,
):
pubbob_decoded = PublicKey(pubbob, encoder=HexEncoder)
privalice_decoded = PrivateKey(privalice, encoder=HexEncoder)
box = Box(privalice_decoded, pubbob_decoded)
encrypted = box.encrypt(binascii.unhexlify(plaintext), encoder=HexEncoder)
decrypted = binascii.hexlify(box.decrypt(encrypted, encoder=HexEncoder))
assert decrypted == plaintext
@pytest.mark.parametrize(
(
"privalice",
"pubalice",
"privbob",
"pubbob",
"nonce",
"plaintext",
"ciphertext",
),
VECTORS,
)
def test_box_encryption_generates_different_nonces(
privalice: bytes,
pubalice: bytes,
privbob: bytes,
pubbob: bytes,
nonce: bytes,
plaintext: bytes,
ciphertext: bytes,
):
pubbob_decoded = PublicKey(pubbob, encoder=HexEncoder)
privalice_decoded = PrivateKey(privalice, encoder=HexEncoder)
box = Box(privalice_decoded, pubbob_decoded)
nonce_0 = box.encrypt(
binascii.unhexlify(plaintext), encoder=HexEncoder
).nonce
nonce_1 = box.encrypt(
binascii.unhexlify(plaintext), encoder=HexEncoder
).nonce
assert nonce_0 != nonce_1
@pytest.mark.parametrize(
(
"privalice",
"pubalice",
"privbob",
"pubbob",
"nonce",
"plaintext",
"ciphertext",
),
VECTORS,
)
def test_box_failed_decryption(
privalice: bytes,
pubalice: bytes,
privbob: bytes,
pubbob: bytes,
nonce: bytes,
plaintext: bytes,
ciphertext: bytes,
):
pubbob_decoded = PublicKey(pubbob, encoder=HexEncoder)
privbob_decoded = PrivateKey(privbob, encoder=HexEncoder)
# this cannot decrypt the ciphertext! the ciphertext must be decrypted by
# (privalice, pubbob) or (privbob, pubalice)
box = Box(privbob_decoded, pubbob_decoded)
with pytest.raises(CryptoError):
box.decrypt(ciphertext, binascii.unhexlify(nonce), encoder=HexEncoder)
def test_box_wrong_length():
with pytest.raises(ValueError):
PublicKey(b"")
# TODO: should the below raise a ValueError?
with pytest.raises(TypeError):
PrivateKey(b"")
pub = PublicKey(
b"ec2bee2d5be613ca82e377c96a0bf2220d823ce980cdff6279473edc52862798",
encoder=HexEncoder,
)
priv = PrivateKey(
b"5c2bee2d5be613ca82e377c96a0bf2220d823ce980cdff6279473edc52862798",
encoder=HexEncoder,
)
b = Box(priv, pub)
with pytest.raises(ValueError):
b.encrypt(b"", b"")
with pytest.raises(ValueError):
b.decrypt(b"", b"")
def test_wrong_types():
priv = PrivateKey.generate()
check_type_error(
("PrivateKey must be created from a 32 bytes long raw secret key"),
PrivateKey,
12,
)
check_type_error(
("PrivateKey must be created from a 32 bytes long raw secret key"),
PrivateKey,
priv,
)
check_type_error(
("PrivateKey must be created from a 32 bytes long raw secret key"),
PrivateKey,
priv.public_key,
)
check_type_error("PublicKey must be created from 32 bytes", PublicKey, 13)
check_type_error(
"PublicKey must be created from 32 bytes", PublicKey, priv
)
check_type_error(
"PublicKey must be created from 32 bytes", PublicKey, priv.public_key
)
check_type_error(
"Box must be created from a PrivateKey and a PublicKey",
Box,
priv,
"not a public key",
)
check_type_error(
"Box must be created from a PrivateKey and a PublicKey",
Box,
priv.encode(),
priv.public_key.encode(),
)
check_type_error(
"Box must be created from a PrivateKey and a PublicKey",
Box,
priv,
priv.public_key.encode(),
)
check_type_error(
"Box must be created from a PrivateKey and a PublicKey",
Box,
priv.encode(),
priv.public_key,
)
check_type_error(
"seed must be a 32 bytes long", PrivateKey.from_seed, b"1"
)
|
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_utils import uuidutils
from oslo_utils import versionutils
from nova import availability_zones
from nova import context as nova_context
from nova import db
from nova import exception
from nova.notifications.objects import base as notification
from nova.notifications.objects import service as service_notification
from nova import objects
from nova.objects import base
from nova.objects import fields
LOG = logging.getLogger(__name__)
# NOTE(danms): This is the global service version counter
SERVICE_VERSION = 22
# NOTE(danms): This is our SERVICE_VERSION history. The idea is that any
# time we bump the version, we will put an entry here to record the change,
# along with any pertinent data. For things that we can programatically
# detect that need a bump, we put something in _collect_things() below to
# assemble a dict of things we can check. For example, we pretty much always
# want to consider the compute RPC API version a thing that requires a service
# bump so that we can drive version pins from it. We could include other
# service RPC versions at some point, minimum object versions, etc.
#
# The TestServiceVersion test will fail if the calculated set of
# things differs from the value in the last item of the list below,
# indicating that a version bump is needed.
#
# Also note that there are other reasons we may want to bump this,
# which will not be caught by the test. An example of this would be
# triggering (or disabling) an online data migration once all services
# in the cluster are at the same level.
#
# If a version bump is required for something mechanical, just document
# that generic thing here (like compute RPC version bumps). No need to
# replicate the details from compute/rpcapi.py here. However, for more
# complex service interactions, extra detail should be provided
SERVICE_VERSION_HISTORY = (
# Version 0: Pre-history
{'compute_rpc': '4.0'},
# Version 1: Introduction of SERVICE_VERSION
{'compute_rpc': '4.4'},
# Version 2: Compute RPC version 4.5
{'compute_rpc': '4.5'},
# Version 3: Compute RPC version 4.6
{'compute_rpc': '4.6'},
# Version 4: Add PciDevice.parent_addr (data migration needed)
{'compute_rpc': '4.6'},
# Version 5: Compute RPC version 4.7
{'compute_rpc': '4.7'},
# Version 6: Compute RPC version 4.8
{'compute_rpc': '4.8'},
# Version 7: Compute RPC version 4.9
{'compute_rpc': '4.9'},
# Version 8: Compute RPC version 4.10
{'compute_rpc': '4.10'},
# Version 9: Compute RPC version 4.11
{'compute_rpc': '4.11'},
# Version 10: Compute node conversion to Inventories
{'compute_rpc': '4.11'},
# Version 11: Compute RPC version 4.12
{'compute_rpc': '4.12'},
# Version 12: The network APIs and compute manager support a NetworkRequest
# object where the network_id value is 'auto' or 'none'. BuildRequest
# objects are populated by nova-api during instance boot.
{'compute_rpc': '4.12'},
# Version 13: Compute RPC version 4.13
{'compute_rpc': '4.13'},
# Version 14: The compute manager supports setting device tags.
{'compute_rpc': '4.13'},
# Version 15: Indicate that nova-conductor will stop a boot if BuildRequest
# is deleted before RPC to nova-compute.
{'compute_rpc': '4.13'},
# Version 16: Indicate that nova-compute will refuse to start if it doesn't
# have a placement section configured.
{'compute_rpc': '4.13'},
# Version 17: Add 'reserve_volume' to the boot from volume flow and
# remove 'check_attach'. The service version bump is needed to fall back to
# the old check in the API as the old computes fail if the volume is moved
# to 'attaching' state by reserve.
{'compute_rpc': '4.13'},
# Version 18: Compute RPC version 4.14
{'compute_rpc': '4.14'},
# Version 19: Compute RPC version 4.15
{'compute_rpc': '4.15'},
# Version 20: Compute RPC version 4.16
{'compute_rpc': '4.16'},
# Version 21: Compute RPC version 4.17
{'compute_rpc': '4.17'},
# Version 22: A marker for the behaviour change of auto-healing code on the
# compute host regarding allocations against an instance
{'compute_rpc': '4.17'},
)
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class Service(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Added compute_node nested object
# Version 1.2: String attributes updated to support unicode
# Version 1.3: ComputeNode version 1.5
# Version 1.4: Added use_slave to get_by_compute_host
# Version 1.5: ComputeNode version 1.6
# Version 1.6: ComputeNode version 1.7
# Version 1.7: ComputeNode version 1.8
# Version 1.8: ComputeNode version 1.9
# Version 1.9: ComputeNode version 1.10
# Version 1.10: Changes behaviour of loading compute_node
# Version 1.11: Added get_by_host_and_binary
# Version 1.12: ComputeNode version 1.11
# Version 1.13: Added last_seen_up
# Version 1.14: Added forced_down
# Version 1.15: ComputeNode version 1.12
# Version 1.16: Added version
# Version 1.17: ComputeNode version 1.13
# Version 1.18: ComputeNode version 1.14
# Version 1.19: Added get_minimum_version()
# Version 1.20: Added get_minimum_version_multi()
# Version 1.21: Added uuid
# Version 1.22: Added get_by_uuid()
VERSION = '1.22'
fields = {
'id': fields.IntegerField(read_only=True),
'uuid': fields.UUIDField(),
'host': fields.StringField(nullable=True),
'binary': fields.StringField(nullable=True),
'topic': fields.StringField(nullable=True),
'report_count': fields.IntegerField(),
'disabled': fields.BooleanField(),
'disabled_reason': fields.StringField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'compute_node': fields.ObjectField('ComputeNode'),
'last_seen_up': fields.DateTimeField(nullable=True),
'forced_down': fields.BooleanField(),
'version': fields.IntegerField(),
}
_MIN_VERSION_CACHE = {}
_SERVICE_VERSION_CACHING = False
def __init__(self, *args, **kwargs):
# NOTE(danms): We're going against the rules here and overriding
# init. The reason is that we want to *ensure* that we're always
# setting the current service version on our objects, overriding
# whatever else might be set in the database, or otherwise (which
# is the normal reason not to override init).
#
# We also need to do this here so that it's set on the client side
# all the time, such that create() and save() operations will
# include the current service version.
if 'version' in kwargs:
raise exception.ObjectActionError(
action='init',
reason='Version field is immutable')
super(Service, self).__init__(*args, **kwargs)
self.version = SERVICE_VERSION
def obj_make_compatible_from_manifest(self, primitive, target_version,
version_manifest):
super(Service, self).obj_make_compatible_from_manifest(
primitive, target_version, version_manifest)
_target_version = versionutils.convert_version_to_tuple(target_version)
if _target_version < (1, 21) and 'uuid' in primitive:
del primitive['uuid']
if _target_version < (1, 16) and 'version' in primitive:
del primitive['version']
if _target_version < (1, 14) and 'forced_down' in primitive:
del primitive['forced_down']
if _target_version < (1, 13) and 'last_seen_up' in primitive:
del primitive['last_seen_up']
if _target_version < (1, 10):
# service.compute_node was not lazy-loaded, we need to provide it
# when called
self._do_compute_node(self._context, primitive,
version_manifest)
def _do_compute_node(self, context, primitive, version_manifest):
try:
target_version = version_manifest['ComputeNode']
# NOTE(sbauza): Some drivers (VMware, Ironic) can have multiple
# nodes for the same service, but for keeping same behaviour,
# returning only the first elem of the list
compute = objects.ComputeNodeList.get_all_by_host(
context, primitive['host'])[0]
except Exception:
return
primitive['compute_node'] = compute.obj_to_primitive(
target_version=target_version,
version_manifest=version_manifest)
@staticmethod
def _from_db_object(context, service, db_service):
allow_missing = ('availability_zone',)
for key in service.fields:
if key in allow_missing and key not in db_service:
continue
if key == 'compute_node':
# NOTE(sbauza); We want to only lazy-load compute_node
continue
elif key == 'version':
# NOTE(danms): Special handling of the version field, since
# it is read_only and set in our init.
setattr(service, base.get_attrname(key), db_service[key])
elif key == 'uuid' and not db_service.get(key):
# Leave uuid off the object if undefined in the database
# so that it will be generated below.
continue
else:
service[key] = db_service[key]
service._context = context
service.obj_reset_changes()
# TODO(dpeschman): Drop this once all services have uuids in database
if 'uuid' not in service:
service.uuid = uuidutils.generate_uuid()
LOG.debug('Generated UUID %(uuid)s for service %(id)i',
dict(uuid=service.uuid, id=service.id))
service.save()
return service
def obj_load_attr(self, attrname):
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
LOG.debug("Lazy-loading '%(attr)s' on %(name)s id %(id)s",
{'attr': attrname,
'name': self.obj_name(),
'id': self.id,
})
if attrname != 'compute_node':
raise exception.ObjectActionError(
action='obj_load_attr',
reason='attribute %s not lazy-loadable' % attrname)
if self.binary == 'nova-compute':
# Only n-cpu services have attached compute_node(s)
compute_nodes = objects.ComputeNodeList.get_all_by_host(
self._context, self.host)
else:
# NOTE(sbauza); Previous behaviour was raising a ServiceNotFound,
# we keep it for backwards compatibility
raise exception.ServiceNotFound(service_id=self.id)
# NOTE(sbauza): Some drivers (VMware, Ironic) can have multiple nodes
# for the same service, but for keeping same behaviour, returning only
# the first elem of the list
self.compute_node = compute_nodes[0]
@base.remotable_classmethod
def get_by_id(cls, context, service_id):
db_service = db.service_get(context, service_id)
return cls._from_db_object(context, cls(), db_service)
@base.remotable_classmethod
def get_by_uuid(cls, context, service_uuid):
db_service = db.service_get_by_uuid(context, service_uuid)
return cls._from_db_object(context, cls(), db_service)
@base.remotable_classmethod
def get_by_host_and_topic(cls, context, host, topic):
db_service = db.service_get_by_host_and_topic(context, host, topic)
return cls._from_db_object(context, cls(), db_service)
@base.remotable_classmethod
def get_by_host_and_binary(cls, context, host, binary):
try:
db_service = db.service_get_by_host_and_binary(context,
host, binary)
except exception.HostBinaryNotFound:
return
return cls._from_db_object(context, cls(), db_service)
@staticmethod
@db.select_db_reader_mode
def _db_service_get_by_compute_host(context, host, use_slave=False):
return db.service_get_by_compute_host(context, host)
@base.remotable_classmethod
def get_by_compute_host(cls, context, host, use_slave=False):
db_service = cls._db_service_get_by_compute_host(context, host,
use_slave=use_slave)
return cls._from_db_object(context, cls(), db_service)
# NOTE(ndipanov): This is deprecated and should be removed on the next
# major version bump
@base.remotable_classmethod
def get_by_args(cls, context, host, binary):
db_service = db.service_get_by_host_and_binary(context, host, binary)
return cls._from_db_object(context, cls(), db_service)
def _check_minimum_version(self):
"""Enforce that we are not older that the minimum version.
This is a loose check to avoid creating or updating our service
record if we would do so with a version that is older that the current
minimum of all services. This could happen if we were started with
older code by accident, either due to a rollback or an old and
un-updated node suddenly coming back onto the network.
There is technically a race here between the check and the update,
but since the minimum version should always roll forward and never
backwards, we don't need to worry about doing it atomically. Further,
the consequence for getting this wrong is minor, in that we'll just
fail to send messages that other services understand.
"""
if not self.obj_attr_is_set('version'):
return
if not self.obj_attr_is_set('binary'):
return
minver = self.get_minimum_version(self._context, self.binary)
if minver > self.version:
raise exception.ServiceTooOld(thisver=self.version,
minver=minver)
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
self._check_minimum_version()
updates = self.obj_get_changes()
if 'uuid' not in updates:
updates['uuid'] = uuidutils.generate_uuid()
self.uuid = updates['uuid']
db_service = db.service_create(self._context, updates)
self._from_db_object(self._context, self, db_service)
@base.remotable
def save(self):
updates = self.obj_get_changes()
updates.pop('id', None)
self._check_minimum_version()
db_service = db.service_update(self._context, self.id, updates)
self._from_db_object(self._context, self, db_service)
self._send_status_update_notification(updates)
def _send_status_update_notification(self, updates):
# Note(gibi): We do not trigger notification on version as that field
# is always dirty, which would cause that nova sends notification on
# every other field change. See the comment in save() too.
if set(updates.keys()).intersection(
{'disabled', 'disabled_reason', 'forced_down'}):
payload = service_notification.ServiceStatusPayload(self)
service_notification.ServiceStatusNotification(
publisher=notification.NotificationPublisher.from_service_obj(
self),
event_type=notification.EventType(
object='service',
action=fields.NotificationAction.UPDATE),
priority=fields.NotificationPriority.INFO,
payload=payload).emit(self._context)
@base.remotable
def destroy(self):
db.service_destroy(self._context, self.id)
@classmethod
def enable_min_version_cache(cls):
cls.clear_min_version_cache()
cls._SERVICE_VERSION_CACHING = True
@classmethod
def clear_min_version_cache(cls):
cls._MIN_VERSION_CACHE = {}
@staticmethod
@db.select_db_reader_mode
def _db_service_get_minimum_version(context, binaries, use_slave=False):
return db.service_get_minimum_version(context, binaries)
@base.remotable_classmethod
def get_minimum_version_multi(cls, context, binaries, use_slave=False):
if not all(binary.startswith('nova-') for binary in binaries):
LOG.warning('get_minimum_version called with likely-incorrect '
'binaries `%s\'', ','.join(binaries))
raise exception.ObjectActionError(action='get_minimum_version',
reason='Invalid binary prefix')
if (not cls._SERVICE_VERSION_CACHING or
any(binary not in cls._MIN_VERSION_CACHE
for binary in binaries)):
min_versions = cls._db_service_get_minimum_version(
context, binaries, use_slave=use_slave)
if min_versions:
min_versions = {binary: version or 0
for binary, version in
min_versions.items()}
cls._MIN_VERSION_CACHE.update(min_versions)
else:
min_versions = {binary: cls._MIN_VERSION_CACHE[binary]
for binary in binaries}
if min_versions:
version = min(min_versions.values())
else:
version = 0
# NOTE(danms): Since our return value is not controlled by object
# schema, be explicit here.
version = int(version)
return version
@base.remotable_classmethod
def get_minimum_version(cls, context, binary, use_slave=False):
return cls.get_minimum_version_multi(context, [binary],
use_slave=use_slave)
def get_minimum_version_all_cells(context, binaries):
"""Get the minimum service version, checking all cells"""
cells = objects.CellMappingList.get_all(context)
min_version = None
for cell in cells:
with nova_context.target_cell(context, cell) as cctxt:
version = objects.Service.get_minimum_version_multi(
cctxt, binaries)
min_version = min(min_version, version) if min_version else version
return min_version
@base.NovaObjectRegistry.register
class ServiceList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# Service <= version 1.2
# Version 1.1 Service version 1.3
# Version 1.2: Service version 1.4
# Version 1.3: Service version 1.5
# Version 1.4: Service version 1.6
# Version 1.5: Service version 1.7
# Version 1.6: Service version 1.8
# Version 1.7: Service version 1.9
# Version 1.8: Service version 1.10
# Version 1.9: Added get_by_binary() and Service version 1.11
# Version 1.10: Service version 1.12
# Version 1.11: Service version 1.13
# Version 1.12: Service version 1.14
# Version 1.13: Service version 1.15
# Version 1.14: Service version 1.16
# Version 1.15: Service version 1.17
# Version 1.16: Service version 1.18
# Version 1.17: Service version 1.19
# Version 1.18: Added include_disabled parameter to get_by_binary()
# Version 1.19: Added get_all_computes_by_hv_type()
VERSION = '1.19'
fields = {
'objects': fields.ListOfObjectsField('Service'),
}
@base.remotable_classmethod
def get_by_topic(cls, context, topic):
db_services = db.service_get_all_by_topic(context, topic)
return base.obj_make_list(context, cls(context), objects.Service,
db_services)
# NOTE(paul-carlton2): In v2.0 of the object the include_disabled flag
# will be removed so both enabled and disabled hosts are returned
@base.remotable_classmethod
def get_by_binary(cls, context, binary, include_disabled=False):
db_services = db.service_get_all_by_binary(
context, binary, include_disabled=include_disabled)
return base.obj_make_list(context, cls(context), objects.Service,
db_services)
@base.remotable_classmethod
def get_by_host(cls, context, host):
db_services = db.service_get_all_by_host(context, host)
return base.obj_make_list(context, cls(context), objects.Service,
db_services)
@base.remotable_classmethod
def get_all(cls, context, disabled=None, set_zones=False):
db_services = db.service_get_all(context, disabled=disabled)
if set_zones:
db_services = availability_zones.set_availability_zones(
context, db_services)
return base.obj_make_list(context, cls(context), objects.Service,
db_services)
@base.remotable_classmethod
def get_all_computes_by_hv_type(cls, context, hv_type):
db_services = db.service_get_all_computes_by_hv_type(
context, hv_type, include_disabled=False)
return base.obj_make_list(context, cls(context), objects.Service,
db_services)
|
|
from __future__ import unicode_literals
import errno
import os
import socket
import time
from .common import FileDownloader
from ..compat import (
compat_urllib_request,
compat_urllib_error,
)
from ..utils import (
ContentTooShortError,
encodeFilename,
sanitize_open,
)
class HttpFD(FileDownloader):
def real_download(self, filename, info_dict):
url = info_dict['url']
tmpfilename = self.temp_name(filename)
stream = None
# Do not include the Accept-Encoding header
headers = {'Youtubedl-no-compression': 'True'}
add_headers = info_dict.get('http_headers')
if add_headers:
headers.update(add_headers)
data = info_dict.get('http_post_data')
http_method = info_dict.get('http_method')
basic_request = compat_urllib_request.Request(url, data, headers)
request = compat_urllib_request.Request(url, data, headers)
if http_method is not None:
basic_request.get_method = lambda: http_method
request.get_method = lambda: http_method
is_test = self.params.get('test', False)
if is_test:
request.add_header('Range', 'bytes=0-%s' % str(self._TEST_FILE_SIZE - 1))
# Establish possible resume length
if os.path.isfile(encodeFilename(tmpfilename)):
resume_len = os.path.getsize(encodeFilename(tmpfilename))
else:
resume_len = 0
open_mode = 'wb'
if resume_len != 0:
if self.params.get('continuedl', False):
self.report_resuming_byte(resume_len)
request.add_header('Range', 'bytes=%d-' % resume_len)
open_mode = 'ab'
else:
resume_len = 0
count = 0
retries = self.params.get('retries', 0)
while count <= retries:
# Establish connection
try:
data = self.ydl.urlopen(request)
break
except (compat_urllib_error.HTTPError, ) as err:
if (err.code < 500 or err.code >= 600) and err.code != 416:
# Unexpected HTTP error
raise
elif err.code == 416:
# Unable to resume (requested range not satisfiable)
try:
# Open the connection again without the range header
data = self.ydl.urlopen(basic_request)
content_length = data.info()['Content-Length']
except (compat_urllib_error.HTTPError, ) as err:
if err.code < 500 or err.code >= 600:
raise
else:
# Examine the reported length
if (content_length is not None and
(resume_len - 100 < int(content_length) < resume_len + 100)):
# The file had already been fully downloaded.
# Explanation to the above condition: in issue #175 it was revealed that
# YouTube sometimes adds or removes a few bytes from the end of the file,
# changing the file size slightly and causing problems for some users. So
# I decided to implement a suggested change and consider the file
# completely downloaded if the file size differs less than 100 bytes from
# the one in the hard drive.
self.report_file_already_downloaded(filename)
self.try_rename(tmpfilename, filename)
self._hook_progress({
'filename': filename,
'status': 'finished',
})
return True
else:
# The length does not match, we start the download over
self.report_unable_to_resume()
resume_len = 0
open_mode = 'wb'
break
except socket.error as e:
if e.errno != errno.ECONNRESET:
# Connection reset is no problem, just retry
raise
# Retry
count += 1
if count <= retries:
self.report_retry(count, retries)
if count > retries:
self.report_error('giving up after %s retries' % retries)
return False
data_len = data.info().get('Content-length', None)
# Range HTTP header may be ignored/unsupported by a webserver
# (e.g. extractor/scivee.py, extractor/bambuser.py).
# However, for a test we still would like to download just a piece of a file.
# To achieve this we limit data_len to _TEST_FILE_SIZE and manually control
# block size when downloading a file.
if is_test and (data_len is None or int(data_len) > self._TEST_FILE_SIZE):
data_len = self._TEST_FILE_SIZE
if data_len is not None:
data_len = int(data_len) + resume_len
min_data_len = self.params.get("min_filesize", None)
max_data_len = self.params.get("max_filesize", None)
if min_data_len is not None and data_len < min_data_len:
self.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
return False
if max_data_len is not None and data_len > max_data_len:
self.to_screen('\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len))
return False
byte_counter = 0 + resume_len
block_size = self.params.get('buffersize', 1024)
start = time.time()
# measure time over whole while-loop, so slow_down() and best_block_size() work together properly
now = None # needed for slow_down() in the first loop run
before = start # start measuring
while True:
# Download and write
data_block = data.read(block_size if not is_test else min(block_size, data_len - byte_counter))
byte_counter += len(data_block)
# exit loop when download is finished
if len(data_block) == 0:
break
# Open destination file just in time
if stream is None:
try:
(stream, tmpfilename) = sanitize_open(tmpfilename, open_mode)
assert stream is not None
filename = self.undo_temp_name(tmpfilename)
self.report_destination(filename)
except (OSError, IOError) as err:
self.report_error('unable to open for writing: %s' % str(err))
return False
if self.params.get('xattr_set_filesize', False) and data_len is not None:
try:
import xattr
xattr.setxattr(tmpfilename, 'user.ytdl.filesize', str(data_len))
except(OSError, IOError, ImportError) as err:
self.report_error('unable to set filesize xattr: %s' % str(err))
try:
stream.write(data_block)
except (IOError, OSError) as err:
self.to_stderr('\n')
self.report_error('unable to write data: %s' % str(err))
return False
# Apply rate limit
self.slow_down(start, now, byte_counter - resume_len)
# end measuring of one loop run
now = time.time()
after = now
# Adjust block size
if not self.params.get('noresizebuffer', False):
block_size = self.best_block_size(after - before, len(data_block))
before = after
# Progress message
speed = self.calc_speed(start, now, byte_counter - resume_len)
if data_len is None:
eta = None
else:
eta = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len)
self._hook_progress({
'status': 'downloading',
'downloaded_bytes': byte_counter,
'total_bytes': data_len,
'tmpfilename': tmpfilename,
'filename': filename,
'eta': eta,
'speed': speed,
'elapsed': now - start,
})
if is_test and byte_counter == data_len:
break
if stream is None:
self.to_stderr('\n')
self.report_error('Did not get any data blocks')
return False
if tmpfilename != '-':
stream.close()
self._hook_progress({
'downloaded_bytes': byte_counter,
'total_bytes': data_len,
'tmpfilename': tmpfilename,
'status': 'error',
})
if data_len is not None and byte_counter != data_len:
raise ContentTooShortError(byte_counter, int(data_len))
self.try_rename(tmpfilename, filename)
# Update file modification time
if self.params.get('updatetime', True):
info_dict['filetime'] = self.try_utime(filename, data.info().get('last-modified', None))
self._hook_progress({
'downloaded_bytes': byte_counter,
'total_bytes': byte_counter,
'filename': filename,
'status': 'finished',
'elapsed': time.time() - start,
})
return True
|
|
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line utility: imports a site export file into the datastore.
You may want first clear the current datastore before running this;
see http://code.google.com/appengine/docs/python/tools/devserver.html#Using_the_Datastore
for instructions.
Once that's done, with the server running, do
$ tools/site_export_importer.py path/to/export_file.zip
"""
# import this first to ensure to add necessary paths to find other project
# imports
import remote_api
# python standard library
import logging
import optparse
import pfif
import sys
import StringIO
import zipfile
# personfinder modules
from model import *
import importer
def open_file_inside_zip(zip_path):
export_zip = zipfile.ZipFile(zip_path)
entry_count = len(export_zip.infolist())
if entry_count > 1:
raise IOError('zip archive had %d entries (expected 1)' % entry_count)
zip_entry = export_zip.infolist()[0]
logging.info('Reading from zip entry: %s', zip_entry.filename)
return StringIO.StringIO(export_zip.read(zip_entry.filename))
def next_n(a_list, batch_size):
"""Generator that yields the next batch_size items from a_list."""
batch = []
for item in a_list:
batch.append(item)
if len(batch) == batch_size:
yield batch
batch = []
if batch:
yield batch
def maybe_add_required_keys(a_dict, required_keys, dummy_value=u'?'):
for required_key in required_keys:
if not importer.strip(a_dict.get(required_key)):
logging.info(
'%s is missing from %s; will add dummy value(%s)',
required_key, a_dict, dummy_value)
a_dict[required_key] = dummy_value
return a_dict
def create_person(person_dict):
# TODO(kpy): Pass a subdomain argument to importer.create_person.
try:
return importer.create_person(person_dict)
except AssertionError:
pass
try:
person_dict = maybe_add_required_keys(
person_dict, (u'given_name', u'family_name'))
return importer.create_person(person_dict)
except AssertionError:
logging.info(
'skipping person %s as it cannot be made valid', person_dict)
return None
def create_note(note_dict):
# TODO(kpy): Pass a subdomain argument to importer.create_note.
try:
return importer.create_note(note_dict)
except AssertionError:
pass
try:
return importer.create_note(note_dict, requires_key=False)
except AssertionError:
logging.info(
'skipping note %s as it cannot be made valid', note_dict)
return None
def maybe_update_index(entity):
if hasattr(entity, 'update_index'):
entity.update_index(['old', 'new'])
def add_entities(entity_dicts, create_function, batch_size, kind, store_all):
"""Adds the data in entity_dicts to storage as entities created by
calling create_function. Uses next_n to group the entity_dicts into
batches that get stored using model.db.put(...), after being converted
into entities using create_function.
Args:
entity_dicts: an iterable of dictionaries containing data to be stored
create_function: a function that converts a dictionary to a new entity
batch_size: size of the batches used to write the entities to storage
kind: the text name of the entities for logging
"""
batch_count = (len(entity_dicts) + batch_size - 1)/batch_size
for i, batch in enumerate(next_n(entity_dicts, batch_size)):
entities = [create_function(d) for d in batch]
entities = [e for e in entities if e]
for e in entities:
maybe_update_index(e)
db.put(entities)
if i % 10 == 0 or i == batch_count - 1:
logging.info('%s update: just added batch %d/%d', kind, i + 1,
batch_count)
def import_site_export(export_path, remote_api_host,
app_id, batch_size, store_all):
# Log in, then use the pfif parser to parse the export file. Use the
# importer methods to convert the dicts to entities then add them as in
# import.py, but less strict, to ensure that all exported data is available.
remote_api.connect(remote_api_host, app_id)
logging.info('%s: importing exported records from %s',
remote_api_host, export_path)
if not export_path.endswith('.zip'):
export_fd = open(export_path)
else:
export_fd = open_file_inside_zip(export_path)
persons, notes = pfif.parse_file(export_fd)
logging.info('loaded %d persons, %d notes', len(persons), len(notes))
if not store_all:
persons = [d for d in persons if is_clone(d.get('person_record_id'))]
notes = [d for d in notes if is_clone(d.get('note_record_id'))]
logging.info(
'... down to %d persons, %d notes after excluding %r records',
len(persons), len(notes), HOME_DOMAIN)
logging.info('... adding persons')
add_entities(persons, create_person, batch_size, 'person', store_all)
logging.info('... adding notes')
add_entities(notes, create_note, batch_size, 'note', store_all)
def parse_command_line():
parser = optparse.OptionParser()
parser.add_option('--import_batch_size',
default=100,
help='size of batches used during data import')
parser.add_option('--store_home_domain_records',
action='store_true',
dest='store_all',
default=False,
help=('Allows importing of records in this app\'s home'
' domain. Disabled by default because this can'
' so can cause existing records with the same'
' key as an imported record to be overwritten'))
parser.add_option('--host',
default='localhost:8080',
help='HOST endpoint to post to for importing data. '
'(Required)')
parser.add_option('--app_id',
help='Application ID of endpoint (Optional for '
'*.appspot.com)')
options, args = parser.parse_args()
if len(args) != 1:
parser.error('One argument required - the path to the export file')
return options, args
ARE_YOU_SURE = ('You have specified --store_home_domain_records:\n'
'This will override records in local storage if there are'
' feed records with matching numeric ids.\nContinue? (Y/n) ')
def main():
logging.basicConfig(level=logging.INFO)
options, args = parse_command_line()
export_path = args[0]
if options.store_all:
answer = raw_input(ARE_YOU_SURE)
if answer and answer[0] in ('n', 'N'):
logging.info("... exiting")
sys.exit(0)
import_site_export(
export_path, options.host, options.app_id,
options.import_batch_size, options.store_all)
if __name__ == '__main__':
main()
|
|
import copy
from datetime import datetime, timedelta
from django.db import connection
from collections import OrderedDict
import json
import re
from django.conf import settings as settings
from django.db.models import Q
from django.db.models.sql import Query
from django.db import DEFAULT_DB_ALIAS
from website.models import Jurisdiction
from website.utils import temporal_stats
def update_reports(question, jurisdictions=None, before=None, previous=None):
if jurisdictions:
if not isinstance(jurisdictions, list):
jurisdictions = [jurisdictions]
jurisdictions = [j.id for j in jurisdictions if j.id not in settings.SAMPLE_JURISDICTIONS]
if not jurisdictions:
return None
if not before:
before = datetime.utcnow()
if not previous:
previous_bucket = before - timedelta(seconds=10) # alignment?
previous = run_reports(question, jurisdictions=jurisdictions, before=previous_bucket)
current = run_reports(question, jurisdictions=jurisdictions, before=before)
changes = differences(previous, current) if previous else current
for report in changes:
if 'name' in report:
stats = dict(temporal_stats.get_report(report['name']))
for row in report['table']:
k = temporal_stats.normalize(row['key'])
v = row['value']
if v:
stats[k](question.id).increment(delta=v)
return current
def differences(last, current):
output = copy.deepcopy(current)
for (i, report) in enumerate(current):
if 'name' in report:
for (r, row) in enumerate(report['table']):
last_val = last[i]['table'][r]['value']
cur_val = row['value']
if last_val is not None and cur_val is not None:
output[i]['table'][r]['value'] = row['value'] - last[i]['table'][r]['value']
else:
output[i]['table'][r]['value'] = cur_val
return output
def run_reports(question, **kwargs):
return [run_report(question, report, **kwargs) for report in get_reports(question)]
def run_report(question, report, **kwargs):
output = report.copy()
output['question_id'] = question.id
query = build_query(question, report['spec'], **kwargs)
cursor = connection.cursor()
cursor.execute(query)
columns = [col[0] for col in cursor.description]
output['table'] = [{'key': k, 'value': v} for (k,v) in zip(columns, cursor.fetchone())]
return output
def get_reports(question):
return (question.id in reports_by_qid and reports_by_qid[question.id]) or \
(question.display_template in reports_by_type and reports_by_type[question.display_template])
def add_temporal_reports(reports):
return reports + [make_temporal(r) for r in reports]
def make_temporal(report):
if not 'name' in report:
return None
spec = temporal_stats.get_report(report['name'])
if not spec:
return None
return { 'type': "temporal",
'name': report['name'],
'question_id': report['question_id'],
'table': report['table'],
'statsd_metrics': [m for (m, f) in spec]
}
def build_query(question, field_map, geo_filter=None, before=None, jurisdictions=None):
# Yes we have two mechanisms for building queries here. We've
# hacked out a slice of the django sql "compiler" for one of them,
# since they specialize in where clauses, while writing a simpler
# one of our own that specializes in select clauses. Both of them
# bear more than a passing resemblance to lisp, of course.
indent = " ";
sep = ",\n"+indent
# convert everything to unsigned, even though they are already
# unsigned. This prevents django from occasionally thinking that
# the values are Decimals
fields = ["CONVERT(%(match)s, UNSIGNED) AS '%(name)s'" % { "name": n, "match": m }
for (n, m) in field_map.items()]
compiled_filter = None
if geo_filter:
fake_query = Query(Jurisdiction)
fake_query.add_q(geo_filter)
compiler = fake_query.get_compiler(DEFAULT_DB_ALIAS)
where, where_params = fake_query.where.as_sql(compiler.connection.ops.quote_name, #might break postgres
compiler.connection)
compiled_filter = where % tuple(["'%s'" % p for p in where_params])
if jurisdictions:
if not isinstance(jurisdictions, list):
jurisdictions = [jurisdictions]
jurisdictions = ", ".join([str(j.id) if isinstance(j, Jurisdiction) else str(j)
for j in jurisdictions])
return ('''
SELECT %(fields)s
FROM (SELECT id AS answer_id,
(SELECT value FROM website_answerreference WHERE id = answer_id) AS value
FROM (SELECT (SELECT MAX(id)
FROM website_answerreference
WHERE website_answerreference.jurisdiction_id = website_jurisdiction.id AND
approval_status = 'A' AND''' +
("\n modify_datetime <= '%(before)s' AND" if before else "") + '''
question_id = %(question_id)s) AS id
FROM website_jurisdiction
WHERE website_jurisdiction.id NOT IN (1, 101105) AND
website_jurisdiction.jurisdiction_type != 'u' ''' +
("AND\n %(geo_filter)s" if geo_filter else "") +
("AND\n create_datetime <= '%(before)s '" if before else "") +
("AND\n website_jurisdiction.id IN (%(jurisdictions)s)" if jurisdictions else "") +
''') AS temp0) as temp1
''') % { "question_id": question.id,
"fields": sep.join(fields),
"geo_filter": compiled_filter,
"before": before.strftime("%Y-%m-%d %H:%M:%S") if before else None,
"jurisdictions": jurisdictions }
def json_match(field_name, value, op="="):
# BUG: this should be utf8mb4_unicode_ci; our connection to the database must be using the wrong encoding
return and_match(not_null_match(json_extract(field_name)),
'json_get(value, "%s") %s "%s" COLLATE utf8_unicode_ci' % (field_name, op, value))
def json_extract(field_name):
return 'json_get(value, "%s")' % field_name
def json_valid():
return 'json_valid(value)'
def regexp_match(regexp):
return 'value REGEXP \'%(regexp)s\'' % { "regexp": regexp }
def escape_regex_inclusion(s):
return re.sub(r'([\[\].*?{}()|$^])',
r'[[.\1.]]',
s)
def null_match(field='value'):
return '%s IS NULL' % field
def not_null_match(field='value'):
return '%s IS NOT NULL' % field
def sum_match(match):
return 'SUM(%s)' % match
def count_match(match):
return 'COUNT(%s)' % match
def count_all():
return 'COUNT(*)'
def total():
return not_null_match("answer_id")
def and_match(*args):
return parenthesize(" AND ".join(args))
def or_match(*args):
return parenthesize(" OR ".join(args))
def not_match(match):
return parenthesize('NOT (%s)' % match)
def eq(a, b):
return "%s = %s" % (a, b)
def ne(a, b):
return "%s != %s" % (a, b)
def lt(a, b):
return "%s < %s" % (a, b)
def gt(a, b):
return "%s > %s" % (a, b)
def lte(a, b):
return "%s <= %s" % (a, b)
def gte(a, b):
return "%s >= %s" % (a, b)
def between(v, a, b):
if a > b:
(a, b) = (b, a)
return and_match(gt(v, a),
lte(v, b))
def parenthesize(match):
return "("+ match +")"
def chart(type, spec):
return {"type": type, "spec": spec}
def pie(spec):
return chart("pie", spec)
def hist(spec):
return chart("histogram", spec)
def named(name, spec):
spec['name'] = name
temporal_stats.define_report(name, spec['spec'])
return spec
def add_freeform(spec):
not_freeform = or_match(null_match(json_extract("free-form")),
json_match("free-form", ""))
copy = [(k, and_match(not_freeform, v)) for (k,v) in spec.iteritems()]
copy.append(("Freeform", and_match(not_null_match(json_extract("free-form")),
json_match("free-form", "", op="!="))))
return OrderedDict(copy)
def add_other(spec):
copy = OrderedDict(spec)
conditions = [v for (k,v) in copy.iteritems()]
copy["Other"] = and_match(not_null_match("answer_id"),
or_match(null_match("value"),
and_match(not_null_match("value"),
not_match(json_valid())),
not_match(or_match(*conditions))))
return copy
def summarize(spec):
copy = OrderedDict(spec)
for (k,v) in copy.iteritems():
copy[k] = sum_match(v)
return copy
def add_sum_total(spec):
copy = OrderedDict(spec)
copy["Total"] = sum_match(total())
return copy
def add_count_total(spec):
copy = OrderedDict(spec)
copy["Total"] = count_all()
return copy
def coverage_report():
spec = OrderedDict([("Answered", not_null_match("answer_id")),
("Unanswered", null_match("answer_id"))])
return named("coverage", pie(add_count_total(summarize(spec))))
def yes_no_field(field_name):
spec = OrderedDict([("Yes", json_match(field_name, "yes")),
("No", json_match(field_name, "no"))])
return named("yes_no", pie(add_sum_total(summarize(add_other(spec)))))
def yes_no_exception_field(field_name):
spec = OrderedDict([("Yes", json_match(field_name, "yes")),
("Yes, with exceptions", json_match(field_name, "yes, with exceptions")),
("No", json_match(field_name, "no"))])
return named("yes_no_except", pie(add_sum_total(summarize(add_other(spec)))))
# macros, man, macros.
def turn_around_report():
is_hours = json_match("time_unit", "hour(s)");
is_days = json_match("time_unit", "day(s)");
is_weeks = json_match("time_unit", "week(s)");
qty = json_extract("time_qty")
bins = OrderedDict([("Same day", and_match(is_hours, lte(qty, 8))),
("1-2 days", or_match(and_match(is_hours, between(qty, 9, 48)),
and_match(is_days, lte(qty, 2)))),
("3-5 days", or_match(and_match(is_days, between(qty, 3, 5)),
and_match(is_weeks, eq(qty, 1)))),
("6-10 days", or_match(and_match(is_days, between(qty, 6, 10)),
and_match(is_weeks, eq(qty, 2)))),
("11-15 days", or_match(and_match(is_days, between(qty, 11, 15)),
and_match(is_weeks, eq(qty, 3)))),
("16-20 days", or_match(and_match(is_days, between(qty, 16, 20)),
and_match(is_weeks, eq(qty, 4)))),
("21+ days", or_match(and_match(is_days, gte(qty, 21)),
and_match(is_weeks, gte(qty, 5))))])
return named("turn_around_time", hist(add_sum_total(summarize(add_other(add_freeform(bins))))))
def plan_check_service_type_report():
spec = OrderedDict([("Over the Counter",
json_match("plan_check_service_type",
"over the counter")),
("In-House (not same day)",
json_match("plan_check_service_type",
"in-house")),
("Outsourced",
json_match("plan_check_service_type",
"outsourced"))])
return named("plan_check", pie(add_sum_total(summarize(add_other(spec)))))
def module_drawings_report():
spec = OrderedDict([("Yes", json_match("value", "must draw individual modules")),
("No", json_match("value", "n in series in a rectangle allowed"))])
return named("module_drawings", pie(add_sum_total(summarize(add_other(spec)))))
def inspection_approval_report():
spec = OrderedDict([("In person", json_match("apply", "in person")),
("Remotely", json_match("apply", "remotely"))])
return named("inspection_approval", pie(add_sum_total(summarize(add_other(spec)))))
def time_window_report():
spec = OrderedDict([("Exact time given", json_match("time_window", "0")),
("2 hours (or less)", json_match("time_window", "2")),
("Half Day (2 to 4 hours)", json_match("time_window", "4")),
("Full Day (greater than 4 hours)", json_match("time_window", "8"))])
return named("time_window", hist(add_sum_total(summarize(add_other(add_freeform(spec))))))
def size_cap_report():
spec = OrderedDict([("<5 kW", lt(json_extract("value"), 5)),
("5-10 kW", between(json_extract("value"), 5, 10)),
("10-15 kW", between(json_extract("value"), 10, 15)),
("15-20 kW", between(json_extract("value"), 15, 20)),
(">20 kW", gte(json_extract("value"), 20))])
return named("size_cap", hist(add_sum_total(summarize(add_other(spec)))))
def sb1222_report():
spec = OrderedDict([("Yes", json_match("compliant", "yes")),
("Yes, with evidence", json_match("compliant", "yes, with exceptions")),
("No", json_match("compliant", "no"))])
return named("sb1222", pie(add_sum_total(summarize(add_other(spec)))))
reports_by_type = {
"available_url_display": [coverage_report(), yes_no_field("available")],
"radio_with_exception_display": [coverage_report(), yes_no_exception_field("required")],
"plan_check_service_type_display": [coverage_report(), plan_check_service_type_report()],
"radio_compliant_sb1222_with_exception": [coverage_report(), sb1222_report()],
"inspection_checklists_display": [coverage_report(), yes_no_field("available")],
"radio_has_training_display": [coverage_report(), yes_no_field("value")],
"phone_display": [coverage_report()],
"url": [coverage_report()],
"address_display": [coverage_report()],
"radio_submit_PE_stamped_structural_letter_with_exception_display": [coverage_report(), yes_no_exception_field("required")],
"hours_display": [coverage_report()], # histogram
"turn_around_time_display": [coverage_report(), turn_around_report()],
"permit_cost_display": [coverage_report()], # check the spec, probably needs histograms and stuff
"radio_required_for_page_sizes_display": [coverage_report(), yes_no_field("required")], # should do more for the required values
"radio_required_for_scales_display": [coverage_report()], # likewise
"radio_required_display": [coverage_report()],
"radio_covered_with_exception_display": [coverage_report(), yes_no_exception_field("allowed")],
"radio_studer_vent_rules_with_exception_display": [coverage_report(), yes_no_exception_field("allowed")],
"radio_module_drawings_display": [coverage_report(), module_drawings_report()],
"radio_allowed_with_exception_display": [coverage_report(), yes_no_exception_field("allowed")],
"required_spec_sheets_display": [coverage_report()],
"homeowner_requirements_display": [coverage_report()], # two yes/no answers in one
"fire_setbacks_display": [coverage_report(), yes_no_exception_field("enforced")],
"radio_inspection_approval_copies_display": [coverage_report(), inspection_approval_report()],
"signed_inspection_approval_delivery_display": [coverage_report()],
"radio_vent_spanning_rules_with_exception_display": [coverage_report(), yes_no_exception_field("allowed")],
"solar_permitting_checklists_display": [coverage_report(), yes_no_field("available")],
"radio_available_with_exception_display": [coverage_report(), yes_no_exception_field("available")],
"time_window_display": [coverage_report(), time_window_report()],
"radio_has_training_display": [coverage_report(), yes_no_field("value")],
"radio_licensing_required_display": [coverage_report(), yes_no_field("required")],
"online_forms": [coverage_report()],
None: [coverage_report()],
}
reports_by_qid = {
# 15: { #
# 'query': '''SELECT (SELECT count(*) FROM (SELECT value FROM `website_answerreference` WHERE question_id = '15' AND jurisdiction_id NOT IN ('1','101105') AND approval_status LIKE 'A' GROUP BY jurisdiction_id ASC, create_datetime DESC) AS tmp1 WHERE value LIKE '%value"%"yes%') as Yes, (SELECT count(*) FROM (SELECT value FROM `website_answerreference` WHERE question_id = '15' AND jurisdiction_id NOT IN ('1','101105') AND approval_status LIKE 'A' GROUP BY jurisdiction_id ASC, create_datetime DESC) AS tmp2 WHERE value LIKE '%value"%"no%' ) as No, (SELECT count(*) FROM (SELECT value FROM `website_answerreference` WHERE question_id = '15' AND jurisdiction_id NOT IN ('1','101105') AND approval_status LIKE 'A' GROUP BY jurisdiction_id ASC, create_datetime DESC) AS tmp3) as Total FROM website_answerreference LIMIT 1''',
# 'keys_in_order': ['Yes', 'No', 'Total'],
# },
15: [coverage_report(), yes_no_field("value")],
71: [coverage_report(), size_cap_report()],
}
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A utility function for importing TensorFlow graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import copy
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.util import compat
# TODO(josh11b): SWIG the code from node_def_util instead of duplicating
# the logic here.
def _GetNodeAttr(node_def, attr_name):
if attr_name not in node_def.attr:
raise ValueError('Expected one attr with name %r in %s.'
% (attr_name, str(node_def)))
return node_def.attr[attr_name]
def _ArgToTypesNoRef(node_def, arg_def):
if arg_def.number_attr:
repeats = _GetNodeAttr(node_def, arg_def.number_attr).i
if arg_def.type_attr:
dtype = _GetNodeAttr(node_def, arg_def.type_attr).type
else:
assert arg_def.type != types_pb2.DT_INVALID
dtype = arg_def.type
return [dtype] * repeats
elif arg_def.type_attr:
return [_GetNodeAttr(node_def, arg_def.type_attr).type]
elif arg_def.type_list_attr:
return _GetNodeAttr(node_def, arg_def.type_list_attr).list.type
else:
assert arg_def.type != types_pb2.DT_INVALID
return [arg_def.type]
def _SingleArgToTypes(node_def, arg_def):
types = _ArgToTypesNoRef(node_def, arg_def)
if arg_def.is_ref:
return [dtypes.as_dtype(dt)._as_ref.as_datatype_enum for dt in types] # pylint: disable=protected-access
return types
def _ArgsToTypes(node_def, arg_list):
types = []
for arg_def in arg_list:
types.extend(_SingleArgToTypes(node_def, arg_def))
return types
def _InputTypes(node_def, op_dict):
op_def = op_dict[node_def.op]
return _ArgsToTypes(node_def, op_def.input_arg)
def _OutputTypes(node_def, op_dict):
op_def = op_dict[node_def.op]
return _ArgsToTypes(node_def, op_def.output_arg)
def _IsControlInput(input_name):
# Expected format: '^operation_name' (control input).
return input_name.startswith('^')
def _IsColocatedOp(input_name):
# Expected format: 'loc:@operation_name' (Colocated op).
return input_name.startswith('loc:@')
def _ParseTensorName(tensor_name):
"""Parses a tensor name into an operation name and output index.
This function will canonicalize tensor names as follows:
* "foo:0" -> ("foo", 0)
* "foo:7" -> ("foo", 7)
* "foo" -> ("foo", 0)
* "foo:bar:baz" -> ValueError
Args:
tensor_name: The name of a tensor.
Returns:
A tuple containing the operation name, and the output index.
Raises:
ValueError: If `tensor_name' cannot be interpreted as the name of a tensor.
"""
components = tensor_name.split(':')
if len(components) == 2:
# Expected format: 'operation_name:output_index'.
try:
output_index = int(components[1])
except ValueError:
raise ValueError('Cannot convert %r to a tensor name.' % (tensor_name,))
return components[0], output_index
elif len(components) == 1:
# Expected format: 'operation_name' (implicit 0th output).
return components[0], 0
else:
raise ValueError('Cannot convert %r to a tensor name.' % (tensor_name,))
def _CanonicalInputName(input_name):
input_name = compat.as_str(input_name)
if _IsControlInput(input_name) or _IsColocatedOp(input_name):
return input_name
input_op_name, output_index = _ParseTensorName(input_name)
return '%s:%d' % (input_op_name, output_index)
def _InvalidNodeMessage(node, message):
return 'graph_def is invalid at node %r: %s.' % (node.name, message)
@contextlib.contextmanager
def _MaybeDevice(device):
"""Applies the given device only if device is not None or empty."""
if device:
with ops.device(device):
yield
else:
yield
def _FindAttrInOpDef(attr_name, op_def):
for attr_def in op_def.attr:
if attr_name == attr_def.name:
return attr_def
return None
def import_graph_def(graph_def, input_map=None, return_elements=None,
name=None, op_dict=None, producer_op_list=None):
"""Imports the graph from `graph_def` into the current default `Graph`.
This function provides a way to import a serialized TensorFlow
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer, and extract individual objects in the `GraphDef` as
@{tf.Tensor} and @{tf.Operation} objects. Once extracted,
these objects are placed into the current default `Graph`. See
@{tf.Graph.as_graph_def} for a way to create a `GraphDef`
proto.
Args:
graph_def: A `GraphDef` proto containing operations to be imported into
the default graph.
input_map: A dictionary mapping input names (as strings) in `graph_def`
to `Tensor` objects. The values of the named input tensors in the
imported graph will be re-mapped to the respective `Tensor` values.
return_elements: A list of strings containing operation names in
`graph_def` that will be returned as `Operation` objects; and/or
tensor names in `graph_def` that will be returned as `Tensor` objects.
name: (Optional.) A prefix that will be prepended to the names in
`graph_def`. Note that this does not apply to imported function names.
Defaults to `"import"`.
op_dict: (Optional.) A dictionary mapping op type names to `OpDef` protos.
Must contain an `OpDef` proto for each op type named in `graph_def`.
If omitted, uses the `OpDef` protos registered in the global registry.
producer_op_list: (Optional.) An `OpList` proto with the (possibly stripped)
list of `OpDef`s used by the producer of the graph. If provided, attrs
for ops in `graph_def` that are not in `op_dict` that have their default
value according to `producer_op_list` will be removed. This will allow
some more `GraphDef`s produced by later binaries to be accepted by
earlier binaries.
Returns:
A list of `Operation` and/or `Tensor` objects from the imported graph,
corresponding to the names in `return_elements`.
Raises:
TypeError: If `graph_def` is not a `GraphDef` proto,
`input_map` is not a dictionary mapping strings to `Tensor` objects,
or `return_elements` is not a list of strings.
ValueError: If `input_map`, or `return_elements` contains names that
do not appear in `graph_def`, or `graph_def` is not well-formed (e.g.
it refers to an unknown tensor).
"""
# Type checks for inputs.
if not isinstance(graph_def, graph_pb2.GraphDef):
# `graph_def` could be a dynamically-created message, so try a duck-typed
# approach
try:
old_graph_def = graph_def
graph_def = graph_pb2.GraphDef()
graph_def.MergeFrom(old_graph_def)
except TypeError:
raise TypeError('graph_def must be a GraphDef proto.')
if input_map is None:
input_map = {}
else:
if not (isinstance(input_map, dict)
and all(isinstance(k, compat.bytes_or_text_types)
for k in input_map.keys())):
raise TypeError('input_map must be a dictionary mapping strings to '
'Tensor or Operation objects.')
if return_elements is not None:
return_elements = tuple(return_elements)
if not all(isinstance(x, compat.bytes_or_text_types)
for x in return_elements):
raise TypeError('return_elements must be a list of strings.')
# Use a canonical representation for all tensor names.
input_map = {_CanonicalInputName(k): v for k, v in input_map.items()}
used_input_keys = set()
name_to_op = {}
if op_dict is None:
op_dict = op_def_registry.get_registered_ops()
if producer_op_list is None:
producer_op_dict = None
else:
producer_op_dict = {op.name: op for op in producer_op_list.op}
g = ops.get_default_graph()
# Add any functions defined in `graph_def` to `g`
if graph_def.library and graph_def.library.function:
# Copy op_dict so we don't clobber the original
op_dict = copy.copy(op_dict)
# pylint: disable=protected-access
# Note that we do not prepend `name` to the function name. The reasoning is
# that function names are similar to op definition names, which currently do
# not have a scoped name or namespace scheme.
functions = function._from_library(graph_def.library)
for f in functions:
g._add_function(f)
op_dict[f.name] = f.definition.signature
# pylint: enable=protected-access
# LINT.IfChange
with ops.name_scope(name, 'import', input_map.values()) as scope:
# TODO(ashankar): Should this just copy over or should it do some
# more nuanced merging? For example, the graph may already have some
# marked "bad versions" and we don't want to lose those because of
# what's in graph_def.versions? The C++ ImporGraphDef does something
# more nuanced.
g.graph_def_versions.CopyFrom(graph_def.versions)
if not all(isinstance(v, ops.Tensor) for v in input_map.values()):
if not scope:
# The caller must have passed `name=''`.
raise ValueError(
'tf.import_graph_def() requires a non-empty `name` if `input_map` '
'contains non-Tensor values. Try calling tf.convert_to_tensor() on '
'`input_map` values before calling tf.import_graph_def().')
with ops.name_scope('_inputs'):
for k, v in input_map.items():
if not (_IsControlInput(k) or _IsColocatedOp(k)):
input_map[k] = ops.convert_to_tensor(v)
# NOTE(mrry): We do this in two passes, because there may be a cycle in
# `graph_def`.
# 1. Add operations without their inputs.
for node in graph_def.node:
# Set any default attr values that aren't present.
if node.op not in op_dict:
raise ValueError('No op named %s in defined operations.' % node.op)
op_def = op_dict[node.op]
for attr_def in op_def.attr:
key = attr_def.name
if attr_def.HasField('default_value'):
value = node.attr[key]
if value is None or value.WhichOneof('value') is None:
node.attr[key].CopyFrom(attr_def.default_value)
if producer_op_dict:
# Remove any default attr values that aren't in op_def.
if node.op in producer_op_dict:
producer_op_def = producer_op_dict[node.op]
# We make a copy of node.attr to iterate through since we
# may modify node.attr inside the loop.
for key in list(node.attr):
if _FindAttrInOpDef(key, op_def) is None:
# No attr_def in consumer, look in producer.
attr_def = _FindAttrInOpDef(key, producer_op_def)
if (attr_def and attr_def.HasField('default_value') and
node.attr[key] == attr_def.default_value):
# Unknown attr had default value in producer, delete it
# so it can be understood by consumer.
del node.attr[key]
output_types = _OutputTypes(node, op_dict)
name_to_op[node.name] = g.create_op(
node.op, [], output_types, name=node.name, attrs=node.attr,
compute_shapes=False, compute_device=False,
op_def=op_def)
# 2. Add inputs to the operations.
for node in graph_def.node:
op = name_to_op[node.name]
input_types = _InputTypes(node, op_dict)
# Rewrite the colocation attributes in the graph, since the
# names of new ops may have changed.
for key, value in op.node_def.attr.items():
if key == '_class':
class_values = value.list
new_class_values = []
for class_value in class_values.s:
if class_value.startswith(b'loc:@'):
class_value_str = class_value.decode()
if class_value_str in input_map:
original_op = input_map[class_value_str]
used_input_keys.add(class_value_str)
else:
op_to_bind_to = class_value_str[5:]
# Find the op by its original name.
if op_to_bind_to not in name_to_op:
raise ValueError('Specified colocation to an op that '
'does not exist during import: %s in %s' % (
op_to_bind_to, node.name))
original_op = name_to_op[op_to_bind_to]
new_class_values.append(compat.as_bytes(
'loc:@' + original_op.name))
else:
new_class_values.append(class_value)
value.list.CopyFrom(attr_value_pb2.AttrValue.ListValue(
s=new_class_values))
# NOTE(mrry): We cannot use zip here because control inputs do not appear
# in the list of input_types.
for i, input_name in enumerate(
[_CanonicalInputName(x) for x in node.input]):
if _IsControlInput(input_name):
# (a) Input is a control input that should be taken from an op
# in "graph_def".
if input_name in input_map:
source_op = input_map[input_name]
used_input_keys.add(input_name)
else:
try:
source_op = name_to_op[input_name[1:]]
except KeyError:
raise ValueError(
_InvalidNodeMessage(
node, 'Control input %r not found in graph_def.' % (
input_name,)))
# pylint: disable=protected-access
op._add_control_input(source_op)
# pylint: enable=protected-access
else:
try:
input_type = input_types[i]
except IndexError:
raise ValueError(_InvalidNodeMessage(
node, 'More inputs specified (%r) than the op expects.'
% (input_name,)))
if input_name in input_map:
# (b) Input should be replaced by a tensor from the caller.
source_tensor = input_map[input_name]
used_input_keys.add(input_name)
else:
# (c) Input should be taken from an op in `graph_def`.
operation_name, output_index = _ParseTensorName(input_name)
try:
source_op = name_to_op[operation_name]
source_tensor = list(source_op.values())[output_index]
except (KeyError, IndexError):
raise ValueError(
_InvalidNodeMessage(
node,
'Input tensor %r not found in graph_def.'
% (input_name,)))
try:
# pylint: disable=protected-access
op._add_input(source_tensor, dtype=input_type)
# pylint: enable=protected-access
except TypeError as te:
raise ValueError(_InvalidNodeMessage(
node, 'Input tensor %r %s' % (input_name, te)))
# pylint: disable=protected-access
if op._input_dtypes != input_types:
raise ValueError(
_InvalidNodeMessage(
node,
'Input types mismatch (expected %r but got %r)'
% (', '.join(dtypes.as_dtype(x).name for x in input_types),
', '.join(x.name for x in op._input_dtypes))))
# pylint: enable=protected-access
if not g._is_function(op.type): # pylint: disable=protected-access
# Execute shape inference for this op.
# NOTE(mrry): If the graph contains a cycle, the full shape information
# may not be available for this op's inputs.
ops.set_shapes_for_outputs(op)
# For nodes with _output_shapes set, set the output shapes.
if '_output_shapes' in op.node_def.attr:
for i, output in enumerate(op.outputs):
dims = op.node_def.attr['_output_shapes'].list.shape[i]
output_shape = tensor_shape.TensorShape(
None if dims.unknown_rank else
[dim.size if dim.size >= 0 else None for dim in dims.dim])
try:
output.set_shape(output_shape)
except ValueError as e:
# If the output shape is incompatible with what is inferred
# by the graph for a very specific whitelist of ops, then we
# ignore this output shape. This can happen if there is a
# bug in the shape function for some operation, and the
# serialized graph def has the incorrect shape set when
# running on a newer binary with the fixed shape function.
# This is an escape hatch that allows us to correct shape
# functions that are not critical to correct execution but
# would cause graphs to fail if imported after correcting.
#
# This can be removed after 2017/03/08.
if op.type in ['RandomShuffleQueue', 'PaddingFIFOQueue',
'FIFOQueue', 'PriorityQueue', 'QueueSize',
'Stack', 'Barrier', 'BarrierReadySize',
'BarrierIncompleteSize', 'HashTable',
'MutableHashTable',
'MutableHashTableOfTensors', 'Mutex',
'CuckooTable', 'IndexTable',
'WholeFileReader', 'TextLineReader',
'FixedLengthRecordReader',
'TFRecordReader', 'IdentityReader',
'RefSwitch', 'RefEnter', 'RefNextIteration',
'RefMerge', 'RefIdentity']:
pass
elif op.type in [
'ConditionalAccumulator', 'SparseConditionalAccumulator',
'Table'
]:
# This can be removed after 2017/04/24.
pass
else:
raise e
del op.node_def.attr['_output_shapes']
# Apply device functions for this op.
# NOTE(mrry): We do this after configuring the inputs, because
# the result of the device functions may depend on the inputs.
with _MaybeDevice(node.device):
g._apply_device_functions(op) # pylint: disable=protected-access
# Treat unused input mappings as an error, because they are likely to be
# due to a typo.
unused_input_keys = frozenset(input_map.keys()).difference(used_input_keys)
if unused_input_keys:
raise ValueError(
'Attempted to map inputs that were not found in graph_def: [%s]'
% ', '.join(unused_input_keys))
if return_elements is None:
return None
else:
ret = []
for name in return_elements:
name = compat.as_str(name)
if ':' in name:
try:
operation_name, output_index = _ParseTensorName(name)
ret.append(name_to_op[operation_name].outputs[output_index])
except (ValueError, KeyError, IndexError):
raise ValueError(
'Requested return_element %r not found in graph_def.' % name)
else:
try:
ret.append(name_to_op[name])
except KeyError:
raise ValueError(
'Requested return_element %r not found in graph_def.' % name)
return ret
# LINT.ThenChange(//tensorflow/core/graph/graph_constructor.cc)
|
|
import logging
from django.core.exceptions import ValidationError
from django.db import models
try:
from django.db.models.fields.related_descriptors import ForwardManyToOneDescriptor
except ImportError:
from django.db.models.fields.related import (
ReverseSingleRelatedObjectDescriptor as ForwardManyToOneDescriptor,
)
logger = logging.getLogger(__name__)
__all__ = ["Country", "State", "Locality", "Address", "AddressField"]
class InconsistentDictError(Exception):
pass
def _to_python(value):
raw = value.get("raw", "")
country = value.get("country", "")
country_code = value.get("country_code", "")
state = value.get("state", "")
state_code = value.get("state_code", "")
locality = value.get("locality", "")
sublocality = value.get("sublocality", "")
postal_town = value.get("postal_town", "")
postal_code = value.get("postal_code", "")
street_number = value.get("street_number", "")
route = value.get("route", "")
formatted = value.get("formatted", "")
latitude = value.get("latitude", None)
longitude = value.get("longitude", None)
# If there is no value (empty raw) then return None.
if not raw:
return None
# Fix issue with NYC boroughs (https://code.google.com/p/gmaps-api-issues/issues/detail?id=635)
if not locality and sublocality:
locality = sublocality
# Fix issue with UK addresses with no locality
# (https://github.com/furious-luke/django-address/issues/114)
if not locality and postal_town:
locality = postal_town
# If we have an inconsistent set of value bail out now.
if (country or state or locality) and not (country and state and locality):
raise InconsistentDictError
# Handle the country.
try:
country_obj = Country.objects.get(name=country)
except Country.DoesNotExist:
if country:
if len(country_code) > Country._meta.get_field("code").max_length:
if country_code != country:
raise ValueError("Invalid country code (too long): %s" % country_code)
country_code = ""
country_obj = Country.objects.create(name=country, code=country_code)
else:
country_obj = None
# Handle the state.
try:
state_obj = State.objects.get(name=state, country=country_obj)
except State.DoesNotExist:
if state:
if len(state_code) > State._meta.get_field("code").max_length:
if state_code != state:
raise ValueError("Invalid state code (too long): %s" % state_code)
state_code = ""
state_obj = State.objects.create(name=state, code=state_code, country=country_obj)
else:
state_obj = None
# Handle the locality.
try:
locality_obj = Locality.objects.get(name=locality, postal_code=postal_code, state=state_obj)
except Locality.DoesNotExist:
if locality:
locality_obj = Locality.objects.create(name=locality, postal_code=postal_code, state=state_obj)
else:
locality_obj = None
# Handle the address.
try:
if not (street_number or route or locality):
address_obj = Address.objects.get(raw=raw)
else:
address_obj = Address.objects.get(street_number=street_number, route=route, locality=locality_obj)
except Address.DoesNotExist:
address_obj = Address(
street_number=street_number,
route=route,
raw=raw,
locality=locality_obj,
formatted=formatted,
latitude=latitude,
longitude=longitude,
)
# If "formatted" is empty try to construct it from other values.
if not address_obj.formatted:
address_obj.formatted = str(address_obj)
# Need to save.
address_obj.save()
# Done.
return address_obj
##
# Convert a dictionary to an address.
##
def to_python(value):
# Keep `None`s.
if value is None:
return None
# Is it already an address object?
if isinstance(value, Address):
return value
# If we have an integer, assume it is a model primary key.
elif isinstance(value, int):
return value
# A string is considered a raw value.
elif isinstance(value, str):
obj = Address(raw=value)
obj.save()
return obj
# A dictionary of named address components.
elif isinstance(value, dict):
# Attempt a conversion.
try:
return _to_python(value)
except InconsistentDictError:
return Address.objects.create(raw=value["raw"])
# Not in any of the formats I recognise.
raise ValidationError("Invalid address value.")
##
# A country.
##
class Country(models.Model):
name = models.CharField(max_length=40, unique=True, blank=True)
code = models.CharField(max_length=2, blank=True) # not unique as there are duplicates (IT)
class Meta:
verbose_name_plural = "Countries"
ordering = ("name",)
def __str__(self):
return "%s" % (self.name or self.code)
##
# A state. Google refers to this as `administration_level_1`.
##
class State(models.Model):
name = models.CharField(max_length=165, blank=True)
code = models.CharField(max_length=8, blank=True)
country = models.ForeignKey(Country, on_delete=models.CASCADE, related_name="states")
class Meta:
unique_together = ("name", "country")
ordering = ("country", "name")
def __str__(self):
txt = self.to_str()
country = "%s" % self.country
if country and txt:
txt += ", "
txt += country
return txt
def to_str(self):
return "%s" % (self.name or self.code)
##
# A locality (suburb).
##
class Locality(models.Model):
name = models.CharField(max_length=165, blank=True)
postal_code = models.CharField(max_length=10, blank=True)
state = models.ForeignKey(State, on_delete=models.CASCADE, related_name="localities")
class Meta:
verbose_name_plural = "Localities"
unique_together = ("name", "postal_code", "state")
ordering = ("state", "name")
def __str__(self):
txt = "%s" % self.name
state = self.state.to_str() if self.state else ""
if txt and state:
txt += ", "
txt += state
if self.postal_code:
txt += " %s" % self.postal_code
cntry = "%s" % (self.state.country if self.state and self.state.country else "")
if cntry:
txt += ", %s" % cntry
return txt
##
# An address. If for any reason we are unable to find a matching
# decomposed address we will store the raw address string in `raw`.
##
class Address(models.Model):
street_number = models.CharField(max_length=20, blank=True)
route = models.CharField(max_length=100, blank=True)
locality = models.ForeignKey(
Locality,
on_delete=models.CASCADE,
related_name="addresses",
blank=True,
null=True,
)
raw = models.CharField(max_length=200)
formatted = models.CharField(max_length=200, blank=True)
latitude = models.FloatField(blank=True, null=True)
longitude = models.FloatField(blank=True, null=True)
class Meta:
verbose_name_plural = "Addresses"
ordering = ("locality", "route", "street_number")
def __str__(self):
if self.formatted != "":
txt = "%s" % self.formatted
elif self.locality:
txt = ""
if self.street_number:
txt = "%s" % self.street_number
if self.route:
if txt:
txt += " %s" % self.route
locality = "%s" % self.locality
if txt and locality:
txt += ", "
txt += locality
else:
txt = "%s" % self.raw
return txt
def clean(self):
if not self.raw:
raise ValidationError("Addresses may not have a blank `raw` field.")
def as_dict(self):
ad = dict(
street_number=self.street_number,
route=self.route,
raw=self.raw,
formatted=self.formatted,
latitude=self.latitude if self.latitude else "",
longitude=self.longitude if self.longitude else "",
)
if self.locality:
ad["locality"] = self.locality.name
ad["postal_code"] = self.locality.postal_code
if self.locality.state:
ad["state"] = self.locality.state.name
ad["state_code"] = self.locality.state.code
if self.locality.state.country:
ad["country"] = self.locality.state.country.name
ad["country_code"] = self.locality.state.country.code
return ad
class AddressDescriptor(ForwardManyToOneDescriptor):
def __set__(self, inst, value):
super(AddressDescriptor, self).__set__(inst, to_python(value))
##
# A field for addresses in other models.
##
class AddressField(models.ForeignKey):
description = "An address"
def __init__(self, *args, **kwargs):
kwargs["to"] = "address.Address"
# The address should be set to null when deleted if the relationship could be null
default_on_delete = models.SET_NULL if kwargs.get("null", False) else models.CASCADE
kwargs["on_delete"] = kwargs.get("on_delete", default_on_delete)
super(AddressField, self).__init__(*args, **kwargs)
def contribute_to_class(self, cls, name, virtual_only=False):
from address.compat import compat_contribute_to_class
compat_contribute_to_class(self, cls, name, virtual_only)
setattr(cls, self.name, AddressDescriptor(self))
def formfield(self, **kwargs):
from .forms import AddressField as AddressFormField
defaults = dict(form_class=AddressFormField)
defaults.update(kwargs)
return super(AddressField, self).formfield(**defaults)
|
|
from pandac.PandaModules import Point3
from direct.distributed.ClockDelta import globalClockDelta
from direct.fsm import ClassicFSM, State
from direct.task import Task
from toontown.minigame import DistributedMinigameAI
from toontown.minigame import MinigameGlobals
from toontown.minigame import IceGameGlobals
from toontown.ai.ToonBarrier import ToonBarrier
class DistributedIceGameAI(DistributedMinigameAI.DistributedMinigameAI):
notify = directNotify.newCategory('DistributedIceGameAI')
def __init__(self, air, minigameId):
try:
self.DistributedIceGameAI_initialized
except:
self.DistributedIceGameAI_initialized = 1
DistributedMinigameAI.DistributedMinigameAI.__init__(self, air, minigameId)
self.gameFSM = ClassicFSM.ClassicFSM('DistributedIceGameAI', [State.State('off', self.enterOff, self.exitOff, ['waitClientsChoices']),
State.State('waitClientsChoices', self.enterWaitClientsChoices, self.exitWaitClientsChoices, ['cleanup', 'processChoices']),
State.State('processChoices', self.enterProcessChoices, self.exitProcessChoices, ['waitEndingPositions', 'cleanup']),
State.State('waitEndingPositions', self.enterWaitEndingPositions, self.exitWaitEndingPositions, ['processEndingPositions', 'cleanup']),
State.State('processEndingPositions', self.enterProcessEndingPositions, self.exitProcessEndingPositions, ['waitClientsChoices', 'scoreMatch', 'cleanup']),
State.State('scoreMatch', self.enterScoreMatch, self.exitScoreMatch, ['waitClientsChoices', 'finalResults', 'cleanup']),
State.State('finalResults', self.enterFinalResults, self.exitFinalResults, ['cleanup']),
State.State('cleanup', self.enterCleanup, self.exitCleanup, ['off'])], 'off', 'off')
self.addChildGameFSM(self.gameFSM)
self.avatarChoices = {}
self.avatarEndingPositions = {}
self.curRound = 0
self.curMatch = 0
self.finalEndingPositions = [Point3(IceGameGlobals.StartingPositions[0]),
Point3(IceGameGlobals.StartingPositions[1]),
Point3(IceGameGlobals.StartingPositions[2]),
Point3(IceGameGlobals.StartingPositions[3])]
def generate(self):
self.notify.debug('generate')
DistributedMinigameAI.DistributedMinigameAI.generate(self)
def delete(self):
self.notify.debug('delete')
taskMgr.remove(self.taskName('wait-choices-timeout'))
taskMgr.remove(self.taskName('endingPositionsTimeout'))
del self.gameFSM
DistributedMinigameAI.DistributedMinigameAI.delete(self)
def setGameReady(self):
self.notify.debug('setGameReady')
DistributedMinigameAI.DistributedMinigameAI.setGameReady(self)
self.numTreasures = IceGameGlobals.NumTreasures[self.getSafezoneId()]
self.numTreasuresTaken = 0
self.takenTreasuresTable = [0] * self.numTreasures
self.numPenalties = IceGameGlobals.NumPenalties[self.getSafezoneId()]
self.numPenaltiesTaken = 0
self.takenPenaltiesTable = [0] * self.numPenalties
def setGameStart(self, timestamp):
self.notify.debug('setGameStart')
DistributedMinigameAI.DistributedMinigameAI.setGameStart(self, timestamp)
self.gameFSM.request('waitClientsChoices')
def setGameAbort(self):
self.notify.debug('setGameAbort')
if self.gameFSM.getCurrentState():
self.gameFSM.request('cleanup')
DistributedMinigameAI.DistributedMinigameAI.setGameAbort(self)
def gameOver(self):
self.notify.debug('gameOver')
self.gameFSM.request('cleanup')
DistributedMinigameAI.DistributedMinigameAI.gameOver(self)
def enterOff(self):
self.notify.debug('enterOff')
def exitOff(self):
pass
def enterCleanup(self):
self.notify.debug('enterCleanup')
self.gameFSM.request('off')
def exitCleanup(self):
pass
def enterWaitClientsChoices(self):
self.notify.debug('enterWaitClientsChoices')
self.resetChoices()
self.sendUpdate('setMatchAndRound', [self.curMatch, self.curRound])
self.sendUpdate('setNewState', ['inputChoice'])
taskMgr.doMethodLater(IceGameGlobals.InputTimeout, self.waitClientsChoicesTimeout, self.taskName('wait-choices-timeout'))
self.sendUpdate('setTimerStartTime', [globalClockDelta.getFrameNetworkTime()])
def exitWaitClientsChoices(self):
self.notify.debug('exitWaitClientsChoices')
taskMgr.remove(self.taskName('wait-choices-timeout'))
def enterProcessChoices(self):
forceAndHeading = []
for avId in self.avIdList:
force = self.avatarChoices[avId][0]
heading = self.avatarChoices[avId][1]
forceAndHeading.append([force, heading])
self.notify.debug('tireInputs = %s' % forceAndHeading)
self.sendUpdate('setTireInputs', [forceAndHeading])
self.gameFSM.request('waitEndingPositions')
def exitProcessChoices(self):
pass
def enterWaitEndingPositions(self):
if self.curRound == 0:
self.takenTreasuresTable = [0] * self.numTreasures
self.takenPenaltiesTable = [0] * self.numPenalties
taskMgr.doMethodLater(IceGameGlobals.InputTimeout, self.waitClientsChoicesTimeout, self.taskName('endingPositionsTimeout'))
self.avatarEndingPositions = {}
def exitWaitEndingPositions(self):
taskMgr.remove(self.taskName('endingPositionsTimeout'))
def enterProcessEndingPositions(self):
averagePos = [Point3(0, 0, 0),
Point3(0, 0, 0),
Point3(0, 0, 0),
Point3(0, 0, 0)]
divisor = 0
for avId in self.avatarEndingPositions.keys():
divisor += 1
oneClientEndingPositions = self.avatarEndingPositions[avId]
avIndex = self.avIdList.index(avId)
for index in xrange(len(oneClientEndingPositions)):
pos = oneClientEndingPositions[index]
averagePos[index] += Point3(pos[0], pos[1], pos[2])
self.notify.debug('index = %d averagePos = %s' % (index, averagePos))
sentPos = []
if divisor:
for newPos in averagePos:
newPos /= divisor
newPos.setZ(IceGameGlobals.TireRadius)
sentPos.append([newPos[0], newPos[1], newPos[2]])
else:
sentPos = self.finalEndingPositions
self.sendUpdate('setFinalPositions', [sentPos])
self.finalEndingPositions = sentPos
if self.curMatch == IceGameGlobals.NumMatches - 1 and self.curRound == IceGameGlobals.NumRounds - 1:
self.gameFSM.request('scoreMatch')
elif self.curRound == IceGameGlobals.NumRounds - 1:
self.gameFSM.request('scoreMatch')
else:
self.curRound += 1
self.sendUpdate('setMatchAndRound', [self.curMatch, self.curRound])
self.gameFSM.request('waitClientsChoices')
def exitProcessEndingPositions(self):
pass
def enterScoreMatch(self):
sortedByDistance = []
for avId in self.avIdList:
index = self.avIdList.index(avId)
pos = Point3(*self.finalEndingPositions[index])
pos.setZ(0)
sortedByDistance.append((avId, pos.length()))
def compareDistance(x, y):
if x[1] - y[1] > 0:
return 1
elif x[1] - y[1] < 0:
return -1
else:
return 0
sortedByDistance.sort(cmp=compareDistance)
self.scoresAsList = []
totalPointsAdded = 0
for index in xrange(len(self.avIdList)):
pos = Point3(*self.finalEndingPositions[index])
pos.setZ(0)
length = pos.length()
points = length / IceGameGlobals.FarthestLength * (IceGameGlobals.PointsInCorner - IceGameGlobals.PointsDeadCenter[self.numPlayers])
points += IceGameGlobals.PointsDeadCenter[self.numPlayers]
self.notify.debug('length = %s points=%s avId=%d' % (length, points, avId))
avId = self.avIdList[index]
bonusIndex = 0
for sortIndex in xrange(len(sortedByDistance)):
if sortedByDistance[sortIndex][0] == avId:
bonusIndex = sortIndex
bonusIndex += 4 - len(self.avIdList)
pointsToAdd = int(points + 0.5) + IceGameGlobals.BonusPointsForPlace[bonusIndex]
totalPointsAdded += pointsToAdd
self.scoreDict[avId] += pointsToAdd
self.scoresAsList.append(self.scoreDict[avId])
self.curMatch += 1
self.curRound = 0
self.sendUpdate('setScores', [self.curMatch, self.curRound, self.scoresAsList])
self.sendUpdate('setNewState', ['scoring'])
def allToonsScoringMovieDone(self = self):
self.notify.debug('allToonsScoringMovieDone')
if self.curMatch == IceGameGlobals.NumMatches:
self.gameFSM.request('finalResults')
else:
self.gameFSM.request('waitClientsChoices')
def handleTimeout(avIds, self = self):
self.notify.debug('handleTimeout: avatars %s did not report "done"' % avIds)
if self.curMatch == IceGameGlobals.NumMatches:
self.gameFSM.request('finalResults')
else:
self.gameFSM.request('waitClientsChoices')
scoreMovieDuration = IceGameGlobals.FarthestLength * IceGameGlobals.ExpandFeetPerSec
scoreMovieDuration += totalPointsAdded * IceGameGlobals.ScoreCountUpRate
self.scoringMovieDoneBarrier = ToonBarrier('waitScoringMovieDone', self.uniqueName('waitScoringMovieDone'), self.avIdList, scoreMovieDuration + MinigameGlobals.latencyTolerance, allToonsScoringMovieDone, handleTimeout)
def exitScoreMatch(self):
self.scoringMovieDoneBarrier.cleanup()
self.scoringMovieDoneBarrier = None
return
def enterFinalResults(self):
self.checkScores()
self.sendUpdate('setNewState', ['finalResults'])
taskMgr.doMethodLater(IceGameGlobals.ShowScoresDuration, self.__doneShowingScores, self.taskName('waitShowScores'))
def exitFinalResults(self):
taskMgr.remove(self.taskName('waitShowScores'))
def __doneShowingScores(self, task):
self.notify.debug('doneShowingScores')
self.gameOver()
return Task.done
def waitClientsChoicesTimeout(self, task):
self.notify.debug('waitClientsChoicesTimeout: did not hear from all clients')
for avId in self.avatarChoices.keys():
if self.avatarChoices[avId] == (-1, 0):
self.avatarChoices[avId] = (0, 0)
self.gameFSM.request('processChoices')
return Task.done
def resetChoices(self):
for avId in self.avIdList:
self.avatarChoices[avId] = (-1, 0)
def setAvatarChoice(self, force, direction):
avatarId = self.air.getAvatarIdFromSender()
self.notify.debug('setAvatarChoice: avatar: ' + str(avatarId) + ' votes: ' + str(force) + ' direction: ' + str(direction))
self.avatarChoices[avatarId] = self.checkChoice(avatarId, force, direction)
if self.allAvatarsChosen():
self.notify.debug('setAvatarChoice: all avatars have chosen')
self.gameFSM.request('processChoices')
else:
self.notify.debug('setAvatarChoice: still waiting for more choices')
def checkChoice(self, avId, force, direction):
retForce = force
retDir = direction
if retForce < 0:
retForce = 0
if retForce > 100:
retForce = 100
return (retForce, retDir)
def allAvatarsChosen(self):
for avId in self.avatarChoices.keys():
choice = self.avatarChoices[avId]
if choice[0] == -1 and not self.stateDict[avId] == DistributedMinigameAI.EXITED:
return False
return True
def endingPositions(self, positions):
if not self.gameFSM or not self.gameFSM.getCurrentState() or self.gameFSM.getCurrentState().getName() != 'waitEndingPositions':
return
self.notify.debug('got endingPositions from client %s' % positions)
avId = self.air.getAvatarIdFromSender()
self.avatarEndingPositions[avId] = positions
if self.allAvatarsSentEndingPositions():
self.gameFSM.request('processEndingPositions')
def allAvatarsSentEndingPositions(self):
if len(self.avatarEndingPositions) == len(self.avIdList):
return True
return False
def endingPositionsTimeout(self, task):
self.notify.debug('endingPositionsTimeout : did not hear from all clients')
self.gameFSM.request('processEndingPositions')
return Task.done
def reportScoringMovieDone(self):
if not self.gameFSM or not self.gameFSM.getCurrentState() or self.gameFSM.getCurrentState().getName() != 'scoreMatch':
return
avId = self.air.getAvatarIdFromSender()
self.notify.debug('reportScoringMovieDone: avatar %s is done' % avId)
self.scoringMovieDoneBarrier.clear(avId)
def claimTreasure(self, treasureNum):
if not self.gameFSM or not self.gameFSM.getCurrentState() or self.gameFSM.getCurrentState().getName() != 'waitEndingPositions':
return
avId = self.air.getAvatarIdFromSender()
if avId not in self.scoreDict:
self.notify.warning('PROBLEM: avatar %s called claimTreasure(%s) but he is not in the scoreDict: %s. avIdList is: %s' % (avId,
treasureNum,
self.scoreDict,
self.avIdList))
return
if treasureNum < 0 or treasureNum >= self.numTreasures:
self.air.writeServerEvent('warning', treasureNum, 'MazeGameAI.claimTreasure treasureNum out of range')
return
if self.takenTreasuresTable[treasureNum]:
return
self.takenTreasuresTable[treasureNum] = 1
avId = self.air.getAvatarIdFromSender()
self.sendUpdate('setTreasureGrabbed', [avId, treasureNum])
self.scoreDict[avId] += 1
self.numTreasuresTaken += 1
def claimPenalty(self, penaltyNum):
if not self.gameFSM or not self.gameFSM.getCurrentState() or self.gameFSM.getCurrentState().getName() != 'waitEndingPositions':
return
avId = self.air.getAvatarIdFromSender()
if avId not in self.scoreDict:
self.notify.warning('PROBLEM: avatar %s called claimPenalty(%s) but he is not in the scoreDict: %s. avIdList is: %s' % (avId,
penaltyNum,
self.scoreDict,
self.avIdList))
return
if penaltyNum < 0 or penaltyNum >= self.numPenalties:
self.air.writeServerEvent('warning', penaltyNum, 'IceGameAI.claimPenalty penaltyNum out of range')
return
if self.takenPenaltiesTable[penaltyNum]:
return
self.takenPenaltiesTable[penaltyNum] = 1
avId = self.air.getAvatarIdFromSender()
self.sendUpdate('setPenaltyGrabbed', [avId, penaltyNum])
self.scoreDict[avId] -= 1
self.numPenaltiesTaken += 1
def checkScores(self):
self.scoresAsList = []
for index in xrange(len(self.avIdList)):
avId = self.avIdList[index]
if self.scoreDict[avId] < 0:
self.scoreDict[avId] = 1
self.scoresAsList.append(self.scoreDict[avId])
|
|
from freezegun import freeze_time
import pytest
from McAfee_ESM_v2 import *
from McAfee_ESM_v2 import McAfeeESMClient
list_test_filtering_incidents = [
{'id': 3},
{'id': 1},
{'id': 5},
{'id': 4},
{'id': 0},
{'id': 2}
]
data_test_filtering_incidents = [
(
(
0, 0
),
(
[5, 4, 3, 2, 1]
)
),
(
(
0, 1
),
(
[1]
)
),
(
(
0, 2
),
(
[2, 1]
)
),
(
(
3, 1
),
(
[4]
)
),
(
(
3, 0
),
(
[5, 4]
)
)
]
data_test_expected_errors = [
('error', False),
('', False),
('alarmUnacknowledgeTriggeredAlarm failed with error[ERROR_BadRequest (60)].', True),
('alarmAcknowledgeTriggeredAlarm failed with error[ERROR_BadRequest (60)].', True),
(
'qryGetResults failed with error[Error deserializing EsmQueryResults, see logs for more information '
'(Error deserializing EsmQueryResults, see logs for more information '
'(Internal communication error, see logs for more details))].',
True
)
]
data_test_time_format = [
('', 'time data \'\' does not match the time format.'),
('test', 'time data \'test\' does not match the time format.')
]
data_test_convert_time_format = [
(('2019-12-19T00:00:00', 0, False), '2019-12-19T00:00:00Z'),
(('2019-12-19T00:00:00', 2, False), '2019-12-19T02:00:00Z'),
(('2019-12-19T02:00:00', -2, False), '2019-12-19T00:00:00Z'),
(('2019-12-19T00:00:00Z', 0, False), '2019-12-19T00:00:00Z'),
(('2019-12-19T00:00:00Z', 2, False), '2019-12-19T02:00:00Z'),
(('2019-12-19T02:00:00Z', -2, False), '2019-12-19T00:00:00Z'),
(('2019/12/19 00:00:00', 0, True), '2019-12-19T00:00:00Z'),
(('2019/12/19 00:00:00', -2, True), '2019-12-19T02:00:00Z'),
(('2019/12/19 02:00:00', 2, True), '2019-12-19T00:00:00Z'),
]
data_test_set_query_times = [
((None, None, None, 0), ('CUSTOM', None, None)),
(('1 day', None, None, 0), ('1 day', '2019/12/31 00:00:00', None)),
(('LAST_WEEK', '', None, 0), ('LAST_WEEK', '', None)),
(('LAST_YEAR', 'TEST', None, 0), 'Invalid set times.'),
(('LAST_YEAR', None, 'TEST', 0), 'Invalid set times.'),
(
(None, '2020-01-01T00:00:00Z', '2020-01-01T00:00:00Z', 0),
('CUSTOM', '2020-01-01T00:00:00Z', '2020-01-01T00:00:00Z')
)
]
data_test_list_times_set = [
(
([], [], 0),
[]
),
(
([0, 0], [], 2),
[0, 0]
),
(
(['2019/12/19 00:00:00', '2019/12/19 00:00:00', 0, '2019/12/19 00:00:00'], [0, 1], 0),
['2019-12-19T00:00:00Z', '2019-12-19T00:00:00Z', 0, '2019/12/19 00:00:00']
),
(
([0, '2019/12/19 00:00:00'], [1], -2),
[0, '2019-12-19T02:00:00Z']
),
(
([0, '2019/12/19 00:00:00'], [], -2),
[0, '2019/12/19 00:00:00']
),
(
(['2019/12/19 00:00:00'], [0], -2),
['2019-12-19T02:00:00Z']
)
]
data_test_time_fields = [
[
['time', 'date'],
[0, 1]
],
[
['name', 'TiMe', 'Datetime'],
[1, 2]
],
[
[],
[]
],
[
['r', 't'],
[]
],
[
['', ''],
[]
]
]
data_test_mcafee_severity_to_demisto = [(100, 3), (65, 2), (32, 1), (0, 0)]
@pytest.mark.parametrize('test_input, output', data_test_filtering_incidents)
def test_filtering_incidents(test_input, output):
temp_output = filtering_incidents(list_test_filtering_incidents, test_input[0], test_input[1])
test_output = [0] * len(temp_output)
for i in range(len(temp_output)):
test_output[i] = temp_output[i]['id']
assert test_output == output, f'filtering_incidents({test_input}) returns: {test_input} instead: {output}.'
@pytest.mark.parametrize('test_input, output', data_test_expected_errors)
def test_expected_errors(test_input, output):
assert expected_errors(test_input) == output, f'expected_errors({test_input})' \
f' returns: {not output} instead: {output}.'
@pytest.mark.parametrize('test_input, output', data_test_time_format)
def test_time_format(test_input, output):
test_output = None
try:
test_output = time_format(test_input)
except ValueError as error:
test_output = str(error)
finally:
assert test_output == output, f'time_format({test_input}) returns error: {test_output} instead: {output}.'
@pytest.mark.parametrize('test_input, output', data_test_convert_time_format)
def test_convert_time_format(test_input, output):
temp = convert_time_format(test_input[0], test_input[1], test_input[2])
assert temp == output, f'convert_time_format({test_input[0]}, {test_input[1]}, {test_input[2]}) ' \
f'returns: {temp} instead: {output}.'
@freeze_time('2020-01-01 00:00:00')
@pytest.mark.parametrize('test_input, output', data_test_set_query_times)
def test_set_query_times(test_input, output):
test_output = None
try:
test_output = set_query_times(test_input[0], test_input[1], test_input[2], test_input[3])
except ValueError as error:
test_output = str(error)
finally:
assert test_output == output, f'time_format({test_input}) returns: {test_output} instead: {output}.'
@pytest.mark.parametrize('test_input, output', data_test_list_times_set)
def test_list_times_set(test_input, output):
temp = list_times_set(test_input[0], test_input[1], test_input[2])
assert temp == output, f'list_times_set({test_input[0]}, {test_input[1]}, {test_input[2]}) ' \
f'returns: {temp} instead: {output}.'
@pytest.mark.parametrize('test_input, output', data_test_time_fields)
def test_time_fields(test_input, output):
for i in range(len(test_input)):
test_input[i] = {'name': test_input[i]}
temp = time_fields(test_input)
assert temp == output, f'time_fields({test_input}) returns: {temp} instead: {output}.'
@pytest.mark.parametrize('test_input, output', data_test_mcafee_severity_to_demisto)
def test_mcafee_severity_to_demisto(test_input, output):
temp = mcafee_severity_to_demisto(test_input)
assert temp == output, f'mcafee_severity_to_demisto({test_input}) returns: {temp} instead: {output}.'
@pytest.mark.filterwarnings('ignore::urllib3.exceptions.InsecureRequestWarning')
def test_edit_case(mocker):
params = {
"url": "https://example.com",
"insecure": True,
"credentials": {
"identifier": "TEST",
"password": "TEST"
},
"version": "11.3"}
raw_response_has_event_list = {"assignedTo": 8207, "closeTime": "2021-05-25T10:29:17Z", "dataSourceList": ["47"],
"deviceList": None,
"eventList": [
{"id": "144117387300438016|6204912068", "lastTime": "2021-05-25T09:47:10Z",
"message": "TEST"}],
"history": "\n------- Viewed: 05/25/2021 10:26:37(GMT)"
"TEST@TEST -------\n\n------- Viewed: 05/25/2021 10:27:34("
"GMT) "
" TEST@TEST -------\n",
"id": 58136,
"notes": "------- Opened on 2021/05/25 09:53:53(GMT) by Triggered Condition -------"
"\n\n------- In Progress: 05/25/2021 10:29:17(GMT) Xsoar@TEST -------"
"\n\n------- Changes: 05/25/2021 10:29:17(GMT) Xsoar@TEST -------"
"\n Organization\n old: None\n new: BRD"
"\n\n", "openTime": "2021-05-25T09:53:53Z",
"orgId": 2, "severity": 50, "statusId": 3,
"summary": "ALERT - Scan"}
mocker.patch.object(McAfeeESMClient, '_McAfeeESMClient__login', return_value={})
mocker.patch.object(McAfeeESMClient, '_McAfeeESMClient__request', return_value={})
mocker.patch.object(McAfeeESMClient, 'get_case_detail', return_value=('', {}, raw_response_has_event_list))
client = McAfeeESMClient(params)
client.edit_case()
result = client._McAfeeESMClient__request.call_args.kwargs['data']['caseDetail']
assert len(result['eventList']) > 0
|
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.unitary_equipment import AirLoopHvacUnitarySystem
log = logging.getLogger(__name__)
class TestAirLoopHvacUnitarySystem(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_airloophvacunitarysystem(self):
pyidf.validation_level = ValidationLevel.error
obj = AirLoopHvacUnitarySystem()
# alpha
var_name = "Name"
obj.name = var_name
# alpha
var_control_type = "Load"
obj.control_type = var_control_type
# object-list
var_controlling_zone_or_thermostat_location = "object-list|Controlling Zone or Thermostat Location"
obj.controlling_zone_or_thermostat_location = var_controlling_zone_or_thermostat_location
# alpha
var_dehumidification_control_type = "None"
obj.dehumidification_control_type = var_dehumidification_control_type
# object-list
var_availability_schedule_name = "object-list|Availability Schedule Name"
obj.availability_schedule_name = var_availability_schedule_name
# node
var_air_inlet_node_name = "node|Air Inlet Node Name"
obj.air_inlet_node_name = var_air_inlet_node_name
# node
var_air_outlet_node_name = "node|Air Outlet Node Name"
obj.air_outlet_node_name = var_air_outlet_node_name
# alpha
var_supply_fan_object_type = "Fan:OnOff"
obj.supply_fan_object_type = var_supply_fan_object_type
# object-list
var_supply_fan_name = "object-list|Supply Fan Name"
obj.supply_fan_name = var_supply_fan_name
# alpha
var_fan_placement = "BlowThrough"
obj.fan_placement = var_fan_placement
# object-list
var_supply_air_fan_operating_mode_schedule_name = "object-list|Supply Air Fan Operating Mode Schedule Name"
obj.supply_air_fan_operating_mode_schedule_name = var_supply_air_fan_operating_mode_schedule_name
# alpha
var_heating_coil_object_type = "Coil:Heating:DX:SingleSpeed"
obj.heating_coil_object_type = var_heating_coil_object_type
# object-list
var_heating_coil_name = "object-list|Heating Coil Name"
obj.heating_coil_name = var_heating_coil_name
# real
var_dx_heating_coil_sizing_ratio = 0.0001
obj.dx_heating_coil_sizing_ratio = var_dx_heating_coil_sizing_ratio
# alpha
var_cooling_coil_object_type = "Coil:Cooling:DX:SingleSpeed"
obj.cooling_coil_object_type = var_cooling_coil_object_type
# object-list
var_cooling_coil_name = "object-list|Cooling Coil Name"
obj.cooling_coil_name = var_cooling_coil_name
# alpha
var_use_doas_dx_cooling_coil = "Yes"
obj.use_doas_dx_cooling_coil = var_use_doas_dx_cooling_coil
# real
var_doas_dx_cooling_coil_leaving_minimum_air_temperature = 3.6
obj.doas_dx_cooling_coil_leaving_minimum_air_temperature = var_doas_dx_cooling_coil_leaving_minimum_air_temperature
# alpha
var_latent_load_control = "SensibleOnlyLoadControl"
obj.latent_load_control = var_latent_load_control
# alpha
var_supplemental_heating_coil_object_type = "Coil:Heating:Gas"
obj.supplemental_heating_coil_object_type = var_supplemental_heating_coil_object_type
# object-list
var_supplemental_heating_coil_name = "object-list|Supplemental Heating Coil Name"
obj.supplemental_heating_coil_name = var_supplemental_heating_coil_name
# alpha
var_cooling_supply_air_flow_rate_method = "None"
obj.cooling_supply_air_flow_rate_method = var_cooling_supply_air_flow_rate_method
# real
var_cooling_supply_air_flow_rate = 0.0
obj.cooling_supply_air_flow_rate = var_cooling_supply_air_flow_rate
# real
var_cooling_supply_air_flow_rate_per_floor_area = 0.0
obj.cooling_supply_air_flow_rate_per_floor_area = var_cooling_supply_air_flow_rate_per_floor_area
# real
var_cooling_fraction_of_autosized_cooling_supply_air_flow_rate = 0.0
obj.cooling_fraction_of_autosized_cooling_supply_air_flow_rate = var_cooling_fraction_of_autosized_cooling_supply_air_flow_rate
# real
var_cooling_supply_air_flow_rate_per_unit_of_capacity = 0.0
obj.cooling_supply_air_flow_rate_per_unit_of_capacity = var_cooling_supply_air_flow_rate_per_unit_of_capacity
# alpha
var_heating_supply_air_flow_rate_method = "None"
obj.heating_supply_air_flow_rate_method = var_heating_supply_air_flow_rate_method
# real
var_heating_supply_air_flow_rate = 0.0
obj.heating_supply_air_flow_rate = var_heating_supply_air_flow_rate
# real
var_heating_supply_air_flow_rate_per_floor_area = 0.0
obj.heating_supply_air_flow_rate_per_floor_area = var_heating_supply_air_flow_rate_per_floor_area
# real
var_heating_fraction_of_autosized_heating_supply_air_flow_rate = 0.0
obj.heating_fraction_of_autosized_heating_supply_air_flow_rate = var_heating_fraction_of_autosized_heating_supply_air_flow_rate
# real
var_heating_supply_air_flow_rate_per_unit_of_capacity = 0.0
obj.heating_supply_air_flow_rate_per_unit_of_capacity = var_heating_supply_air_flow_rate_per_unit_of_capacity
# alpha
var_no_load_supply_air_flow_rate_method = "None"
obj.no_load_supply_air_flow_rate_method = var_no_load_supply_air_flow_rate_method
# real
var_no_load_supply_air_flow_rate = 0.0
obj.no_load_supply_air_flow_rate = var_no_load_supply_air_flow_rate
# real
var_no_load_supply_air_flow_rate_per_floor_area = 0.0
obj.no_load_supply_air_flow_rate_per_floor_area = var_no_load_supply_air_flow_rate_per_floor_area
# real
var_no_load_fraction_of_autosized_cooling_supply_air_flow_rate = 0.0
obj.no_load_fraction_of_autosized_cooling_supply_air_flow_rate = var_no_load_fraction_of_autosized_cooling_supply_air_flow_rate
# real
var_no_load_fraction_of_autosized_heating_supply_air_flow_rate = 0.0
obj.no_load_fraction_of_autosized_heating_supply_air_flow_rate = var_no_load_fraction_of_autosized_heating_supply_air_flow_rate
# real
var_no_load_supply_air_flow_rate_per_unit_of_capacity_during_cooling_operation = 0.0
obj.no_load_supply_air_flow_rate_per_unit_of_capacity_during_cooling_operation = var_no_load_supply_air_flow_rate_per_unit_of_capacity_during_cooling_operation
# real
var_no_load_supply_air_flow_rate_per_unit_of_capacity_during_heating_operation = 0.0
obj.no_load_supply_air_flow_rate_per_unit_of_capacity_during_heating_operation = var_no_load_supply_air_flow_rate_per_unit_of_capacity_during_heating_operation
# real
var_maximum_supply_air_temperature = 39.39
obj.maximum_supply_air_temperature = var_maximum_supply_air_temperature
# real
var_maximum_outdoor_drybulb_temperature_for_supplemental_heater_operation = 40.4
obj.maximum_outdoor_drybulb_temperature_for_supplemental_heater_operation = var_maximum_outdoor_drybulb_temperature_for_supplemental_heater_operation
# node
var_outdoor_drybulb_temperature_sensor_node_name = "node|Outdoor Dry-Bulb Temperature Sensor Node Name"
obj.outdoor_drybulb_temperature_sensor_node_name = var_outdoor_drybulb_temperature_sensor_node_name
# real
var_maximum_cycling_rate = 2.5
obj.maximum_cycling_rate = var_maximum_cycling_rate
# real
var_heat_pump_time_constant = 250.0
obj.heat_pump_time_constant = var_heat_pump_time_constant
# real
var_fraction_of_oncycle_power_use = 0.025
obj.fraction_of_oncycle_power_use = var_fraction_of_oncycle_power_use
# real
var_heat_pump_fan_delay_time = 0.0
obj.heat_pump_fan_delay_time = var_heat_pump_fan_delay_time
# real
var_ancillary_oncycle_electric_power = 0.0
obj.ancillary_oncycle_electric_power = var_ancillary_oncycle_electric_power
# real
var_ancillary_offcycle_electric_power = 0.0
obj.ancillary_offcycle_electric_power = var_ancillary_offcycle_electric_power
# real
var_design_heat_recovery_water_flow_rate = 0.0
obj.design_heat_recovery_water_flow_rate = var_design_heat_recovery_water_flow_rate
# real
var_maximum_temperature_for_heat_recovery = 50.0
obj.maximum_temperature_for_heat_recovery = var_maximum_temperature_for_heat_recovery
# node
var_heat_recovery_water_inlet_node_name = "node|Heat Recovery Water Inlet Node Name"
obj.heat_recovery_water_inlet_node_name = var_heat_recovery_water_inlet_node_name
# node
var_heat_recovery_water_outlet_node_name = "node|Heat Recovery Water Outlet Node Name"
obj.heat_recovery_water_outlet_node_name = var_heat_recovery_water_outlet_node_name
# alpha
var_design_specification_multispeed_object_type = "UnitarySystemPerformance:Multispeed"
obj.design_specification_multispeed_object_type = var_design_specification_multispeed_object_type
# object-list
var_design_specification_multispeed_object_name = "object-list|Design Specification Multispeed Object Name"
obj.design_specification_multispeed_object_name = var_design_specification_multispeed_object_name
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.airloophvacunitarysystems[0].name, var_name)
self.assertEqual(idf2.airloophvacunitarysystems[0].control_type, var_control_type)
self.assertEqual(idf2.airloophvacunitarysystems[0].controlling_zone_or_thermostat_location, var_controlling_zone_or_thermostat_location)
self.assertEqual(idf2.airloophvacunitarysystems[0].dehumidification_control_type, var_dehumidification_control_type)
self.assertEqual(idf2.airloophvacunitarysystems[0].availability_schedule_name, var_availability_schedule_name)
self.assertEqual(idf2.airloophvacunitarysystems[0].air_inlet_node_name, var_air_inlet_node_name)
self.assertEqual(idf2.airloophvacunitarysystems[0].air_outlet_node_name, var_air_outlet_node_name)
self.assertEqual(idf2.airloophvacunitarysystems[0].supply_fan_object_type, var_supply_fan_object_type)
self.assertEqual(idf2.airloophvacunitarysystems[0].supply_fan_name, var_supply_fan_name)
self.assertEqual(idf2.airloophvacunitarysystems[0].fan_placement, var_fan_placement)
self.assertEqual(idf2.airloophvacunitarysystems[0].supply_air_fan_operating_mode_schedule_name, var_supply_air_fan_operating_mode_schedule_name)
self.assertEqual(idf2.airloophvacunitarysystems[0].heating_coil_object_type, var_heating_coil_object_type)
self.assertEqual(idf2.airloophvacunitarysystems[0].heating_coil_name, var_heating_coil_name)
self.assertAlmostEqual(idf2.airloophvacunitarysystems[0].dx_heating_coil_sizing_ratio, var_dx_heating_coil_sizing_ratio)
self.assertEqual(idf2.airloophvacunitarysystems[0].cooling_coil_object_type, var_cooling_coil_object_type)
self.assertEqual(idf2.airloophvacunitarysystems[0].cooling_coil_name, var_cooling_coil_name)
self.assertEqual(idf2.airloophvacunitarysystems[0].use_doas_dx_cooling_coil, var_use_doas_dx_cooling_coil)
self.assertAlmostEqual(idf2.airloophvacunitarysystems[0].doas_dx_cooling_coil_leaving_minimum_air_temperature, var_doas_dx_cooling_coil_leaving_minimum_air_temperature)
self.assertEqual(idf2.airloophvacunitarysystems[0].latent_load_control, var_latent_load_control)
self.assertEqual(idf2.airloophvacunitarysystems[0].supplemental_heating_coil_object_type, var_supplemental_heating_coil_object_type)
self.assertEqual(idf2.airloophvacunitarysystems[0].supplemental_heating_coil_name, var_supplemental_heating_coil_name)
self.assertEqual(idf2.airloophvacunitarysystems[0].cooling_supply_air_flow_rate_method, var_cooling_supply_air_flow_rate_method)
self.assertAlmostEqual(idf2.airloophvacunitarysystems[0].cooling_supply_air_flow_rate, var_cooling_supply_air_flow_rate)
self.assertAlmostEqual(idf2.airloophvacunitarysystems[0].cooling_supply_air_flow_rate_per_floor_area, var_cooling_supply_air_flow_rate_per_floor_area)
self.assertAlmostEqual(idf2.airloophvacunitarysystems[0].cooling_fraction_of_autosized_cooling_supply_air_flow_rate, var_cooling_fraction_of_autosized_cooling_supply_air_flow_rate)
self.assertAlmostEqual(idf2.airloophvacunitarysystems[0].cooling_supply_air_flow_rate_per_unit_of_capacity, var_cooling_supply_air_flow_rate_per_unit_of_capacity)
self.assertEqual(idf2.airloophvacunitarysystems[0].heating_supply_air_flow_rate_method, var_heating_supply_air_flow_rate_method)
self.assertAlmostEqual(idf2.airloophvacunitarysystems[0].heating_supply_air_flow_rate, var_heating_supply_air_flow_rate)
self.assertAlmostEqual(idf2.airloophvacunitarysystems[0].heating_supply_air_flow_rate_per_floor_area, var_heating_supply_air_flow_rate_per_floor_area)
self.assertAlmostEqual(idf2.airloophvacunitarysystems[0].heating_fraction_of_autosized_heating_supply_air_flow_rate, var_heating_fraction_of_autosized_heating_supply_air_flow_rate)
self.assertAlmostEqual(idf2.airloophvacunitarysystems[0].heating_supply_air_flow_rate_per_unit_of_capacity, var_heating_supply_air_flow_rate_per_unit_of_capacity)
self.assertEqual(idf2.airloophvacunitarysystems[0].no_load_supply_air_flow_rate_method, var_no_load_supply_air_flow_rate_method)
self.assertAlmostEqual(idf2.airloophvacunitarysystems[0].no_load_supply_air_flow_rate, var_no_load_supply_air_flow_rate)
self.assertAlmostEqual(idf2.airloophvacunitarysystems[0].no_load_supply_air_flow_rate_per_floor_area, var_no_load_supply_air_flow_rate_per_floor_area)
self.assertAlmostEqual(idf2.airloophvacunitarysystems[0].no_load_fraction_of_autosized_cooling_supply_air_flow_rate, var_no_load_fraction_of_autosized_cooling_supply_air_flow_rate)
self.assertAlmostEqual(idf2.airloophvacunitarysystems[0].no_load_fraction_of_autosized_heating_supply_air_flow_rate, var_no_load_fraction_of_autosized_heating_supply_air_flow_rate)
self.assertAlmostEqual(idf2.airloophvacunitarysystems[0].no_load_supply_air_flow_rate_per_unit_of_capacity_during_cooling_operation, var_no_load_supply_air_flow_rate_per_unit_of_capacity_during_cooling_operation)
self.assertAlmostEqual(idf2.airloophvacunitarysystems[0].no_load_supply_air_flow_rate_per_unit_of_capacity_during_heating_operation, var_no_load_supply_air_flow_rate_per_unit_of_capacity_during_heating_operation)
self.assertAlmostEqual(idf2.airloophvacunitarysystems[0].maximum_supply_air_temperature, var_maximum_supply_air_temperature)
self.assertAlmostEqual(idf2.airloophvacunitarysystems[0].maximum_outdoor_drybulb_temperature_for_supplemental_heater_operation, var_maximum_outdoor_drybulb_temperature_for_supplemental_heater_operation)
self.assertEqual(idf2.airloophvacunitarysystems[0].outdoor_drybulb_temperature_sensor_node_name, var_outdoor_drybulb_temperature_sensor_node_name)
self.assertAlmostEqual(idf2.airloophvacunitarysystems[0].maximum_cycling_rate, var_maximum_cycling_rate)
self.assertAlmostEqual(idf2.airloophvacunitarysystems[0].heat_pump_time_constant, var_heat_pump_time_constant)
self.assertAlmostEqual(idf2.airloophvacunitarysystems[0].fraction_of_oncycle_power_use, var_fraction_of_oncycle_power_use)
self.assertAlmostEqual(idf2.airloophvacunitarysystems[0].heat_pump_fan_delay_time, var_heat_pump_fan_delay_time)
self.assertAlmostEqual(idf2.airloophvacunitarysystems[0].ancillary_oncycle_electric_power, var_ancillary_oncycle_electric_power)
self.assertAlmostEqual(idf2.airloophvacunitarysystems[0].ancillary_offcycle_electric_power, var_ancillary_offcycle_electric_power)
self.assertAlmostEqual(idf2.airloophvacunitarysystems[0].design_heat_recovery_water_flow_rate, var_design_heat_recovery_water_flow_rate)
self.assertAlmostEqual(idf2.airloophvacunitarysystems[0].maximum_temperature_for_heat_recovery, var_maximum_temperature_for_heat_recovery)
self.assertEqual(idf2.airloophvacunitarysystems[0].heat_recovery_water_inlet_node_name, var_heat_recovery_water_inlet_node_name)
self.assertEqual(idf2.airloophvacunitarysystems[0].heat_recovery_water_outlet_node_name, var_heat_recovery_water_outlet_node_name)
self.assertEqual(idf2.airloophvacunitarysystems[0].design_specification_multispeed_object_type, var_design_specification_multispeed_object_type)
self.assertEqual(idf2.airloophvacunitarysystems[0].design_specification_multispeed_object_name, var_design_specification_multispeed_object_name)
|
|
# -*- coding: utf-8 -*-
'''
Salt interface to ZFS commands
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
'''
from __future__ import absolute_import
# Import Python libs
import logging
# Import Salt libs
import salt.utils
import salt.modules.cmdmod
import salt.utils.decorators as decorators
from salt.utils.odict import OrderedDict
log = logging.getLogger(__name__)
# Function alias to set mapping.
__func_alias__ = {
'list_': 'list',
}
@decorators.memoize
def _check_zfs():
'''
Looks to see if zfs is present on the system.
'''
# Get the path to the zfs binary.
return salt.utils.which('zfs')
@decorators.memoize
def _check_features():
'''
Looks to see if zpool-features is available
'''
# get man location
man = salt.utils.which('man')
if not man:
return False
cmd = '{man} zpool-features'.format(
man=man
)
res = __salt__['cmd.run_all'](cmd, python_shell=False)
return res['retcode'] == 0
def __virtual__():
'''
Makes sure that ZFS kernel module is loaded.
'''
on_freebsd = __grains__['kernel'] == 'FreeBSD'
on_linux = __grains__['kernel'] == 'Linux'
on_solaris = __grains__['kernel'] == 'SunOS' and __grains__['kernelrelease'] == '5.11'
cmd = ''
if on_freebsd:
cmd = 'kldstat -q -m zfs'
elif on_linux:
modinfo = salt.utils.which('modinfo')
if modinfo:
cmd = '{0} zfs'.format(modinfo)
else:
cmd = 'ls /sys/module/zfs'
elif on_solaris:
# not using salt.utils.which('zfs') to keep compatible with others
cmd = 'which zfs'
if cmd and salt.modules.cmdmod.retcode(
cmd, output_loglevel='quiet', ignore_retcode=True
) == 0:
return 'zfs'
return (False, "The zfs module cannot be loaded: zfs not found")
def exists(name, **kwargs):
'''
.. versionadded:: 2015.5.0
Check if a ZFS filesystem or volume or snapshot exists.
name : string
name of dataset
type : string
also check if dataset is of a certain type, valid choices are:
filesystem, snapshot, volume, bookmark, or all.
CLI Example:
.. code-block:: bash
salt '*' zfs.exists myzpool/mydataset
salt '*' zfs.exists myzpool/myvolume type=volume
'''
zfs = _check_zfs()
ltype = kwargs.get('type', None)
cmd = '{0} list {1}{2}'.format(zfs, '-t {0} '.format(ltype) if ltype else '', name)
res = __salt__['cmd.run_all'](cmd, ignore_retcode=True)
return res['retcode'] == 0
def create(name, **kwargs):
'''
.. versionadded:: 2015.5.0
.. versionchanged:: 2016.3.0
Create a ZFS File System.
name : string
name of dataset or volume
volume_size : string
if specified, a zvol will be created instead of a dataset
sparse : boolean
create sparse volume
create_parent : boolean
creates all the non-existing parent datasets. any property specified on the
command line using the -o option is ignored.
properties : dict
additional zfs properties (-o)
.. note::
ZFS properties can be specified at the time of creation of the filesystem by
passing an additional argument called "properties" and specifying the properties
with their respective values in the form of a python dictionary::
properties="{'property1': 'value1', 'property2': 'value2'}"
CLI Example:
.. code-block:: bash
salt '*' zfs.create myzpool/mydataset [create_parent=True|False]
salt '*' zfs.create myzpool/mydataset properties="{'mountpoint': '/export/zfs', 'sharenfs': 'on'}"
salt '*' zfs.create myzpool/volume volume_size=1G [sparse=True|False]`
salt '*' zfs.create myzpool/volume volume_size=1G properties="{'volblocksize': '512'}" [sparse=True|False]
'''
ret = {}
zfs = _check_zfs()
properties = kwargs.get('properties', None)
create_parent = kwargs.get('create_parent', False)
volume_size = kwargs.get('volume_size', None)
sparse = kwargs.get('sparse', False)
cmd = '{0} create'.format(zfs)
if create_parent:
cmd = '{0} -p'.format(cmd)
if volume_size and sparse:
cmd = '{0} -s'.format(cmd)
# if zpool properties specified, then
# create "-o property=value" pairs
if properties:
optlist = []
for prop in properties.keys():
if isinstance(properties[prop], bool): # salt breaks the on/off/yes/no properties :(
properties[prop] = 'on' if properties[prop] else 'off'
optlist.append('-o {0}={1}'.format(prop, properties[prop]))
opts = ' '.join(optlist)
cmd = '{0} {1}'.format(cmd, opts)
if volume_size:
cmd = '{0} -V {1}'.format(cmd, volume_size)
# append name
cmd = '{0} {1}'.format(cmd, name)
# Create filesystem
res = __salt__['cmd.run_all'](cmd)
# Check and see if the dataset is available
if res['retcode'] != 0:
ret[name] = res['stderr'] if 'stderr' in res else res['stdout']
else:
ret[name] = 'created'
return ret
def destroy(name, **kwargs):
'''
.. versionadded:: 2015.5.0
Destroy a ZFS File System.
name : string
name of dataset, volume, or snapshot
force : boolean
force an unmount of any file systems using the unmount -f command.
recursive : boolean
recursively destroy all children. (-r)
recursive_all : boolean
recursively destroy all dependents, including cloned file systems
outside the target hierarchy. (-R)
.. warning::
watch out when using recursive and recursive_all
CLI Example:
.. code-block:: bash
salt '*' zfs.destroy myzpool/mydataset [force=True|False]
'''
ret = {}
zfs = _check_zfs()
force = kwargs.get('force', False)
recursive = kwargs.get('recursive', False)
recursive_all = kwargs.get('recursive_all', False)
cmd = '{0} destroy'.format(zfs)
if recursive_all:
cmd = '{0} -R'.format(cmd)
if force:
cmd = '{0} -f'.format(cmd)
if recursive:
cmd = '{0} -r'.format(cmd)
cmd = '{0} {1}'.format(cmd, name)
res = __salt__['cmd.run_all'](cmd)
if res['retcode'] != 0:
if "operation does not apply to pools" in res['stderr']:
ret[name] = '{0}, use zpool.destroy to destroy the pool'.format(res['stderr'].splitlines()[0])
if "has children" in res['stderr']:
ret[name] = '{0}, you can add the "recursive=True" parameter'.format(res['stderr'].splitlines()[0])
else:
ret[name] = res['stderr'] if 'stderr' in res else res['stdout']
else:
ret[name] = 'destroyed'
return ret
def rename(name, new_name, **kwargs):
'''
.. versionadded:: 2015.5.0
.. versionchanged:: 2016.3.0
Rename or Relocate a ZFS File System.
name : string
name of dataset, volume, or snapshot
new_name : string
new name of dataset, volume, or snapshot
force : boolean
force unmount any filesystems that need to be unmounted in the process.
create_parent : boolean
creates all the nonexistent parent datasets. Datasets created in
this manner are automatically mounted according to the mountpoint
property inherited from their parent.
recursive : boolean
recursively rename the snapshots of all descendent datasets.
snapshots are the only dataset that can be renamed recursively.
CLI Example:
.. code-block:: bash
salt '*' zfs.rename myzpool/mydataset myzpool/renameddataset
'''
ret = {}
zfs = _check_zfs()
create_parent = kwargs.get('create_parent', False)
force = kwargs.get('force', False)
recursive = kwargs.get('recursive', False)
# fix up conflicting parameters
if recursive:
if '@' in name: # -p and -f don't work with -r
create_parent = False
force = False
else: # -r only works with snapshots
recursive = False
if create_parent and '@' in name: # doesn't work with snapshots
create_parent = False
res = __salt__['cmd.run_all']('{zfs} rename {force}{create_parent}{recursive}{name} {new_name}'.format(
zfs=zfs,
force='-f ' if force else '',
create_parent='-p ' if create_parent else '',
recursive='-r ' if recursive else '',
name=name,
new_name=new_name
))
if res['retcode'] != 0:
ret[name] = res['stderr'] if 'stderr' in res else res['stdout']
else:
ret[name] = 'renamed to {0}'.format(new_name)
return ret
def list_(name=None, **kwargs):
'''
.. versionadded:: 2015.5.0
.. versionchanged:: 2016.3.0
Return a list of all datasets or a specified dataset on the system and the
values of their used, available, referenced, and mountpoint properties.
name : string
name of dataset, volume, or snapshot
recursive : boolean
recursively list children
depth : int
limit recursion to depth
properties : string
comma-separated list of properties to list, the name property will always be added
type : string
comma-separated list of types to display, where type is one of
filesystem, snapshot, volume, bookmark, or all.
sort : string
property to sort on (default = name)
order : string [ascending|descending]
sort order (default = ascending)
CLI Example:
.. code-block:: bash
salt '*' zfs.list
salt '*' zfs.list myzpool/mydataset [recursive=True|False]
salt '*' zfs.list myzpool/mydataset properties="sharenfs,mountpoint"
'''
ret = OrderedDict()
zfs = _check_zfs()
recursive = kwargs.get('recursive', False)
depth = kwargs.get('depth', 0)
properties = kwargs.get('properties', 'used,avail,refer,mountpoint')
sort = kwargs.get('sort', None)
ltype = kwargs.get('type', None)
order = kwargs.get('order', 'ascending')
cmd = '{0} list -H'.format(zfs)
# filter on type
if ltype:
cmd = '{0} -t {1}'.format(cmd, ltype)
# recursively list
if recursive:
cmd = '{0} -r'.format(cmd)
if depth:
cmd = '{0} -d {1}'.format(cmd, depth)
# add properties
properties = properties.split(',')
if 'name' in properties: # ensure name is first property
properties.remove('name')
properties.insert(0, 'name')
cmd = '{0} -o {1}'.format(cmd, ','.join(properties))
# sorting
if sort and sort in properties:
if order.startswith('a'):
cmd = '{0} -s {1}'.format(cmd, sort)
else:
cmd = '{0} -S {1}'.format(cmd, sort)
# add name if set
if name:
cmd = '{0} {1}'.format(cmd, name)
# parse output
res = __salt__['cmd.run_all'](cmd)
if res['retcode'] == 0:
for ds in [l for l in res['stdout'].splitlines()]:
ds = ds.split("\t")
ds_data = {}
for prop in properties:
ds_data[prop] = ds[properties.index(prop)]
ret[ds_data['name']] = ds_data
del ret[ds_data['name']]['name']
else:
ret['error'] = res['stderr'] if 'stderr' in res else res['stdout']
return ret
def mount(name='-a', **kwargs):
'''
.. versionadded:: 2016.3.0
Mounts ZFS file systems
name : string
name of the filesystem, you can use '-a' to mount all unmounted filesystems. (this is the default)
overlay : boolean
perform an overlay mount.
options : string
optional comma-separated list of mount options to use temporarily for
the duration of the mount.
CLI Example:
.. code-block:: bash
salt '*' zfs.mount
salt '*' zfs.mount myzpool/mydataset
salt '*' zfs.mount myzpool/mydataset options=ro
'''
zfs = _check_zfs()
overlay = kwargs.get('overlay', False)
options = kwargs.get('options', None)
res = __salt__['cmd.run_all']('{zfs} mount {overlay}{options}{filesystem}'.format(
zfs=zfs,
overlay='-O ' if overlay else '',
options='-o {0} '.format(options) if options else '',
filesystem=name
))
ret = {}
if name == '-a':
ret = res['retcode'] == 0
else:
if res['retcode'] != 0:
ret[name] = res['stderr'] if 'stderr' in res else res['stdout']
else:
ret[name] = 'mounted'
return ret
def unmount(name, **kwargs):
'''
.. versionadded:: 2016.3.0
Unmounts ZFS file systems
name : string
name of the filesystem, you can use '-a' to unmount all mounted filesystems.
force : boolean
forcefully unmount the file system, even if it is currently in use.
.. warning::
Using ``-a`` for the name parameter will probably break your system, unless your rootfs is not on zfs.
CLI Example:
.. code-block:: bash
salt '*' zfs.unmount myzpool/mydataset [force=True|False]
'''
zfs = _check_zfs()
force = kwargs.get('force', False)
res = __salt__['cmd.run_all']('{zfs} unmount {force}{filesystem}'.format(
zfs=zfs,
force='-f ' if force else '',
filesystem=name
))
ret = {}
if name == '-a':
ret = res['retcode'] == 0
else:
if res['retcode'] != 0:
ret[name] = res['stderr'] if 'stderr' in res else res['stdout']
else:
ret[name] = 'unmounted'
return ret
def inherit(prop, name, **kwargs):
'''
.. versionadded:: 2016.3.0
Clears the specified property
prop : string
name of property
name : string
name of the filesystem, volume, or snapshot
recursive : boolean
recursively inherit the given property for all children.
revert : boolean
revert the property to the received value if one exists; otherwise
operate as if the -S option was not specified.
CLI Example:
.. code-block:: bash
salt '*' zfs.inherit canmount myzpool/mydataset [recursive=True|False]
'''
zfs = _check_zfs()
recursive = kwargs.get('recursive', False)
revert = kwargs.get('revert', False)
res = __salt__['cmd.run_all']('{zfs} inherit {recursive}{revert}{prop} {name}'.format(
zfs=zfs,
recursive='-r ' if recursive else '',
revert='-S ' if revert else '',
prop=prop,
name=name
))
ret = {}
ret[name] = {}
if res['retcode'] != 0:
ret[name][prop] = res['stderr'] if 'stderr' in res else res['stdout']
if 'property cannot be inherited' in res['stderr']:
ret[name][prop] = '{0}, {1}'.format(
ret[name][prop],
'use revert=True to try and reset it to it\'s default value.'
)
else:
ret[name][prop] = 'cleared'
return ret
def diff(name_a, name_b, **kwargs):
'''
.. versionadded:: 2016.3.0
Display the difference between a snapshot of a given filesystem and
another snapshot of that filesystem from a later time or the current
contents of the filesystem.
name_a : string
name of snapshot
name_b : string
name of snapshot or filesystem
show_changetime : boolean
display the path's inode change time as the first column of output. (default = False)
show_indication : boolean
display an indication of the type of file. (default = True)
CLI Example:
.. code-block:: bash
salt '*' zfs.diff myzpool/mydataset@yesterday myzpool/mydataset
'''
ret = {}
zfs = _check_zfs()
show_changetime = kwargs.get('show_changetime', False)
show_indication = kwargs.get('show_indication', True)
if '@' not in name_a:
ret[name_a] = 'MUST be a snapshot'
return ret
res = __salt__['cmd.run_all']('{zfs} diff -H {changetime}{indication}{name_a} {name_b}'.format(
zfs=zfs,
changetime='-t ' if show_changetime else '',
indication='-F ' if show_indication else '',
name_a=name_a,
name_b=name_b
))
if res['retcode'] != 0:
ret['error'] = res['stderr'] if 'stderr' in res else res['stdout']
else:
ret = []
for line in res['stdout'].splitlines():
ret.append(line)
return ret
def rollback(name, **kwargs):
'''
.. versionadded:: 2016.3.0
Roll back the given dataset to a previous snapshot.
.. warning::
When a dataset is rolled back, all data that has changed since
the snapshot is discarded, and the dataset reverts to the state
at the time of the snapshot. By default, the command refuses to
roll back to a snapshot other than the most recent one.
In order to do so, all intermediate snapshots and bookmarks
must be destroyed by specifying the -r option.
name : string
name of snapshot
recursive : boolean
destroy any snapshots and bookmarks more recent than the one
specified.
recursive_all : boolean
destroy any more recent snapshots and bookmarks, as well as any
clones of those snapshots.
force : boolean
used with the -R option to force an unmount of any clone file
systems that are to be destroyed.
CLI Example:
.. code-block:: bash
salt '*' zfs.rollback myzpool/mydataset@yesterday
'''
ret = {}
zfs = _check_zfs()
force = kwargs.get('force', False)
recursive = kwargs.get('recursive', False)
recursive_all = kwargs.get('recursive_all', False)
if '@' not in name:
ret[name] = 'MUST be a snapshot'
return ret
if force:
if not recursive and not recursive_all: # -f only works with -R
log.warning('zfs.rollback - force=True can only be used when recursive_all=True or recursive=True')
force = False
res = __salt__['cmd.run_all']('{zfs} rollback {force}{recursive}{recursive_all}{snapshot}'.format(
zfs=zfs,
force='-f ' if force else '',
recursive='-r ' if recursive else '',
recursive_all='-R ' if recursive_all else '',
snapshot=name
))
if res['retcode'] != 0:
ret[name[:name.index('@')]] = res['stderr'] if 'stderr' in res else res['stdout']
else:
ret[name[:name.index('@')]] = 'rolledback to snapshot: {0}'.format(name[name.index('@')+1:])
return ret
def clone(name_a, name_b, **kwargs):
'''
.. versionadded:: 2016.3.0
Creates a clone of the given snapshot.
name_a : string
name of snapshot
name_b : string
name of filesystem or volume
create_parent : boolean
creates all the non-existing parent datasets. any property specified on the
command line using the -o option is ignored.
properties : dict
additional zfs properties (-o)
.. note::
ZFS properties can be specified at the time of creation of the filesystem by
passing an additional argument called "properties" and specifying the properties
with their respective values in the form of a python dictionary::
properties="{'property1': 'value1', 'property2': 'value2'}"
CLI Example:
.. code-block:: bash
salt '*' zfs.clone myzpool/mydataset@yesterday myzpool/mydataset_yesterday
'''
ret = {}
zfs = _check_zfs()
create_parent = kwargs.get('create_parent', False)
properties = kwargs.get('properties', None)
if '@' not in name_a:
ret[name_b] = 'failed to clone from {0} because it is not a snapshot'.format(name_a)
return ret
# if zpool properties specified, then
# create "-o property=value" pairs
if properties:
optlist = []
for prop in properties.keys():
if isinstance(properties[prop], bool): # salt breaks the on/off/yes/no properties :(
properties[prop] = 'on' if properties[prop] else 'off'
optlist.append('-o {0}={1}'.format(prop, properties[prop]))
properties = ' '.join(optlist)
res = __salt__['cmd.run_all']('{zfs} clone {create_parent}{properties}{name_a} {name_b}'.format(
zfs=zfs,
create_parent='-p ' if create_parent else '',
properties='{0} '.format(properties) if properties else '',
name_a=name_a,
name_b=name_b
))
if res['retcode'] != 0:
ret[name_b] = res['stderr'] if 'stderr' in res else res['stdout']
else:
ret[name_b] = 'cloned from {0}'.format(name_a)
return ret
def promote(name):
'''
.. versionadded:: 2016.3.0
Promotes a clone file system to no longer be dependent on its "origin"
snapshot.
.. note::
This makes it possible to destroy the file system that the
clone was created from. The clone parent-child dependency relationship
is reversed, so that the origin file system becomes a clone of the
specified file system.
The snapshot that was cloned, and any snapshots previous to this
snapshot, are now owned by the promoted clone. The space they use moves
from the origin file system to the promoted clone, so enough space must
be available to accommodate these snapshots. No new space is consumed
by this operation, but the space accounting is adjusted. The promoted
clone must not have any conflicting snapshot names of its own. The
rename subcommand can be used to rename any conflicting snapshots.
name : string
name of clone-filesystem
CLI Example:
.. code-block:: bash
salt '*' zfs.promote myzpool/myclone
'''
ret = {}
zfs = _check_zfs()
res = __salt__['cmd.run_all']('{zfs} promote {name}'.format(
zfs=zfs,
name=name
))
if res['retcode'] != 0:
ret[name] = res['stderr'] if 'stderr' in res else res['stdout']
else:
ret[name] = 'promoted'
return ret
def bookmark(snapshot, bookmark):
'''
.. versionadded:: 2016.3.0
Creates a bookmark of the given snapshot
.. note::
Bookmarks mark the point in time when the snapshot was created,
and can be used as the incremental source for a zfs send command.
This feature must be enabled to be used. See zpool-features(5) for
details on ZFS feature flags and the bookmarks feature.
snapshot : string
name of snapshot to bookmark
bookmark : string
name of bookmark
CLI Example:
.. code-block:: bash
salt '*' zfs.bookmark myzpool/mydataset@yesterday myzpool/mydataset#complete
'''
ret = {}
# abort if we do not have feature flags
if not _check_features():
ret['error'] = 'bookmarks are not supported'
return ret
zfs = _check_zfs()
if '@' not in snapshot:
ret[snapshot] = 'MUST be a snapshot'
if '#' not in bookmark:
ret[bookmark] = 'MUST be a bookmark'
if len(ret) > 0:
return ret
res = __salt__['cmd.run_all']('{zfs} bookmark {snapshot} {bookmark}'.format(
zfs=zfs,
snapshot=snapshot,
bookmark=bookmark
))
if res['retcode'] != 0:
ret[snapshot] = res['stderr'] if 'stderr' in res else res['stdout']
else:
ret[snapshot] = 'bookmarked as {0}'.format(bookmark)
return ret
def holds(snapshot, **kwargs):
'''
.. versionadded:: 2016.3.0
Lists all existing user references for the given snapshot or snapshots.
snapshot : string
name of snapshot
recursive : boolean
lists the holds that are set on the named descendent snapshots also.
CLI Example:
.. code-block:: bash
salt '*' zfs.holds myzpool/mydataset@baseline
'''
ret = {}
if '@' not in snapshot:
ret[snapshot] = 'MUST be a snapshot'
return ret
zfs = _check_zfs()
recursive = kwargs.get('recursive', False)
res = __salt__['cmd.run_all']('{zfs} holds -H {recursive}{snapshot}'.format(
zfs=zfs,
recursive='-r ' if recursive else '',
snapshot=snapshot
))
if res['retcode'] == 0:
if res['stdout'] != '':
properties = "name,tag,timestamp".split(",")
for hold in [l for l in res['stdout'].splitlines()]:
hold = hold.split("\t")
hold_data = {}
for prop in properties:
hold_data[prop] = hold[properties.index(prop)]
if hold_data['name'] not in ret:
ret[hold_data['name']] = {}
ret[hold_data['name']][hold_data['tag']] = hold_data['timestamp']
else:
ret[snapshot] = 'no holds'
else:
ret[snapshot] = res['stderr'] if 'stderr' in res else res['stdout']
return ret
def hold(tag, *snapshot, **kwargs):
'''
.. versionadded:: 2016.3.0
Adds a single reference, named with the tag argument, to the specified
snapshot or snapshots.
.. note::
Each snapshot has its own tag namespace, and tags must be unique within that space.
If a hold exists on a snapshot, attempts to destroy that snapshot by
using the zfs destroy command return EBUSY.
tag : string
name of tag
*snapshot : string
name of snapshot(s)
recursive : boolean
specifies that a hold with the given tag is applied recursively to
the snapshots of all descendent file systems.
.. note::
A comma-separated list can be provided for the tag parameter to hold multiple tags.
CLI Example:
.. code-block:: bash
salt '*' zfs.hold mytag myzpool/mydataset@mysnapshot [recursive=True]
salt '*' zfs.hold mytag,myothertag myzpool/mydataset@mysnapshot
salt '*' zfs.hold mytag myzpool/mydataset@mysnapshot myzpool/mydataset@myothersnapshot
'''
ret = {}
zfs = _check_zfs()
recursive = kwargs.get('recursive', False)
# verify snapshots
if not snapshot:
ret['error'] = 'one or more snapshots must be specified'
for snap in snapshot:
if '@' not in snap:
ret[snap] = 'not a snapshot'
if len(ret) > 0:
return ret
for csnap in snapshot:
for ctag in tag.split(','):
res = __salt__['cmd.run_all']('{zfs} hold {recursive}{tag} {snapshot}'.format(
zfs=zfs,
recursive='-r ' if recursive else '',
tag=ctag,
snapshot=csnap
))
if csnap not in ret:
ret[csnap] = {}
if res['retcode'] != 0:
for err in res['stderr'].splitlines():
if err.startswith('cannot hold snapshot'):
ret[csnap][ctag] = err[err.index(':')+2:]
elif err.startswith('cannot open'):
ret[csnap][ctag] = err[err.index(':')+2:]
else:
# fallback in case we hit a weird error
if err == 'usage:':
break
ret[csnap][ctag] = res['stderr']
else:
ret[csnap][ctag] = 'held'
return ret
def release(tag, *snapshot, **kwargs):
'''
.. versionadded:: 2016.3.0
Removes a single reference, named with the tag argument, from the
specified snapshot or snapshots.
.. note::
The tag must already exist for each snapshot.
If a hold exists on a snapshot, attempts to destroy that
snapshot by using the zfs destroy command return EBUSY.
tag : string
name of tag
*snapshot : string
name of snapshot(s)
recursive : boolean
recursively releases a hold with the given tag on the snapshots of
all descendent file systems.
.. note::
A comma-separated list can be provided for the tag parameter to release multiple tags.
CLI Example:
.. code-block:: bash
salt '*' zfs.release mytag myzpool/mydataset@mysnapshot [recursive=True]
salt '*' zfs.release mytag myzpool/mydataset@mysnapshot myzpool/mydataset@myothersnapshot
'''
ret = {}
zfs = _check_zfs()
recursive = kwargs.get('recursive', False)
# verify snapshots
if not snapshot:
ret['error'] = 'one or more snapshots must be specified'
for snap in snapshot:
if '@' not in snap:
ret[snap] = 'not a snapshot'
if len(ret) > 0:
return ret
for csnap in snapshot:
for ctag in tag.split(','):
res = __salt__['cmd.run_all']('{zfs} release {recursive}{tag} {snapshot}'.format(
zfs=zfs,
recursive='-r ' if recursive else '',
tag=ctag,
snapshot=csnap
))
if csnap not in ret:
ret[csnap] = {}
if res['retcode'] != 0:
for err in res['stderr'].splitlines():
if err.startswith('cannot release hold from snapshot'):
ret[csnap][ctag] = err[err.index(':')+2:]
elif err.startswith('cannot open'):
ret[csnap][ctag] = err[err.index(':')+2:]
else:
# fallback in case we hit a weird error
if err == 'usage:':
break
ret[csnap][ctag] = res['stderr']
else:
ret[csnap][ctag] = 'released'
return ret
def snapshot(*snapshot, **kwargs):
'''
.. versionadded:: 2016.3.0
Creates snapshots with the given names.
*snapshot : string
name of snapshot(s)
recursive : boolean
recursively create snapshots of all descendent datasets.
properties : dict
additional zfs properties (-o)
.. note::
ZFS properties can be specified at the time of creation of the filesystem by
passing an additional argument called "properties" and specifying the properties
with their respective values in the form of a python dictionary::
properties="{'property1': 'value1', 'property2': 'value2'}"
CLI Example:
.. code-block:: bash
salt '*' zfs.snapshot myzpool/mydataset@yesterday [recursive=True]
salt '*' zfs.snapshot myzpool/mydataset@yesterday myzpool/myotherdataset@yesterday [recursive=True]
'''
ret = {}
zfs = _check_zfs()
recursive = kwargs.get('recursive', False)
properties = kwargs.get('properties', None)
# verify snapshots
if not snapshot:
ret['error'] = 'one or more snapshots must be specified'
for snap in snapshot:
if '@' not in snap:
ret[snap] = 'not a snapshot'
# if zpool properties specified, then
# create "-o property=value" pairs
if properties:
optlist = []
for prop in properties.keys():
if isinstance(properties[prop], bool): # salt breaks the on/off/yes/no properties :(
properties[prop] = 'on' if properties[prop] else 'off'
optlist.append('-o {0}={1}'.format(prop, properties[prop]))
properties = ' '.join(optlist)
for csnap in snapshot:
if '@' not in csnap:
continue
res = __salt__['cmd.run_all']('{zfs} snapshot {recursive}{properties}{snapshot}'.format(
zfs=zfs,
recursive='-r ' if recursive else '',
properties='{0} '.format(properties) if properties else '',
snapshot=csnap
))
if res['retcode'] != 0:
for err in res['stderr'].splitlines():
if err.startswith('cannot create snapshot'):
ret[csnap] = err[err.index(':')+2:]
elif err.startswith('cannot open'):
ret[csnap] = err[err.index(':')+2:]
else:
# fallback in case we hit a weird error
if err == 'usage:':
break
ret[csnap] = res['stderr']
else:
ret[csnap] = 'snapshotted'
return ret
def set(*dataset, **kwargs):
'''
.. versionadded:: 2016.3.0
Sets the property or list of properties to the given value(s) for each dataset.
*dataset : string
name of snapshot(s), filesystem(s), or volume(s)
*properties : string
additional zfs properties pairs
.. note::
properties are passed as key-value pairs. e.g.
compression=off
.. note::
Only some properties can be edited.
See the Properties section for more information on what properties
can be set and acceptable values.
Numeric values can be specified as exact values, or in a human-readable
form with a suffix of B, K, M, G, T, P, E, Z (for bytes, kilobytes,
megabytes, gigabytes, terabytes, petabytes, exabytes, or zettabytes,
respectively).
CLI Example:
.. code-block:: bash
salt '*' zfs.set myzpool/mydataset compression=off
salt '*' zfs.set myzpool/mydataset myzpool/myotherdataset compression=off
salt '*' zfs.set myzpool/mydataset myzpool/myotherdataset compression=lz4 canmount=off
'''
ret = {}
zfs = _check_zfs()
# verify snapshots
if not dataset:
ret['error'] = 'one or more snapshots must be specified'
# clean kwargs
properties = salt.utils.clean_kwargs(**kwargs)
if len(properties) < 1:
ret['error'] = '{0}one or more properties must be specified'.format(
'{0},\n'.format(ret['error']) if 'error' in ret else ''
)
if len(ret) > 0:
return ret
# for better error handling we don't do one big set command
for ds in dataset:
for prop in properties.keys():
if isinstance(properties[prop], bool): # salt breaks the on/off/yes/no properties :(
properties[prop] = 'on' if properties[prop] else 'off'
res = __salt__['cmd.run_all']('{zfs} set {prop}={value} {dataset}'.format(
zfs=zfs,
prop=prop,
value=properties[prop],
dataset=ds
))
if ds not in ret:
ret[ds] = {}
if res['retcode'] != 0:
ret[ds][prop] = res['stderr'] if 'stderr' in res else res['stdout']
if ':' in ret[ds][prop]:
ret[ds][prop] = ret[ds][prop][ret[ds][prop].index(':')+2:]
else:
ret[ds][prop] = 'set'
return ret
def get(*dataset, **kwargs):
'''
.. versionadded:: 2016.3.0
Displays properties for the given datasets.
*dataset : string
name of snapshot(s), filesystem(s), or volume(s)
properties : string
comma-separated list of properties to list, defaults to all
recursive : boolean
recursively list children
depth : int
recursively list children to depth
fields : string
comma-separated list of fields to include, the name and property field will always be added
type : string
comma-separated list of types to display, where type is one of
filesystem, snapshot, volume, bookmark, or all.
source : string
comma-separated list of sources to display. Must be one of the following:
local, default, inherited, temporary, and none. The default value is all sources.
.. note::
If no datasets are specified, then the command displays properties
for all datasets on the system.
CLI Example:
.. code-block:: bash
salt '*' zfs.get
salt '*' zfs.get myzpool/mydataset [recursive=True|False]
salt '*' zfs.get myzpool/mydataset properties="sharenfs,mountpoint" [recursive=True|False]
salt '*' zfs.get myzpool/mydataset myzpool/myotherdataset properties=available fields=value depth=1
'''
ret = OrderedDict()
zfs = _check_zfs()
properties = kwargs.get('properties', 'all')
recursive = kwargs.get('recursive', False)
depth = kwargs.get('depth', 0)
fields = kwargs.get('fields', 'value,source')
ltype = kwargs.get('type', None)
source = kwargs.get('source', None)
cmd = '{0} get -H'.format(zfs)
# recursively get
if depth:
cmd = '{0} -d {1}'.format(cmd, depth)
elif recursive:
cmd = '{0} -r'.format(cmd)
# fields
fields = fields.split(',')
if 'name' in fields: # ensure name is first
fields.remove('name')
if 'property' in fields: # ensure property is second
fields.remove('property')
fields.insert(0, 'name')
fields.insert(1, 'property')
cmd = '{0} -o {1}'.format(cmd, ','.join(fields))
# filter on type
if source:
cmd = '{0} -s {1}'.format(cmd, source)
# filter on type
if ltype:
cmd = '{0} -t {1}'.format(cmd, ltype)
# properties
cmd = '{0} {1}'.format(cmd, properties)
# datasets
cmd = '{0} {1}'.format(cmd, ' '.join(dataset))
# parse output
res = __salt__['cmd.run_all'](cmd)
if res['retcode'] == 0:
for ds in [l for l in res['stdout'].splitlines()]:
ds = ds.split("\t")
ds_data = {}
for field in fields:
ds_data[field] = ds[fields.index(field)]
ds_name = ds_data['name']
ds_prop = ds_data['property']
del ds_data['name']
del ds_data['property']
if ds_name not in ret:
ret[ds_name] = {}
ret[ds_name][ds_prop] = ds_data
else:
ret['error'] = res['stderr'] if 'stderr' in res else res['stdout']
return ret
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests For miscellaneous util methods used with compute."""
import copy
import string
import uuid
import mock
from oslo.config import cfg
from oslo.serialization import jsonutils
from oslo.utils import encodeutils
from oslo.utils import importutils
import six
import testtools
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova import context
from nova import db
from nova import exception
from nova.image import glance
from nova.network import api as network_api
from nova import objects
from nova.objects import block_device as block_device_obj
from nova.objects import instance as instance_obj
from nova import rpc
from nova import test
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_network
from nova.tests.unit import fake_notifier
from nova.tests.unit import fake_server_actions
import nova.tests.unit.image.fake
from nova.tests.unit import matchers
from nova.tests.unit.objects import test_flavor
from nova import utils
from nova.virt import driver
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
CONF.import_opt('compute_driver', 'nova.virt.driver')
class ComputeValidateDeviceTestCase(test.NoDBTestCase):
def setUp(self):
super(ComputeValidateDeviceTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
# check if test name includes "xen"
if 'xen' in self.id():
self.flags(compute_driver='xenapi.XenAPIDriver')
self.instance = objects.Instance(uuid=uuid.uuid4().hex,
root_device_name=None,
default_ephemeral_device=None)
else:
self.instance = objects.Instance(uuid=uuid.uuid4().hex,
root_device_name='/dev/vda',
default_ephemeral_device='/dev/vdb')
flavor = objects.Flavor(**test_flavor.fake_flavor)
self.instance.system_metadata = {}
with mock.patch.object(self.instance, 'save'):
self.instance.set_flavor(flavor)
self.instance.default_swap_device = None
self.data = []
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
lambda context, instance, use_slave=False: self.data)
def _update_flavor(self, flavor_info):
self.flavor = {
'id': 1,
'name': 'foo',
'memory_mb': 128,
'vcpus': 1,
'root_gb': 10,
'ephemeral_gb': 10,
'flavorid': 1,
'swap': 0,
'rxtx_factor': 1.0,
'vcpu_weight': 1,
}
self.flavor.update(flavor_info)
with mock.patch.object(self.instance, 'save'):
self.instance.set_flavor(self.flavor)
def _validate_device(self, device=None):
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
self.context, self.instance['uuid'])
return compute_utils.get_device_name_for_instance(
self.context, self.instance, bdms, device)
@staticmethod
def _fake_bdm(device):
return fake_block_device.FakeDbBlockDeviceDict({
'source_type': 'volume',
'destination_type': 'volume',
'device_name': device,
'no_device': None,
'volume_id': 'fake',
'snapshot_id': None,
'guest_format': None
})
def test_wrap(self):
self.data = []
for letter in string.ascii_lowercase[2:]:
self.data.append(self._fake_bdm('/dev/vd' + letter))
device = self._validate_device()
self.assertEqual(device, '/dev/vdaa')
def test_wrap_plus_one(self):
self.data = []
for letter in string.ascii_lowercase[2:]:
self.data.append(self._fake_bdm('/dev/vd' + letter))
self.data.append(self._fake_bdm('/dev/vdaa'))
device = self._validate_device()
self.assertEqual(device, '/dev/vdab')
def test_later(self):
self.data = [
self._fake_bdm('/dev/vdc'),
self._fake_bdm('/dev/vdd'),
self._fake_bdm('/dev/vde'),
]
device = self._validate_device()
self.assertEqual(device, '/dev/vdf')
def test_gap(self):
self.data = [
self._fake_bdm('/dev/vdc'),
self._fake_bdm('/dev/vde'),
]
device = self._validate_device()
self.assertEqual(device, '/dev/vdd')
def test_no_bdms(self):
self.data = []
device = self._validate_device()
self.assertEqual(device, '/dev/vdc')
def test_lxc_names_work(self):
self.instance['root_device_name'] = '/dev/a'
self.instance['ephemeral_device_name'] = '/dev/b'
self.data = []
device = self._validate_device()
self.assertEqual(device, '/dev/c')
def test_name_conversion(self):
self.data = []
device = self._validate_device('/dev/c')
self.assertEqual(device, '/dev/vdc')
device = self._validate_device('/dev/sdc')
self.assertEqual(device, '/dev/vdc')
device = self._validate_device('/dev/xvdc')
self.assertEqual(device, '/dev/vdc')
def test_invalid_device_prefix(self):
self.assertRaises(exception.InvalidDevicePath,
self._validate_device, '/baddata/vdc')
def test_device_in_use(self):
exc = self.assertRaises(exception.DevicePathInUse,
self._validate_device, '/dev/vda')
self.assertIn('/dev/vda', six.text_type(exc))
def test_swap(self):
self.instance['default_swap_device'] = "/dev/vdc"
device = self._validate_device()
self.assertEqual(device, '/dev/vdd')
def test_swap_no_ephemeral(self):
self.instance.default_ephemeral_device = None
self.instance.default_swap_device = "/dev/vdb"
device = self._validate_device()
self.assertEqual(device, '/dev/vdc')
def test_ephemeral_xenapi(self):
self._update_flavor({
'ephemeral_gb': 10,
'swap': 0,
})
self.stubs.Set(flavors, 'get_flavor',
lambda instance_type_id, ctxt=None: self.flavor)
device = self._validate_device()
self.assertEqual(device, '/dev/xvdc')
def test_swap_xenapi(self):
self._update_flavor({
'ephemeral_gb': 0,
'swap': 10,
})
self.stubs.Set(flavors, 'get_flavor',
lambda instance_type_id, ctxt=None: self.flavor)
device = self._validate_device()
self.assertEqual(device, '/dev/xvdb')
def test_swap_and_ephemeral_xenapi(self):
self._update_flavor({
'ephemeral_gb': 10,
'swap': 10,
})
self.stubs.Set(flavors, 'get_flavor',
lambda instance_type_id, ctxt=None: self.flavor)
device = self._validate_device()
self.assertEqual(device, '/dev/xvdd')
def test_swap_and_one_attachment_xenapi(self):
self._update_flavor({
'ephemeral_gb': 0,
'swap': 10,
})
self.stubs.Set(flavors, 'get_flavor',
lambda instance_type_id, ctxt=None: self.flavor)
device = self._validate_device()
self.assertEqual(device, '/dev/xvdb')
self.data.append(self._fake_bdm(device))
device = self._validate_device()
self.assertEqual(device, '/dev/xvdd')
def test_no_dev_root_device_name_get_next_name(self):
self.instance['root_device_name'] = 'vda'
device = self._validate_device()
self.assertEqual('/dev/vdc', device)
class DefaultDeviceNamesForInstanceTestCase(test.NoDBTestCase):
def setUp(self):
super(DefaultDeviceNamesForInstanceTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.ephemerals = block_device_obj.block_device_make_list(
self.context,
[fake_block_device.FakeDbBlockDeviceDict(
{'id': 1, 'instance_uuid': 'fake-instance',
'device_name': '/dev/vdb',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'guest_format': None,
'boot_index': -1})])
self.swap = block_device_obj.block_device_make_list(
self.context,
[fake_block_device.FakeDbBlockDeviceDict(
{'id': 2, 'instance_uuid': 'fake-instance',
'device_name': '/dev/vdc',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'guest_format': 'swap',
'boot_index': -1})])
self.block_device_mapping = block_device_obj.block_device_make_list(
self.context,
[fake_block_device.FakeDbBlockDeviceDict(
{'id': 3, 'instance_uuid': 'fake-instance',
'device_name': '/dev/vda',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'boot_index': 0}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 4, 'instance_uuid': 'fake-instance',
'device_name': '/dev/vdd',
'source_type': 'snapshot',
'destination_type': 'volume',
'snapshot_id': 'fake-snapshot-id-1',
'boot_index': -1}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 5, 'instance_uuid': 'fake-instance',
'device_name': '/dev/vde',
'source_type': 'blank',
'destination_type': 'volume',
'boot_index': -1})])
self.flavor = {'swap': 4}
self.instance = {'uuid': 'fake_instance', 'ephemeral_gb': 2}
self.is_libvirt = False
self.root_device_name = '/dev/vda'
self.update_called = False
def fake_extract_flavor(instance):
return self.flavor
def fake_driver_matches(driver_string):
if driver_string == 'libvirt.LibvirtDriver':
return self.is_libvirt
return False
self.patchers = []
self.patchers.append(
mock.patch.object(objects.BlockDeviceMapping, 'save'))
self.patchers.append(
mock.patch.object(
flavors, 'extract_flavor',
new=mock.Mock(side_effect=fake_extract_flavor)))
self.patchers.append(
mock.patch.object(driver,
'compute_driver_matches',
new=mock.Mock(
side_effect=fake_driver_matches)))
for patcher in self.patchers:
patcher.start()
def tearDown(self):
super(DefaultDeviceNamesForInstanceTestCase, self).tearDown()
for patcher in self.patchers:
patcher.stop()
def _test_default_device_names(self, *block_device_lists):
compute_utils.default_device_names_for_instance(self.instance,
self.root_device_name,
*block_device_lists)
def test_only_block_device_mapping(self):
# Test no-op
original_bdm = copy.deepcopy(self.block_device_mapping)
self._test_default_device_names([], [], self.block_device_mapping)
for original, new in zip(original_bdm, self.block_device_mapping):
self.assertEqual(original.device_name, new.device_name)
# Assert it defaults the missing one as expected
self.block_device_mapping[1]['device_name'] = None
self.block_device_mapping[2]['device_name'] = None
self._test_default_device_names([], [], self.block_device_mapping)
self.assertEqual('/dev/vdb',
self.block_device_mapping[1]['device_name'])
self.assertEqual('/dev/vdc',
self.block_device_mapping[2]['device_name'])
def test_with_ephemerals(self):
# Test ephemeral gets assigned
self.ephemerals[0]['device_name'] = None
self._test_default_device_names(self.ephemerals, [],
self.block_device_mapping)
self.assertEqual(self.ephemerals[0]['device_name'], '/dev/vdb')
self.block_device_mapping[1]['device_name'] = None
self.block_device_mapping[2]['device_name'] = None
self._test_default_device_names(self.ephemerals, [],
self.block_device_mapping)
self.assertEqual('/dev/vdc',
self.block_device_mapping[1]['device_name'])
self.assertEqual('/dev/vdd',
self.block_device_mapping[2]['device_name'])
def test_with_swap(self):
# Test swap only
self.swap[0]['device_name'] = None
self._test_default_device_names([], self.swap, [])
self.assertEqual(self.swap[0]['device_name'], '/dev/vdb')
# Test swap and block_device_mapping
self.swap[0]['device_name'] = None
self.block_device_mapping[1]['device_name'] = None
self.block_device_mapping[2]['device_name'] = None
self._test_default_device_names([], self.swap,
self.block_device_mapping)
self.assertEqual(self.swap[0]['device_name'], '/dev/vdb')
self.assertEqual('/dev/vdc',
self.block_device_mapping[1]['device_name'])
self.assertEqual('/dev/vdd',
self.block_device_mapping[2]['device_name'])
def test_all_together(self):
# Test swap missing
self.swap[0]['device_name'] = None
self._test_default_device_names(self.ephemerals,
self.swap, self.block_device_mapping)
self.assertEqual(self.swap[0]['device_name'], '/dev/vdc')
# Test swap and eph missing
self.swap[0]['device_name'] = None
self.ephemerals[0]['device_name'] = None
self._test_default_device_names(self.ephemerals,
self.swap, self.block_device_mapping)
self.assertEqual(self.ephemerals[0]['device_name'], '/dev/vdb')
self.assertEqual(self.swap[0]['device_name'], '/dev/vdc')
# Test all missing
self.swap[0]['device_name'] = None
self.ephemerals[0]['device_name'] = None
self.block_device_mapping[1]['device_name'] = None
self.block_device_mapping[2]['device_name'] = None
self._test_default_device_names(self.ephemerals,
self.swap, self.block_device_mapping)
self.assertEqual(self.ephemerals[0]['device_name'], '/dev/vdb')
self.assertEqual(self.swap[0]['device_name'], '/dev/vdc')
self.assertEqual('/dev/vdd',
self.block_device_mapping[1]['device_name'])
self.assertEqual('/dev/vde',
self.block_device_mapping[2]['device_name'])
class UsageInfoTestCase(test.TestCase):
def setUp(self):
def fake_get_nw_info(cls, ctxt, instance):
self.assertTrue(ctxt.is_admin)
return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
super(UsageInfoTestCase, self).setUp()
self.stubs.Set(network_api.API, 'get_instance_nw_info',
fake_get_nw_info)
fake_notifier.stub_notifier(self.stubs)
self.addCleanup(fake_notifier.reset)
self.flags(use_local=True, group='conductor')
self.flags(compute_driver='nova.virt.fake.FakeDriver',
network_manager='nova.network.manager.FlatManager')
self.compute = importutils.import_object(CONF.compute_manager)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
def fake_show(meh, context, id, **kwargs):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
self.stubs.Set(nova.tests.unit.image.fake._FakeImageService,
'show', fake_show)
fake_network.set_stub_network_methods(self.stubs)
fake_server_actions.stub_out_action_events(self.stubs)
def _create_instance(self, params=None):
"""Create a test instance."""
params = params or {}
flavor = flavors.get_flavor_by_name('m1.tiny')
sys_meta = flavors.save_flavor_info({}, flavor)
inst = {}
inst['image_ref'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['instance_type_id'] = flavor['id']
inst['system_metadata'] = sys_meta
inst['ami_launch_index'] = 0
inst['root_gb'] = 0
inst['ephemeral_gb'] = 0
inst['info_cache'] = {'network_info': '[]'}
inst.update(params)
return db.instance_create(self.context, inst)['id']
def test_notify_usage_exists(self):
# Ensure 'exists' notification generates appropriate usage data.
instance_id = self._create_instance()
instance = objects.Instance.get_by_id(self.context, instance_id,
expected_attrs=[
'system_metadata'])
# Set some system metadata
sys_metadata = {'image_md_key1': 'val1',
'image_md_key2': 'val2',
'other_data': 'meow'}
instance.system_metadata.update(sys_metadata)
instance.save()
compute_utils.notify_usage_exists(
rpc.get_notifier('compute'), self.context, instance)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.priority, 'INFO')
self.assertEqual(msg.event_type, 'compute.instance.exists')
payload = msg.payload
self.assertEqual(payload['tenant_id'], self.project_id)
self.assertEqual(payload['user_id'], self.user_id)
self.assertEqual(payload['instance_id'], instance['uuid'])
self.assertEqual(payload['instance_type'], 'm1.tiny')
type_id = flavors.get_flavor_by_name('m1.tiny')['id']
self.assertEqual(str(payload['instance_type_id']), str(type_id))
flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
for attr in ('display_name', 'created_at', 'launched_at',
'state', 'state_description',
'bandwidth', 'audit_period_beginning',
'audit_period_ending', 'image_meta'):
self.assertIn(attr, payload,
"Key %s not in payload" % attr)
self.assertEqual(payload['image_meta'],
{'md_key1': 'val1', 'md_key2': 'val2'})
image_ref_url = "%s/images/1" % glance.generate_glance_url()
self.assertEqual(payload['image_ref_url'], image_ref_url)
self.compute.terminate_instance(self.context, instance, [], [])
def test_notify_usage_exists_deleted_instance(self):
# Ensure 'exists' notification generates appropriate usage data.
instance_id = self._create_instance()
instance = objects.Instance.get_by_id(self.context, instance_id,
expected_attrs=['metadata', 'system_metadata', 'info_cache'])
# Set some system metadata
sys_metadata = {'image_md_key1': 'val1',
'image_md_key2': 'val2',
'other_data': 'meow'}
instance.system_metadata.update(sys_metadata)
instance.save()
self.compute.terminate_instance(self.context, instance, [], [])
instance = objects.Instance.get_by_id(
self.context.elevated(read_deleted='yes'), instance_id,
expected_attrs=['system_metadata'])
compute_utils.notify_usage_exists(
rpc.get_notifier('compute'), self.context, instance)
msg = fake_notifier.NOTIFICATIONS[-1]
self.assertEqual(msg.priority, 'INFO')
self.assertEqual(msg.event_type, 'compute.instance.exists')
payload = msg.payload
self.assertEqual(payload['tenant_id'], self.project_id)
self.assertEqual(payload['user_id'], self.user_id)
self.assertEqual(payload['instance_id'], instance['uuid'])
self.assertEqual(payload['instance_type'], 'm1.tiny')
type_id = flavors.get_flavor_by_name('m1.tiny')['id']
self.assertEqual(str(payload['instance_type_id']), str(type_id))
flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
for attr in ('display_name', 'created_at', 'launched_at',
'state', 'state_description',
'bandwidth', 'audit_period_beginning',
'audit_period_ending', 'image_meta'):
self.assertIn(attr, payload, "Key %s not in payload" % attr)
self.assertEqual(payload['image_meta'],
{'md_key1': 'val1', 'md_key2': 'val2'})
image_ref_url = "%s/images/1" % glance.generate_glance_url()
self.assertEqual(payload['image_ref_url'], image_ref_url)
def test_notify_usage_exists_instance_not_found(self):
# Ensure 'exists' notification generates appropriate usage data.
instance_id = self._create_instance()
instance = objects.Instance.get_by_id(self.context, instance_id,
expected_attrs=['metadata', 'system_metadata', 'info_cache'])
self.compute.terminate_instance(self.context, instance, [], [])
compute_utils.notify_usage_exists(
rpc.get_notifier('compute'), self.context, instance)
msg = fake_notifier.NOTIFICATIONS[-1]
self.assertEqual(msg.priority, 'INFO')
self.assertEqual(msg.event_type, 'compute.instance.exists')
payload = msg.payload
self.assertEqual(payload['tenant_id'], self.project_id)
self.assertEqual(payload['user_id'], self.user_id)
self.assertEqual(payload['instance_id'], instance['uuid'])
self.assertEqual(payload['instance_type'], 'm1.tiny')
type_id = flavors.get_flavor_by_name('m1.tiny')['id']
self.assertEqual(str(payload['instance_type_id']), str(type_id))
flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
for attr in ('display_name', 'created_at', 'launched_at',
'state', 'state_description',
'bandwidth', 'audit_period_beginning',
'audit_period_ending', 'image_meta'):
self.assertIn(attr, payload, "Key %s not in payload" % attr)
self.assertEqual(payload['image_meta'], {})
image_ref_url = "%s/images/1" % glance.generate_glance_url()
self.assertEqual(payload['image_ref_url'], image_ref_url)
def test_notify_about_instance_usage(self):
instance_id = self._create_instance()
instance = objects.Instance.get_by_id(self.context, instance_id,
expected_attrs=['metadata', 'system_metadata', 'info_cache'])
# Set some system metadata
sys_metadata = {'image_md_key1': 'val1',
'image_md_key2': 'val2',
'other_data': 'meow'}
instance.system_metadata.update(sys_metadata)
instance.save()
extra_usage_info = {'image_name': 'fake_name'}
compute_utils.notify_about_instance_usage(
rpc.get_notifier('compute'),
self.context, instance, 'create.start',
extra_usage_info=extra_usage_info)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.priority, 'INFO')
self.assertEqual(msg.event_type, 'compute.instance.create.start')
payload = msg.payload
self.assertEqual(payload['tenant_id'], self.project_id)
self.assertEqual(payload['user_id'], self.user_id)
self.assertEqual(payload['instance_id'], instance['uuid'])
self.assertEqual(payload['instance_type'], 'm1.tiny')
type_id = flavors.get_flavor_by_name('m1.tiny')['id']
self.assertEqual(str(payload['instance_type_id']), str(type_id))
flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
for attr in ('display_name', 'created_at', 'launched_at',
'state', 'state_description', 'image_meta'):
self.assertIn(attr, payload, "Key %s not in payload" % attr)
self.assertEqual(payload['image_meta'],
{'md_key1': 'val1', 'md_key2': 'val2'})
self.assertEqual(payload['image_name'], 'fake_name')
image_ref_url = "%s/images/1" % glance.generate_glance_url()
self.assertEqual(payload['image_ref_url'], image_ref_url)
self.compute.terminate_instance(self.context, instance, [], [])
def test_notify_about_aggregate_update_with_id(self):
# Set aggregate payload
aggregate_payload = {'aggregate_id': 1}
compute_utils.notify_about_aggregate_update(self.context,
"create.end",
aggregate_payload)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.priority, 'INFO')
self.assertEqual(msg.event_type, 'aggregate.create.end')
payload = msg.payload
self.assertEqual(payload['aggregate_id'], 1)
def test_notify_about_aggregate_update_with_name(self):
# Set aggregate payload
aggregate_payload = {'name': 'fakegroup'}
compute_utils.notify_about_aggregate_update(self.context,
"create.start",
aggregate_payload)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.priority, 'INFO')
self.assertEqual(msg.event_type, 'aggregate.create.start')
payload = msg.payload
self.assertEqual(payload['name'], 'fakegroup')
def test_notify_about_aggregate_update_without_name_id(self):
# Set empty aggregate payload
aggregate_payload = {}
compute_utils.notify_about_aggregate_update(self.context,
"create.start",
aggregate_payload)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
class ComputeGetImageMetadataTestCase(test.NoDBTestCase):
def setUp(self):
super(ComputeGetImageMetadataTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.image = {
"min_ram": 10,
"min_disk": 1,
"disk_format": "raw",
"container_format": "bare",
"properties": {},
}
self.mock_image_api = mock.Mock()
self.mock_image_api.get.return_value = self.image
self.ctx = context.RequestContext('fake', 'fake')
sys_meta = {
'image_min_ram': 10,
'image_min_disk': 1,
'image_disk_format': 'raw',
'image_container_format': 'bare',
}
flavor = objects.Flavor(
id=0,
name='m1.fake',
memory_mb=10,
vcpus=1,
root_gb=1,
ephemeral_gb=1,
flavorid='0',
swap=1,
rxtx_factor=0.0,
vcpu_weight=None)
instance = fake_instance.fake_db_instance(
memory_mb=0, root_gb=0,
system_metadata=sys_meta)
self.instance_obj = objects.Instance._from_db_object(
self.ctx, objects.Instance(), instance,
expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS)
with mock.patch.object(self.instance_obj, 'save'):
self.instance_obj.set_flavor(flavor)
@mock.patch('nova.objects.Flavor.get_by_flavor_id')
def test_get_image_meta(self, mock_get):
mock_get.return_value = objects.Flavor(extra_specs={})
image_meta = compute_utils.get_image_metadata(
self.ctx, self.mock_image_api, 'fake-image', self.instance_obj)
self.image['properties'] = 'DONTCARE'
self.assertThat(self.image, matchers.DictMatches(image_meta))
@mock.patch('nova.objects.Flavor.get_by_flavor_id')
def test_get_image_meta_with_image_id_none(self, mock_flavor_get):
mock_flavor_get.return_value = objects.Flavor(extra_specs={})
self.image['properties'] = {'fake_property': 'fake_value'}
inst = self.instance_obj
with mock.patch.object(flavors,
"extract_flavor") as mock_extract_flavor:
with mock.patch.object(utils, "get_system_metadata_from_image"
) as mock_get_sys_metadata:
image_meta = compute_utils.get_image_metadata(
self.ctx, self.mock_image_api, None, inst)
self.assertEqual(0, self.mock_image_api.get.call_count)
self.assertEqual(0, mock_extract_flavor.call_count)
self.assertEqual(0, mock_get_sys_metadata.call_count)
self.assertNotIn('fake_property', image_meta['properties'])
# Checking mock_image_api_get is called with 0 image_id
# as 0 is a valid image ID
image_meta = compute_utils.get_image_metadata(self.ctx,
self.mock_image_api,
0, self.instance_obj)
self.assertEqual(1, self.mock_image_api.get.call_count)
self.assertIn('fake_property', image_meta['properties'])
def _test_get_image_meta_exception(self, error):
self.mock_image_api.get.side_effect = error
image_meta = compute_utils.get_image_metadata(
self.ctx, self.mock_image_api, 'fake-image', self.instance_obj)
self.image['properties'] = 'DONTCARE'
# NOTE(danms): The trip through system_metadata will stringify things
for key in self.image:
self.image[key] = str(self.image[key])
self.assertThat(self.image, matchers.DictMatches(image_meta))
def test_get_image_meta_no_image(self):
error = exception.ImageNotFound(image_id='fake-image')
self._test_get_image_meta_exception(error)
def test_get_image_meta_not_authorized(self):
error = exception.ImageNotAuthorized(image_id='fake-image')
self._test_get_image_meta_exception(error)
def test_get_image_meta_bad_request(self):
error = exception.Invalid()
self._test_get_image_meta_exception(error)
def test_get_image_meta_unexpected_exception(self):
error = test.TestingException()
with testtools.ExpectedException(test.TestingException):
self._test_get_image_meta_exception(error)
def test_get_image_meta_no_image_system_meta(self):
for k in self.instance_obj.system_metadata.keys():
if k.startswith('image_'):
del self.instance_obj.system_metadata[k]
with mock.patch('nova.objects.Flavor.get_by_flavor_id') as get:
get.return_value = objects.Flavor(extra_specs={})
image_meta = compute_utils.get_image_metadata(
self.ctx, self.mock_image_api, 'fake-image', self.instance_obj)
self.image['properties'] = 'DONTCARE'
self.assertThat(self.image, matchers.DictMatches(image_meta))
def test_get_image_meta_no_image_no_image_system_meta(self):
e = exception.ImageNotFound(image_id='fake-image')
self.mock_image_api.get.side_effect = e
for k in self.instance_obj.system_metadata.keys():
if k.startswith('image_'):
del self.instance_obj.system_metadata[k]
with mock.patch('nova.objects.Flavor.get_by_flavor_id') as get:
get.return_value = objects.Flavor(extra_specs={})
image_meta = compute_utils.get_image_metadata(
self.ctx, self.mock_image_api, 'fake-image', self.instance_obj)
expected = {'properties': 'DONTCARE'}
self.assertThat(expected, matchers.DictMatches(image_meta))
class ComputeUtilsGetValFromSysMetadata(test.NoDBTestCase):
def test_get_value_from_system_metadata(self):
instance = fake_instance.fake_instance_obj('fake-context')
system_meta = {'int_val': 1,
'int_string': '2',
'not_int': 'Nope'}
instance.system_metadata = system_meta
result = compute_utils.get_value_from_system_metadata(
instance, 'int_val', int, 0)
self.assertEqual(1, result)
result = compute_utils.get_value_from_system_metadata(
instance, 'int_string', int, 0)
self.assertEqual(2, result)
result = compute_utils.get_value_from_system_metadata(
instance, 'not_int', int, 0)
self.assertEqual(0, result)
class ComputeUtilsGetNWInfo(test.NoDBTestCase):
def test_instance_object_none_info_cache(self):
inst = fake_instance.fake_instance_obj('fake-context',
expected_attrs=['info_cache'])
self.assertIsNone(inst.info_cache)
result = compute_utils.get_nw_info_for_instance(inst)
self.assertEqual(jsonutils.dumps([]), result.json())
def test_instance_dict_none_info_cache(self):
inst = fake_instance.fake_db_instance(info_cache=None)
self.assertIsNone(inst['info_cache'])
result = compute_utils.get_nw_info_for_instance(inst)
self.assertEqual(jsonutils.dumps([]), result.json())
class ComputeUtilsGetRebootTypes(test.NoDBTestCase):
def setUp(self):
super(ComputeUtilsGetRebootTypes, self).setUp()
self.context = context.RequestContext('fake', 'fake')
def test_get_reboot_type_started_soft(self):
reboot_type = compute_utils.get_reboot_type(task_states.REBOOT_STARTED,
power_state.RUNNING)
self.assertEqual(reboot_type, 'SOFT')
def test_get_reboot_type_pending_soft(self):
reboot_type = compute_utils.get_reboot_type(task_states.REBOOT_PENDING,
power_state.RUNNING)
self.assertEqual(reboot_type, 'SOFT')
def test_get_reboot_type_hard(self):
reboot_type = compute_utils.get_reboot_type('foo', power_state.RUNNING)
self.assertEqual(reboot_type, 'HARD')
def test_get_reboot_not_running_hard(self):
reboot_type = compute_utils.get_reboot_type('foo', 'bar')
self.assertEqual(reboot_type, 'HARD')
class ComputeUtilsTestCase(test.NoDBTestCase):
def test_exception_to_dict_with_long_message_3_bytes(self):
# Generate Chinese byte string whose length is 300. This Chinese UTF-8
# character occupies 3 bytes. After truncating, the byte string length
# should be 255.
msg = encodeutils.safe_decode('\xe8\xb5\xb5' * 100)
exc = exception.NovaException(message=msg)
fault_dict = compute_utils.exception_to_dict(exc)
byte_message = encodeutils.safe_encode(fault_dict["message"])
self.assertEqual(255, len(byte_message))
def test_exception_to_dict_with_long_message_2_bytes(self):
# Generate Russian byte string whose length is 300. This Russian UTF-8
# character occupies 2 bytes. After truncating, the byte string length
# should be 254.
msg = encodeutils.safe_decode('\xd0\x92' * 150)
exc = exception.NovaException(message=msg)
fault_dict = compute_utils.exception_to_dict(exc)
byte_message = encodeutils.safe_encode(fault_dict["message"])
self.assertEqual(254, len(byte_message))
|
|
"""
MODULE : basic_blocks
Purpose : * Class used to represent a basic block. Will be an array of instructions.
* All required algorithms such as register allocation etc. will be a part of this
class
Import Acronym : BB
"""
# List of Imports Begin
import debug as DEBUG
import instr3ac as INSTRUCTION
import registers as REG
import global_objects as G
import translator as TRANS
# List of Imports End
class BasicBlock(object):
"""
This class holds an array of instructions corresponding to a single basic block.
Member Variables :
* bbNum : Basic Block Number
* instructions : The actual sequence of instructions corresponding to the basic block.
* regAddrDescriptor : The register and address descriptor for this block
TODO: Add more stuff as and how we need it.
"""
def __init__(self, bbNum=0, instructions=[]):
self.bbNum = bbNum
self.instructions = instructions[:]
self.symbols = set([])
def AddInstruction(self, instr):
""" Add instructions incrementally. Will help in the basic block algorithm """
DEBUG.Assert(type(instr) == INSTRUCTION.Instr3AC, "Input is not of instr3AC type")
self.instructions += [instr]
self.symbols |= instr.ReturnSymbols()
def IsEmpty(self):
return len(self.instructions) == 0
def ComputeSymbolTables(self):
""" Compute live ranges and next-use using a backward scan """
symTable = SymbolTable(self.symbols)
self.finalSymTable = symTable
for revInstr in reversed(self.instructions):
revInstr.UpdateAndAttachSymbolTable(symTable)
symTable = revInstr.symTable
def Translate(self):
G.AsmText.AddComment("BASIC BLOCK #" + str(self.bbNum))
self.ComputeSymbolTables()
bbRegAddrDescriptor = RegAddrDescriptor(self.symbols)
# Set global pointers
G.CurrRegAddrTable = bbRegAddrDescriptor
for (idx, instr) in enumerate(self.instructions):
# Set global pointers
G.CurrSymbolTable = instr.symTable
G.CurrInstruction = instr
G.AllocMap = {}
#print instr
if idx != len(self.instructions) - 1:
G.NextSymbolTable = self.instructions[idx + 1].symTable
else:
G.NextSymbolTable = self.finalSymTable
G.AsmText.AddComment("INSTR : " + instr.PrettyPrint())
# Add the necessary labels before doing register allocation
if instr.instrType.is_LABEL():
G.AsmText.AddText("%s:"%(instr.label))
G.CurrFunction = instr.label
elif instr.IsTarget():
# Add a label L_<line_no> for each line in the input
# if it is a branch target
G.AsmText.AddText("$LID_%d:"%(instr.lineID))
# Perform register allocation
regAllocCode = self.RegisterAllocate()
#if regAllocCode:
#G.AsmText.AddText(regAllocCode)
self.UpdateRegAddrDescriptor()
#G.CurrRegAddrTable.PrettyPrintRegisters()
# TODO : Actual Translation
TRANS.Translate(instr)
if not self.instructions[-1].instrType.is_JMP():
G.CurrRegAddrTable.DumpDirtyVars()
def PrettyPrint(self):
print "BASIC BLOCK #" + str(self.bbNum)
for instr in self.instructions:
print instr
def RegisterAllocate(self):
""" Perform register allocation for the current instruction """
codeSegment = ""
alreadyAllocatedRegs = []
if (G.CurrInstruction.inp1.is_HASH_VARIABLE() or
G.CurrInstruction.inp2.is_HASH_VARIABLE() or
G.CurrInstruction.dest.is_HASH_VARIABLE()):
# No need to do register allocation
return codeSegment
# Register Allocation for input 1
if G.CurrInstruction.inp1.is_SCALAR_VARIABLE():
varName = G.CurrInstruction.inp1.value
reg, getRegCode, isLoaded = GetReg(varName, alreadyAllocatedRegs)
# Set global details and add code for loading if necessary
G.AllocMap[varName] = reg
G.AsmText.AddText(getRegCode[:-1], "Freeing space for %s"%varName)
codeSegment += getRegCode
if not isLoaded:
codeSegment += reg.LoadVar(varName)
G.AsmText.AddText(reg.LoadVar(varName)[:-1], "Load %s into %s"%(varName, reg))
alreadyAllocatedRegs += [reg.regName]
# Register Allocation for input 2
if G.CurrInstruction.inp2.is_SCALAR_VARIABLE():
varName = G.CurrInstruction.inp2.value
reg, getRegCode, isLoaded = GetReg(varName, alreadyAllocatedRegs)
# Set global details and add code for loading if necessary
G.AllocMap[varName] = reg
G.AsmText.AddText(getRegCode[:-1], "Freeing space for %s"%varName)
codeSegment += getRegCode
if not isLoaded:
codeSegment += reg.LoadVar(varName)
G.AsmText.AddText(reg.LoadVar(varName)[:-1], "Load %s into %s"%(varName, reg))
alreadyAllocatedRegs += [reg.regName]
# Register Allocation for dest variable
if G.CurrInstruction.dest.is_SCALAR_VARIABLE():
varName = G.CurrInstruction.dest.value
if G.CurrInstruction.isCopy:
#If it is a copy operation, simply allocate the register allocated to the input
if (G.CurrInstruction.inp1.is_SCALAR_VARIABLE()):
inpVarName = G.CurrInstruction.inp1.value
reg = G.AllocMap[inpVarName]
G.AllocMap[varName] = reg
return codeSegment
if G.CurrRegAddrTable.IsInRegisterSafe(varName):
reg = G.CurrRegAddrTable.GetAllocatedRegister(varName)
G.AllocMap[varName] = reg
return codeSegment
if G.AllocMap.has_key(varName):
return codeSegment
if G.CurrInstruction.inp1.is_SCALAR_VARIABLE():
inpVarName = G.CurrInstruction.inp1.value
if SafeToReuse(inpVarName):
# Use the same register
reg = G.AllocMap[inpVarName]
G.AllocMap[varName] = reg
return codeSegment
if G.CurrInstruction.inp2.is_SCALAR_VARIABLE():
inpVarName = G.CurrInstruction.inp2.value
if SafeToReuse(inpVarName):
# Use the same register
reg = G.AllocMap[inpVarName]
G.AllocMap[varName] = reg
return codeSegment
reg, getRegCode, isLoaded = GetReg(varName, alreadyAllocatedRegs, forceSearch=True)
# Set global details and add code for loading if necessary
G.AllocMap[varName] = reg
codeSegment += getRegCode
G.AsmText.AddText(getRegCode[:-1], "Freeing space for %s"%varName)
return codeSegment
def UpdateRegAddrDescriptor(self):
if (G.CurrInstruction.inp1.is_HASH_VARIABLE() or
G.CurrInstruction.inp2.is_HASH_VARIABLE() or
G.CurrInstruction.dest.is_HASH_VARIABLE()):
# No allocation has been performed
return
if G.CurrInstruction.inp1.is_SCALAR_VARIABLE():
varName = G.CurrInstruction.inp1.value
G.CurrRegAddrTable.SetRegister(varName, G.AllocMap[varName])
if G.CurrInstruction.inp2.is_SCALAR_VARIABLE():
varName = G.CurrInstruction.inp2.value
G.CurrRegAddrTable.SetRegister(varName, G.AllocMap[varName])
if G.CurrInstruction.dest.is_SCALAR_VARIABLE():
if G.CurrInstruction.instrType.is_CALL() or G.CurrInstruction.instrType.is_ALLOC():
# End of a basic block anyway. Don't update anything
return
varName = G.CurrInstruction.dest.value
if G.CurrInstruction.isCopy:
G.CurrRegAddrTable.RemoveDestVarFromRegisters(varName)
G.CurrRegAddrTable.SetInMemory(varName, False, knockOffRegister=True)
G.CurrRegAddrTable.SetRegister(varName, G.AllocMap[varName])
else:
G.CurrRegAddrTable.RemoveDestVarFromRegisters(varName)
G.CurrRegAddrTable.ClearRegister(G.AllocMap[varName])
G.CurrRegAddrTable.SetInMemory(varName, False, knockOffRegister=True)
G.CurrRegAddrTable.SetRegister(varName, G.AllocMap[varName])
def GetReg(varName, alreadyAllocatedRegs, forceSearch=False):
codeSegment = ""
if G.CurrRegAddrTable.IsInRegister(varName) and (not forceSearch):
# If it's already in a register, there is nothing to do
# True stands for the fact the variable is already loaded
return G.CurrRegAddrTable.GetAllocatedRegister(varName), codeSegment, True
for (reg, alloc) in G.CurrRegAddrTable.regMap.items():
if reg.regName in alreadyAllocatedRegs:
continue
if not alloc: # Empty, we can use this
# False stands for the fact the variable is not loaded
return reg, codeSegment, False
else:
currReg, currCodeSegment = FindBestRegister(varName, alreadyAllocatedRegs)
codeSegment += currCodeSegment
G.CurrRegAddrTable.ClearRegister(currReg)
# False stands for the fact the variable is not loaded
return currReg, codeSegment, False
def FindBestRegister(varName, alreadyAllocatedRegs, alreadyLoadedReg=None):
# All registers are full, need to select the cheapest one.
currMinScore = 10
currReg = None
currCodeSegment = ""
currRemoveSet = []
for reg in G.CurrRegAddrTable.regs:
if reg.regName in alreadyAllocatedRegs:
# Shouldn't disturb previous allocations. Wouldn't make any sense
continue
score, regCodeSeg, regRemoveSet = reg.Score(varName)
if score < currMinScore:
currMinScore = score
currReg = reg
currCodeSegment = regCodeSeg
currRemoveSet = regRemoveSet[:]
elif score == currMinScore:
#Compare next use values
if currReg.GetEarliestNextUse() < reg.GetEarliestNextUse():
currReg = reg
currCodeSegment = regCodeSeg
currRemoveSet = regRemoveSet[:]
for removeVar in currRemoveSet:
# Update Register-Address-Table
#print "REMOVING : ", removeVar
G.CurrRegAddrTable.SetInMemory(removeVar, knockOffRegister=True)
return currReg, currCodeSegment
class SymbolTable(object):
"""
Used to implement a symbol table. Contains info about liveness and next-use.
Will be required in register allocation
symTable : Map : <var> -> (live?, next-use)
Note: -1 means no-next-use
"""
def __init__(self, initial_symbols):
self.symTable = {sym:[True, -1] for sym in initial_symbols}
def SetLive(self, varName, isLive=True):
if type(varName) == INSTRUCTION.Entity:
DEBUG.Assert(varName.is_SCALAR_VARIABLE(), "Entity is not a scalar variable")
varName = varName.value
self.symTable[varName][0] = isLive
def SetNextUse(self, varName, nextUse):
if type(varName) == INSTRUCTION.Entity:
DEBUG.Assert(varName.is_SCALAR_VARIABLE(), "Entity is not a scalar variable")
varName = varName.value
self.symTable[varName][1] = nextUse
def GetNextUse(self, varName):
if type(varName) == INSTRUCTION.Entity:
DEBUG.Assert(varName.is_SCALAR_VARIABLE(), "Entity is not a scalar variable")
varName = varName.value
return self.symTable[varName][1]
def IsLive(self, varName):
if type(varName) == INSTRUCTION.Entity:
DEBUG.Assert(varName.is_SCALAR_VARIABLE(), "Entity is not a scalar variable")
varName = varName.value
return self.symTable[varName][0] and (self.symTable[varName][1] != -1)
def IsLiveOnExit(self, varName):
if type(varName) == INSTRUCTION.Entity:
DEBUG.Assert(varName.is_SCALAR_VARIABLE(), "Entity is not a scalar variable")
varName = varName.value
return self.symTable[varName][0]
def PrettyPrint(self):
""" For debugging purposes """
for (sym, prop) in self.symTable.items():
print sym, " : ", prop
def SymSetProperties(symTable, newProperties):
""" Creates a copy and then modifies it. This is because we need a separate table for every instruction """
symCopy = SymbolTable([])
symCopy.symTable = {sym:value[:] for (sym, value) in symTable.symTable.items()}
for (varName, properties) in newProperties.items():
symCopy.SetLive(varName, properties[0])
symCopy.SetNextUse(varName, properties[1])
return symCopy
def SafeToReuse(varName):
# Assumes a register has been allocated to varName
return (not G.NextSymbolTable.IsLive(varName)) and (G.CurrRegAddrTable.IsInMemory(varName))
class RegAddrDescriptor(object):
"""
This class implements a register and address descriptor.
"""
def __init__(self, initial_symbols):
self.symbols = initial_symbols
self.regs = REG.addrDescRegs + REG.savedRegs # Can be changed if we want to use less/more
#self.regs = [REG.t0, REG.t1, REG.t2]
# Since everything is global, all of them reside in memory
# MAP : <var_name> -> (in_memory? , in_register?)
self.addrMap = {sym:[True, None] for sym in initial_symbols}
# All registers assumed to be empty initially
self.regMap = {reg : [] for reg in self.regs}
def IsInRegister(self, varName):
if type(varName) == INSTRUCTION.Entity:
DEBUG.Assert(varName.is_SCALAR_VARIABLE(), "Entity is not a scalar variable")
varName = varName.value
return (self.addrMap[varName][1] != None)
def IsInRegisterSafe(self, varName):
if type(varName) == INSTRUCTION.Entity:
DEBUG.Assert(varName.is_SCALAR_VARIABLE(), "Entity is not a scalar variable")
varName = varName.value
reg = self.addrMap[varName][1]
if reg == None:
return False
return len(self.regMap[reg]) == 1
def IsElsewhere(self, varName, regName):
return self.IsInMemory(varName) or (self.addrMap[varName][1].regName != regName)
def GetVars(self, reg):
return self.regMap[reg]
def GetAllocatedRegister(self, varName):
""" To be used after register allocation has been performed """
if type(varName) == INSTRUCTION.Entity:
DEBUG.Assert(varName.is_SCALAR_VARIABLE(), "Entity is not a scalar variable")
varName = varName.value
return self.addrMap[varName][1]
def IsInMemory(self, varName):
""" Is latest value in memory? """
if type(varName) == INSTRUCTION.Entity:
DEBUG.Assert(varName.is_SCALAR_VARIABLE(), "Entity is not a scalar variable")
varName = varName.value
return self.addrMap[varName][0]
def SetInMemory(self, varName, inMemory=True, knockOffRegister=False):
if type(varName) == INSTRUCTION.Entity:
DEBUG.Assert(varName.is_SCALAR_VARIABLE(), "Entity is not a scalar variable")
varName = varName.value
self.addrMap[varName][0] = inMemory
if knockOffRegister:
self.addrMap[varName][1] = None
def SetRegister(self, varName, reg):
if type(varName) == INSTRUCTION.Entity:
DEBUG.Assert(varName.is_SCALAR_VARIABLE(), "Entity is not a scalar variable")
varName = varName.value
self.addrMap[varName][1] = reg
self.regMap[reg] = list(set(self.regMap[reg] + [varName]))
def ClearRegister(self, reg):
self.regMap[reg] = []
def RemoveDestVarFromRegisters(self, varName):
if type(varName) == INSTRUCTION.Entity:
DEBUG.Assert(varName.is_SCALAR_VARIABLE(), "Entity is not a scalar variable")
varName = varName.value
for reg in self.regMap:
if len(self.regMap[reg]) > 1:
try:
self.regMap[reg].remove(varName)
self.addrMap[varName][1] = None
except:
pass
def PrettyPrintRegisters(self):
for (reg, var) in self.regMap.items():
if var:
print str(reg), " : ", var, " ",
print ""
def DumpDirtyVars(self):
# Will be called when exiting a basic block
# Writes values of dirty registers to memory
for (var,value) in self.addrMap.iteritems():
if not value[0]:
G.AsmText.AddText(value[1].SpillVar(var)[:-1], "Spilling variable %s\n"%var)
def Reset(self):
# Since everything is global, all of them reside in memory
# MAP : <var_name> -> (in_memory? , in_register?)
self.addrMap = {sym:[True, None] for sym in self.symbols}
# All registers assumed to be empty initially
self.regMap = {reg : [] for reg in self.regs}
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import state
from . import ipv4_segment
from . import unnumbered_hop
class segment(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/extended-prefix/tlvs/tlv/sid-label-binding/tlvs/tlv/ero-path/segments/segment. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: A segment of the path described within the sub-TLV
"""
__slots__ = (
"_path_helper", "_extmethods", "__state", "__ipv4_segment", "__unnumbered_hop"
)
_yang_name = "segment"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__ipv4_segment = YANGDynClass(
base=ipv4_segment.ipv4_segment,
is_container="container",
yang_name="ipv4-segment",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__unnumbered_hop = YANGDynClass(
base=unnumbered_hop.unnumbered_hop,
is_container="container",
yang_name="unnumbered-hop",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"opaque-lsa",
"extended-prefix",
"tlvs",
"tlv",
"sid-label-binding",
"tlvs",
"tlv",
"ero-path",
"segments",
"segment",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/tlvs/tlv/ero_path/segments/segment/state (container)
YANG Description: State parameters relating to the path segment
contained within the sub-TLV
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/tlvs/tlv/ero_path/segments/segment/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters relating to the path segment
contained within the sub-TLV
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_ipv4_segment(self):
"""
Getter method for ipv4_segment, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/tlvs/tlv/ero_path/segments/segment/ipv4_segment (container)
YANG Description: Details of the IPv4 segment interface of the ERO
"""
return self.__ipv4_segment
def _set_ipv4_segment(self, v, load=False):
"""
Setter method for ipv4_segment, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/tlvs/tlv/ero_path/segments/segment/ipv4_segment (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ipv4_segment is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ipv4_segment() directly.
YANG Description: Details of the IPv4 segment interface of the ERO
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=ipv4_segment.ipv4_segment,
is_container="container",
yang_name="ipv4-segment",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """ipv4_segment must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=ipv4_segment.ipv4_segment, is_container='container', yang_name="ipv4-segment", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__ipv4_segment = t
if hasattr(self, "_set"):
self._set()
def _unset_ipv4_segment(self):
self.__ipv4_segment = YANGDynClass(
base=ipv4_segment.ipv4_segment,
is_container="container",
yang_name="ipv4-segment",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_unnumbered_hop(self):
"""
Getter method for unnumbered_hop, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/tlvs/tlv/ero_path/segments/segment/unnumbered_hop (container)
YANG Description: Details of the unnumbered interface segment of the
ERO
"""
return self.__unnumbered_hop
def _set_unnumbered_hop(self, v, load=False):
"""
Setter method for unnumbered_hop, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/tlvs/tlv/ero_path/segments/segment/unnumbered_hop (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_unnumbered_hop is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_unnumbered_hop() directly.
YANG Description: Details of the unnumbered interface segment of the
ERO
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=unnumbered_hop.unnumbered_hop,
is_container="container",
yang_name="unnumbered-hop",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """unnumbered_hop must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=unnumbered_hop.unnumbered_hop, is_container='container', yang_name="unnumbered-hop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__unnumbered_hop = t
if hasattr(self, "_set"):
self._set()
def _unset_unnumbered_hop(self):
self.__unnumbered_hop = YANGDynClass(
base=unnumbered_hop.unnumbered_hop,
is_container="container",
yang_name="unnumbered-hop",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
ipv4_segment = __builtin__.property(_get_ipv4_segment)
unnumbered_hop = __builtin__.property(_get_unnumbered_hop)
_pyangbind_elements = OrderedDict(
[
("state", state),
("ipv4_segment", ipv4_segment),
("unnumbered_hop", unnumbered_hop),
]
)
from . import state
from . import ipv4_segment
from . import unnumbered_hop
class segment(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/extended-prefix/tlvs/tlv/sid-label-binding/tlvs/tlv/ero-path/segments/segment. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: A segment of the path described within the sub-TLV
"""
__slots__ = (
"_path_helper", "_extmethods", "__state", "__ipv4_segment", "__unnumbered_hop"
)
_yang_name = "segment"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__ipv4_segment = YANGDynClass(
base=ipv4_segment.ipv4_segment,
is_container="container",
yang_name="ipv4-segment",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__unnumbered_hop = YANGDynClass(
base=unnumbered_hop.unnumbered_hop,
is_container="container",
yang_name="unnumbered-hop",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"opaque-lsa",
"extended-prefix",
"tlvs",
"tlv",
"sid-label-binding",
"tlvs",
"tlv",
"ero-path",
"segments",
"segment",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/tlvs/tlv/ero_path/segments/segment/state (container)
YANG Description: State parameters relating to the path segment
contained within the sub-TLV
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/tlvs/tlv/ero_path/segments/segment/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters relating to the path segment
contained within the sub-TLV
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_ipv4_segment(self):
"""
Getter method for ipv4_segment, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/tlvs/tlv/ero_path/segments/segment/ipv4_segment (container)
YANG Description: Details of the IPv4 segment interface of the ERO
"""
return self.__ipv4_segment
def _set_ipv4_segment(self, v, load=False):
"""
Setter method for ipv4_segment, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/tlvs/tlv/ero_path/segments/segment/ipv4_segment (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ipv4_segment is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ipv4_segment() directly.
YANG Description: Details of the IPv4 segment interface of the ERO
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=ipv4_segment.ipv4_segment,
is_container="container",
yang_name="ipv4-segment",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """ipv4_segment must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=ipv4_segment.ipv4_segment, is_container='container', yang_name="ipv4-segment", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__ipv4_segment = t
if hasattr(self, "_set"):
self._set()
def _unset_ipv4_segment(self):
self.__ipv4_segment = YANGDynClass(
base=ipv4_segment.ipv4_segment,
is_container="container",
yang_name="ipv4-segment",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_unnumbered_hop(self):
"""
Getter method for unnumbered_hop, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/tlvs/tlv/ero_path/segments/segment/unnumbered_hop (container)
YANG Description: Details of the unnumbered interface segment of the
ERO
"""
return self.__unnumbered_hop
def _set_unnumbered_hop(self, v, load=False):
"""
Setter method for unnumbered_hop, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/tlvs/tlv/ero_path/segments/segment/unnumbered_hop (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_unnumbered_hop is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_unnumbered_hop() directly.
YANG Description: Details of the unnumbered interface segment of the
ERO
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=unnumbered_hop.unnumbered_hop,
is_container="container",
yang_name="unnumbered-hop",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """unnumbered_hop must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=unnumbered_hop.unnumbered_hop, is_container='container', yang_name="unnumbered-hop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__unnumbered_hop = t
if hasattr(self, "_set"):
self._set()
def _unset_unnumbered_hop(self):
self.__unnumbered_hop = YANGDynClass(
base=unnumbered_hop.unnumbered_hop,
is_container="container",
yang_name="unnumbered-hop",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
ipv4_segment = __builtin__.property(_get_ipv4_segment)
unnumbered_hop = __builtin__.property(_get_unnumbered_hop)
_pyangbind_elements = OrderedDict(
[
("state", state),
("ipv4_segment", ipv4_segment),
("unnumbered_hop", unnumbered_hop),
]
)
|
|
# Copyright 2020 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import logging
import os
import select
import threading
"""Zmake multiprocessing utility module.
This module is used to aid in zmake's multiprocessing. It contains tools
available to log output from multiple processes on the fly. This means that a
process does not need to finish before the output is available to the developer
on the screen.
"""
# A local pipe use to signal the look that a new file descriptor was added and
# should be included in the select statement.
_logging_interrupt_pipe = os.pipe()
# A condition variable used to synchronize logging operations.
_logging_cv = threading.Condition()
# A map of file descriptors to their LogWriter
_logging_map = {}
# Should we log job names or not
log_job_names = True
def reset():
"""Reset this module to its starting state (useful for tests)"""
global _logging_map
_logging_map = {}
class LogWriter:
"""Contains information about a file descriptor that is producing output
There is typically one of these for each file descriptor that a process is
writing to while running (stdout and stderr).
Properties:
_logger: The logger object to use.
_log_level: The logging level to use.
_override_func: A function used to override the log level. The
function will be called once per line prior to logging and will be
passed the arguments of the line and the default log level.
_written_at_level: dict:
key: log_level
value: True if output was written at that level
_job_id: The name to prepend to logged lines
_file_descriptor: The file descriptor being logged.
"""
def __init__(
self, logger, log_level, log_level_override_func, job_id, file_descriptor
):
self._logger = logger
self._log_level = log_level
self._override_func = log_level_override_func
# A map whether output was printed at each logging level
self._written_at_level = collections.defaultdict(lambda: False)
self._job_id = job_id
self._file_descriptor = file_descriptor
def log_line(self, line):
"""Log a line of output
If the log-level override function requests a change in log level, that
causes self._log_level to be updated accordingly.
Args:
line: Text line to log
"""
if self._override_func:
# Get the new log level and update the default. The reason we
# want to update the default is that if we hit an error, all
# future logging should be moved to the new logging level. This
# greatly simplifies the logic that is needed to update the log
# level.
self._log_level = self._override_func(line, self._log_level)
if self._job_id and log_job_names:
self._logger.log(self._log_level, "[%s]%s", self._job_id, line)
else:
self._logger.log(self._log_level, line)
self._written_at_level[self._log_level] = True
def has_written(self, log_level):
"""Check if output was written at a certain log level
Args:
log_level: log level to check
Returns:
True if any output was written at that log level, False if not
"""
return self._written_at_level[log_level]
def wait(self):
"""Wait for this LogWriter to finish.
This method will block execution until all the logs have been flushed out.
"""
with _logging_cv:
_logging_cv.wait_for(lambda: self._file_descriptor not in _logging_map)
def _log_fd(fd):
"""Log information from a single file descriptor.
This function is BLOCKING. It will read from the given file descriptor until
either the end of line is read or EOF. Once EOF is read it will remove the
file descriptor from _logging_map so it will no longer be used.
Additionally, in some cases, the file descriptor will be closed (caused by
a call to Popen.wait()). In these cases, the file descriptor will also be
removed from the map as it is no longer valid.
"""
with _logging_cv:
writer = _logging_map[fd]
if fd.closed:
del _logging_map[fd]
_logging_cv.notify_all()
return
line = fd.readline()
if not line:
# EOF
del _logging_map[fd]
_logging_cv.notify_all()
return
line = line.rstrip("\n")
if line:
writer.log_line(line)
def _prune_logging_fds():
"""Prune the current file descriptors under _logging_map.
This function will iterate over the logging map and check for closed file
descriptors. Every closed file descriptor will be removed.
"""
with _logging_cv:
remove = [fd for fd in _logging_map.keys() if fd.closed]
for fd in remove:
del _logging_map[fd]
if remove:
_logging_cv.notify_all()
def _logging_loop():
"""The primary logging thread loop.
This is the entry point of the logging thread. It will listen for (1) any
new data on the output file descriptors that were added via log_output() and
(2) any new file descriptors being added by log_output(). Once a file
descriptor is ready to be read, this function will call _log_fd to perform
the actual read and logging.
"""
while True:
with _logging_cv:
_logging_cv.wait_for(lambda: _logging_map)
keys = list(_logging_map.keys()) + [_logging_interrupt_pipe[0]]
try:
fds, _, _ = select.select(keys, [], [])
except ValueError:
# One of the file descriptors must be closed, prune them and try
# again.
_prune_logging_fds()
continue
if _logging_interrupt_pipe[0] in fds:
# We got a dummy byte sent by log_output(), this is a signal used to
# break out of the blocking select.select call to tell us that the
# file descriptor set has changed. We just need to read the byte and
# remove this descriptor from the list. If we actually have data
# that should be read it will be read in the for loop below.
os.read(_logging_interrupt_pipe[0], 1)
fds.remove(_logging_interrupt_pipe[0])
for fd in fds:
_log_fd(fd)
_logging_thread = None
def log_output(
logger, log_level, file_descriptor, log_level_override_func=None, job_id=None
):
"""Log the output from the given file descriptor.
Args:
logger: The logger object to use.
log_level: The logging level to use.
file_descriptor: The file descriptor to read from.
log_level_override_func: A function used to override the log level. The
function will be called once per line prior to logging and will be
passed the arguments of the line and the default log level.
Returns:
LogWriter object for the resulting output
"""
with _logging_cv:
global _logging_thread
if _logging_thread is None or not _logging_thread.is_alive():
# First pass or thread must have died, create a new one.
_logging_thread = threading.Thread(target=_logging_loop, daemon=True)
_logging_thread.start()
writer = LogWriter(
logger, log_level, log_level_override_func, job_id, file_descriptor
)
_logging_map[file_descriptor] = writer
# Write a dummy byte to the pipe to break the select so we can add the
# new fd.
os.write(_logging_interrupt_pipe[1], b"x")
# Notify the condition so we can run the select on the current fds.
_logging_cv.notify_all()
return writer
def wait_for_log_end():
"""Wait for all the logs to be printed.
This method will block execution until all the logs have been flushed out.
"""
with _logging_cv:
_logging_cv.wait_for(lambda: not _logging_map)
class Executor:
"""Parallel executor helper class.
This class is used to run multiple functions in parallel. The functions MUST
return an integer result code (or throw an exception). This class will start
a thread per operation and wait() for all the threads to resolve.
Attributes:
lock: The condition variable used to synchronize across threads.
threads: A list of threading.Thread objects currently under this
Executor.
results: A list of result codes returned by each of the functions called
by this Executor.
"""
def __init__(self):
self.lock = threading.Condition()
self.threads = []
self.results = []
self.logger = logging.getLogger(self.__class__.__name__)
def append(self, func):
"""Append the given function to the wait list.
Once added, the function's return value will be used to determine the
Executor's final result value. The function must return an int result
code or throw an exception. For example: If two functions were added
to the Executor, they will both be run in parallel and their results
will determine whether or not the Executor succeeded. If both functions
returned 0, then the Executor's wait function will also return 0.
Args:
func: A function which returns an int result code or throws an
exception.
"""
with self.lock:
thread = threading.Thread(target=lambda: self._run_fn(func), daemon=True)
thread.start()
self.threads.append(thread)
def wait(self):
"""Wait for a result to be available.
This function waits for the executor to resolve (i.e., all
threads have finished).
Returns:
An integer result code of either the first failed function or 0 if
they all succeeded.
"""
with self.lock:
self.lock.wait_for(predicate=lambda: self._is_finished)
return self._result
def _run_fn(self, func):
"""Entry point to each running thread.
This function will run the function provided in the append() function.
The result value of the function will be used to determine the
Executor's result value. If the function throws any exception it will be
caught and -1 will be used as the assumed result value.
Args:
func: The function to run.
"""
try:
result = func()
except Exception as ex:
self.logger.exception(ex)
result = -1
with self.lock:
self.results.append(result)
self.lock.notify_all()
@property
def _is_finished(self):
"""Whether or not the Executor is considered to be done.
Returns:
True if the Executor is considered done.
"""
if len(self.threads) == len(self.results):
return True
return False
@property
def _result(self):
"""The result code of the Executor.
Note that _is_finished must be True for this to have any meaning.
Returns:
An int representing the result value of the underlying functions.
"""
return next((result for result in self.results if result), 0)
|
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import unicodedata
import six
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
"""Checks whether the casing config is consistent with the checkpoint name."""
# The casing has to be passed in by the user and there is no explicit check
# as to whether it matches the checkpoint. The casing information probably
# should have been stored in the bert_config.json file, but it's not, so
# we have to heuristically detect it to validate.
if not init_checkpoint:
return
m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
if m is None:
return
model_name = m.group(1)
lower_models = [
"uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
"multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
]
cased_models = [
"cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
"multi_cased_L-12_H-768_A-12"
]
is_bad_config = False
if model_name in lower_models and not do_lower_case:
is_bad_config = True
actual_flag = "False"
case_name = "lowercased"
opposite_flag = "True"
if model_name in cased_models and do_lower_case:
is_bad_config = True
actual_flag = "True"
case_name = "cased"
opposite_flag = "False"
if is_bad_config:
raise ValueError(
"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
"However, `%s` seems to be a %s model, so you "
"should pass in `--do_lower_case=%s` so that the fine-tuning matches "
"how the model was pre-training. If this error is wrong, please "
"just comment out this check." % (actual_flag, init_checkpoint,
model_name, case_name, opposite_flag))
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "rb") as reader:
while True:
token = reader.readline()
token = token.decode("utf-8", "ignore")
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat in ("Cc", "Cf"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
|
|
"""Utilities for collecting objects based on "is" comparison."""
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import collections
import weakref
# LINT.IfChange
class _ObjectIdentityWrapper(object):
"""Wraps an object, mapping __eq__ on wrapper to "is" on wrapped.
Since __eq__ is based on object identity, it's safe to also define __hash__
based on object ids. This lets us add unhashable types like trackable
_ListWrapper objects to object-identity collections.
"""
__slots__ = ["_wrapped", "__weakref__"]
def __init__(self, wrapped):
self._wrapped = wrapped
@property
def unwrapped(self):
return self._wrapped
def _assert_type(self, other):
if not isinstance(other, _ObjectIdentityWrapper):
raise TypeError("Cannot compare wrapped object with unwrapped object")
def __lt__(self, other):
self._assert_type(other)
return id(self._wrapped) < id(other._wrapped) # pylint: disable=protected-access
def __gt__(self, other):
self._assert_type(other)
return id(self._wrapped) > id(other._wrapped) # pylint: disable=protected-access
def __eq__(self, other):
if other is None:
return False
self._assert_type(other)
return self._wrapped is other._wrapped # pylint: disable=protected-access
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
# Wrapper id() is also fine for weakrefs. In fact, we rely on
# id(weakref.ref(a)) == id(weakref.ref(a)) and weakref.ref(a) is
# weakref.ref(a) in _WeakObjectIdentityWrapper.
return id(self._wrapped)
def __repr__(self):
return "<{} wrapping {!r}>".format(type(self).__name__, self._wrapped)
class _WeakObjectIdentityWrapper(_ObjectIdentityWrapper):
__slots__ = ()
def __init__(self, wrapped):
super(_WeakObjectIdentityWrapper, self).__init__(weakref.ref(wrapped))
@property
def unwrapped(self):
return self._wrapped()
class Reference(_ObjectIdentityWrapper):
"""Reference that refers an object.
```python
x = [1]
y = [1]
x_ref1 = Reference(x)
x_ref2 = Reference(x)
y_ref2 = Reference(y)
print(x_ref1 == x_ref2)
==> True
print(x_ref1 == y)
==> False
```
"""
__slots__ = ()
# Disabling super class' unwrapped field.
unwrapped = property()
def deref(self):
"""Returns the referenced object.
```python
x_ref = Reference(x)
print(x is x_ref.deref())
==> True
```
"""
return self._wrapped
class ObjectIdentityDictionary(collections.abc.MutableMapping):
"""A mutable mapping data structure which compares using "is".
This is necessary because we have trackable objects (_ListWrapper) which
have behavior identical to built-in Python lists (including being unhashable
and comparing based on the equality of their contents by default).
"""
__slots__ = ["_storage"]
def __init__(self):
self._storage = {}
def _wrap_key(self, key):
return _ObjectIdentityWrapper(key)
def __getitem__(self, key):
return self._storage[self._wrap_key(key)]
def __setitem__(self, key, value):
self._storage[self._wrap_key(key)] = value
def __delitem__(self, key):
del self._storage[self._wrap_key(key)]
def __len__(self):
return len(self._storage)
def __iter__(self):
for key in self._storage:
yield key.unwrapped
def __repr__(self):
return "ObjectIdentityDictionary(%s)" % repr(self._storage)
class ObjectIdentityWeakKeyDictionary(ObjectIdentityDictionary):
"""Like weakref.WeakKeyDictionary, but compares objects with "is"."""
__slots__ = ["__weakref__"]
def _wrap_key(self, key):
return _WeakObjectIdentityWrapper(key)
def __len__(self):
# Iterate, discarding old weak refs
return len(list(self._storage))
def __iter__(self):
keys = self._storage.keys()
for key in keys:
unwrapped = key.unwrapped
if unwrapped is None:
del self[key]
else:
yield unwrapped
class ObjectIdentitySet(collections.abc.MutableSet):
"""Like the built-in set, but compares objects with "is"."""
__slots__ = ["_storage", "__weakref__"]
def __init__(self, *args):
self._storage = set(self._wrap_key(obj) for obj in list(*args))
@staticmethod
def _from_storage(storage):
result = ObjectIdentitySet()
result._storage = storage # pylint: disable=protected-access
return result
def _wrap_key(self, key):
return _ObjectIdentityWrapper(key)
def __contains__(self, key):
return self._wrap_key(key) in self._storage
def discard(self, key):
self._storage.discard(self._wrap_key(key))
def add(self, key):
self._storage.add(self._wrap_key(key))
def update(self, items):
self._storage.update([self._wrap_key(item) for item in items])
def clear(self):
self._storage.clear()
def intersection(self, items):
return self._storage.intersection([self._wrap_key(item) for item in items])
def difference(self, items):
return ObjectIdentitySet._from_storage(
self._storage.difference([self._wrap_key(item) for item in items]))
def __len__(self):
return len(self._storage)
def __iter__(self):
keys = list(self._storage)
for key in keys:
yield key.unwrapped
class ObjectIdentityWeakSet(ObjectIdentitySet):
"""Like weakref.WeakSet, but compares objects with "is"."""
__slots__ = ()
def _wrap_key(self, key):
return _WeakObjectIdentityWrapper(key)
def __len__(self):
# Iterate, discarding old weak refs
return len([_ for _ in self])
def __iter__(self):
keys = list(self._storage)
for key in keys:
unwrapped = key.unwrapped
if unwrapped is None:
self.discard(key)
else:
yield unwrapped
# LINT.ThenChange(//tensorflow/python/util/object_identity.py)
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Country'
db.create_table('location_country', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50)),
('description', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('gn_name', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('gn_id', self.gf('django.db.models.fields.CharField')(max_length=20, blank=True)),
('latitude', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('longitude', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(related_name='children', to=orm['location.GlobalRegion'])),
))
db.send_create_signal('location', ['Country'])
# Adding unique constraint on 'Country', fields ['parent', 'slug']
db.create_unique('location_country', ['parent_id', 'slug'])
# Adding model 'RegionDistrict'
db.create_table('location_regiondistrict', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50)),
('description', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('gn_name', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('gn_id', self.gf('django.db.models.fields.CharField')(max_length=20, blank=True)),
('latitude', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('longitude', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(related_name='children', to=orm['location.StateProvince'])),
))
db.send_create_signal('location', ['RegionDistrict'])
# Adding unique constraint on 'RegionDistrict', fields ['parent', 'slug']
db.create_unique('location_regiondistrict', ['parent_id', 'slug'])
# Adding model 'GlobalRegion'
db.create_table('location_globalregion', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50)),
('description', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('gn_name', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('gn_id', self.gf('django.db.models.fields.CharField')(max_length=20, blank=True)),
('latitude', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('longitude', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
))
db.send_create_signal('location', ['GlobalRegion'])
# Adding model 'StateProvince'
db.create_table('location_stateprovince', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50)),
('description', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('gn_name', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('gn_id', self.gf('django.db.models.fields.CharField')(max_length=20, blank=True)),
('latitude', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('longitude', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(related_name='children', to=orm['location.Country'])),
))
db.send_create_signal('location', ['StateProvince'])
# Adding unique constraint on 'StateProvince', fields ['parent', 'slug']
db.create_unique('location_stateprovince', ['parent_id', 'slug'])
# Adding model 'Locality'
db.create_table('location_locality', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50)),
('description', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('gn_name', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('gn_id', self.gf('django.db.models.fields.CharField')(max_length=20, blank=True)),
('latitude', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('longitude', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(related_name='children', to=orm['location.RegionDistrict'])),
))
db.send_create_signal('location', ['Locality'])
# Adding unique constraint on 'Locality', fields ['parent', 'slug']
db.create_unique('location_locality', ['parent_id', 'slug'])
def backwards(self, orm):
# Removing unique constraint on 'Locality', fields ['parent', 'slug']
db.delete_unique('location_locality', ['parent_id', 'slug'])
# Removing unique constraint on 'StateProvince', fields ['parent', 'slug']
db.delete_unique('location_stateprovince', ['parent_id', 'slug'])
# Removing unique constraint on 'RegionDistrict', fields ['parent', 'slug']
db.delete_unique('location_regiondistrict', ['parent_id', 'slug'])
# Removing unique constraint on 'Country', fields ['parent', 'slug']
db.delete_unique('location_country', ['parent_id', 'slug'])
# Deleting model 'Country'
db.delete_table('location_country')
# Deleting model 'RegionDistrict'
db.delete_table('location_regiondistrict')
# Deleting model 'GlobalRegion'
db.delete_table('location_globalregion')
# Deleting model 'StateProvince'
db.delete_table('location_stateprovince')
# Deleting model 'Locality'
db.delete_table('location_locality')
models = {
'location.country': {
'Meta': {'ordering': "['name']", 'unique_together': "(('parent', 'slug'),)", 'object_name': 'Country'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'gn_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'gn_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'to': "orm['location.GlobalRegion']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'location.globalregion': {
'Meta': {'ordering': "['name']", 'object_name': 'GlobalRegion'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'gn_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'gn_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'location.locality': {
'Meta': {'ordering': "['name']", 'unique_together': "(('parent', 'slug'),)", 'object_name': 'Locality'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'gn_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'gn_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'to': "orm['location.RegionDistrict']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'location.place': {
'Meta': {'ordering': "['id']", 'object_name': 'Place'},
'australian_state': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gn_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'gn_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_corrected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'})
},
'location.region': {
'Meta': {'object_name': 'Region'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'})
},
'location.regiondistrict': {
'Meta': {'ordering': "['name']", 'unique_together': "(('parent', 'slug'),)", 'object_name': 'RegionDistrict'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'gn_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'gn_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'to': "orm['location.StateProvince']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'location.stateprovince': {
'Meta': {'ordering': "['name']", 'unique_together': "(('parent', 'slug'),)", 'object_name': 'StateProvince'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'gn_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'gn_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'to': "orm['location.Country']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
}
}
complete_apps = ['location']
|
|
import calendar
import datetime
import logging
import os
import environ
import pytest
from aiohttp import web
from rororo.logger import default_logging_dict
from rororo.settings import (
BaseSettings,
from_env,
immutable_settings,
inject_settings,
is_setting_key,
setup_locale,
setup_logging,
setup_settings,
setup_timezone,
)
from . import settings as settings_module
TEST_DEBUG = True
TEST_USER = "test-user"
_TEST_USER = "private-user"
def check_immutability(settings):
# Cannot update current value
key = list(settings.keys())[0]
with pytest.raises(TypeError):
settings[key] = "new-value"
# Cannot add new value
assert "TEST_SETTING" not in settings
with pytest.raises(TypeError):
settings["TEST_SETTING"] = "test-value"
# Cannot update values at all
with pytest.raises(AttributeError):
settings.update({key: "new-value", "TEST_SETTING": "test_value"})
def test_base_settings():
settings = BaseSettings.from_environ()
assert settings.host == "localhost"
assert settings.port == 8080
assert settings.debug is False
assert settings.level == "test"
assert settings.time_zone == "UTC"
assert settings.first_weekday == 0
assert settings.locale == "en_US.UTF-8"
assert settings.sentry_dsn is None
assert settings.sentry_release is None
def test_base_settings_apply():
BaseSettings.from_environ().apply()
def test_base_settings_apply_with_loggers():
BaseSettings.from_environ().apply(loggers=("aiohttp", "rororo"))
def test_base_settings_from_env(monkeypatch):
monkeypatch.setenv("DEBUG", "yes")
assert BaseSettings.from_environ().debug is True
def test_base_settings_from_env_kwargs():
assert BaseSettings.from_environ({"DEBUG": "true"}).debug is True
def test_base_settings_from_kwargs():
assert BaseSettings(debug=True).debug is True
def test_base_settings_inheritance(monkeypatch):
monkeypatch.setenv("USE_RORORO", "yes")
@environ.config(prefix=None, frozen=True)
class Settings(BaseSettings):
use_rororo: bool = environ.bool_var(name="USE_RORORO", default=True)
settings = Settings.from_environ()
assert settings.debug is False
assert settings.use_rororo is True
@pytest.mark.parametrize(
"level, expected_is_test, expected_is_dev, expected_is_staging, "
"expected_is_prod",
(
("dev", False, True, False, False),
("test", True, False, False, False),
("prod", False, False, False, True),
("staging", False, False, True, False),
),
)
def test_base_settings_is_properties(
monkeypatch,
level,
expected_is_test,
expected_is_dev,
expected_is_staging,
expected_is_prod,
):
monkeypatch.setenv("LEVEL", level)
settings = BaseSettings.from_environ()
assert settings.is_test is expected_is_test
assert settings.is_dev is expected_is_dev
assert settings.is_staging is expected_is_staging
assert settings.is_prod is expected_is_prod
def test_from_env():
assert from_env("USER") == os.getenv("USER")
assert from_env("DOES_NOT_EXIST") is None
assert from_env("DOES_NOT_EXIST", True) is True
def test_immutable_settings_from_dict():
settings_dict = {
"DEBUG": True,
"USER": "test-user",
"_USER": "private-user",
}
settings = immutable_settings(settings_dict)
assert settings["DEBUG"] is True
assert settings["USER"] == "test-user"
assert "_USER" not in settings
settings_dict.pop("USER")
assert settings["USER"] == "test-user"
check_immutability(settings)
def test_immutable_settings_from_globals():
settings = immutable_settings(globals())
assert settings["TEST_DEBUG"] is True
assert settings["TEST_USER"] == "test-user"
assert "_TEST_USER" not in settings
assert "pytest" not in settings
check_immutability(settings)
def test_immutable_settings_from_locals():
DEBUG = True # noqa: N806
USER = "local-test-user" # noqa: N806
_USER = "private-user" # noqa: N806
not_a_setting = True
settings = immutable_settings(locals())
assert settings["DEBUG"] is True
assert settings["USER"], "local-test-user"
assert "_USER" not in settings
assert "not_a_setting" not in settings
del DEBUG, USER, _USER
assert settings["USER"] == "local-test-user"
check_immutability(settings)
def test_immutable_settings_from_module():
settings = immutable_settings(settings_module)
assert settings["DEBUG"] is True
assert settings["USER"] == os.getenv("USER")
assert "os" not in settings
check_immutability(settings)
def test_immutable_settings_with_optionals():
settings = immutable_settings(settings_module, DEBUG=False)
assert settings["DEBUG"] is False
assert settings["USER"] == os.getenv("USER")
def test_inject_settings_fail_silently():
context = {}
inject_settings("tests.rororo.settings_error", context, True)
assert context == {}
def test_inject_settings_failed():
context = {}
with pytest.raises(NameError):
inject_settings("tests.rororo.settings_error", context)
assert context == {}
def test_inject_settings_from_dict():
context = {"DEBUG": False}
settings_dict = {"DEBUG": True, "_DEBUG": True}
inject_settings(settings_dict, context)
assert context["DEBUG"] is True
assert "_DEBUG" not in context
def test_inject_settings_from_module():
context = {"DEBUG": False}
inject_settings(settings_module, context)
assert context["DEBUG"] is True
assert "os" not in context
def test_inject_settings_from_str():
context = {"DEBUG": False}
inject_settings("tests.rororo.settings", context)
assert context["DEBUG"] is True
assert "os" not in context
@pytest.mark.parametrize(
"key, expected",
(
("DEBUG", True),
("SECRET_KEY", True),
("_PRIVATE_USER", False),
("camelCase", False),
("secret_key", False),
),
)
def test_is_settings_key(key, expected):
assert is_setting_key(key) is expected
def test_setup_locale():
monday = calendar.day_abbr[0]
first_weekday = calendar.firstweekday()
setup_locale("uk_UA.UTF-8")
assert calendar.day_abbr[0] != monday
assert calendar.firstweekday() == first_weekday
def test_setup_locale_with_first_weekday():
first_weekday = calendar.firstweekday()
setup_locale("uk_UA.UTF-8", 1)
assert calendar.firstweekday() == 1
setup_locale("en_US.UTF-8", first_weekday)
def test_setup_logging():
setup_logging(default_logging_dict("rororo"))
@pytest.mark.parametrize("remove, expected", ((False, 2), (True, 0)))
def test_setup_logging_remove_root_handlers(remove, expected):
logging.basicConfig(level="INFO")
assert len(logging.root.handlers) == 2
setup_logging(default_logging_dict("rororo"), remove_root_handlers=remove)
assert len(logging.root.handlers) == expected
def test_setup_settings():
app = web.Application()
assert "settings" not in app
setup_settings(app, BaseSettings())
assert "settings" in app
def test_setup_timezone():
setup_timezone("UTC")
utc_now = datetime.datetime.now()
setup_timezone("Europe/Kiev")
kyiv_now = datetime.datetime.now()
assert utc_now.hour != kyiv_now.hour
def test_setup_timezone_empty():
previous = datetime.datetime.now()
setup_timezone(None)
assert previous.hour == datetime.datetime.now().hour
def test_setup_timezone_unknown():
with pytest.raises(ValueError):
setup_timezone("Unknown/Timezone")
|
|
import logging
import numpy as np
from typing import List, Type
import ray
from ray.rllib.agents import with_common_config
from ray.rllib.agents.mbmpo.mbmpo_torch_policy import MBMPOTorchPolicy
from ray.rllib.agents.mbmpo.model_ensemble import DynamicsEnsembleCustomModel
from ray.rllib.agents.mbmpo.utils import calculate_gae_advantages, MBMPOExploration
from ray.rllib.agents.trainer import Trainer
from ray.rllib.env.env_context import EnvContext
from ray.rllib.env.wrappers.model_vector_env import model_vector_env
from ray.rllib.evaluation.metrics import (
collect_episodes,
collect_metrics,
get_learner_stats,
)
from ray.rllib.evaluation.worker_set import WorkerSet
from ray.rllib.execution.common import (
STEPS_SAMPLED_COUNTER,
STEPS_TRAINED_COUNTER,
STEPS_TRAINED_THIS_ITER_COUNTER,
_get_shared_metrics,
)
from ray.rllib.execution.metric_ops import CollectMetrics
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID, SampleBatch
from ray.rllib.utils.annotations import override
from ray.rllib.utils.deprecation import DEPRECATED_VALUE
from ray.rllib.utils.metrics.learner_info import LEARNER_INFO
from ray.rllib.utils.sgd import standardized
from ray.rllib.utils.torch_utils import convert_to_torch_tensor
from ray.rllib.utils.typing import EnvType, TrainerConfigDict
from ray.util.iter import from_actors, LocalIterator
logger = logging.getLogger(__name__)
# fmt: off
# __sphinx_doc_begin__
# Adds the following updates to the (base) `Trainer` config in
# rllib/agents/trainer.py (`COMMON_CONFIG` dict).
DEFAULT_CONFIG = with_common_config({
# If true, use the Generalized Advantage Estimator (GAE)
# with a value function, see https://arxiv.org/pdf/1506.02438.pdf.
"use_gae": True,
# GAE(lambda) parameter.
"lambda": 1.0,
# Initial coefficient for KL divergence.
"kl_coeff": 0.0005,
# Size of batches collected from each worker.
"rollout_fragment_length": 200,
# Do create an actual env on the local worker (worker-idx=0).
"create_env_on_driver": True,
# Step size of SGD.
"lr": 1e-3,
# Coefficient of the value function loss.
"vf_loss_coeff": 0.5,
# Coefficient of the entropy regularizer.
"entropy_coeff": 0.0,
# PPO clip parameter.
"clip_param": 0.5,
# Clip param for the value function. Note that this is sensitive to the
# scale of the rewards. If your expected V is large, increase this.
"vf_clip_param": 10.0,
# If specified, clip the global norm of gradients by this amount.
"grad_clip": None,
# Target value for KL divergence.
"kl_target": 0.01,
# Whether to rollout "complete_episodes" or "truncate_episodes".
"batch_mode": "complete_episodes",
# Which observation filter to apply to the observation.
"observation_filter": "NoFilter",
# Number of Inner adaptation steps for the MAML algorithm.
"inner_adaptation_steps": 1,
# Number of MAML steps per meta-update iteration (PPO steps).
"maml_optimizer_steps": 8,
# Inner adaptation step size.
"inner_lr": 1e-3,
# Horizon of the environment (200 in MB-MPO paper).
"horizon": 200,
# Dynamics ensemble hyperparameters.
"dynamics_model": {
"custom_model": DynamicsEnsembleCustomModel,
# Number of Transition-Dynamics (TD) models in the ensemble.
"ensemble_size": 5,
# Hidden layers for each model in the TD-model ensemble.
"fcnet_hiddens": [512, 512, 512],
# Model learning rate.
"lr": 1e-3,
# Max number of training epochs per MBMPO iter.
"train_epochs": 500,
# Model batch size.
"batch_size": 500,
# Training/validation split.
"valid_split_ratio": 0.2,
# Normalize data (obs, action, and deltas).
"normalize_data": True,
},
# Exploration for MB-MPO is based on StochasticSampling, but uses 8000
# random timesteps up-front for worker=0.
"exploration_config": {
"type": MBMPOExploration,
"random_timesteps": 8000,
},
# Workers sample from dynamics models, not from actual envs.
"custom_vector_env": model_vector_env,
# How many iterations through MAML per MBMPO iteration.
"num_maml_steps": 10,
# Deprecated keys:
# Share layers for value function. If you set this to True, it's important
# to tune vf_loss_coeff.
# Use config.model.vf_share_layers instead.
"vf_share_layers": DEPRECATED_VALUE,
})
# __sphinx_doc_end__
# fmt: on
# Select Metric Keys for MAML Stats Tracing
METRICS_KEYS = ["episode_reward_mean", "episode_reward_min", "episode_reward_max"]
class MetaUpdate:
def __init__(self, workers, num_steps, maml_steps, metric_gen):
"""Computes the MetaUpdate step in MAML.
Adapted for MBMPO for multiple MAML Iterations.
Args:
workers (WorkerSet): Set of Workers
num_steps (int): Number of meta-update steps per MAML Iteration
maml_steps (int): MAML Iterations per MBMPO Iteration
metric_gen (Iterator): Generates metrics dictionary
Returns:
metrics (dict): MBMPO metrics for logging.
"""
self.workers = workers
self.num_steps = num_steps
self.step_counter = 0
self.maml_optimizer_steps = maml_steps
self.metric_gen = metric_gen
self.metrics = {}
def __call__(self, data_tuple):
"""Args:
data_tuple (tuple): 1st element is samples collected from MAML
Inner adaptation steps and 2nd element is accumulated metrics
"""
# Metaupdate Step.
print("Meta-Update Step")
samples = data_tuple[0]
adapt_metrics_dict = data_tuple[1]
self.postprocess_metrics(
adapt_metrics_dict, prefix="MAMLIter{}".format(self.step_counter)
)
# MAML Meta-update.
fetches = None
for i in range(self.maml_optimizer_steps):
fetches = self.workers.local_worker().learn_on_batch(samples)
learner_stats = get_learner_stats(fetches)
# Update KLs.
def update(pi, pi_id):
assert "inner_kl" not in learner_stats, (
"inner_kl should be nested under policy id key",
learner_stats,
)
if pi_id in learner_stats:
assert "inner_kl" in learner_stats[pi_id], (learner_stats, pi_id)
pi.update_kls(learner_stats[pi_id]["inner_kl"])
else:
logger.warning("No data for {}, not updating kl".format(pi_id))
self.workers.local_worker().foreach_policy_to_train(update)
# Modify Reporting Metrics.
metrics = _get_shared_metrics()
metrics.info[LEARNER_INFO] = fetches
metrics.counters[STEPS_TRAINED_THIS_ITER_COUNTER] = samples.count
metrics.counters[STEPS_TRAINED_COUNTER] += samples.count
if self.step_counter == self.num_steps - 1:
td_metric = self.workers.local_worker().foreach_policy(fit_dynamics)[0]
# Sync workers with meta policy.
self.workers.sync_weights()
# Sync TD Models with workers.
sync_ensemble(self.workers)
sync_stats(self.workers)
metrics.counters[STEPS_SAMPLED_COUNTER] = td_metric[STEPS_SAMPLED_COUNTER]
# Modify to CollectMetrics.
res = self.metric_gen.__call__(None)
res.update(self.metrics)
self.step_counter = 0
print("MB-MPO Iteration Completed")
return [res]
else:
print("MAML Iteration {} Completed".format(self.step_counter))
self.step_counter += 1
# Sync workers with meta policy
print("Syncing Weights with Workers")
self.workers.sync_weights()
return []
def postprocess_metrics(self, metrics, prefix=""):
"""Appends prefix to current metrics
Args:
metrics (dict): Dictionary of current metrics
prefix (str): Prefix string to be appended
"""
for key in metrics.keys():
self.metrics[prefix + "_" + key] = metrics[key]
def post_process_metrics(prefix, workers, metrics):
"""Update current dataset metrics and filter out specific keys.
Args:
prefix (str): Prefix string to be appended
workers (WorkerSet): Set of workers
metrics (dict): Current metrics dictionary
"""
res = collect_metrics(remote_workers=workers.remote_workers())
for key in METRICS_KEYS:
metrics[prefix + "_" + key] = res[key]
return metrics
def inner_adaptation(workers: WorkerSet, samples: List[SampleBatch]):
"""Performs one gradient descend step on each remote worker.
Args:
workers (WorkerSet): The WorkerSet of the Trainer.
samples (List[SampleBatch]): The list of SampleBatches to perform
a training step on (one for each remote worker).
"""
for i, e in enumerate(workers.remote_workers()):
e.learn_on_batch.remote(samples[i])
def fit_dynamics(policy, pid):
return policy.dynamics_model.fit()
def sync_ensemble(workers: WorkerSet) -> None:
"""Syncs dynamics ensemble weights from driver (main) to workers.
Args:
workers (WorkerSet): Set of workers, including driver (main).
"""
def get_ensemble_weights(worker):
policy_map = worker.policy_map
policies = policy_map.keys()
def policy_ensemble_weights(policy):
model = policy.dynamics_model
return {k: v.cpu().detach().numpy() for k, v in model.state_dict().items()}
return {
pid: policy_ensemble_weights(policy)
for pid, policy in policy_map.items()
if pid in policies
}
def set_ensemble_weights(policy, pid, weights):
weights = weights[pid]
weights = convert_to_torch_tensor(weights, device=policy.device)
model = policy.dynamics_model
model.load_state_dict(weights)
if workers.remote_workers():
weights = ray.put(get_ensemble_weights(workers.local_worker()))
set_func = ray.put(set_ensemble_weights)
for e in workers.remote_workers():
e.foreach_policy.remote(set_func, weights=weights)
def sync_stats(workers: WorkerSet) -> None:
def get_normalizations(worker):
policy = worker.policy_map[DEFAULT_POLICY_ID]
return policy.dynamics_model.normalizations
def set_normalizations(policy, pid, normalizations):
policy.dynamics_model.set_norms(normalizations)
if workers.remote_workers():
normalization_dict = ray.put(get_normalizations(workers.local_worker()))
set_func = ray.put(set_normalizations)
for e in workers.remote_workers():
e.foreach_policy.remote(set_func, normalizations=normalization_dict)
def post_process_samples(samples, config: TrainerConfigDict):
# Instead of using NN for value function, we use regression
split_lst = []
for sample in samples:
indexes = np.asarray(sample["dones"]).nonzero()[0]
indexes = indexes + 1
reward_list = np.split(sample["rewards"], indexes)[:-1]
observation_list = np.split(sample["obs"], indexes)[:-1]
paths = []
for i in range(0, len(reward_list)):
paths.append(
{"rewards": reward_list[i], "observations": observation_list[i]}
)
paths = calculate_gae_advantages(paths, config["gamma"], config["lambda"])
advantages = np.concatenate([path["advantages"] for path in paths])
sample["advantages"] = standardized(advantages)
split_lst.append(sample.count)
return samples, split_lst
class MBMPOTrainer(Trainer):
"""Model-Based Meta Policy Optimization (MB-MPO) Trainer.
This file defines the distributed Trainer class for model-based meta
policy optimization.
See `mbmpo_[tf|torch]_policy.py` for the definition of the policy loss.
Detailed documentation:
https://docs.ray.io/en/master/rllib-algorithms.html#mbmpo
"""
@classmethod
@override(Trainer)
def get_default_config(cls) -> TrainerConfigDict:
return DEFAULT_CONFIG
@override(Trainer)
def validate_config(self, config: TrainerConfigDict) -> None:
# Call super's validation method.
super().validate_config(config)
if config["num_gpus"] > 1:
raise ValueError("`num_gpus` > 1 not yet supported for MB-MPO!")
if config["framework"] != "torch":
logger.warning(
"MB-MPO only supported in PyTorch so far! Switching to "
"`framework=torch`."
)
config["framework"] = "torch"
if config["inner_adaptation_steps"] <= 0:
raise ValueError("Inner adaptation steps must be >=1!")
if config["maml_optimizer_steps"] <= 0:
raise ValueError("PPO steps for meta-update needs to be >=0!")
if config["entropy_coeff"] < 0:
raise ValueError("`entropy_coeff` must be >=0.0!")
if config["batch_mode"] != "complete_episodes":
raise ValueError("`batch_mode=truncate_episodes` not supported!")
if config["num_workers"] <= 0:
raise ValueError("Must have at least 1 worker/task.")
if config["create_env_on_driver"] is False:
raise ValueError(
"Must have an actual Env created on the driver "
"(local) worker! Set `create_env_on_driver` to True."
)
@override(Trainer)
def get_default_policy_class(self, config: TrainerConfigDict) -> Type[Policy]:
return MBMPOTorchPolicy
@staticmethod
@override(Trainer)
def execution_plan(
workers: WorkerSet, config: TrainerConfigDict, **kwargs
) -> LocalIterator[dict]:
assert (
len(kwargs) == 0
), "MBMPO execution_plan does NOT take any additional parameters"
# Train TD Models on the driver.
workers.local_worker().foreach_policy(fit_dynamics)
# Sync driver's policy with workers.
workers.sync_weights()
# Sync TD Models and normalization stats with workers
sync_ensemble(workers)
sync_stats(workers)
# Dropping metrics from the first iteration
_, _ = collect_episodes(
workers.local_worker(), workers.remote_workers(), [], timeout_seconds=9999
)
# Metrics Collector.
metric_collect = CollectMetrics(
workers,
min_history=0,
timeout_seconds=config["metrics_episode_collection_timeout_s"],
)
num_inner_steps = config["inner_adaptation_steps"]
def inner_adaptation_steps(itr):
buf = []
split = []
metrics = {}
for samples in itr:
print("Collecting Samples, Inner Adaptation {}".format(len(split)))
# Processing Samples (Standardize Advantages)
samples, split_lst = post_process_samples(samples, config)
buf.extend(samples)
split.append(split_lst)
adapt_iter = len(split) - 1
prefix = "DynaTrajInner_" + str(adapt_iter)
metrics = post_process_metrics(prefix, workers, metrics)
if len(split) > num_inner_steps:
out = SampleBatch.concat_samples(buf)
out["split"] = np.array(split)
buf = []
split = []
yield out, metrics
metrics = {}
else:
inner_adaptation(workers, samples)
# Iterator for Inner Adaptation Data gathering (from pre->post
# adaptation).
rollouts = from_actors(workers.remote_workers())
rollouts = rollouts.batch_across_shards()
rollouts = rollouts.transform(inner_adaptation_steps)
# Meta update step with outer combine loop for multiple MAML
# iterations.
train_op = rollouts.combine(
MetaUpdate(
workers,
config["num_maml_steps"],
config["maml_optimizer_steps"],
metric_collect,
)
)
return train_op
@staticmethod
@override(Trainer)
def validate_env(env: EnvType, env_context: EnvContext) -> None:
"""Validates the local_worker's env object (after creation).
Args:
env: The env object to check (for worker=0 only).
env_context: The env context used for the instantiation of
the local worker's env (worker=0).
Raises:
ValueError: In case something is wrong with the config.
"""
if not hasattr(env, "reward") or not callable(env.reward):
raise ValueError(
f"Env {env} doest not have a `reward()` method, needed for "
"MB-MPO! This `reward()` method should return "
)
|
|
""" Test functions for fftpack.basic module
"""
from __future__ import division, absolute_import, print_function
from numpy import arange, asarray, zeros, dot, exp, pi, double, cdouble
from numpy.random import rand
import numpy as np
from concurrent import futures
import os
import scipy.fftpack
import numpy.fft
try:
import scipy.fft as scipy_fft
has_scipy_fft = True
except ImportError:
scipy_fft = {}
has_scipy_fft = False
from .common import Benchmark
try:
import pyfftw.interfaces.numpy_fft as pyfftw_fft
import pyfftw
pyfftw.interfaces.cache.enable()
has_pyfftw = True
except ImportError:
pyfftw_fft = {}
has_pyfftw = False
class PyfftwBackend:
"""Backend for pyfftw"""
__ua_domain__ = 'numpy.scipy.fft'
@staticmethod
def __ua_function__(method, args, kwargs):
kwargs.pop('overwrite_x', None)
fn = getattr(pyfftw_fft, method.__name__, None)
return (NotImplemented if fn is None
else fn(*args, **kwargs))
def random(size):
return rand(*size)
def direct_dft(x):
x = asarray(x)
n = len(x)
y = zeros(n, dtype=cdouble)
w = -arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w), x)
return y
def direct_idft(x):
x = asarray(x)
n = len(x)
y = zeros(n, dtype=cdouble)
w = arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w), x)/n
return y
def get_module(mod_name):
module_map = {
'scipy.fftpack': scipy.fftpack,
'scipy.fft': scipy_fft,
'numpy.fft': numpy.fft
}
if not has_scipy_fft and mod_name == 'scipy.fft':
raise NotImplementedError
return module_map[mod_name]
class Fft(Benchmark):
params = [
[100, 256, 313, 512, 1000, 1024, 2048, 2048*2, 2048*4],
['real', 'cmplx'],
['scipy.fftpack', 'scipy.fft', 'numpy.fft']
]
param_names = ['size', 'type', 'module']
def setup(self, size, cmplx, module):
if cmplx == 'cmplx':
self.x = random([size]).astype(cdouble)+random([size]).astype(cdouble)*1j
else:
self.x = random([size]).astype(double)
module = get_module(module)
self.fft = getattr(module, 'fft')
self.ifft = getattr(module, 'ifft')
def time_fft(self, size, cmplx, module):
self.fft(self.x)
def time_ifft(self, size, cmplx, module):
self.ifft(self.x)
class NextFastLen(Benchmark):
params = [
[12, 13, # small ones
1021, 1024, # 2 ** 10 and a prime
16381, 16384, # 2 ** 14 and a prime
262139, 262144, # 2 ** 17 and a prime
999983, 1048576, # 2 ** 20 and a prime
],
]
param_names = ['size']
def setup(self, size):
if not has_scipy_fft:
raise NotImplementedError
def time_next_fast_len(self, size):
scipy_fft.next_fast_len.__wrapped__(size)
def time_next_fast_len_cached(self, size):
scipy_fft.next_fast_len(size)
class RFft(Benchmark):
params = [
[100, 256, 313, 512, 1000, 1024, 2048, 2048*2, 2048*4],
['scipy.fftpack', 'scipy.fft', 'numpy.fft']
]
param_names = ['size', 'module']
def setup(self, size, module):
self.x = random([size]).astype(double)
module = get_module(module)
self.rfft = getattr(module, 'rfft')
self.irfft = getattr(module, 'irfft')
self.y = self.rfft(self.x)
def time_rfft(self, size, module):
self.rfft(self.x)
def time_irfft(self, size, module):
self.irfft(self.y)
class RealTransforms1D(Benchmark):
params = [
[75, 100, 135, 256, 313, 512, 675, 1024, 2025, 2048],
['I', 'II', 'III', 'IV'],
['scipy.fftpack', 'scipy.fft']
]
param_names = ['size', 'type', 'module']
def setup(self, size, type, module):
module = get_module(module)
self.dct = getattr(module, 'dct')
self.dst = getattr(module, 'dst')
self.type = {'I':1, 'II':2, 'III':3, 'IV':4}[type]
# The "logical" transform size should be smooth, which for dct/dst
# type 1 is offset by -1/+1 respectively
if self.type == 1:
size += 1
self.x = random([size]).astype(double)
if self.type == 1:
self.x_dst = self.x[:-2].copy()
def time_dct(self, size, type, module):
self.dct(self.x, self.type)
def time_dst(self, size, type, module):
x = self.x if self.type != 1 else self.x_dst
self.dst(x, self.type)
class Fftn(Benchmark):
params = [
["100x100", "313x100", "1000x100", "256x256", "512x512"],
['real', 'cmplx'],
['scipy.fftpack', 'scipy.fft', 'numpy.fft']
]
param_names = ['size', 'type', 'module']
def setup(self, size, cmplx, module):
size = list(map(int, size.split("x")))
if cmplx != 'cmplx':
self.x = random(size).astype(double)
else:
self.x = random(size).astype(cdouble)+random(size).astype(cdouble)*1j
self.fftn = getattr(get_module(module), 'fftn')
def time_fftn(self, size, cmplx, module):
self.fftn(self.x)
class RealTransformsND(Benchmark):
params = [
['75x75', '100x100', '135x135', '313x363', '1000x100', '256x256'],
['I', 'II', 'III', 'IV'],
['scipy.fftpack', 'scipy.fft']
]
param_names = ['size', 'type', 'module']
def setup(self, size, type, module):
self.dctn = getattr(get_module(module), 'dctn')
self.dstn = getattr(get_module(module), 'dstn')
self.type = {'I':1, 'II':2, 'III':3, 'IV':4}[type]
# The "logical" transform size should be smooth, which for dct/dst
# type 1 is offset by -1/+1 respectively
size = list(map(int, size.split('x')))
if self.type == 1:
size = (s + 1 for s in size)
self.x = random(size).astype(double)
if self.type == 1:
self.x_dst = self.x[:-2,:-2].copy()
def time_dctn(self, size, type, module):
self.dctn(self.x, self.type)
def time_dstn(self, size, type, module):
x = self.x if self.type != 1 else self.x_dst
self.dstn(x, self.type)
class FftBackends(Benchmark):
params = [
[100, 256, 313, 512, 1000, 1024, 2048, 2048*2, 2048*4],
['real', 'cmplx'],
['pocketfft', 'pyfftw', 'numpy', 'direct']
]
param_names = ['size', 'type', 'backend']
def setup(self, size, cmplx, backend):
import scipy.fft
if cmplx == 'cmplx':
self.x = random([size]).astype(cdouble)+random([size]).astype(cdouble)*1j
else:
self.x = random([size]).astype(double)
self.fft = scipy.fft.fft
self.ifft = scipy.fft.ifft
if backend == 'pocketfft':
scipy.fft.set_global_backend('scipy')
elif backend == 'pyfftw':
if not has_pyfftw:
raise NotImplementedError
scipy.fft.set_global_backend(PyfftwBackend)
elif backend == 'numpy':
from scipy.fft._debug_backends import NumPyBackend
scipy.fft.set_global_backend(NumPyBackend)
elif backend == 'direct':
import scipy.fft._pocketfft
self.fft = scipy.fft._pocketfft.fft
self.ifft = scipy.fft._pocketfft.ifft
def time_fft(self, size, cmplx, module):
self.fft(self.x)
def time_ifft(self, size, cmplx, module):
self.ifft(self.x)
class FftnBackends(Benchmark):
params = [
["100x100", "313x100", "1000x100", "256x256", "512x512"],
['real', 'cmplx'],
['pocketfft', 'pyfftw', 'numpy', 'direct']
]
param_names = ['size', 'type', 'backend']
def setup(self, size, cmplx, backend):
import scipy.fft
size = list(map(int, size.split("x")))
if cmplx == 'cmplx':
self.x = random(size).astype(double)+random(size).astype(double)*1j
else:
self.x = random(size).astype(double)
self.fftn = scipy.fft.fftn
self.ifftn = scipy.fft.ifftn
if backend == 'pocketfft':
scipy.fft.set_global_backend('scipy')
elif backend == 'pyfftw':
if not has_pyfftw:
raise NotImplementedError
scipy.fft.set_global_backend(PyfftwBackend)
elif backend == 'numpy':
from scipy.fft._debug_backends import NumPyBackend
scipy.fft.set_global_backend(NumPyBackend)
elif backend == 'direct':
import scipy.fft._pocketfft
self.fftn = scipy.fft._pocketfft.fftn
self.ifftn = scipy.fft._pocketfft.ifftn
def time_fft(self, size, cmplx, module):
self.fftn(self.x)
def time_ifft(self, size, cmplx, module):
self.ifftn(self.x)
class FftThreading(Benchmark):
params = [
['100x100', '1000x100', '256x256', '512x512'],
[1, 8, 32, 100],
['workers', 'threading']
]
param_names = ['size', 'num_transforms', 'method']
def setup(self, size, num_transforms, method):
if not has_scipy_fft:
raise NotImplementedError
size = list(map(int, size.split("x")))
self.xs = [(random(size)+1j*random(size)).astype(np.complex128)
for _ in range(num_transforms)]
if method == 'threading':
self.pool = futures.ThreadPoolExecutor(os.cpu_count())
def map_thread(self, func):
f = []
for x in self.xs:
f.append(self.pool.submit(func, x))
futures.wait(f)
def time_fft(self, size, num_transforms, method):
if method == 'threading':
self.map_thread(scipy_fft.fft)
else:
for x in self.xs:
scipy_fft.fft(x, workers=-1)
def time_fftn(self, size, num_transforms, method):
if method == 'threading':
self.map_thread(scipy_fft.fftn)
else:
for x in self.xs:
scipy_fft.fftn(x, workers=-1)
|
|
"""Resource pools."""
__all__ = [
'ProcessActorPool',
'TimeoutPool',
]
import array
import collections
import contextlib
import dataclasses
import functools
import heapq
import inspect
import itertools
import logging
import multiprocessing
import multiprocessing.connection
import multiprocessing.reduction
import os
import socket
import threading
import time
import types
import weakref
from typing import Any, Dict, Tuple
from . import collections as g1_collections # pylint: disable=reimported
from .assertions import ASSERT
LOG = logging.getLogger(__name__)
class TimeoutPool:
"""Rudimentary timeout-based resource pool.
A pool that releases resources unused after a timeout.
NOTE: This class is not thread-safe.
"""
@dataclasses.dataclass(frozen=True)
class Stats:
num_allocations: int
num_concurrent_resources: int
max_concurrent_resources: int
def __init__(
self,
pool_size,
allocate,
release,
timeout=300, # 5 minutes.
):
# Store pairs of (resource, returned_at), sorted by returned_at
# in ascending order.
self._pool = collections.deque()
self._pool_size = pool_size
self._allocate = allocate
self._release = release
self._timeout = timeout
self._num_allocations = 0
self._num_concurrent_resources = 0
self._max_concurrent_resources = 0
def get_stats(self):
return self.Stats(
num_allocations=self._num_allocations,
num_concurrent_resources=self._num_concurrent_resources,
max_concurrent_resources=self._max_concurrent_resources,
)
def __enter__(self):
return self
def __exit__(self, *_):
self.close()
@contextlib.contextmanager
def using(self):
resource = self.get()
try:
yield resource
finally:
self.return_(resource)
def get(self):
"""Get a resource from the pool or allocate new one when empty.
This does not block nor raise when the pool is empty (if we want
to implement rate limit, we could do that?).
"""
to_allocate = not self._pool
if to_allocate:
resource = self._allocate()
self._num_allocations += 1
self._num_concurrent_resources += 1
max_concurrent_resources = max(
self._num_concurrent_resources, self._max_concurrent_resources
)
else:
# Return the most recently released resource so that the
# less recently released resources may grow older and then
# released eventually.
resource = self._pool.pop()[0]
max_concurrent_resources = self._max_concurrent_resources
try:
self.cleanup()
except Exception:
if to_allocate:
self._num_allocations -= 1
self._num_concurrent_resources -= 1
self._release(resource)
raise
self._max_concurrent_resources = max_concurrent_resources
return resource
def return_(self, resource):
"""Return the resource to the pool.
The pool will release resources for resources that exceed the
timeout, or when the pool is full.
"""
now = time.monotonic()
self._pool.append((resource, now))
self._cleanup(now)
def cleanup(self):
"""Release resources that exceed the timeout.
You may call this periodically to release old resources so that
pooled resources is not always at high water mark. Note that
get/return_ calls this for you; so if the program uses the pool
frequently, you do not need to call cleanup periodically.
"""
self._cleanup(time.monotonic())
def _cleanup(self, now):
deadline = now - self._timeout
while self._pool:
if (
len(self._pool) > self._pool_size
or self._pool[0][1] < deadline
):
self._release_least_recently_released_resource()
else:
break
def close(self):
"""Release all resources in the pool."""
while self._pool:
self._release_least_recently_released_resource()
def _release_least_recently_released_resource(self):
self._num_concurrent_resources -= 1
self._release(self._pool.popleft()[0])
class ProcessActorPool:
"""Process-actor pool.
stdlib's multiprocessing.pool.Pool is modeled after the executor
where workers are stateless. ProcessActorPool manages a pool of
stateful process-actors.
If an actor is not returned to the pool and is garbage collected,
the associated process and other resources will be automatically
returned to the pool or released.
NOTE: This class is not thread-safe.
"""
@dataclasses.dataclass(frozen=True)
class Stats:
num_spawns: int
num_concurrent_processes: int
max_concurrent_processes: int
current_highest_uses: int
_COUNTER = itertools.count(1).__next__
@dataclasses.dataclass(order=True)
class _Entry:
process: multiprocessing.Process = dataclasses.field(compare=False)
conn: multiprocessing.connection.Connection = \
dataclasses.field(compare=False)
negative_num_uses: int
def __init__(self, pool_size, max_uses_per_actor=None, context=None):
# Store processes, sorted by num_uses in descending order.
self._pool = []
self._pool_size = pool_size
# Store id(stub) -> entry. We store id(stub) to avoid creating
# a strong reference to the stub.
self._stub_ids_in_use = {}
self._max_uses_per_actor = max_uses_per_actor
self._context = context or multiprocessing.get_context()
self._num_spawns = 0
self._num_concurrent_processes = 0
self._max_concurrent_processes = 0
def get_stats(self):
if self._pool:
current_highest_uses = -self._pool[0].negative_num_uses
else:
current_highest_uses = 0
for entry in self._stub_ids_in_use.values():
num_uses = -entry.negative_num_uses
if num_uses > current_highest_uses:
current_highest_uses = num_uses
return self.Stats(
num_spawns=self._num_spawns,
num_concurrent_processes=self._num_concurrent_processes,
max_concurrent_processes=self._max_concurrent_processes,
current_highest_uses=current_highest_uses,
)
def __enter__(self):
return self
def __exit__(self, exc_type, *_):
self.close(graceful=not exc_type)
@contextlib.contextmanager
def using(self, referent):
stub = self.get(referent)
try:
yield stub
finally:
self.return_(stub)
def get(self, referent):
"""Get a stub from the pool or allocate new one when empty.
This does not block nor raise when the pool is empty (if we want
to implement rate limit, we could do that?).
"""
to_spawn = not self._pool
if to_spawn:
entry = self._spawn()
self._num_spawns += 1
self._num_concurrent_processes += 1
max_concurrent_processes = max(
self._num_concurrent_processes, self._max_concurrent_processes
)
else:
# Return the most often used process so that is will be
# released sooner (when max_uses_per_actor is set).
entry = heapq.heappop(self._pool)
max_concurrent_processes = self._max_concurrent_processes
try:
stub = _Stub(type(referent), entry.process, entry.conn)
stub_id = id(stub)
# Although this stub_id can be the same as another already
# collected stub's id (since id is just object's address),
# it is very unlikely that this id conflict will happen when
# the entry is still in the self._stub_ids_in_use dict as it
# requires all these to happen:
#
# * The old stub is collected.
# * The old stub's finalizer has not been called yet (is
# this even possible?).
# * The new stub is allocated, at the same address.
#
# But there is not harm to assert this will never happen.
ASSERT.setitem(self._stub_ids_in_use, stub_id, entry)
_BoundMethod('_adopt', entry.conn)(referent)
self._cleanup()
except Exception:
if to_spawn:
self._num_spawns -= 1
# self._num_concurrent_processes is decreased in
# self._release.
self._stub_ids_in_use.pop(stub_id)
self._release(entry)
raise
# TODO: self._return_id is non-reentrant, and thus is not safe
# in a finalize callback. How do we fix this?
weakref.finalize(stub, self._return_id, stub_id)
entry.negative_num_uses -= 1
self._max_concurrent_processes = max_concurrent_processes
return stub
def return_(self, stub):
"""Return the stub to the pool.
The pool will release actors for actors that exceed the
``max_uses_per_actor``, or when the pool is full.
"""
return self._return_id(id(stub))
def _return_id(self, stub_id):
entry = self._stub_ids_in_use.pop(stub_id, None)
if entry is None:
return
try:
_BoundMethod('_disadopt', entry.conn)()
except Exception:
self._release(entry)
raise
heapq.heappush(self._pool, entry)
self._cleanup()
def _spawn(self):
conn, conn_actor = self._context.Pipe()
try:
name = 'pactor-%02d' % self._COUNTER()
entry = self._Entry(
process=self._context.Process(
name=name,
target=_ProcessActor(name, conn_actor),
),
conn=conn,
negative_num_uses=0,
)
entry.process.start()
# Block until process actor has received conn_actor; then we
# may close conn_actor.
_BoundMethod('_adopt', conn)(None)
except Exception:
conn.close()
raise
finally:
conn_actor.close()
return entry
def _release(self, entry):
self._num_concurrent_processes -= 1
try:
_conn_send(entry.conn, None)
entry.process.join(timeout=1)
if entry.process.exitcode is None:
LOG.warning(
'process actor does not quit: pid=%d', entry.process.pid
)
entry.process.kill()
entry.process.join(timeout=1)
if entry.process.exitcode is None:
raise RuntimeError(
'process actor cannot be killed: pid=%d' %
entry.process.pid
)
if entry.process.exitcode != 0:
# Sadly SIGTERM also causes exitcode != 0.
LOG.warning(
'process actor err out: pid=%d exitcode=%d',
entry.process.pid,
entry.process.exitcode,
)
# Process can only be closed after exits.
entry.process.close()
finally:
entry.conn.close()
def _cleanup(self):
while self._pool:
if (
len(self._pool) > self._pool_size or (
self._max_uses_per_actor is not None and
-self._pool[0].negative_num_uses > self._max_uses_per_actor
)
):
self._release(heapq.heappop(self._pool))
else:
break
# Check crashed actors.
i = 0
last = len(self._pool) - 1
while i <= last:
if self._pool[i].process.exitcode is not None:
self._pool[i], self._pool[last] = \
self._pool[last], self._pool[i]
last -= 1
else:
i += 1
if last < len(self._pool) - 1:
to_release = self._pool[last:]
del self._pool[last:]
heapq.heapify(self._pool)
for entry in to_release:
try:
self._release(entry)
except Exception as exc:
LOG.error('cleanup: unable to release process: %r', exc)
def close(self, graceful=True):
entries = list(self._pool)
self._pool.clear()
if graceful:
for entry in entries:
try:
self._release(entry)
except Exception as exc:
LOG.error('close: unable to release process: %r', exc)
ASSERT.empty(self._stub_ids_in_use)
else:
entries.extend(self._stub_ids_in_use.values())
self._stub_ids_in_use.clear()
self._num_concurrent_processes -= len(entries)
for entry in entries:
entry.process.kill()
for entry in entries:
entry.process.join(timeout=1)
if entry.process.exitcode is None:
LOG.error(
'close: process actor cannot be killed: pid=%d',
entry.process.pid
)
else:
# Process can only be closed after exits.
entry.process.close()
entry.conn.close()
@dataclasses.dataclass(frozen=True)
class _Call:
method: str
args: Tuple[Any, ...]
kwargs: Dict[str, Any]
# It seems like you cannot call sendmsg with empty buffers.
_SEND_FDS_DUMMY = b'0'
class _Stub:
def __init__(self, referent_type, process, conn):
self._conn = conn
self._submit = _BoundMethod('_submit', conn)
self._apply = _BoundMethod('_apply', conn)
self.m = _Methods(referent_type, process, conn)
def send_fds(self, fds):
ASSERT.not_empty(fds)
_conn_send(self._conn, _Call('_send_fds', (len(fds), ), {}))
sock = socket.socket(fileno=self._conn.fileno())
try:
_send_fds(sock, [_SEND_FDS_DUMMY], fds)
finally:
sock.detach()
remote_fds, exc = _conn_recv(self._conn)
if exc is not None:
raise exc
ASSERT.equal(len(remote_fds), len(fds))
return remote_fds
def submit(self, func, *args, **kwargs):
return self._submit(func, args, kwargs)
def apply(self, func, *args, **kwargs):
return self._apply(func, args, kwargs)
class _Methods:
def __init__(self, referent_type, process, conn):
self._referent_type = referent_type
self._process = process
self._bound_methods = g1_collections.LoadingDict(
functools.partial(_BoundMethod, conn=conn)
)
def __getattr__(self, name):
ASSERT.none(self._process.exitcode)
attr = getattr(self._referent_type, name, None)
bound_method = self._bound_methods[ASSERT.not_startswith(name, '_')]
if attr is None or isinstance(attr, property):
# Instance attribute or property.
return bound_method()
else:
# Static/class/instance method.
return bound_method
class _BoundMethod:
def __init__(self, name, conn):
self._name = name
self._conn = conn
def __call__(self, *args, **kwargs):
_conn_send(self._conn, _Call(self._name, args, kwargs))
result, exc = _conn_recv(self._conn)
if exc is not None:
raise exc
return result
class _ProcessActor:
# TODO: Get this from g1.apps.loggers?
_LOG_FORMAT = (
'%(asctime)s %(threadName)s %(levelname)s %(name)s: %(message)s'
)
def __init__(self, name, conn):
self._name = name
self._conn = conn
self._referent = None
def __call__(self):
self._process_init()
try:
while True:
try:
call = _conn_recv(self._conn)
except (EOFError, OSError, KeyboardInterrupt) as exc:
LOG.warning('actor input closed early: %r', exc)
break
if call is None: # Normal exit.
break
self._handle(call)
del call
except BaseException:
# Actor only exits due to either self._conn is closed, or
# call is None. We treat everything else as crash, even
# BaseException like SystemExit.
LOG.exception('actor crashed')
raise
finally:
self._process_cleanup()
def _process_init(self):
threading.current_thread().name = self._name
logging.basicConfig(level=logging.INFO, format=self._LOG_FORMAT)
LOG.info('start: pid=%d', os.getpid())
def _process_cleanup(self):
LOG.info('exit: pid=%d', os.getpid())
self._conn.close()
# NOTE:
#
# * When handling exceptions, remember to strip off the stack trace
# before sending it back (although I think pickle does this for
# you?).
#
# * Because recv_bytes is blocking, you have to very, very careful
# not to block actor's caller indefinitely, waiting for actor's
# response. One particular example is pickle.dumps, which fails
# on many cases, and this is why we call ForkingPickler.dumps
# explicitly.
def _handle(self, call):
# First, check actor methods.
if call.method == '_adopt':
self._handle_adopt(call)
elif call.method == '_disadopt':
self._handle_disadopt(call)
elif call.method == '_send_fds':
self._handle_send_fds(call)
elif call.method == '_submit':
self._handle_submit(call)
# Then, check referent methods.
elif self._referent is None:
self._send_exc(AssertionError('expect referent not None'))
elif call.method == '_apply':
self._handle_apply(call)
elif call.method.startswith('_'):
self._send_exc(
AssertionError('expect public method: %s' % call.method)
)
else:
self._handle_method(call)
def _send_result(self, result):
self._conn.send_bytes(self._pickle_pair((result, None)))
def _send_exc(self, exc):
self._conn.send_bytes(
self._pickle_pair((None, exc.with_traceback(None)))
)
@staticmethod
def _pickle_pair(pair):
try:
return multiprocessing.reduction.ForkingPickler.dumps(pair)
except Exception as exc:
LOG.error('pickle error: pair=%r exc=%r', pair, exc)
return multiprocessing.reduction.ForkingPickler.dumps(
(None, exc.with_traceback(None))
)
def _handle_adopt(self, call):
self._referent = call.args[0]
self._send_result(None)
def _handle_disadopt(self, call):
del call # Unused.
self._referent = None
self._send_result(None)
def _handle_send_fds(self, call):
num_fds = call.args[0]
sock = socket.socket(fileno=self._conn.fileno())
try:
msg, fds, _, _ = _recv_fds(sock, len(_SEND_FDS_DUMMY), num_fds)
except Exception as exc:
self._send_exc(AssertionError('recv_fds error: %r' % exc))
return
finally:
sock.detach()
if msg != _SEND_FDS_DUMMY:
self._send_exc(
AssertionError(
'expect dummy message %r, not %r' % (_SEND_FDS_DUMMY, msg)
)
)
return
self._send_result(fds)
def _handle_submit(self, call):
try:
func, args, kwargs = call.args
result = func(*args, **kwargs)
except BaseException as exc:
self._send_exc(exc)
else:
self._send_result(result)
def _handle_apply(self, call):
try:
func, args, kwargs = call.args
result = func(self._referent, *args, **kwargs)
except BaseException as exc:
self._send_exc(exc)
else:
self._send_result(result)
def _handle_method(self, call):
try:
method = getattr(type(self._referent), call.method, None)
bound_method = getattr(self._referent, call.method)
if method is None or isinstance(method, property):
# Instance attribute or property.
result = bound_method
elif isinstance(method, types.MethodType):
# Class method.
result = method(*call.args, **call.kwargs)
elif inspect.isgeneratorfunction(bound_method):
# Replace a generator with a list because generator is
# not pickle-able.
result = list(bound_method(*call.args, **call.kwargs))
else:
# Static method or instance method.
result = bound_method(*call.args, **call.kwargs)
except BaseException as exc:
self._send_exc(exc)
else:
self._send_result(result)
def _conn_send(conn, obj):
conn.send_bytes(multiprocessing.reduction.ForkingPickler.dumps(obj))
def _conn_recv(conn):
return multiprocessing.reduction.ForkingPickler.loads(conn.recv_bytes())
# TODO: Use stdlib's send_fds when upgrade to Python 3.9.
def _send_fds(sock, buffers, fds, flags=0, address=None):
return sock.sendmsg(
buffers,
[(socket.SOL_SOCKET, socket.SCM_RIGHTS, array.array('i', fds))],
flags,
address,
)
# TODO: Use stdlib's recv_fds when upgrade to Python 3.9.
def _recv_fds(sock, bufsize, maxfds, flags=0):
fds = array.array('i')
msg, ancdata, flags, addr = sock.recvmsg(
bufsize,
socket.CMSG_LEN(maxfds * fds.itemsize),
flags,
)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
if cmsg_level == socket.SOL_SOCKET and cmsg_type == socket.SCM_RIGHTS:
fds.frombytes(
cmsg_data[:len(cmsg_data) - (len(cmsg_data) % fds.itemsize)]
)
return msg, list(fds), flags, addr
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This API defines FeatureColumn for sequential input.
NOTE: This API is a work in progress and will likely be changing frequently.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.feature_column import feature_column_v2 as fc
from tensorflow.python.feature_column import utils as fc_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import backend
from tensorflow.python.keras.layers import serialization as layer_serialization
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.util.tf_export import keras_export
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=protected-access
@keras_export('keras.experimental.SequenceFeatures')
class SequenceFeatures(fc._BaseFeaturesLayer):
"""A layer for sequence input.
All `feature_columns` must be sequence dense columns with the same
`sequence_length`. The output of this method can be fed into sequence
networks, such as RNN.
The output of this method is a 3D `Tensor` of shape `[batch_size, T, D]`.
`T` is the maximum sequence length for this batch, which could differ from
batch to batch.
If multiple `feature_columns` are given with `Di` `num_elements` each, their
outputs are concatenated. So, the final `Tensor` has shape
`[batch_size, T, D0 + D1 + ... + Dn]`.
Example:
```python
# Behavior of some cells or feature columns may depend on whether we are in
# training or inference mode, e.g. applying dropout.
training = True
rating = sequence_numeric_column('rating')
watches = sequence_categorical_column_with_identity(
'watches', num_buckets=1000)
watches_embedding = embedding_column(watches, dimension=10)
columns = [rating, watches_embedding]
sequence_input_layer = SequenceFeatures(columns)
features = tf.io.parse_example(...,
features=make_parse_example_spec(columns))
sequence_input, sequence_length = sequence_input_layer(
features, training=training)
sequence_length_mask = tf.sequence_mask(sequence_length)
rnn_cell = tf.keras.layers.SimpleRNNCell(hidden_size, training=training)
rnn_layer = tf.keras.layers.RNN(rnn_cell, training=training)
outputs, state = rnn_layer(sequence_input, mask=sequence_length_mask)
```
"""
def __init__(
self,
feature_columns,
trainable=True,
name=None,
**kwargs):
""""Constructs a SequenceFeatures layer.
Args:
feature_columns: An iterable of dense sequence columns. Valid columns are
- `embedding_column` that wraps a `sequence_categorical_column_with_*`
- `sequence_numeric_column`.
trainable: Boolean, whether the layer's variables will be updated via
gradient descent during training.
name: Name to give to the SequenceFeatures.
**kwargs: Keyword arguments to construct a layer.
Raises:
ValueError: If any of the `feature_columns` is not a
`SequenceDenseColumn`.
"""
super(SequenceFeatures, self).__init__(
feature_columns=feature_columns,
trainable=trainable,
name=name,
expected_column_type=fc.SequenceDenseColumn,
**kwargs)
@property
def _is_feature_layer(self):
return True
def _target_shape(self, input_shape, total_elements):
return (input_shape[0], input_shape[1], total_elements)
def call(self, features, training=None):
"""Returns sequence input corresponding to the `feature_columns`.
Args:
features: A dict mapping keys to tensors.
training: Python boolean or None, indicating whether to the layer is being
run in training mode. This argument is passed to the call method of any
`FeatureColumn` that takes a `training` argument. For example, if a
`FeatureColumn` performed dropout, the column could expose a `training`
argument to control whether the dropout should be applied. If `None`,
defaults to `tf.keras.backend.learning_phase()`.
Returns:
An `(input_layer, sequence_length)` tuple where:
- input_layer: A float `Tensor` of shape `[batch_size, T, D]`.
`T` is the maximum sequence length for this batch, which could differ
from batch to batch. `D` is the sum of `num_elements` for all
`feature_columns`.
- sequence_length: An int `Tensor` of shape `[batch_size]`. The sequence
length for each example.
Raises:
ValueError: If features are not a dictionary.
"""
if not isinstance(features, dict):
raise ValueError('We expected a dictionary here. Instead we got: ',
features)
if training is None:
training = backend.learning_phase()
transformation_cache = fc.FeatureTransformationCache(features)
output_tensors = []
sequence_lengths = []
for column in self._feature_columns:
with ops.name_scope(column.name):
try:
dense_tensor, sequence_length = column.get_sequence_dense_tensor(
transformation_cache, self._state_manager, training=training)
except TypeError:
dense_tensor, sequence_length = column.get_sequence_dense_tensor(
transformation_cache, self._state_manager)
# Flattens the final dimension to produce a 3D Tensor.
output_tensors.append(self._process_dense_tensor(column, dense_tensor))
sequence_lengths.append(sequence_length)
# Check and process sequence lengths.
fc._verify_static_batch_size_equality(sequence_lengths,
self._feature_columns)
sequence_length = _assert_all_equal_and_return(sequence_lengths)
return self._verify_and_concat_tensors(output_tensors), sequence_length
layer_serialization.inject_feature_column_v1_objects(
'SequenceFeatures', SequenceFeatures)
layer_serialization.inject_feature_column_v2_objects(
'SequenceFeatures', SequenceFeatures)
def concatenate_context_input(context_input, sequence_input):
"""Replicates `context_input` across all timesteps of `sequence_input`.
Expands dimension 1 of `context_input` then tiles it `sequence_length` times.
This value is appended to `sequence_input` on dimension 2 and the result is
returned.
Args:
context_input: A `Tensor` of dtype `float32` and shape `[batch_size, d1]`.
sequence_input: A `Tensor` of dtype `float32` and shape `[batch_size,
padded_length, d0]`.
Returns:
A `Tensor` of dtype `float32` and shape `[batch_size, padded_length,
d0 + d1]`.
Raises:
ValueError: If `sequence_input` does not have rank 3 or `context_input` does
not have rank 2.
"""
seq_rank_check = check_ops.assert_rank(
sequence_input,
3,
message='sequence_input must have rank 3',
data=[array_ops.shape(sequence_input)])
seq_type_check = check_ops.assert_type(
sequence_input,
dtypes.float32,
message='sequence_input must have dtype float32; got {}.'.format(
sequence_input.dtype))
ctx_rank_check = check_ops.assert_rank(
context_input,
2,
message='context_input must have rank 2',
data=[array_ops.shape(context_input)])
ctx_type_check = check_ops.assert_type(
context_input,
dtypes.float32,
message='context_input must have dtype float32; got {}.'.format(
context_input.dtype))
with ops.control_dependencies(
[seq_rank_check, seq_type_check, ctx_rank_check, ctx_type_check]):
padded_length = array_ops.shape(sequence_input)[1]
tiled_context_input = array_ops.tile(
array_ops.expand_dims(context_input, 1),
array_ops.concat([[1], [padded_length], [1]], 0))
return array_ops.concat([sequence_input, tiled_context_input], 2)
@tf_export('feature_column.sequence_categorical_column_with_identity')
def sequence_categorical_column_with_identity(
key, num_buckets, default_value=None):
"""Returns a feature column that represents sequences of integers.
Pass this to `embedding_column` or `indicator_column` to convert sequence
categorical data into dense representation for input to sequence NN, such as
RNN.
Example:
```python
watches = sequence_categorical_column_with_identity(
'watches', num_buckets=1000)
watches_embedding = embedding_column(watches, dimension=10)
columns = [watches_embedding]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
sequence_feature_layer = SequenceFeatures(columns)
sequence_input, sequence_length = sequence_feature_layer(features)
sequence_length_mask = tf.sequence_mask(sequence_length)
rnn_cell = tf.keras.layers.SimpleRNNCell(hidden_size)
rnn_layer = tf.keras.layers.RNN(rnn_cell)
outputs, state = rnn_layer(sequence_input, mask=sequence_length_mask)
```
Args:
key: A unique string identifying the input feature.
num_buckets: Range of inputs. Namely, inputs are expected to be in the
range `[0, num_buckets)`.
default_value: If `None`, this column's graph operations will fail for
out-of-range inputs. Otherwise, this value must be in the range
`[0, num_buckets)`, and will replace out-of-range inputs.
Returns:
A `SequenceCategoricalColumn`.
Raises:
ValueError: if `num_buckets` is less than one.
ValueError: if `default_value` is not in range `[0, num_buckets)`.
"""
return fc.SequenceCategoricalColumn(
fc.categorical_column_with_identity(
key=key,
num_buckets=num_buckets,
default_value=default_value))
@tf_export('feature_column.sequence_categorical_column_with_hash_bucket')
def sequence_categorical_column_with_hash_bucket(
key, hash_bucket_size, dtype=dtypes.string):
"""A sequence of categorical terms where ids are set by hashing.
Pass this to `embedding_column` or `indicator_column` to convert sequence
categorical data into dense representation for input to sequence NN, such as
RNN.
Example:
```python
tokens = sequence_categorical_column_with_hash_bucket(
'tokens', hash_bucket_size=1000)
tokens_embedding = embedding_column(tokens, dimension=10)
columns = [tokens_embedding]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
sequence_feature_layer = SequenceFeatures(columns)
sequence_input, sequence_length = sequence_feature_layer(features)
sequence_length_mask = tf.sequence_mask(sequence_length)
rnn_cell = tf.keras.layers.SimpleRNNCell(hidden_size)
rnn_layer = tf.keras.layers.RNN(rnn_cell)
outputs, state = rnn_layer(sequence_input, mask=sequence_length_mask)
```
Args:
key: A unique string identifying the input feature.
hash_bucket_size: An int > 1. The number of buckets.
dtype: The type of features. Only string and integer types are supported.
Returns:
A `SequenceCategoricalColumn`.
Raises:
ValueError: `hash_bucket_size` is not greater than 1.
ValueError: `dtype` is neither string nor integer.
"""
return fc.SequenceCategoricalColumn(
fc.categorical_column_with_hash_bucket(
key=key,
hash_bucket_size=hash_bucket_size,
dtype=dtype))
@tf_export('feature_column.sequence_categorical_column_with_vocabulary_file')
def sequence_categorical_column_with_vocabulary_file(
key, vocabulary_file, vocabulary_size=None, num_oov_buckets=0,
default_value=None, dtype=dtypes.string):
"""A sequence of categorical terms where ids use a vocabulary file.
Pass this to `embedding_column` or `indicator_column` to convert sequence
categorical data into dense representation for input to sequence NN, such as
RNN.
Example:
```python
states = sequence_categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=50,
num_oov_buckets=5)
states_embedding = embedding_column(states, dimension=10)
columns = [states_embedding]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
sequence_feature_layer = SequenceFeatures(columns)
sequence_input, sequence_length = sequence_feature_layer(features)
sequence_length_mask = tf.sequence_mask(sequence_length)
rnn_cell = tf.keras.layers.SimpleRNNCell(hidden_size)
rnn_layer = tf.keras.layers.RNN(rnn_cell)
outputs, state = rnn_layer(sequence_input, mask=sequence_length_mask)
```
Args:
key: A unique string identifying the input feature.
vocabulary_file: The vocabulary file name.
vocabulary_size: Number of the elements in the vocabulary. This must be no
greater than length of `vocabulary_file`, if less than length, later
values are ignored. If None, it is set to the length of `vocabulary_file`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of
the input value. A positive `num_oov_buckets` can not be specified with
`default_value`.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
dtype: The type of features. Only string and integer types are supported.
Returns:
A `SequenceCategoricalColumn`.
Raises:
ValueError: `vocabulary_file` is missing or cannot be opened.
ValueError: `vocabulary_size` is missing or < 1.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: `dtype` is neither string nor integer.
"""
return fc.SequenceCategoricalColumn(
fc.categorical_column_with_vocabulary_file(
key=key,
vocabulary_file=vocabulary_file,
vocabulary_size=vocabulary_size,
num_oov_buckets=num_oov_buckets,
default_value=default_value,
dtype=dtype))
@tf_export('feature_column.sequence_categorical_column_with_vocabulary_list')
def sequence_categorical_column_with_vocabulary_list(
key, vocabulary_list, dtype=None, default_value=-1, num_oov_buckets=0):
"""A sequence of categorical terms where ids use an in-memory list.
Pass this to `embedding_column` or `indicator_column` to convert sequence
categorical data into dense representation for input to sequence NN, such as
RNN.
Example:
```python
colors = sequence_categorical_column_with_vocabulary_list(
key='colors', vocabulary_list=('R', 'G', 'B', 'Y'),
num_oov_buckets=2)
colors_embedding = embedding_column(colors, dimension=3)
columns = [colors_embedding]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
sequence_feature_layer = SequenceFeatures(columns)
sequence_input, sequence_length = sequence_feature_layer(features)
sequence_length_mask = tf.sequence_mask(sequence_length)
rnn_cell = tf.keras.layers.SimpleRNNCell(hidden_size)
rnn_layer = tf.keras.layers.RNN(rnn_cell)
outputs, state = rnn_layer(sequence_input, mask=sequence_length_mask)
```
Args:
key: A unique string identifying the input feature.
vocabulary_list: An ordered iterable defining the vocabulary. Each feature
is mapped to the index of its value (if present) in `vocabulary_list`.
Must be castable to `dtype`.
dtype: The type of features. Only string and integer types are supported.
If `None`, it will be inferred from `vocabulary_list`.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[len(vocabulary_list), len(vocabulary_list)+num_oov_buckets)` based on a
hash of the input value. A positive `num_oov_buckets` can not be specified
with `default_value`.
Returns:
A `SequenceCategoricalColumn`.
Raises:
ValueError: if `vocabulary_list` is empty, or contains duplicate keys.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: if `dtype` is not integer or string.
"""
return fc.SequenceCategoricalColumn(
fc.categorical_column_with_vocabulary_list(
key=key,
vocabulary_list=vocabulary_list,
dtype=dtype,
default_value=default_value,
num_oov_buckets=num_oov_buckets))
@tf_export('feature_column.sequence_numeric_column')
def sequence_numeric_column(
key,
shape=(1,),
default_value=0.,
dtype=dtypes.float32,
normalizer_fn=None):
"""Returns a feature column that represents sequences of numeric data.
Example:
```python
temperature = sequence_numeric_column('temperature')
columns = [temperature]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
sequence_feature_layer = SequenceFeatures(columns)
sequence_input, sequence_length = sequence_feature_layer(features)
sequence_length_mask = tf.sequence_mask(sequence_length)
rnn_cell = tf.keras.layers.SimpleRNNCell(hidden_size)
rnn_layer = tf.keras.layers.RNN(rnn_cell)
outputs, state = rnn_layer(sequence_input, mask=sequence_length_mask)
```
Args:
key: A unique string identifying the input features.
shape: The shape of the input data per sequence id. E.g. if `shape=(2,)`,
each example must contain `2 * sequence_length` values.
default_value: A single value compatible with `dtype` that is used for
padding the sparse data into a dense `Tensor`.
dtype: The type of values.
normalizer_fn: If not `None`, a function that can be used to normalize the
value of the tensor after `default_value` is applied for parsing.
Normalizer function takes the input `Tensor` as its argument, and returns
the output `Tensor`. (e.g. lambda x: (x - 3.0) / 4.2). Please note that
even though the most common use case of this function is normalization, it
can be used for any kind of Tensorflow transformations.
Returns:
A `SequenceNumericColumn`.
Raises:
TypeError: if any dimension in shape is not an int.
ValueError: if any dimension in shape is not a positive integer.
ValueError: if `dtype` is not convertible to `tf.float32`.
"""
shape = fc._check_shape(shape=shape, key=key)
if not (dtype.is_integer or dtype.is_floating):
raise ValueError('dtype must be convertible to float. '
'dtype: {}, key: {}'.format(dtype, key))
if normalizer_fn is not None and not callable(normalizer_fn):
raise TypeError(
'normalizer_fn must be a callable. Given: {}'.format(normalizer_fn))
return SequenceNumericColumn(
key,
shape=shape,
default_value=default_value,
dtype=dtype,
normalizer_fn=normalizer_fn)
def _assert_all_equal_and_return(tensors, name=None):
"""Asserts that all tensors are equal and returns the first one."""
with ops.name_scope(name, 'assert_all_equal', values=tensors):
if len(tensors) == 1:
return tensors[0]
assert_equal_ops = []
for t in tensors[1:]:
assert_equal_ops.append(check_ops.assert_equal(tensors[0], t))
with ops.control_dependencies(assert_equal_ops):
return array_ops.identity(tensors[0])
class SequenceNumericColumn(
fc.SequenceDenseColumn,
collections.namedtuple(
'SequenceNumericColumn',
('key', 'shape', 'default_value', 'dtype', 'normalizer_fn'))):
"""Represents sequences of numeric data."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
def transform_feature(self, transformation_cache, state_manager):
"""See `FeatureColumn` base class.
In this case, we apply the `normalizer_fn` to the input tensor.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Normalized input tensor.
"""
input_tensor = transformation_cache.get(self.key, state_manager)
if self.normalizer_fn is not None:
input_tensor = self.normalizer_fn(input_tensor)
return input_tensor
@property
def variable_shape(self):
"""Returns a `TensorShape` representing the shape of sequence input."""
return tensor_shape.TensorShape(self.shape)
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
"""Returns a `TensorSequenceLengthPair`.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
"""
sp_tensor = transformation_cache.get(self, state_manager)
dense_tensor = sparse_ops.sparse_tensor_to_dense(
sp_tensor, default_value=self.default_value)
# Reshape into [batch_size, T, variable_shape].
dense_shape = array_ops.concat(
[array_ops.shape(dense_tensor)[:1], [-1], self.variable_shape],
axis=0)
dense_tensor = array_ops.reshape(dense_tensor, shape=dense_shape)
# Get the number of timesteps per example
# For the 2D case, the raw values are grouped according to num_elements;
# for the 3D case, the grouping happens in the third dimension, and
# sequence length is not affected.
if sp_tensor.shape.ndims == 2:
num_elements = self.variable_shape.num_elements()
else:
num_elements = 1
seq_length = fc_utils.sequence_length_from_sparse_tensor(
sp_tensor, num_elements=num_elements)
return fc.SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=seq_length)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def get_config(self):
"""See 'FeatureColumn` base class."""
config = dict(zip(self._fields, self))
config['dtype'] = self.dtype.name
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
fc._check_config_keys(config, cls._fields)
kwargs = fc._standardize_and_copy_config(config)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
# pylint: enable=protected-access
|
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 02 18:20:40 2015
@author: Konstantin
"""
from scipy import optimize
import numpy as np
import configparser, os, csv
from fabric.api import env, execute, task, get
import cuisine
import vm_control
import pandas
import operator
import pickle
import scipy.stats as stats
#class Parameter:
# def __init__(self, value):
# self.value = value
#
# def set_val(self, value):
# self.value = value
#
# def __call__(self):
# return self.value
#
#def fit(function, parameters, y, x=None):
# def f(params):
# i = 0
# for p in parameters:
# p.set_val(params[i])
# i += 1
# return y - function(x)
# if x is None: x = arange(y.shape[0])
# p = [param() for param in parameters]
# optimize.leastsq(f, p)
#
#mu = Parameter(7)
#sigma = Parameter(3)
#height = Parameter(5)
#
#def gaussian(x):
# return height() * exp(-((x-mu())/sigma())**2)
#
#def exponential(x):
# return lamda * exp(-((x-mu())/sigma())**2)
#
# pylint: disable=E1101
@task
def upload_file(remote_location, local_location, sudo=False):
"""
Fabric task to upload a file to a VM.
"""
cuisine.file_upload(remote_location, local_location, sudo=sudo)
cuisine.file_ensure(remote_location)
@task
def collect_data_files(vm_list):
i = 1
for vm in vm_list:
get('/root/response_time.csv.'+vm,
'response_time_'+str(i)+'.csv')
i += 1
def sum_sq(x, x_bar):
return (x - x_bar)**2
def log_num(x, x_bar):
return np.log(x / x_bar) + ((x - x_bar)/x_bar)
def log_den(x, x_bar):
return np.log(x / x_bar)
def calculate_prediction_data():
data = pickle.load(open('predictions', 'r'))
Y_bar = np.mean(data['measured'].copy())
y_var = np.apply_along_axis(log_den,
0,
data['measured'],
Y_bar)
y_var2 = np.apply_along_axis(sum_sq,
0,
data['measured'],
Y_bar)
y_var = y_var.cumsum()[-1]
y_var2 = y_var2.cumsum()[-1]
R_squared_list = []
regres_list = []
i = 0
for el in data.columns:
if i == 3:
# break
print y_var2
# Y_bar_z = np.mean(data[el].copy())
regres_var = np.apply_along_axis(sum_sq,
0,
data[el],
Y_bar)
regres_var = regres_var.cumsum()[-1]
regres_list.append(regres_var)
R_squared = y_var2/regres_var
if R_squared > 1.0:
R_squared = 1.0/R_squared
print "======%s" % R_squared
R_squared_list.append(R_squared)
elif i == 4:
# break
print y_var2
# Y_bar_z = np.mean(data[el].copy())
regres_var = np.apply_along_axis(sum_sq,
0,
data[el],
Y_bar)
regres_var = regres_var.cumsum()[-1]
regres_list.append(regres_var)
R_squared = y_var2/regres_var
if R_squared > 1.0:
R_squared = 1.0/R_squared
print "======%s" % R_squared
R_squared_list.append(R_squared)
else:
print y_var
# Y_bar_z = np.mean(data[el].copy())
regres_var = np.apply_along_axis(log_num,
0,
data[el],
Y_bar)
regres_var = regres_var.cumsum()[-1]
regres_list.append(regres_var)
R_squared = 1 - abs(y_var/regres_var)
print "======%s" % R_squared
R_squared_list.append(R_squared)
i += 1
print R_squared_list
print regres_list
print y_var
R_squared_list = np.array(R_squared_list, dtype='float64')
# R_squared_list = pandas.DataFrame(R_squared_list)
print data
data.loc[len(data)] = R_squared_list
# data = np.hstack((data,R_squared_list))
pickle.dump(data,open('predictions2','w'))
np.savetxt('predictions2.csv', data, fmt='%.4f', delimiter=';')
with open('predictions2.csv', 'ab') as f:
writer = csv.writer(f, delimiter=';')
writer.writerow(R_squared_list)
writer.writerow(data.columns)
def write_prediction_data(data,ratings):
# f_rates = read_data_from_files()
# f_rates = sorted(f_rates)
# data = np.array(f_rates,dtype='float64')
# data = data/10000
n = len(data)
print n
rates = sorted(ratings.keys())
for el in rates:
preds = get_predictions(ratings, el, n)
data = np.vstack((data, preds))
data = data.transpose()
data = pandas.DataFrame(data=data, dtype='float64')
np.savetxt('predictions.csv', data, fmt='%.4f', delimiter=';')
names = ['measured']
[names.append(el) for el in rates]
print names
data.columns = names
pickle.dump(data,open('predictions', 'w'))
# np.savetxt('predictions.csv', arr, fmt='%.4f', delimiter=';')
def read_data_from_files():
f_rates = []
with open('f_rates.csv','rb') as f:
reader = csv.reader(f, delimiter=';', quotechar='|')
f_rates = [row[0] for row in reader]
return f_rates
def fit_model(data, model_name):
n = len(data)
if(model_name == 'expon'):
params = stats.expon.fit(data)
std_model = stats.expon.rvs(size=n,
loc=params[0],
scale=params[1])
elif(model_name == 'dweibull'):
params = stats.dweibull.fit(data)
std_model = stats.dweibull.rvs(params[0],
loc=params[1],
scale=params[2],
size=n)
elif(model_name == 'norm'):
params = stats.norm.fit(data)
std_model = stats.norm.rvs(loc=params[0],
scale=params[1],
size=n)
elif(model_name == 'lognorm'):
params = stats.lognorm.fit(data)
std_model = stats.lognorm.rvs(params[0],
loc=params[1],
scale=params[2],
size=n)
std_model.sort()
test_vals = stats.ks_2samp(data, std_model)
result = [test for test in test_vals]
result.append(params)
result.append(std_model)
return result
def rank_models(ratings):
sorted_by_stat = sorted(ratings.items(),
key=operator.itemgetter(1))
return [element[0] for element in sorted_by_stat]
def write_results(ranking, ratings):
with open('fitted_models.csv', 'wb') as f:
writer=csv.writer(f,delimiter=';')
for dist in ranking:
writer.writerow([dist,ratings[dist][2],
ratings[dist][0],ratings[dist][1]
])
def weibull(x, c):
return (c / 2 * abs(x)**(c-1) * np.exp(-abs(x)**c))
def norm(x):
return (np.exp(-x**2/2)/np.sqrt(2*np.pi))
def lognorm(x, s):
return (1 / (s*x*np.sqrt(2*np.pi)) * np.exp(-1/2*(np.log(x)/s)**2))
def get_predictions(ratings, model_name, size):
if(model_name == 'expon'):
preds = stats.expon.rvs(ratings['expon'][2][0],
ratings['expon'][2][1],
size=size)
elif(model_name == 'dweibull'):
preds = stats.dweibull.rvs(ratings['dweibull'][2][0],
ratings['dweibull'][2][1],
ratings['dweibull'][2][2],
size=size)
elif(model_name == 'norm'):
preds = stats.norm.rvs(ratings['norm'][2][0],
ratings['norm'][2][1],
size=size)
elif(model_name == 'lognorm'):
preds = stats.lognorm.rvs(ratings['lognorm'][2][0],
ratings['lognorm'][2][1],
ratings['lognorm'][2][2],
size=size)
preds = np.apply_along_axis(abs, 0, preds)
preds = np.apply_along_axis(sorted, 0, preds)
return preds
def fit_models():
#if __name__ == "__main__":
f_rates = read_data_from_files()
f_rates = sorted(f_rates)
print f_rates
data = np.array(f_rates, dtype='float64')
data = data/1500
n = len(data)
data.sort()
ratings = {'expon': fit_model(data,'expon'),
'dweibull': fit_model(data, 'dweibull'),
'norm': fit_model(data, 'norm'),
'lognorm': fit_model(data, 'lognorm'),
}
ranking = rank_models(ratings)
print ranking
write_results(ranking, ratings)
print ranking[0]
space = np.linspace(0.01, 1.0, num=100)
preds = get_predictions(ratings, ranking[0], n)
print preds
write_prediction_data(data, ratings)
calculate_prediction_data()
if __name__ == "__main__":
fit_models()
|
|
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest # noqa
from six import b as b_
from webtest import TestApp
from pecan import expose, make_app
from pecan.secure import secure, unlocked, SecureController
from pecan.tests import PecanTestCase
try:
set()
except:
from sets import Set as set
class TestSecure(PecanTestCase):
def test_simple_secure(self):
authorized = False
class SecretController(SecureController):
@expose()
def index(self):
return 'Index'
@expose()
@unlocked
def allowed(self):
return 'Allowed!'
@classmethod
def check_permissions(cls):
return authorized
class RootController(object):
@expose()
def index(self):
return 'Hello, World!'
@expose()
@secure(lambda: False)
def locked(self):
return 'No dice!'
@expose()
@secure(lambda: True)
def unlocked(self):
return 'Sure thing'
secret = SecretController()
app = TestApp(make_app(
RootController(),
debug=True,
static_root='tests/static'
))
response = app.get('/')
assert response.status_int == 200
assert response.body == b_('Hello, World!')
response = app.get('/unlocked')
assert response.status_int == 200
assert response.body == b_('Sure thing')
response = app.get('/locked', expect_errors=True)
assert response.status_int == 401
response = app.get('/secret/', expect_errors=True)
assert response.status_int == 401
response = app.get('/secret/allowed')
assert response.status_int == 200
assert response.body == b_('Allowed!')
def test_unlocked_attribute(self):
class AuthorizedSubController(object):
@expose()
def index(self):
return 'Index'
@expose()
def allowed(self):
return 'Allowed!'
class SecretController(SecureController):
@expose()
def index(self):
return 'Index'
@expose()
@unlocked
def allowed(self):
return 'Allowed!'
authorized = unlocked(AuthorizedSubController())
class RootController(object):
@expose()
def index(self):
return 'Hello, World!'
@expose()
@secure(lambda: False)
def locked(self):
return 'No dice!'
@expose()
@secure(lambda: True)
def unlocked(self):
return 'Sure thing'
secret = SecretController()
app = TestApp(make_app(
RootController(),
debug=True,
static_root='tests/static'
))
response = app.get('/')
assert response.status_int == 200
assert response.body == b_('Hello, World!')
response = app.get('/unlocked')
assert response.status_int == 200
assert response.body == b_('Sure thing')
response = app.get('/locked', expect_errors=True)
assert response.status_int == 401
response = app.get('/secret/', expect_errors=True)
assert response.status_int == 401
response = app.get('/secret/allowed')
assert response.status_int == 200
assert response.body == b_('Allowed!')
response = app.get('/secret/authorized/')
assert response.status_int == 200
assert response.body == b_('Index')
response = app.get('/secret/authorized/allowed')
assert response.status_int == 200
assert response.body == b_('Allowed!')
def test_secure_attribute(self):
authorized = False
class SubController(object):
@expose()
def index(self):
return 'Hello from sub!'
class RootController(object):
@expose()
def index(self):
return 'Hello from root!'
sub = secure(SubController(), lambda: authorized)
app = TestApp(make_app(RootController()))
response = app.get('/')
assert response.status_int == 200
assert response.body == b_('Hello from root!')
response = app.get('/sub/', expect_errors=True)
assert response.status_int == 401
authorized = True
response = app.get('/sub/')
assert response.status_int == 200
assert response.body == b_('Hello from sub!')
def test_secured_generic_controller(self):
authorized = False
class RootController(object):
@classmethod
def check_permissions(cls):
return authorized
@expose(generic=True)
def index(self):
return 'Index'
@secure('check_permissions')
@index.when(method='POST')
def index_post(self):
return 'I should not be allowed'
@secure('check_permissions')
@expose(generic=True)
def secret(self):
return 'I should not be allowed'
app = TestApp(make_app(
RootController(),
debug=True,
static_root='tests/static'
))
response = app.get('/')
assert response.status_int == 200
response = app.post('/', expect_errors=True)
assert response.status_int == 401
response = app.get('/secret/', expect_errors=True)
assert response.status_int == 401
def test_secured_generic_controller_lambda(self):
authorized = False
class RootController(object):
@expose(generic=True)
def index(self):
return 'Index'
@secure(lambda: authorized)
@index.when(method='POST')
def index_post(self):
return 'I should not be allowed'
@secure(lambda: authorized)
@expose(generic=True)
def secret(self):
return 'I should not be allowed'
app = TestApp(make_app(
RootController(),
debug=True,
static_root='tests/static'
))
response = app.get('/')
assert response.status_int == 200
response = app.post('/', expect_errors=True)
assert response.status_int == 401
response = app.get('/secret/', expect_errors=True)
assert response.status_int == 401
def test_secured_generic_controller_secure_attribute(self):
authorized = False
class SecureController(object):
@expose(generic=True)
def index(self):
return 'I should not be allowed'
@index.when(method='POST')
def index_post(self):
return 'I should not be allowed'
@expose(generic=True)
def secret(self):
return 'I should not be allowed'
class RootController(object):
sub = secure(SecureController(), lambda: authorized)
app = TestApp(make_app(
RootController(),
debug=True,
static_root='tests/static'
))
response = app.get('/sub/', expect_errors=True)
assert response.status_int == 401
response = app.post('/sub/', expect_errors=True)
assert response.status_int == 401
response = app.get('/sub/secret/', expect_errors=True)
assert response.status_int == 401
def test_secured_generic_controller_secure_attribute_with_unlocked(self):
class RootController(SecureController):
@unlocked
@expose(generic=True)
def index(self):
return 'Unlocked!'
@unlocked
@index.when(method='POST')
def index_post(self):
return 'Unlocked!'
@expose(generic=True)
def secret(self):
return 'I should not be allowed'
app = TestApp(make_app(
RootController(),
debug=True,
static_root='tests/static'
))
response = app.get('/')
assert response.status_int == 200
response = app.post('/')
assert response.status_int == 200
response = app.get('/secret/', expect_errors=True)
assert response.status_int == 401
def test_state_attribute(self):
from pecan.secure import Any, Protected
assert repr(Any) == '<SecureState Any>'
assert bool(Any) is False
assert repr(Protected) == '<SecureState Protected>'
assert bool(Protected) is True
def test_secure_obj_only_failure(self):
class Foo(object):
pass
try:
secure(Foo())
except Exception as e:
assert isinstance(e, TypeError)
class TestObjectPathSecurity(PecanTestCase):
def setUp(self):
super(TestObjectPathSecurity, self).setUp()
permissions_checked = set()
class DeepSecretController(SecureController):
authorized = False
@expose()
@unlocked
def _lookup(self, someID, *remainder):
if someID == 'notfound':
return None
return SubController(someID), remainder
@expose()
def index(self):
return 'Deep Secret'
@classmethod
def check_permissions(cls):
permissions_checked.add('deepsecret')
return cls.authorized
class SubController(object):
def __init__(self, myID):
self.myID = myID
@expose()
def index(self):
return 'Index %s' % self.myID
deepsecret = DeepSecretController()
class SecretController(SecureController):
authorized = False
independent_authorization = False
@expose()
def _lookup(self, someID, *remainder):
if someID == 'notfound':
return None
elif someID == 'lookup_wrapped':
return self.wrapped, remainder
return SubController(someID), remainder
@secure('independent_check_permissions')
@expose()
def independent(self):
return 'Independent Security'
wrapped = secure(
SubController('wrapped'), 'independent_check_permissions'
)
@classmethod
def check_permissions(cls):
permissions_checked.add('secretcontroller')
return cls.authorized
@classmethod
def independent_check_permissions(cls):
permissions_checked.add('independent')
return cls.independent_authorization
class NotSecretController(object):
@expose()
def _lookup(self, someID, *remainder):
if someID == 'notfound':
return None
return SubController(someID), remainder
unlocked = unlocked(SubController('unlocked'))
class RootController(object):
secret = SecretController()
notsecret = NotSecretController()
self.deepsecret_cls = DeepSecretController
self.secret_cls = SecretController
self.permissions_checked = permissions_checked
self.app = TestApp(make_app(
RootController(),
debug=True,
static_root='tests/static'
))
def tearDown(self):
self.permissions_checked.clear()
self.secret_cls.authorized = False
self.deepsecret_cls.authorized = False
def test_sub_of_both_not_secret(self):
response = self.app.get('/notsecret/hi/')
assert response.status_int == 200
assert response.body == b_('Index hi')
def test_protected_lookup(self):
response = self.app.get('/secret/hi/', expect_errors=True)
assert response.status_int == 401
self.secret_cls.authorized = True
response = self.app.get('/secret/hi/')
assert response.status_int == 200
assert response.body == b_('Index hi')
assert 'secretcontroller' in self.permissions_checked
def test_secured_notfound_lookup(self):
response = self.app.get('/secret/notfound/', expect_errors=True)
assert response.status_int == 404
def test_secret_through_lookup(self):
response = self.app.get(
'/notsecret/hi/deepsecret/', expect_errors=True
)
assert response.status_int == 401
def test_layered_protection(self):
response = self.app.get('/secret/hi/deepsecret/', expect_errors=True)
assert response.status_int == 401
assert 'secretcontroller' in self.permissions_checked
self.secret_cls.authorized = True
response = self.app.get('/secret/hi/deepsecret/', expect_errors=True)
assert response.status_int == 401
assert 'secretcontroller' in self.permissions_checked
assert 'deepsecret' in self.permissions_checked
self.deepsecret_cls.authorized = True
response = self.app.get('/secret/hi/deepsecret/')
assert response.status_int == 200
assert response.body == b_('Deep Secret')
assert 'secretcontroller' in self.permissions_checked
assert 'deepsecret' in self.permissions_checked
def test_cyclical_protection(self):
self.secret_cls.authorized = True
self.deepsecret_cls.authorized = True
response = self.app.get('/secret/1/deepsecret/2/deepsecret/')
assert response.status_int == 200
assert response.body == b_('Deep Secret')
assert 'secretcontroller' in self.permissions_checked
assert 'deepsecret' in self.permissions_checked
def test_unlocked_lookup(self):
response = self.app.get('/notsecret/1/deepsecret/2/')
assert response.status_int == 200
assert response.body == b_('Index 2')
assert 'deepsecret' not in self.permissions_checked
response = self.app.get(
'/notsecret/1/deepsecret/notfound/', expect_errors=True
)
assert response.status_int == 404
assert 'deepsecret' not in self.permissions_checked
def test_mixed_protection(self):
self.secret_cls.authorized = True
response = self.app.get(
'/secret/1/deepsecret/notfound/', expect_errors=True
)
assert response.status_int == 404
assert 'secretcontroller' in self.permissions_checked
assert 'deepsecret' not in self.permissions_checked
def test_independent_check_failure(self):
response = self.app.get('/secret/independent/', expect_errors=True)
assert response.status_int == 401
assert len(self.permissions_checked) == 1
assert 'independent' in self.permissions_checked
def test_independent_check_success(self):
self.secret_cls.independent_authorization = True
response = self.app.get('/secret/independent')
assert response.status_int == 200
assert response.body == b_('Independent Security')
assert len(self.permissions_checked) == 1
assert 'independent' in self.permissions_checked
def test_wrapped_attribute_failure(self):
self.secret_cls.independent_authorization = False
response = self.app.get('/secret/wrapped/', expect_errors=True)
assert response.status_int == 401
assert len(self.permissions_checked) == 1
assert 'independent' in self.permissions_checked
def test_wrapped_attribute_success(self):
self.secret_cls.independent_authorization = True
response = self.app.get('/secret/wrapped/')
assert response.status_int == 200
assert response.body == b_('Index wrapped')
assert len(self.permissions_checked) == 1
assert 'independent' in self.permissions_checked
def test_lookup_to_wrapped_attribute_on_self(self):
self.secret_cls.authorized = True
self.secret_cls.independent_authorization = True
response = self.app.get('/secret/lookup_wrapped/')
assert response.status_int == 200
assert response.body == b_('Index wrapped')
assert len(self.permissions_checked) == 2
assert 'independent' in self.permissions_checked
assert 'secretcontroller' in self.permissions_checked
def test_unlocked_attribute_in_insecure(self):
response = self.app.get('/notsecret/unlocked/')
assert response.status_int == 200
assert response.body == b_('Index unlocked')
class SecureControllerSharedPermissionsRegression(PecanTestCase):
"""Regression tests for https://github.com/dreamhost/pecan/issues/131"""
def setUp(self):
super(SecureControllerSharedPermissionsRegression, self).setUp()
class Parent(object):
@expose()
def index(self):
return 'hello'
class UnsecuredChild(Parent):
pass
class SecureChild(Parent, SecureController):
@classmethod
def check_permissions(cls):
return False
class RootController(object):
secured = SecureChild()
unsecured = UnsecuredChild()
self.app = TestApp(make_app(RootController()))
def test_inherited_security(self):
assert self.app.get('/secured/', status=401).status_int == 401
assert self.app.get('/unsecured/').status_int == 200
|
|
"""
LogParser class
"""
import re
from datetime import datetime
class LogParser(object):
""" Handles parsing of all logs from the Mobile API.
Attributes:
parser_info: info required by the parser that is specific to each OS
"""
parser_info = {
'Android': {
'logLineRegex': re.compile(
'(.+?)\\s+(\\d+)\\s+(\\d+) ([IWVEDAF]) (.*?): ((?:.*\\n*)*)'),
'datetimeFormat': ['%Y-%m-%d %H:%M:%S.%f'],
'filterLineRegex': re.compile('-* beginning of'),
'groupNums': {
'time': 1,
'processId': 2,
'threadId': 3,
'logType': 4,
'tag': 5,
'text': 6,
},
'logTypes': {
'I': 'Info',
'W': 'Warning',
'V': 'Verbose',
'E': 'Error',
'D': 'Debug',
'A': 'WTF',
'F': 'Fatal',
},
},
'iOS': {
'logLineRegex': re.compile('(.*) (.*)\\[(\\d+):(\\d+)\\] (.*)'),
'datetimeFormat': [
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d %H:%M:%S.%f%z',
'%Y-%m-%d %H:%M:%S %z'
],
'exceptionRegex': re.compile('(.*?)-{2,} BEGIN UNHANDLED EXCEPTION'),
'groupNums': {
'time': 1,
'tag': 2,
'processId': 3,
'threadId': 4,
'text': 5,
},
},
}
@staticmethod
def parse(raw_log_lines, os_type):
""" Parses a log message from the Mobile API. Note, this is a Generator
function.
Args:
raw_log_lines (string): the raw log lines
os_type (string): the OS type from which the logs came ("iOS" or
"Android")
Generates:
a single log entry dictionary every time the function is called
"""
# If there are no log lines, don't do anything.
if not raw_log_lines or raw_log_lines.isspace():
return []
# Grab the regexes from the config.
filter_regex = LogParser.parser_info[os_type]\
.get('filterLineRegex', None)
exception_regex = LogParser.parser_info[os_type]\
.get('exceptionRegex', None)
current_log = None
in_unhandled_exception = False
for line in raw_log_lines.splitlines():
# Skip lines that are not log lines. There may be cases when these
# appear in a log line that is not at the beginning of the raw
# data.
if filter_regex and filter_regex.match(line):
continue
# Check to see if an iOS unhandled exception is starting.
if exception_regex:
exception_groups = exception_regex.match(line)
if exception_groups:
if current_log:
yield current_log
in_unhandled_exception = True
exception_time_string = exception_groups.group(1)
current_log = {
'time': LogParser._parse_datetime(exception_time_string,
os_type),
'logType': 'Error',
'tag': '',
'text': line,
}
continue
# If we are in an unhandled exception, just add the line to the
# current log.
if in_unhandled_exception:
current_log['text'] += '\n%s' % line
else:
# Check if current log has the same time as the previous log
# parsed.
new_log = LogParser.parse_raw_log(line, os_type)
if current_log and (current_log['time'] == new_log['time'] and
current_log['logType'] == new_log['logType']):
# If part of the same event, add the log's text to the
# previous parsed log.
current_log['text'] += '\n%s' % new_log['text']
else:
if current_log:
yield current_log
current_log = LogParser.parse_entries(new_log)
# Send any leftover unhandled exception logs.
if in_unhandled_exception or current_log:
yield current_log
@staticmethod
def parse_entries(log_entry):
""" Returns the elements that the web interface shows of a log.
Args:
log_entry: the logEntry to return including processId and threadId
Returns:
dict: the message data to be sent to the web browser (no processId
nor threadId)
"""
return {
'time': log_entry['time'],
'logType': log_entry['logType'],
'tag': log_entry['tag'],
'text': log_entry['text'],
}
@staticmethod
def parse_raw_log(log_data, os_type):
""" Parse a raw log line.
Args:
log_data: the raw log line
os_type (string): the OS type from which the logs came ("iOS" or
"Android")
Returns:
dict: the log entry from the log line
"""
log_line_regex = LogParser.parser_info[os_type]['logLineRegex']
parsed_log = log_line_regex.match(log_data)
group_from_log = LogParser._group_from_log
# Parse the Time
time_field = group_from_log(parsed_log, 'time', os_type)
log_time = LogParser._parse_datetime(time_field, os_type)
# Determine the log type if the OS supports it.
log_type = None
if 'logTypes' in LogParser.parser_info[os_type]:
type_group = group_from_log(parsed_log, 'logType', os_type)
log_type = LogParser.parser_info[os_type]['logTypes'][type_group]
return {
'time': log_time,
'processId': group_from_log(parsed_log, 'processId', os_type),
'threadId': group_from_log(parsed_log, 'threadId', os_type),
'logType': log_type,
'tag': group_from_log(parsed_log, 'tag', os_type),
'text': group_from_log(parsed_log, 'text', os_type),
}
@staticmethod
def convert_line_to_html(log_entry):
""" Takes a parsed_line and converts it to HTML.
Args:
log_entry: the log entry dictionary
Returns:
string: formatted HTML
"""
row_classes = {
'Error': 'danger',
'Warning': 'warning',
}
return '''
<tr class="%s">
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
</tr>''' % (
row_classes.get(log_entry['logType'], ''),
str(log_entry['time']),
log_entry['tag'],
log_entry.get('logType', ''),
log_entry['text'],
)
@staticmethod
def convert_to_html(log_entries):
""" Takes a parsed block and converts it to HTML.
Args:
log entries (list): a list of log entries
Returns:
string: formatted HTML
"""
return ''.join(LogParser.convert_line_to_html(line)
for line in log_entries)
@staticmethod
def _group_from_log(parsed_log, group_name, os_type):
""" Gets a group from a parsed log.
Args:
parsed_log: regex results from parsing the log line
group_name: the name of the group to get
os_type (string): the OS type from which the logs came ("iOS" or
"Android")
Returns:
string: the value of the group
"""
group_nums = LogParser.parser_info[os_type]['groupNums']
if group_name not in group_nums:
return None
return parsed_log.group(group_nums[group_name])
@staticmethod
def _parse_datetime(date_string, os_type):
""" Parses a datetime string into a datetime Python object.
Args:
date_string: the date string to parse
os_type (string): the OS type from which the logs came ("iOS" or
"Android")
Returns:
datetime: the parsed datetime object
"""
# On Android, we have to add the year to the string so that it parses
# correctly.
if os_type == 'Android':
current_year = datetime.now().year
date_string = '%s-%s' % (str(current_year), date_string)
# Try to parse the datetime using all of the formats provided.
datetime_formats = LogParser.parser_info[os_type]['datetimeFormat']
for datetime_format in datetime_formats:
try:
date_time = datetime.strptime(date_string, datetime_format)\
.replace(tzinfo=None)
return date_time
except:
# If we got here, it means that that format didn't work, just
# allow it to fall through.
continue
return None
|
|
# MIT License
# Copyright (c) 2016 Stefan Zapf, Christopher Kraushaar
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Load libraries
import math
import matplotlib.pyplot as plt
import numpy as np
import sys
import csv
import warnings
import argparse
warnings.filterwarnings("ignore")
def get_angles(orbit, number_of_datapoints, ax):
start = (2 * math.pi) / 11 * (orbit - 1)
stop = start + (math.pi / 2)
if orbit > 0:
ax.text(0, orbit - 0.1, "{0:.1f}".format(1 - float(orbit) / 10), verticalalignment="top",
horizontalalignment="center", color="lightgray")
return np.linspace(start, stop, number_of_datapoints, endpoint=True)
def get_y(angle, radius):
return math.sin(angle) * radius
def get_x(angle, radius):
return math.cos(angle) * radius
def label_to_idx(labels, label):
center_idx_bool = labels == label
return np.asscalar(np.where(center_idx_bool)[0]), center_idx_bool
def transform_to_correlation_dist(data):
y_corr = np.corrcoef(data.T)
# we just need the magnitude of the correlation and don't care whether it's positive or not
abs_corr = np.abs(y_corr)
return np.nan_to_num(abs_corr)
def transform_to_positive_corrs(data, sun_idx):
y_corr = np.corrcoef(data.T)
positive = y_corr[sun_idx]
positive = positive >= 0
return positive
def solar_corr(data, labels, center, orbits=10, show_window=True, image_path="solar.png",
save_png=True, title="Solar Correlation Map"):
labels = np.array(labels)
center_idx, center_idx_bool = label_to_idx(labels, center)
plot_idx = 23
all_idx = np.logical_not(center_idx_bool)
positive = transform_to_positive_corrs(data, center_idx)
corr_dist = transform_to_correlation_dist(data)
sun_corr_dist = corr_dist[center_idx]
colors = np.linspace(0, 1, num=len(sun_corr_dist))
cordinate_to_correlation = {}
step = 1.0 / orbits
last_orbit = 0.1
fig = plt.gcf()
fig.set_size_inches(20, 20)
labels_idx = np.array([center_idx])
color_map = plt.get_cmap("Paired")
color_map_circle = plt.get_cmap("Accent")
ax = fig.add_subplot(111)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.set_position([0.01, 0.01, 0.98, 0.98]) # set a new position
# place sun:
plt.scatter(0, 0, color=color_map(colors[center_idx]), s=600, label=labels[center_idx])
ax.text(0.2, 0.2, str(labels[center_idx]), verticalalignment="bottom", horizontalalignment='left', color="gray")
for orbit in range(1, orbits + 1):
new_orbit = step * orbit + 0.1
idx = (sun_corr_dist >= (1 - last_orbit)) & (sun_corr_dist > (1 - new_orbit)) & all_idx
idx_int = np.where(idx)[0]
corr_dists = []
for index in idx_int:
corr_dists = np.append(corr_dists, corr_dist[center_idx][index])
sort_args = np.argsort(corr_dists)
idx_int = idx_int[sort_args]
labels_idx = np.append(labels_idx, idx_int)
planets = sum(idx)
angles = get_angles(orbit, planets, ax)
# place planets
while np.any(idx):
remaining = sum(idx)
current_planet = planets - remaining
current_idx = idx_int[current_planet]
angle = angles[current_planet]
x = get_x(angle, orbit)
y = get_y(angle, orbit)
# current_idx = idx_int[current_planet]
color = colors[current_idx]
plt.scatter(x, y, color=color_map(color), s=250, label=labels[current_idx])
cordinate_to_correlation[(x, y)] = {"is_planet": True, "corr_sun": corr_dist[center_idx, current_idx] }
planet_idx = current_idx
idx[planet_idx] = False
all_idx[planet_idx] = False
planet_corr = corr_dist[planet_idx]
# ax.text(x-0.35, y+0.2, "{0:.3f}".format(planet_corr[center_idx]))
col = "#03C03C" if positive[planet_idx] else "#FF6961"
if orbit == orbits:
col = "grey"
ax.text(x + 0.15, y + 0.15, str(labels[planet_idx]), verticalalignment="bottom", horizontalalignment='left',
color=col)
moon_idx = (planet_corr >= 0.8) & all_idx
moon_idx_int = np.where(moon_idx)[0]
moons = sum(moon_idx)
moon_angles = get_angles(0, moons, ax)
# add orbit around planet if it has moons
if np.any(moon_idx):
moon_orbit = 0.5
circle = plt.Circle((x, y), moon_orbit, color='lightgrey', alpha=0.8, fill=True, zorder=0)
fig.gca().add_artist(circle)
while np.any(moon_idx):
remaining_moons = sum(moon_idx)
current_moon = moons - remaining_moons
current_moon_idx = moon_idx_int[current_moon]
angle = moon_angles[current_moon]
m_x = get_x(angle, moon_orbit) + x
m_y = get_y(angle, moon_orbit) + y
color = colors[current_moon_idx]
plt.scatter(m_x, m_y, color=color_map(color), s=100, label=labels[current_moon_idx])
cordinate_to_correlation[(m_x, m_y)] = {"is_planet": False, "corr_sun": corr_dist[center_idx][current_moon_idx]}
col = "#03C03C" if positive[current_moon_idx] else "#FF6961"
if orbit == orbits:
col = "grey"
ax.text(m_x + 0.15, m_y + 0.05, str(labels[current_moon_idx]), verticalalignment="bottom",
horizontalalignment='left', color=col)
moon_idx[current_moon_idx] = False
idx[current_moon_idx] = False
all_idx[current_moon_idx] = False
last_orbit = new_orbit
circle = plt.Circle((0, 0), orbit, color=color_map_circle(1 - ((orbit - 1) * step)), fill=False, zorder=0)
fig.gca().add_artist(circle)
labels_pos = np.array(labels)[labels_idx]
recs = []
ax = plt.gca()
ax.axis("equal")
plt.axis([-10, 10, -10, 10])
plt.suptitle(title, fontsize=16)
plt.subplots_adjust(top=0.95)
if save_png:
plt.savefig(image_path)
if show_window:
# only require mplcursors if we need an interactive plot
import mplcursors
# cooordinate_to_correlation[(sel.target.x, sel.target.y)]["corr_sun"])
cursors = mplcursors.cursor(hover=True)
@cursors.connect("add")
def _(sel):
sel.annotation.set(position=(15, -15))
# Note: Needs to be set separately due to matplotlib/matplotlib#8956.
sel.annotation.get_bbox_patch().set(fc="lightgrey")
sel.annotation.arrow_patch.set(arrowstyle="simple", fc="white", alpha=0)
sel.annotation.set_text("Correlation to sun \n{}".format(cordinate_to_correlation[ (sel.target[0],sel.target[1])]["corr_sun"]))
plt.show()
def main(input_csv, sun, image_path, title):
# Load data
data = np.genfromtxt(input_csv, delimiter=",", skip_header=1)
labels = csv.DictReader(open(input_csv), skipinitialspace=True).fieldnames
solar_corr(data, labels, sun, image_path=image_path, title=title)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Create a solar correlation map')
parser.add_argument("csv_file", type=str)
parser.add_argument("sun_variable", type=str)
parser.add_argument("image_file_name", type=str, nargs="?", default="solar.png")
parser.add_argument("--title", nargs="?", default=None)
args = parser.parse_args()
if args.title is None:
args.title = "Solar Correlation Map for '{}' ".format(args.sun_variable)
main(args.csv_file, args.sun_variable, args.image_file_name, args.title)
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import re
import sys
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Series, Index, DatetimeIndex, TimedeltaIndex, PeriodIndex,
Timedelta)
from pandas.compat import u, StringIO
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.base import (FrozenList, FrozenNDArray, PandasDelegate,
NoNewAttributesMixin)
from pandas.tseries.base import DatetimeIndexOpsMixin
from pandas.util.testing import (assertRaisesRegexp, assertIsInstance)
class CheckStringMixin(object):
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
if not compat.PY3:
unicode(self.container) # noqa
def test_tricky_container(self):
if not hasattr(self, 'unicode_container'):
raise nose.SkipTest('Need unicode_container to test with this')
repr(self.unicode_container)
str(self.unicode_container)
bytes(self.unicode_container)
if not compat.PY3:
unicode(self.unicode_container) # noqa
class CheckImmutable(object):
mutable_regex = re.compile('does not support mutable operations')
def check_mutable_error(self, *args, **kwargs):
# pass whatever functions you normally would to assertRaises (after the
# Exception kind)
assertRaisesRegexp(TypeError, self.mutable_regex, *args, **kwargs)
def test_no_mutable_funcs(self):
def setitem():
self.container[0] = 5
self.check_mutable_error(setitem)
def setslice():
self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem():
del self.container[0]
self.check_mutable_error(delitem)
def delslice():
del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = getattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(getattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assertIsInstance(result, klass)
self.assertEqual(result, expected)
class TestFrozenList(CheckImmutable, CheckStringMixin, tm.TestCase):
mutable_methods = ('extend', 'pop', 'remove', 'insert')
unicode_container = FrozenList([u("\u05d0"), u("\u05d1"), "c"])
def setUp(self):
self.lst = [1, 2, 3, 4, 5]
self.container = FrozenList(self.lst)
self.klass = FrozenList
def test_add(self):
result = self.container + (1, 2, 3)
expected = FrozenList(self.lst + [1, 2, 3])
self.check_result(result, expected)
result = (1, 2, 3) + self.container
expected = FrozenList([1, 2, 3] + self.lst)
self.check_result(result, expected)
def test_inplace(self):
q = r = self.container
q += [5]
self.check_result(q, self.lst + [5])
# other shouldn't be mutated
self.check_result(r, self.lst)
class TestFrozenNDArray(CheckImmutable, CheckStringMixin, tm.TestCase):
mutable_methods = ('put', 'itemset', 'fill')
unicode_container = FrozenNDArray([u("\u05d0"), u("\u05d1"), "c"])
def setUp(self):
self.lst = [3, 5, 7, -2]
self.container = FrozenNDArray(self.lst)
self.klass = FrozenNDArray
def test_shallow_copying(self):
original = self.container.copy()
assertIsInstance(self.container.view(), FrozenNDArray)
self.assertFalse(isinstance(
self.container.view(np.ndarray), FrozenNDArray))
self.assertIsNot(self.container.view(), self.container)
self.assert_numpy_array_equal(self.container, original)
# shallow copy should be the same too
assertIsInstance(self.container._shallow_copy(), FrozenNDArray)
# setting should not be allowed
def testit(container):
container[0] = 16
self.check_mutable_error(testit, self.container)
def test_values(self):
original = self.container.view(np.ndarray).copy()
n = original[0] + 15
vals = self.container.values()
self.assert_numpy_array_equal(original, vals)
self.assertIsNot(original, vals)
vals[0] = n
self.assert_numpy_array_equal(self.container, original)
self.assertEqual(vals[0], n)
class TestPandasDelegate(tm.TestCase):
def setUp(self):
pass
def test_invalida_delgation(self):
# these show that in order for the delegation to work
# the _delegate_* methods need to be overriden to not raise a TypeError
class Delegator(object):
_properties = ['foo']
_methods = ['bar']
def _set_foo(self, value):
self.foo = value
def _get_foo(self):
return self.foo
foo = property(_get_foo, _set_foo, doc="foo property")
def bar(self, *args, **kwargs):
""" a test bar method """
pass
class Delegate(PandasDelegate):
def __init__(self, obj):
self.obj = obj
Delegate._add_delegate_accessors(delegate=Delegator,
accessors=Delegator._properties,
typ='property')
Delegate._add_delegate_accessors(delegate=Delegator,
accessors=Delegator._methods,
typ='method')
delegate = Delegate(Delegator())
def f():
delegate.foo
self.assertRaises(TypeError, f)
def f():
delegate.foo = 5
self.assertRaises(TypeError, f)
def f():
delegate.foo()
self.assertRaises(TypeError, f)
class Ops(tm.TestCase):
def _allow_na_ops(self, obj):
"""Whether to skip test cases including NaN"""
if (isinstance(obj, Index) and
(obj.is_boolean() or not obj._can_hold_na)):
# don't test boolean / int64 index
return False
return True
def setUp(self):
self.bool_index = tm.makeBoolIndex(10, name='a')
self.int_index = tm.makeIntIndex(10, name='a')
self.float_index = tm.makeFloatIndex(10, name='a')
self.dt_index = tm.makeDateIndex(10, name='a')
self.dt_tz_index = tm.makeDateIndex(10, name='a').tz_localize(
tz='US/Eastern')
self.period_index = tm.makePeriodIndex(10, name='a')
self.string_index = tm.makeStringIndex(10, name='a')
self.unicode_index = tm.makeUnicodeIndex(10, name='a')
arr = np.random.randn(10)
self.int_series = Series(arr, index=self.int_index, name='a')
self.float_series = Series(arr, index=self.float_index, name='a')
self.dt_series = Series(arr, index=self.dt_index, name='a')
self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True)
self.period_series = Series(arr, index=self.period_index, name='a')
self.string_series = Series(arr, index=self.string_index, name='a')
types = ['bool', 'int', 'float', 'dt', 'dt_tz', 'period', 'string',
'unicode']
fmts = ["{0}_{1}".format(t, f)
for t in types for f in ['index', 'series']]
self.objs = [getattr(self, f)
for f in fmts if getattr(self, f, None) is not None]
def check_ops_properties(self, props, filter=None, ignore_failures=False):
for op in props:
for o in self.is_valid_objs:
# if a filter, skip if it doesn't match
if filter is not None:
filt = o.index if isinstance(o, Series) else o
if not filter(filt):
continue
try:
if isinstance(o, Series):
expected = Series(
getattr(o.index, op), index=o.index, name='a')
else:
expected = getattr(o, op)
except (AttributeError):
if ignore_failures:
continue
result = getattr(o, op)
# these couuld be series, arrays or scalars
if isinstance(result, Series) and isinstance(expected, Series):
tm.assert_series_equal(result, expected)
elif isinstance(result, Index) and isinstance(expected, Index):
tm.assert_index_equal(result, expected)
elif isinstance(result, np.ndarray) and isinstance(expected,
np.ndarray):
self.assert_numpy_array_equal(result, expected)
else:
self.assertEqual(result, expected)
# freq raises AttributeError on an Int64Index because its not
# defined we mostly care about Series hwere anyhow
if not ignore_failures:
for o in self.not_valid_objs:
# an object that is datetimelike will raise a TypeError,
# otherwise an AttributeError
if issubclass(type(o), DatetimeIndexOpsMixin):
self.assertRaises(TypeError, lambda: getattr(o, op))
else:
self.assertRaises(AttributeError,
lambda: getattr(o, op))
def test_binary_ops_docs(self):
from pandas import DataFrame, Panel
op_map = {'add': '+',
'sub': '-',
'mul': '*',
'mod': '%',
'pow': '**',
'truediv': '/',
'floordiv': '//'}
for op_name in ['add', 'sub', 'mul', 'mod', 'pow', 'truediv',
'floordiv']:
for klass in [Series, DataFrame, Panel]:
operand1 = klass.__name__.lower()
operand2 = 'other'
op = op_map[op_name]
expected_str = ' '.join([operand1, op, operand2])
self.assertTrue(expected_str in getattr(klass,
op_name).__doc__)
# reverse version of the binary ops
expected_str = ' '.join([operand2, op, operand1])
self.assertTrue(expected_str in getattr(klass, 'r' +
op_name).__doc__)
class TestIndexOps(Ops):
def setUp(self):
super(TestIndexOps, self).setUp()
self.is_valid_objs = [o for o in self.objs if o._allow_index_ops]
self.not_valid_objs = [o for o in self.objs if not o._allow_index_ops]
def test_none_comparison(self):
# bug brought up by #1079
# changed from TypeError in 0.17.0
for o in self.is_valid_objs:
if isinstance(o, Series):
o[0] = np.nan
# noinspection PyComparisonWithNone
result = o == None # noqa
self.assertFalse(result.iat[0])
self.assertFalse(result.iat[1])
# noinspection PyComparisonWithNone
result = o != None # noqa
self.assertTrue(result.iat[0])
self.assertTrue(result.iat[1])
result = None == o # noqa
self.assertFalse(result.iat[0])
self.assertFalse(result.iat[1])
# this fails for numpy < 1.9
# and oddly for *some* platforms
# result = None != o # noqa
# self.assertTrue(result.iat[0])
# self.assertTrue(result.iat[1])
result = None > o
self.assertFalse(result.iat[0])
self.assertFalse(result.iat[1])
result = o < None
self.assertFalse(result.iat[0])
self.assertFalse(result.iat[1])
def test_ndarray_compat_properties(self):
for o in self.objs:
# check that we work
for p in ['shape', 'dtype', 'flags', 'T', 'strides', 'itemsize',
'nbytes']:
self.assertIsNotNone(getattr(o, p, None))
self.assertTrue(hasattr(o, 'base'))
# if we have a datetimelike dtype then needs a view to work
# but the user is responsible for that
try:
self.assertIsNotNone(o.data)
except ValueError:
pass
self.assertRaises(ValueError, o.item) # len > 1
self.assertEqual(o.ndim, 1)
self.assertEqual(o.size, len(o))
self.assertEqual(Index([1]).item(), 1)
self.assertEqual(Series([1]).item(), 1)
def test_ops(self):
for op in ['max', 'min']:
for o in self.objs:
result = getattr(o, op)()
if not isinstance(o, PeriodIndex):
expected = getattr(o.values, op)()
else:
expected = pd.Period(ordinal=getattr(o.values, op)(),
freq=o.freq)
try:
self.assertEqual(result, expected)
except TypeError:
# comparing tz-aware series with np.array results in
# TypeError
expected = expected.astype('M8[ns]').astype('int64')
self.assertEqual(result.value, expected)
def test_nanops(self):
# GH 7261
for op in ['max', 'min']:
for klass in [Index, Series]:
obj = klass([np.nan, 2.0])
self.assertEqual(getattr(obj, op)(), 2.0)
obj = klass([np.nan])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = klass([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = klass([pd.NaT, datetime(2011, 11, 1)])
# check DatetimeIndex monotonic path
self.assertEqual(getattr(obj, op)(), datetime(2011, 11, 1))
obj = klass([pd.NaT, datetime(2011, 11, 1), pd.NaT])
# check DatetimeIndex non-monotonic path
self.assertEqual(getattr(obj, op)(), datetime(2011, 11, 1))
# argmin/max
obj = Index(np.arange(5, dtype='int64'))
self.assertEqual(obj.argmin(), 0)
self.assertEqual(obj.argmax(), 4)
obj = Index([np.nan, 1, np.nan, 2])
self.assertEqual(obj.argmin(), 1)
self.assertEqual(obj.argmax(), 3)
obj = Index([np.nan])
self.assertEqual(obj.argmin(), -1)
self.assertEqual(obj.argmax(), -1)
obj = Index([pd.NaT, datetime(2011, 11, 1), datetime(2011, 11, 2),
pd.NaT])
self.assertEqual(obj.argmin(), 1)
self.assertEqual(obj.argmax(), 2)
obj = Index([pd.NaT])
self.assertEqual(obj.argmin(), -1)
self.assertEqual(obj.argmax(), -1)
def test_value_counts_unique_nunique(self):
for o in self.objs:
klass = type(o)
values = o.values
# create repeated values, 'n'th element is repeated by n+1 times
if isinstance(o, PeriodIndex):
# freq must be specified because repeat makes freq ambiguous
# resets name from Index
expected_index = pd.Index(o[::-1])
expected_index.name = None
# attach name to klass
o = o.repeat(range(1, len(o) + 1))
o.name = 'a'
elif isinstance(o, DatetimeIndex):
# resets name from Index
expected_index = pd.Index(o[::-1])
expected_index.name = None
# attach name to klass
o = o.repeat(range(1, len(o) + 1))
o.name = 'a'
# don't test boolean
elif isinstance(o, Index) and o.is_boolean():
continue
elif isinstance(o, Index):
expected_index = pd.Index(values[::-1])
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = 'a'
else:
expected_index = pd.Index(values[::-1])
idx = o.index.repeat(range(1, len(o) + 1))
o = klass(
np.repeat(values, range(1,
len(o) + 1)), index=idx, name='a')
expected_s = Series(
range(10, 0, -
1), index=expected_index, dtype='int64', name='a')
result = o.value_counts()
tm.assert_series_equal(result, expected_s)
self.assertTrue(result.index.name is None)
self.assertEqual(result.name, 'a')
result = o.unique()
if isinstance(o, (DatetimeIndex, PeriodIndex)):
self.assertTrue(isinstance(result, o.__class__))
self.assertEqual(result.name, o.name)
self.assertEqual(result.freq, o.freq)
self.assert_numpy_array_equal(result, values)
self.assertEqual(o.nunique(), len(np.unique(o.values)))
for null_obj in [np.nan, None]:
for o in self.objs:
klass = type(o)
values = o.values
if not self._allow_na_ops(o):
continue
# special assign to the numpy array
if com.is_datetimetz(o):
if isinstance(o, DatetimeIndex):
v = o.asi8
v[0:2] = pd.tslib.iNaT
values = o._shallow_copy(v)
else:
o = o.copy()
o[0:2] = pd.tslib.iNaT
values = o.values
elif o.values.dtype == 'datetime64[ns]' or isinstance(
o, PeriodIndex):
values[0:2] = pd.tslib.iNaT
else:
values[0:2] = null_obj
# create repeated values, 'n'th element is repeated by n+1
# times
if isinstance(o, PeriodIndex):
# freq must be specified because repeat makes freq
# ambiguous
# resets name from Index
expected_index = pd.Index(o, name=None)
# attach name to klass
o = klass(
np.repeat(values, range(
1, len(o) + 1)), freq=o.freq, name='a')
elif isinstance(o, Index):
expected_index = pd.Index(values, name=None)
o = klass(
np.repeat(values, range(1, len(o) + 1)), name='a')
else:
expected_index = pd.Index(values, name=None)
idx = np.repeat(o.index.values, range(1, len(o) + 1))
o = klass(
np.repeat(values, range(
1, len(o) + 1)), index=idx, name='a')
expected_s_na = Series(list(range(10, 2, -1)) + [3],
index=expected_index[9:0:-1],
dtype='int64', name='a')
expected_s = Series(list(range(10, 2, -1)),
index=expected_index[9:1:-1],
dtype='int64', name='a')
result_s_na = o.value_counts(dropna=False)
tm.assert_series_equal(result_s_na, expected_s_na)
self.assertTrue(result_s_na.index.name is None)
self.assertEqual(result_s_na.name, 'a')
result_s = o.value_counts()
tm.assert_series_equal(o.value_counts(), expected_s)
self.assertTrue(result_s.index.name is None)
self.assertEqual(result_s.name, 'a')
# numpy_array_equal cannot compare arrays includes nan
result = o.unique()
self.assert_numpy_array_equal(result[1:], values[2:])
if isinstance(o, (DatetimeIndex, PeriodIndex)):
self.assertTrue(result.asi8[0] == pd.tslib.iNaT)
else:
self.assertTrue(pd.isnull(result[0]))
self.assertEqual(o.nunique(), 8)
self.assertEqual(o.nunique(dropna=False), 9)
def test_value_counts_inferred(self):
klasses = [Index, Series]
for klass in klasses:
s_values = ['a', 'b', 'b', 'b', 'b', 'c', 'd', 'd', 'a', 'a']
s = klass(s_values)
expected = Series([4, 3, 2, 1], index=['b', 'a', 'd', 'c'])
tm.assert_series_equal(s.value_counts(), expected)
self.assert_numpy_array_equal(s.unique(), np.unique(s_values))
self.assertEqual(s.nunique(), 4)
# don't sort, have to sort after the fact as not sorting is
# platform-dep
hist = s.value_counts(sort=False).sort_values()
expected = Series([3, 1, 4, 2], index=list('acbd')).sort_values()
tm.assert_series_equal(hist, expected)
# sort ascending
hist = s.value_counts(ascending=True)
expected = Series([1, 2, 3, 4], index=list('cdab'))
tm.assert_series_equal(hist, expected)
# relative histogram.
hist = s.value_counts(normalize=True)
expected = Series([.4, .3, .2, .1], index=['b', 'a', 'd', 'c'])
tm.assert_series_equal(hist, expected)
# bins
self.assertRaises(TypeError,
lambda bins: s.value_counts(bins=bins), 1)
s1 = Series([1, 1, 2, 3])
res1 = s1.value_counts(bins=1)
exp1 = Series({0.998: 4})
tm.assert_series_equal(res1, exp1)
res1n = s1.value_counts(bins=1, normalize=True)
exp1n = Series({0.998: 1.0})
tm.assert_series_equal(res1n, exp1n)
self.assert_numpy_array_equal(s1.unique(), np.array([1, 2, 3]))
self.assertEqual(s1.nunique(), 3)
res4 = s1.value_counts(bins=4)
exp4 = Series({0.998: 2,
1.5: 1,
2.0: 0,
2.5: 1}, index=[0.998, 2.5, 1.5, 2.0])
tm.assert_series_equal(res4, exp4)
res4n = s1.value_counts(bins=4, normalize=True)
exp4n = Series(
{0.998: 0.5,
1.5: 0.25,
2.0: 0.0,
2.5: 0.25}, index=[0.998, 2.5, 1.5, 2.0])
tm.assert_series_equal(res4n, exp4n)
# handle NA's properly
s_values = ['a', 'b', 'b', 'b', np.nan, np.nan, 'd', 'd', 'a', 'a',
'b']
s = klass(s_values)
expected = Series([4, 3, 2], index=['b', 'a', 'd'])
tm.assert_series_equal(s.value_counts(), expected)
self.assert_numpy_array_equal(s.unique(), np.array(
['a', 'b', np.nan, 'd'], dtype='O'))
self.assertEqual(s.nunique(), 3)
s = klass({})
expected = Series([], dtype=np.int64)
tm.assert_series_equal(s.value_counts(), expected,
check_index_type=False)
self.assert_numpy_array_equal(s.unique(), np.array([]))
self.assertEqual(s.nunique(), 0)
# GH 3002, datetime64[ns]
# don't test names though
txt = "\n".join(['xxyyzz20100101PIE', 'xxyyzz20100101GUM',
'xxyyzz20100101EGG', 'xxyyww20090101EGG',
'foofoo20080909PIE', 'foofoo20080909GUM'])
f = StringIO(txt)
df = pd.read_fwf(f, widths=[6, 8, 3],
names=["person_id", "dt", "food"],
parse_dates=["dt"])
s = klass(df['dt'].copy())
s.name = None
idx = pd.to_datetime(
['2010-01-01 00:00:00Z', '2008-09-09 00:00:00Z',
'2009-01-01 00:00:00X'])
expected_s = Series([3, 2, 1], index=idx)
tm.assert_series_equal(s.value_counts(), expected_s)
expected = np_array_datetime64_compat(['2010-01-01 00:00:00Z',
'2009-01-01 00:00:00Z',
'2008-09-09 00:00:00Z'],
dtype='datetime64[ns]')
if isinstance(s, DatetimeIndex):
expected = DatetimeIndex(expected)
self.assertTrue(s.unique().equals(expected))
else:
self.assert_numpy_array_equal(s.unique(), expected)
self.assertEqual(s.nunique(), 3)
# with NaT
s = df['dt'].copy()
s = klass([v for v in s.values] + [pd.NaT])
result = s.value_counts()
self.assertEqual(result.index.dtype, 'datetime64[ns]')
tm.assert_series_equal(result, expected_s)
result = s.value_counts(dropna=False)
expected_s[pd.NaT] = 1
tm.assert_series_equal(result, expected_s)
unique = s.unique()
self.assertEqual(unique.dtype, 'datetime64[ns]')
# numpy_array_equal cannot compare pd.NaT
self.assert_numpy_array_equal(unique[:3], expected)
self.assertTrue(unique[3] is pd.NaT or unique[3].astype('int64') ==
pd.tslib.iNaT)
self.assertEqual(s.nunique(), 3)
self.assertEqual(s.nunique(dropna=False), 4)
# timedelta64[ns]
td = df.dt - df.dt + timedelta(1)
td = klass(td, name='dt')
result = td.value_counts()
expected_s = Series([6], index=[Timedelta('1day')], name='dt')
tm.assert_series_equal(result, expected_s)
expected = TimedeltaIndex(['1 days'])
if isinstance(td, TimedeltaIndex):
self.assertTrue(td.unique().equals(expected))
else:
self.assert_numpy_array_equal(td.unique(), expected.values)
td2 = timedelta(1) + (df.dt - df.dt)
td2 = klass(td2, name='dt')
result2 = td2.value_counts()
tm.assert_series_equal(result2, expected_s)
def test_factorize(self):
for o in self.objs:
if isinstance(o, Index) and o.is_boolean():
exp_arr = np.array([0, 1] + [0] * 8)
exp_uniques = o
exp_uniques = Index([False, True])
else:
exp_arr = np.array(range(len(o)))
exp_uniques = o
labels, uniques = o.factorize()
self.assert_numpy_array_equal(labels, exp_arr)
if isinstance(o, Series):
expected = Index(o.values)
self.assert_numpy_array_equal(uniques, expected)
else:
self.assertTrue(uniques.equals(exp_uniques))
for o in self.objs:
# don't test boolean
if isinstance(o, Index) and o.is_boolean():
continue
# sort by value, and create duplicates
if isinstance(o, Series):
o = o.sort_values()
n = o.iloc[5:].append(o)
else:
indexer = o.argsort()
o = o.take(indexer)
n = o[5:].append(o)
exp_arr = np.array([5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
labels, uniques = n.factorize(sort=True)
self.assert_numpy_array_equal(labels, exp_arr)
if isinstance(o, Series):
expected = Index(o.values)
self.assert_numpy_array_equal(uniques, expected)
else:
self.assertTrue(uniques.equals(o))
exp_arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4])
labels, uniques = n.factorize(sort=False)
self.assert_numpy_array_equal(labels, exp_arr)
if isinstance(o, Series):
expected = Index(np.concatenate([o.values[5:10], o.values[:5]
]))
self.assert_numpy_array_equal(uniques, expected)
else:
expected = o[5:].append(o[:5])
self.assertTrue(uniques.equals(expected))
def test_duplicated_drop_duplicates(self):
# GH 4060
for original in self.objs:
if isinstance(original, Index):
# special case
if original.is_boolean():
result = original.drop_duplicates()
expected = Index([False, True], name='a')
tm.assert_index_equal(result, expected)
continue
# original doesn't have duplicates
expected = np.array([False] * len(original), dtype=bool)
duplicated = original.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
self.assertTrue(duplicated.dtype == bool)
result = original.drop_duplicates()
tm.assert_index_equal(result, original)
self.assertFalse(result is original)
# has_duplicates
self.assertFalse(original.has_duplicates)
# create repeated values, 3rd and 5th values are duplicated
idx = original[list(range(len(original))) + [5, 3]]
expected = np.array([False] * len(original) + [True, True],
dtype=bool)
duplicated = idx.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
self.assertTrue(duplicated.dtype == bool)
tm.assert_index_equal(idx.drop_duplicates(), original)
base = [False] * len(idx)
base[3] = True
base[5] = True
expected = np.array(base)
duplicated = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(duplicated, expected)
self.assertTrue(duplicated.dtype == bool)
result = idx.drop_duplicates(keep='last')
tm.assert_index_equal(result, idx[~expected])
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
duplicated = idx.duplicated(take_last=True)
tm.assert_numpy_array_equal(duplicated, expected)
self.assertTrue(duplicated.dtype == bool)
with tm.assert_produces_warning(FutureWarning):
result = idx.drop_duplicates(take_last=True)
tm.assert_index_equal(result, idx[~expected])
base = [False] * len(original) + [True, True]
base[3] = True
base[5] = True
expected = np.array(base)
duplicated = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(duplicated, expected)
self.assertTrue(duplicated.dtype == bool)
result = idx.drop_duplicates(keep=False)
tm.assert_index_equal(result, idx[~expected])
with tm.assertRaisesRegexp(
TypeError, "drop_duplicates\(\) got an unexpected "
"keyword argument"):
idx.drop_duplicates(inplace=True)
else:
expected = Series([False] * len(original),
index=original.index, name='a')
tm.assert_series_equal(original.duplicated(), expected)
result = original.drop_duplicates()
tm.assert_series_equal(result, original)
self.assertFalse(result is original)
idx = original.index[list(range(len(original))) + [5, 3]]
values = original._values[list(range(len(original))) + [5, 3]]
s = Series(values, index=idx, name='a')
expected = Series([False] * len(original) + [True, True],
index=idx, name='a')
tm.assert_series_equal(s.duplicated(), expected)
tm.assert_series_equal(s.drop_duplicates(), original)
base = [False] * len(idx)
base[3] = True
base[5] = True
expected = Series(base, index=idx, name='a')
tm.assert_series_equal(s.duplicated(keep='last'), expected)
tm.assert_series_equal(s.drop_duplicates(keep='last'),
s[~np.array(base)])
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(
s.duplicated(take_last=True), expected)
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(s.drop_duplicates(take_last=True),
s[~np.array(base)])
base = [False] * len(original) + [True, True]
base[3] = True
base[5] = True
expected = Series(base, index=idx, name='a')
tm.assert_series_equal(s.duplicated(keep=False), expected)
tm.assert_series_equal(s.drop_duplicates(keep=False),
s[~np.array(base)])
s.drop_duplicates(inplace=True)
tm.assert_series_equal(s, original)
def test_fillna(self):
# # GH 11343
# though Index.fillna and Series.fillna has separate impl,
# test here to confirm these works as the same
def get_fill_value(obj):
if isinstance(obj, pd.tseries.base.DatetimeIndexOpsMixin):
return obj.asobject.values[0]
else:
return obj.values[0]
for o in self.objs:
klass = type(o)
values = o.values
# values will not be changed
result = o.fillna(get_fill_value(o))
if isinstance(o, Index):
self.assert_index_equal(o, result)
else:
self.assert_series_equal(o, result)
# check shallow_copied
self.assertFalse(o is result)
for null_obj in [np.nan, None]:
for o in self.objs:
klass = type(o)
values = o.values.copy()
if not self._allow_na_ops(o):
continue
# value for filling
fill_value = get_fill_value(o)
# special assign to the numpy array
if o.values.dtype == 'datetime64[ns]' or isinstance(
o, PeriodIndex):
values[0:2] = pd.tslib.iNaT
else:
values[0:2] = null_obj
if isinstance(o, PeriodIndex):
# freq must be specified because repeat makes freq
# ambiguous
expected = [fill_value.ordinal] * 2 + list(values[2:])
expected = klass(ordinal=expected, freq=o.freq)
o = klass(ordinal=values, freq=o.freq)
else:
expected = [fill_value] * 2 + list(values[2:])
expected = klass(expected)
o = klass(values)
result = o.fillna(fill_value)
if isinstance(o, Index):
self.assert_index_equal(result, expected)
else:
self.assert_series_equal(result, expected)
# check shallow_copied
self.assertFalse(o is result)
def test_memory_usage(self):
for o in self.objs:
res = o.memory_usage()
res_deep = o.memory_usage(deep=True)
if (com.is_object_dtype(o) or (isinstance(o, Series) and
com.is_object_dtype(o.index))):
# if there are objects, only deep will pick them up
self.assertTrue(res_deep > res)
else:
self.assertEqual(res, res_deep)
if isinstance(o, Series):
self.assertEqual(
(o.memory_usage(index=False) +
o.index.memory_usage()),
o.memory_usage(index=True)
)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = res_deep - sys.getsizeof(o)
self.assertTrue(abs(diff) < 100)
def test_searchsorted(self):
# See gh-12238
for o in self.objs:
index = np.searchsorted(o, max(o))
self.assertTrue(0 <= index <= len(o))
index = np.searchsorted(o, max(o), sorter=range(len(o)))
self.assertTrue(0 <= index <= len(o))
class TestFloat64HashTable(tm.TestCase):
def test_lookup_nan(self):
from pandas.hashtable import Float64HashTable
xs = np.array([2.718, 3.14, np.nan, -7, 5, 2, 3])
m = Float64HashTable()
m.map_locations(xs)
self.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs)))
class TestTranspose(Ops):
errmsg = "the 'axes' parameter is not supported"
def test_transpose(self):
for obj in self.objs:
if isinstance(obj, Index):
tm.assert_index_equal(obj.transpose(), obj)
else:
tm.assert_series_equal(obj.transpose(), obj)
def test_transpose_non_default_axes(self):
for obj in self.objs:
tm.assertRaisesRegexp(ValueError, self.errmsg,
obj.transpose, 1)
tm.assertRaisesRegexp(ValueError, self.errmsg,
obj.transpose, axes=1)
def test_numpy_transpose(self):
for obj in self.objs:
if isinstance(obj, Index):
tm.assert_index_equal(np.transpose(obj), obj)
else:
tm.assert_series_equal(np.transpose(obj), obj)
tm.assertRaisesRegexp(ValueError, self.errmsg,
np.transpose, obj, axes=1)
class TestNoNewAttributesMixin(tm.TestCase):
def test_mixin(self):
class T(NoNewAttributesMixin):
pass
t = T()
self.assertFalse(hasattr(t, "__frozen"))
t.a = "test"
self.assertEqual(t.a, "test")
t._freeze()
# self.assertTrue("__frozen" not in dir(t))
self.assertIs(getattr(t, "__frozen"), True)
def f():
t.b = "test"
self.assertRaises(AttributeError, f)
self.assertFalse(hasattr(t, "b"))
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
# '--with-coverage', '--cover-package=pandas.core'],
exit=False)
|
|
import Queue
import time
import os.path
from cStringIO import StringIO
import email.utils
import mock
import netlib.utils
from netlib import odict
from netlib.http.semantics import CONTENT_MISSING, HDR_FORM_URLENCODED, HDR_FORM_MULTIPART
from libmproxy import filt, protocol, controller, utils, tnetstring, flow
from libmproxy.protocol import http_wrappers
from libmproxy.protocol.primitives import Error, Flow
from libmproxy.protocol.http import decoded
from libmproxy.proxy.config import HostMatcher
from libmproxy.proxy import ProxyConfig
from libmproxy.proxy.server import DummyServer
from libmproxy.proxy.connection import ClientConnection
import tutils
def test_app_registry():
ar = flow.AppRegistry()
ar.add("foo", "domain", 80)
r = http_wrappers.HTTPRequest.wrap(netlib.tutils.treq())
r.host = "domain"
r.port = 80
assert ar.get(r)
r.port = 81
assert not ar.get(r)
r = http_wrappers.HTTPRequest.wrap(netlib.tutils.treq())
r.host = "domain2"
r.port = 80
assert not ar.get(r)
r.headers["host"] = ["domain"]
assert ar.get(r)
class TestStickyCookieState:
def _response(self, cookie, host):
s = flow.StickyCookieState(filt.parse(".*"))
f = tutils.tflow(req=netlib.tutils.treq(host=host, port=80), resp=True)
f.response.headers["Set-Cookie"] = [cookie]
s.handle_response(f)
return s, f
def test_domain_match(self):
s = flow.StickyCookieState(filt.parse(".*"))
assert s.domain_match("www.google.com", ".google.com")
assert s.domain_match("google.com", ".google.com")
def test_handle_response(self):
c = "SSID=mooo; domain=.google.com, FOO=bar; Domain=.google.com; Path=/; "\
"Expires=Wed, 13-Jan-2021 22:23:01 GMT; Secure; "
s, f = self._response(c, "host")
assert not s.jar.keys()
s, f = self._response(c, "www.google.com")
assert s.jar.keys()
s, f = self._response("SSID=mooo", "www.google.com")
assert s.jar.keys()[0] == ('www.google.com', 80, '/')
def test_handle_request(self):
s, f = self._response("SSID=mooo", "www.google.com")
assert "cookie" not in f.request.headers
s.handle_request(f)
assert "cookie" in f.request.headers
class TestStickyAuthState:
def test_handle_response(self):
s = flow.StickyAuthState(filt.parse(".*"))
f = tutils.tflow(resp=True)
f.request.headers["authorization"] = ["foo"]
s.handle_request(f)
assert "address" in s.hosts
f = tutils.tflow(resp=True)
s.handle_request(f)
assert f.request.headers["authorization"] == ["foo"]
class TestClientPlaybackState:
def test_tick(self):
first = tutils.tflow()
s = flow.State()
fm = flow.FlowMaster(None, s)
fm.start_client_playback([first, tutils.tflow()], True)
c = fm.client_playback
c.testing = True
assert not c.done()
assert not s.flow_count()
assert c.count() == 2
c.tick(fm)
assert s.flow_count()
assert c.count() == 1
c.tick(fm)
assert c.count() == 1
c.clear(c.current)
c.tick(fm)
assert c.count() == 0
c.clear(c.current)
assert c.done()
q = Queue.Queue()
fm.state.clear()
fm.tick(q, timeout=0)
fm.stop_client_playback()
assert not fm.client_playback
class TestServerPlaybackState:
def test_hash(self):
s = flow.ServerPlaybackState(
None,
[],
False,
False,
None,
False,
None,
False)
r = tutils.tflow()
r2 = tutils.tflow()
assert s._hash(r)
assert s._hash(r) == s._hash(r2)
r.request.headers["foo"] = ["bar"]
assert s._hash(r) == s._hash(r2)
r.request.path = "voing"
assert s._hash(r) != s._hash(r2)
r.request.path = "path?blank_value"
r2.request.path = "path?"
assert s._hash(r) != s._hash(r2)
def test_headers(self):
s = flow.ServerPlaybackState(
["foo"],
[],
False,
False,
None,
False,
None,
False)
r = tutils.tflow(resp=True)
r.request.headers["foo"] = ["bar"]
r2 = tutils.tflow(resp=True)
assert not s._hash(r) == s._hash(r2)
r2.request.headers["foo"] = ["bar"]
assert s._hash(r) == s._hash(r2)
r2.request.headers["oink"] = ["bar"]
assert s._hash(r) == s._hash(r2)
r = tutils.tflow(resp=True)
r2 = tutils.tflow(resp=True)
assert s._hash(r) == s._hash(r2)
def test_load(self):
r = tutils.tflow(resp=True)
r.request.headers["key"] = ["one"]
r2 = tutils.tflow(resp=True)
r2.request.headers["key"] = ["two"]
s = flow.ServerPlaybackState(
None, [
r, r2], False, False, None, False, None, False)
assert s.count() == 2
assert len(s.fmap.keys()) == 1
n = s.next_flow(r)
assert n.request.headers["key"] == ["one"]
assert s.count() == 1
n = s.next_flow(r)
assert n.request.headers["key"] == ["two"]
assert s.count() == 0
assert not s.next_flow(r)
def test_load_with_nopop(self):
r = tutils.tflow(resp=True)
r.request.headers["key"] = ["one"]
r2 = tutils.tflow(resp=True)
r2.request.headers["key"] = ["two"]
s = flow.ServerPlaybackState(
None, [
r, r2], False, True, None, False, None, False)
assert s.count() == 2
s.next_flow(r)
assert s.count() == 2
def test_ignore_params(self):
s = flow.ServerPlaybackState(
None, [], False, False, [
"param1", "param2"], False, None, False)
r = tutils.tflow(resp=True)
r.request.path = "/test?param1=1"
r2 = tutils.tflow(resp=True)
r2.request.path = "/test"
assert s._hash(r) == s._hash(r2)
r2.request.path = "/test?param1=2"
assert s._hash(r) == s._hash(r2)
r2.request.path = "/test?param2=1"
assert s._hash(r) == s._hash(r2)
r2.request.path = "/test?param3=2"
assert not s._hash(r) == s._hash(r2)
def test_ignore_payload_params(self):
s = flow.ServerPlaybackState(
None, [], False, False, None, False, [
"param1", "param2"], False)
r = tutils.tflow(resp=True)
r.request.headers[
"Content-Type"] = ["application/x-www-form-urlencoded"]
r.request.content = "paramx=x¶m1=1"
r2 = tutils.tflow(resp=True)
r2.request.headers[
"Content-Type"] = ["application/x-www-form-urlencoded"]
r2.request.content = "paramx=x¶m1=1"
# same parameters
assert s._hash(r) == s._hash(r2)
# ignored parameters !=
r2.request.content = "paramx=x¶m1=2"
assert s._hash(r) == s._hash(r2)
# missing parameter
r2.request.content = "paramx=x"
assert s._hash(r) == s._hash(r2)
# ignorable parameter added
r2.request.content = "paramx=x¶m1=2"
assert s._hash(r) == s._hash(r2)
# not ignorable parameter changed
r2.request.content = "paramx=y¶m1=1"
assert not s._hash(r) == s._hash(r2)
# not ignorable parameter missing
r2.request.content = "param1=1"
assert not s._hash(r) == s._hash(r2)
def test_ignore_payload_params_other_content_type(self):
s = flow.ServerPlaybackState(
None, [], False, False, None, False, [
"param1", "param2"], False)
r = tutils.tflow(resp=True)
r.request.headers["Content-Type"] = ["application/json"]
r.request.content = '{"param1":"1"}'
r2 = tutils.tflow(resp=True)
r2.request.headers["Content-Type"] = ["application/json"]
r2.request.content = '{"param1":"1"}'
# same content
assert s._hash(r) == s._hash(r2)
# distint content (note only x-www-form-urlencoded payload is analysed)
r2.request.content = '{"param1":"2"}'
assert not s._hash(r) == s._hash(r2)
def test_ignore_payload_wins_over_params(self):
# NOTE: parameters are mutually exclusive in options
s = flow.ServerPlaybackState(
None, [], False, False, None, True, [
"param1", "param2"], False)
r = tutils.tflow(resp=True)
r.request.headers[
"Content-Type"] = ["application/x-www-form-urlencoded"]
r.request.content = "paramx=y"
r2 = tutils.tflow(resp=True)
r2.request.headers[
"Content-Type"] = ["application/x-www-form-urlencoded"]
r2.request.content = "paramx=x"
# same parameters
assert s._hash(r) == s._hash(r2)
def test_ignore_content(self):
s = flow.ServerPlaybackState(
None,
[],
False,
False,
None,
False,
None,
False)
r = tutils.tflow(resp=True)
r2 = tutils.tflow(resp=True)
r.request.content = "foo"
r2.request.content = "foo"
assert s._hash(r) == s._hash(r2)
r2.request.content = "bar"
assert not s._hash(r) == s._hash(r2)
# now ignoring content
s = flow.ServerPlaybackState(
None,
[],
False,
False,
None,
True,
None,
False)
r = tutils.tflow(resp=True)
r2 = tutils.tflow(resp=True)
r.request.content = "foo"
r2.request.content = "foo"
assert s._hash(r) == s._hash(r2)
r2.request.content = "bar"
assert s._hash(r) == s._hash(r2)
r2.request.content = ""
assert s._hash(r) == s._hash(r2)
r2.request.content = None
assert s._hash(r) == s._hash(r2)
def test_ignore_host(self):
s = flow.ServerPlaybackState(
None,
[],
False,
False,
None,
False,
None,
True)
r = tutils.tflow(resp=True)
r2 = tutils.tflow(resp=True)
r.request.host = "address"
r2.request.host = "address"
assert s._hash(r) == s._hash(r2)
r2.request.host = "wrong_address"
assert s._hash(r) == s._hash(r2)
class TestFlow:
def test_copy(self):
f = tutils.tflow(resp=True)
a0 = f.get_state()
f2 = f.copy()
a = f.get_state()
b = f2.get_state()
del a["id"]
del b["id"]
assert a == b
assert not f == f2
assert not f is f2
assert f.request.get_state() == f2.request.get_state()
assert not f.request is f2.request
assert f.request.headers == f2.request.headers
assert not f.request.headers is f2.request.headers
assert f.response.get_state() == f2.response.get_state()
assert not f.response is f2.response
f = tutils.tflow(err=True)
f2 = f.copy()
assert not f is f2
assert not f.request is f2.request
assert f.request.headers == f2.request.headers
assert not f.request.headers is f2.request.headers
assert f.error.get_state() == f2.error.get_state()
assert not f.error is f2.error
def test_match(self):
f = tutils.tflow(resp=True)
assert not f.match("~b test")
assert f.match(None)
assert not f.match("~b test")
f = tutils.tflow(err=True)
assert f.match("~e")
tutils.raises(ValueError, f.match, "~")
def test_backup(self):
f = tutils.tflow()
f.response = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp())
f.request.content = "foo"
assert not f.modified()
f.backup()
f.request.content = "bar"
assert f.modified()
f.revert()
assert f.request.content == "foo"
def test_backup_idempotence(self):
f = tutils.tflow(resp=True)
f.backup()
f.revert()
f.backup()
f.revert()
def test_getset_state(self):
f = tutils.tflow(resp=True)
state = f.get_state()
assert f.get_state() == protocol.http.HTTPFlow.from_state(
state).get_state()
f.response = None
f.error = Error("error")
state = f.get_state()
assert f.get_state() == protocol.http.HTTPFlow.from_state(
state).get_state()
f2 = f.copy()
f2.id = f.id # copy creates a different uuid
assert f.get_state() == f2.get_state()
assert not f == f2
f2.error = Error("e2")
assert not f == f2
f.load_state(f2.get_state())
assert f.get_state() == f2.get_state()
def test_kill(self):
s = flow.State()
fm = flow.FlowMaster(None, s)
f = tutils.tflow()
f.intercept(mock.Mock())
assert not f.reply.acked
f.kill(fm)
assert f.reply.acked
def test_killall(self):
s = flow.State()
fm = flow.FlowMaster(None, s)
f = tutils.tflow()
fm.handle_request(f)
f = tutils.tflow()
fm.handle_request(f)
for i in s.view:
assert not i.reply.acked
s.killall(fm)
for i in s.view:
assert i.reply.acked
def test_accept_intercept(self):
f = tutils.tflow()
f.intercept(mock.Mock())
assert not f.reply.acked
f.accept_intercept(mock.Mock())
assert f.reply.acked
def test_replace_unicode(self):
f = tutils.tflow(resp=True)
f.response.content = "\xc2foo"
f.replace("foo", u"bar")
def test_replace(self):
f = tutils.tflow(resp=True)
f.request.headers["foo"] = ["foo"]
f.request.content = "afoob"
f.response.headers["foo"] = ["foo"]
f.response.content = "afoob"
assert f.replace("foo", "bar") == 6
assert f.request.headers["bar"] == ["bar"]
assert f.request.content == "abarb"
assert f.response.headers["bar"] == ["bar"]
assert f.response.content == "abarb"
def test_replace_encoded(self):
f = tutils.tflow(resp=True)
f.request.content = "afoob"
f.request.encode("gzip")
f.response.content = "afoob"
f.response.encode("gzip")
f.replace("foo", "bar")
assert f.request.content != "abarb"
f.request.decode()
assert f.request.content == "abarb"
assert f.response.content != "abarb"
f.response.decode()
assert f.response.content == "abarb"
class TestState:
def test_backup(self):
c = flow.State()
f = tutils.tflow()
c.add_flow(f)
f.backup()
c.revert(f)
def test_flow(self):
"""
normal flow:
connect -> request -> response
"""
c = flow.State()
f = tutils.tflow()
c.add_flow(f)
assert f
assert c.flow_count() == 1
assert c.active_flow_count() == 1
newf = tutils.tflow()
assert c.add_flow(newf)
assert c.active_flow_count() == 2
f.response = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp())
assert c.update_flow(f)
assert c.flow_count() == 2
assert c.active_flow_count() == 1
_ = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp())
assert not c.update_flow(None)
assert c.active_flow_count() == 1
newf.response = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp())
assert c.update_flow(newf)
assert c.active_flow_count() == 0
def test_err(self):
c = flow.State()
f = tutils.tflow()
c.add_flow(f)
f.error = Error("message")
assert c.update_flow(f)
c = flow.State()
f = tutils.tflow()
c.add_flow(f)
c.set_limit("~e")
assert not c.view
f.error = tutils.terr()
assert c.update_flow(f)
assert c.view
def test_set_limit(self):
c = flow.State()
f = tutils.tflow()
assert len(c.view) == 0
c.add_flow(f)
assert len(c.view) == 1
c.set_limit("~s")
assert c.limit_txt == "~s"
assert len(c.view) == 0
f.response = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp())
c.update_flow(f)
assert len(c.view) == 1
c.set_limit(None)
assert len(c.view) == 1
f = tutils.tflow()
c.add_flow(f)
assert len(c.view) == 2
c.set_limit("~q")
assert len(c.view) == 1
c.set_limit("~s")
assert len(c.view) == 1
assert "Invalid" in c.set_limit("~")
def test_set_intercept(self):
c = flow.State()
assert not c.set_intercept("~q")
assert c.intercept_txt == "~q"
assert "Invalid" in c.set_intercept("~")
assert not c.set_intercept(None)
assert c.intercept_txt is None
def _add_request(self, state):
f = tutils.tflow()
state.add_flow(f)
return f
def _add_response(self, state):
f = tutils.tflow()
state.add_flow(f)
f.response = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp())
state.update_flow(f)
def _add_error(self, state):
f = tutils.tflow(err=True)
state.add_flow(f)
def test_clear(self):
c = flow.State()
f = self._add_request(c)
f.intercepted = True
c.clear()
assert c.flow_count() == 0
def test_dump_flows(self):
c = flow.State()
self._add_request(c)
self._add_response(c)
self._add_request(c)
self._add_response(c)
self._add_request(c)
self._add_response(c)
self._add_error(c)
flows = c.view[:]
c.clear()
c.load_flows(flows)
assert isinstance(c.flows[0], Flow)
def test_accept_all(self):
c = flow.State()
self._add_request(c)
self._add_response(c)
self._add_request(c)
c.accept_all(mock.Mock())
class TestSerialize:
def _treader(self):
sio = StringIO()
w = flow.FlowWriter(sio)
for i in range(3):
f = tutils.tflow(resp=True)
w.add(f)
for i in range(3):
f = tutils.tflow(err=True)
w.add(f)
sio.seek(0)
return flow.FlowReader(sio)
def test_roundtrip(self):
sio = StringIO()
f = tutils.tflow()
f.request.content = "".join(chr(i) for i in range(255))
w = flow.FlowWriter(sio)
w.add(f)
sio.seek(0)
r = flow.FlowReader(sio)
l = list(r.stream())
assert len(l) == 1
f2 = l[0]
assert f2.get_state() == f.get_state()
assert f2.request == f.request
def test_load_flows(self):
r = self._treader()
s = flow.State()
fm = flow.FlowMaster(None, s)
fm.load_flows(r)
assert len(s.flows) == 6
def test_load_flows_reverse(self):
r = self._treader()
s = flow.State()
conf = ProxyConfig(
mode="reverse",
upstream_server=[
True,
True,
"use-this-domain",
80])
fm = flow.FlowMaster(DummyServer(conf), s)
fm.load_flows(r)
assert s.flows[0].request.host == "use-this-domain"
def test_filter(self):
sio = StringIO()
fl = filt.parse("~c 200")
w = flow.FilteredFlowWriter(sio, fl)
f = tutils.tflow(resp=True)
f.response.code = 200
w.add(f)
f = tutils.tflow(resp=True)
f.response.code = 201
w.add(f)
sio.seek(0)
r = flow.FlowReader(sio)
assert len(list(r.stream()))
def test_error(self):
sio = StringIO()
sio.write("bogus")
sio.seek(0)
r = flow.FlowReader(sio)
tutils.raises(flow.FlowReadError, list, r.stream())
f = flow.FlowReadError("foo")
assert f.strerror == "foo"
def test_versioncheck(self):
f = tutils.tflow()
d = f.get_state()
d["version"] = (0, 0)
sio = StringIO()
tnetstring.dump(d, sio)
sio.seek(0)
r = flow.FlowReader(sio)
tutils.raises("version", list, r.stream())
class TestFlowMaster:
def test_load_script(self):
s = flow.State()
fm = flow.FlowMaster(None, s)
assert not fm.load_script(tutils.test_data.path("scripts/a.py"))
assert not fm.load_script(tutils.test_data.path("scripts/a.py"))
assert not fm.unload_scripts()
assert fm.load_script("nonexistent")
assert "ValueError" in fm.load_script(
tutils.test_data.path("scripts/starterr.py"))
assert len(fm.scripts) == 0
def test_getset_ignore(self):
p = mock.Mock()
p.config.check_ignore = HostMatcher()
fm = flow.FlowMaster(p, flow.State())
assert not fm.get_ignore_filter()
fm.set_ignore_filter(["^apple\.com:", ":443$"])
assert fm.get_ignore_filter()
def test_replay(self):
s = flow.State()
fm = flow.FlowMaster(None, s)
f = tutils.tflow(resp=True)
f.request.content = CONTENT_MISSING
assert "missing" in fm.replay_request(f)
f.intercepted = True
assert "intercepting" in fm.replay_request(f)
f.live = True
assert "live" in fm.replay_request(f, run_scripthooks=True)
def test_script_reqerr(self):
s = flow.State()
fm = flow.FlowMaster(None, s)
assert not fm.load_script(tutils.test_data.path("scripts/reqerr.py"))
f = tutils.tflow()
fm.handle_clientconnect(f.client_conn)
assert fm.handle_request(f)
def test_script(self):
s = flow.State()
fm = flow.FlowMaster(None, s)
assert not fm.load_script(tutils.test_data.path("scripts/all.py"))
f = tutils.tflow(resp=True)
fm.handle_clientconnect(f.client_conn)
assert fm.scripts[0].ns["log"][-1] == "clientconnect"
fm.handle_serverconnect(f.server_conn)
assert fm.scripts[0].ns["log"][-1] == "serverconnect"
fm.handle_request(f)
assert fm.scripts[0].ns["log"][-1] == "request"
fm.handle_response(f)
assert fm.scripts[0].ns["log"][-1] == "response"
# load second script
assert not fm.load_script(tutils.test_data.path("scripts/all.py"))
assert len(fm.scripts) == 2
fm.handle_clientdisconnect(f.server_conn)
assert fm.scripts[0].ns["log"][-1] == "clientdisconnect"
assert fm.scripts[1].ns["log"][-1] == "clientdisconnect"
# unload first script
fm.unload_scripts()
assert len(fm.scripts) == 0
assert not fm.load_script(tutils.test_data.path("scripts/all.py"))
f.error = tutils.terr()
fm.handle_error(f)
assert fm.scripts[0].ns["log"][-1] == "error"
def test_duplicate_flow(self):
s = flow.State()
fm = flow.FlowMaster(None, s)
f = tutils.tflow(resp=True)
f = fm.load_flow(f)
assert s.flow_count() == 1
f2 = fm.duplicate_flow(f)
assert f2.response
assert s.flow_count() == 2
assert s.index(f2) == 1
def test_all(self):
s = flow.State()
fm = flow.FlowMaster(None, s)
fm.anticache = True
fm.anticomp = True
f = tutils.tflow(req=None)
fm.handle_clientconnect(f.client_conn)
f.request = http_wrappers.HTTPRequest.wrap(netlib.tutils.treq())
fm.handle_request(f)
assert s.flow_count() == 1
f.response = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp())
fm.handle_response(f)
assert not fm.handle_response(None)
assert s.flow_count() == 1
fm.handle_clientdisconnect(f.client_conn)
f.error = Error("msg")
f.error.reply = controller.DummyReply()
fm.handle_error(f)
fm.load_script(tutils.test_data.path("scripts/a.py"))
fm.shutdown()
def test_client_playback(self):
s = flow.State()
f = tutils.tflow(resp=True)
pb = [tutils.tflow(resp=True), f]
fm = flow.FlowMaster(DummyServer(ProxyConfig()), s)
assert not fm.start_server_playback(
pb,
False,
[],
False,
False,
None,
False,
None,
False)
assert not fm.start_client_playback(pb, False)
fm.client_playback.testing = True
q = Queue.Queue()
assert not fm.state.flow_count()
fm.tick(q, 0)
assert fm.state.flow_count()
f.error = Error("error")
fm.handle_error(f)
def test_server_playback(self):
s = flow.State()
f = tutils.tflow()
f.response = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp(f.request))
pb = [f]
fm = flow.FlowMaster(None, s)
fm.refresh_server_playback = True
assert not fm.do_server_playback(tutils.tflow())
fm.start_server_playback(
pb,
False,
[],
False,
False,
None,
False,
None,
False)
assert fm.do_server_playback(tutils.tflow())
fm.start_server_playback(
pb,
False,
[],
True,
False,
None,
False,
None,
False)
r = tutils.tflow()
r.request.content = "gibble"
assert not fm.do_server_playback(r)
assert fm.do_server_playback(tutils.tflow())
fm.start_server_playback(
pb,
False,
[],
True,
False,
None,
False,
None,
False)
q = Queue.Queue()
fm.tick(q, 0)
assert fm.should_exit.is_set()
fm.stop_server_playback()
assert not fm.server_playback
def test_server_playback_kill(self):
s = flow.State()
f = tutils.tflow()
f.response = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp(f.request))
pb = [f]
fm = flow.FlowMaster(None, s)
fm.refresh_server_playback = True
fm.start_server_playback(
pb,
True,
[],
False,
False,
None,
False,
None,
False)
f = tutils.tflow()
f.request.host = "nonexistent"
fm.process_new_request(f)
assert "killed" in f.error.msg
def test_stickycookie(self):
s = flow.State()
fm = flow.FlowMaster(None, s)
assert "Invalid" in fm.set_stickycookie("~h")
fm.set_stickycookie(".*")
assert fm.stickycookie_state
fm.set_stickycookie(None)
assert not fm.stickycookie_state
fm.set_stickycookie(".*")
f = tutils.tflow(resp=True)
f.response.headers["set-cookie"] = ["foo=bar"]
fm.handle_request(f)
fm.handle_response(f)
assert fm.stickycookie_state.jar
assert not "cookie" in f.request.headers
f = f.copy()
fm.handle_request(f)
assert f.request.headers["cookie"] == ["foo=bar"]
def test_stickyauth(self):
s = flow.State()
fm = flow.FlowMaster(None, s)
assert "Invalid" in fm.set_stickyauth("~h")
fm.set_stickyauth(".*")
assert fm.stickyauth_state
fm.set_stickyauth(None)
assert not fm.stickyauth_state
fm.set_stickyauth(".*")
f = tutils.tflow(resp=True)
f.request.headers["authorization"] = ["foo"]
fm.handle_request(f)
f = tutils.tflow(resp=True)
assert fm.stickyauth_state.hosts
assert not "authorization" in f.request.headers
fm.handle_request(f)
assert f.request.headers["authorization"] == ["foo"]
def test_stream(self):
with tutils.tmpdir() as tdir:
p = os.path.join(tdir, "foo")
def r():
r = flow.FlowReader(open(p, "rb"))
return list(r.stream())
s = flow.State()
fm = flow.FlowMaster(None, s)
f = tutils.tflow(resp=True)
fm.start_stream(file(p, "ab"), None)
fm.handle_request(f)
fm.handle_response(f)
fm.stop_stream()
assert r()[0].response
f = tutils.tflow()
fm.start_stream(file(p, "ab"), None)
fm.handle_request(f)
fm.shutdown()
assert not r()[1].response
class TestRequest:
def test_simple(self):
f = tutils.tflow()
r = f.request
u = r.url
r.url = u
tutils.raises(ValueError, setattr, r, "url", "")
assert r.url == u
r2 = r.copy()
assert r.get_state() == r2.get_state()
def test_get_url(self):
r = http_wrappers.HTTPRequest.wrap(netlib.tutils.treq())
assert r.url == "http://address:22/path"
r.scheme = "https"
assert r.url == "https://address:22/path"
r.host = "host"
r.port = 42
assert r.url == "https://host:42/path"
r.host = "address"
r.port = 22
assert r.url == "https://address:22/path"
assert r.pretty_url(True) == "https://address:22/path"
r.headers["Host"] = ["foo.com"]
assert r.pretty_url(False) == "https://address:22/path"
assert r.pretty_url(True) == "https://foo.com:22/path"
def test_path_components(self):
r = http_wrappers.HTTPRequest.wrap(netlib.tutils.treq())
r.path = "/"
assert r.get_path_components() == []
r.path = "/foo/bar"
assert r.get_path_components() == ["foo", "bar"]
q = odict.ODict()
q["test"] = ["123"]
r.set_query(q)
assert r.get_path_components() == ["foo", "bar"]
r.set_path_components([])
assert r.get_path_components() == []
r.set_path_components(["foo"])
assert r.get_path_components() == ["foo"]
r.set_path_components(["/oo"])
assert r.get_path_components() == ["/oo"]
assert "%2F" in r.path
def test_getset_form_urlencoded(self):
d = odict.ODict([("one", "two"), ("three", "four")])
r = http_wrappers.HTTPRequest.wrap(netlib.tutils.treq(content=netlib.utils.urlencode(d.lst)))
r.headers["content-type"] = [HDR_FORM_URLENCODED]
assert r.get_form_urlencoded() == d
d = odict.ODict([("x", "y")])
r.set_form_urlencoded(d)
assert r.get_form_urlencoded() == d
r.headers["content-type"] = ["foo"]
assert not r.get_form_urlencoded()
def test_getset_query(self):
h = odict.ODictCaseless()
r = http_wrappers.HTTPRequest.wrap(netlib.tutils.treq())
r.path = "/foo?x=y&a=b"
q = r.get_query()
assert q.lst == [("x", "y"), ("a", "b")]
r.path = "/"
q = r.get_query()
assert not q
r.path = "/?adsfa"
q = r.get_query()
assert q.lst == [("adsfa", "")]
r.path = "/foo?x=y&a=b"
assert r.get_query()
r.set_query(odict.ODict([]))
assert not r.get_query()
qv = odict.ODict([("a", "b"), ("c", "d")])
r.set_query(qv)
assert r.get_query() == qv
def test_anticache(self):
h = odict.ODictCaseless()
r = http_wrappers.HTTPRequest.wrap(netlib.tutils.treq())
r.headers = h
h["if-modified-since"] = ["test"]
h["if-none-match"] = ["test"]
r.anticache()
assert not "if-modified-since" in r.headers
assert not "if-none-match" in r.headers
def test_replace(self):
r = http_wrappers.HTTPRequest.wrap(netlib.tutils.treq())
r.path = "path/foo"
r.headers["Foo"] = ["fOo"]
r.content = "afoob"
assert r.replace("foo(?i)", "boo") == 4
assert r.path == "path/boo"
assert not "foo" in r.content
assert r.headers["boo"] == ["boo"]
def test_constrain_encoding(self):
r = http_wrappers.HTTPRequest.wrap(netlib.tutils.treq())
r.headers["accept-encoding"] = ["gzip", "oink"]
r.constrain_encoding()
assert "oink" not in r.headers["accept-encoding"]
def test_decodeencode(self):
r = http_wrappers.HTTPRequest.wrap(netlib.tutils.treq())
r.headers["content-encoding"] = ["identity"]
r.content = "falafel"
r.decode()
assert not r.headers["content-encoding"]
assert r.content == "falafel"
r = http_wrappers.HTTPRequest.wrap(netlib.tutils.treq())
r.content = "falafel"
assert not r.decode()
r = http_wrappers.HTTPRequest.wrap(netlib.tutils.treq())
r.headers["content-encoding"] = ["identity"]
r.content = "falafel"
r.encode("identity")
assert r.headers["content-encoding"] == ["identity"]
assert r.content == "falafel"
r = http_wrappers.HTTPRequest.wrap(netlib.tutils.treq())
r.headers["content-encoding"] = ["identity"]
r.content = "falafel"
r.encode("gzip")
assert r.headers["content-encoding"] == ["gzip"]
assert r.content != "falafel"
r.decode()
assert not r.headers["content-encoding"]
assert r.content == "falafel"
def test_get_decoded_content(self):
r = http_wrappers.HTTPRequest.wrap(netlib.tutils.treq())
r.content = None
r.headers["content-encoding"] = ["identity"]
assert r.get_decoded_content() == None
r.content = "falafel"
r.encode("gzip")
assert r.get_decoded_content() == "falafel"
def test_get_content_type(self):
h = odict.ODictCaseless()
h["Content-Type"] = ["text/plain"]
resp = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp())
resp.headers = h
assert resp.headers.get_first("content-type") == "text/plain"
class TestResponse:
def test_simple(self):
f = tutils.tflow(resp=True)
resp = f.response
resp2 = resp.copy()
assert resp2.get_state() == resp.get_state()
def test_refresh(self):
r = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp())
n = time.time()
r.headers["date"] = [email.utils.formatdate(n)]
pre = r.headers["date"]
r.refresh(n)
assert pre == r.headers["date"]
r.refresh(n + 60)
d = email.utils.parsedate_tz(r.headers["date"][0])
d = email.utils.mktime_tz(d)
# Weird that this is not exact...
assert abs(60 - (d - n)) <= 1
r.headers[
"set-cookie"] = ["MOO=BAR; Expires=Tue, 08-Mar-2011 00:20:38 GMT; Path=foo.com; Secure"]
r.refresh()
def test_refresh_cookie(self):
r = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp())
# Invalid expires format, sent to us by Reddit.
c = "rfoo=bar; Domain=reddit.com; expires=Thu, 31 Dec 2037 23:59:59 GMT; Path=/"
assert r._refresh_cookie(c, 60)
c = "MOO=BAR; Expires=Tue, 08-Mar-2011 00:20:38 GMT; Path=foo.com; Secure"
assert "00:21:38" in r._refresh_cookie(c, 60)
def test_replace(self):
r = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp())
r.headers["Foo"] = ["fOo"]
r.content = "afoob"
assert r.replace("foo(?i)", "boo") == 3
assert not "foo" in r.content
assert r.headers["boo"] == ["boo"]
def test_decodeencode(self):
r = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp())
r.headers["content-encoding"] = ["identity"]
r.content = "falafel"
assert r.decode()
assert not r.headers["content-encoding"]
assert r.content == "falafel"
r = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp())
r.headers["content-encoding"] = ["identity"]
r.content = "falafel"
r.encode("identity")
assert r.headers["content-encoding"] == ["identity"]
assert r.content == "falafel"
r = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp())
r.headers["content-encoding"] = ["identity"]
r.content = "falafel"
r.encode("gzip")
assert r.headers["content-encoding"] == ["gzip"]
assert r.content != "falafel"
assert r.decode()
assert not r.headers["content-encoding"]
assert r.content == "falafel"
r.headers["content-encoding"] = ["gzip"]
assert not r.decode()
assert r.content == "falafel"
def test_get_content_type(self):
h = odict.ODictCaseless()
h["Content-Type"] = ["text/plain"]
resp = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp())
resp.headers = h
assert resp.headers.get_first("content-type") == "text/plain"
class TestError:
def test_getset_state(self):
e = Error("Error")
state = e.get_state()
assert Error.from_state(state).get_state() == e.get_state()
assert e.copy()
e2 = Error("bar")
assert not e == e2
e.load_state(e2.get_state())
assert e.get_state() == e2.get_state()
e3 = e.copy()
assert e3.get_state() == e.get_state()
class TestClientConnection:
def test_state(self):
c = tutils.tclient_conn()
assert ClientConnection.from_state(c.get_state()).get_state() ==\
c.get_state()
c2 = tutils.tclient_conn()
c2.address.address = (c2.address.host, 4242)
assert not c == c2
c2.timestamp_start = 42
c.load_state(c2.get_state())
assert c.timestamp_start == 42
c3 = c.copy()
assert c3.get_state() == c.get_state()
assert str(c)
def test_decoded():
r = http_wrappers.HTTPRequest.wrap(netlib.tutils.treq())
assert r.content == "content"
assert not r.headers["content-encoding"]
r.encode("gzip")
assert r.headers["content-encoding"]
assert r.content != "content"
with decoded(r):
assert not r.headers["content-encoding"]
assert r.content == "content"
assert r.headers["content-encoding"]
assert r.content != "content"
with decoded(r):
r.content = "foo"
assert r.content != "foo"
r.decode()
assert r.content == "foo"
def test_replacehooks():
h = flow.ReplaceHooks()
h.add("~q", "foo", "bar")
assert h.lst
h.set(
[
(".*", "one", "two"),
(".*", "three", "four"),
]
)
assert h.count() == 2
h.clear()
assert not h.lst
h.add("~q", "foo", "bar")
h.add("~s", "foo", "bar")
v = h.get_specs()
assert v == [('~q', 'foo', 'bar'), ('~s', 'foo', 'bar')]
assert h.count() == 2
h.clear()
assert h.count() == 0
f = tutils.tflow()
f.request.content = "foo"
h.add("~s", "foo", "bar")
h.run(f)
assert f.request.content == "foo"
f = tutils.tflow(resp=True)
f.request.content = "foo"
f.response.content = "foo"
h.run(f)
assert f.response.content == "bar"
assert f.request.content == "foo"
f = tutils.tflow()
h.clear()
h.add("~q", "foo", "bar")
f.request.content = "foo"
h.run(f)
assert f.request.content == "bar"
assert not h.add("~", "foo", "bar")
assert not h.add("foo", "*", "bar")
def test_setheaders():
h = flow.SetHeaders()
h.add("~q", "foo", "bar")
assert h.lst
h.set(
[
(".*", "one", "two"),
(".*", "three", "four"),
]
)
assert h.count() == 2
h.clear()
assert not h.lst
h.add("~q", "foo", "bar")
h.add("~s", "foo", "bar")
v = h.get_specs()
assert v == [('~q', 'foo', 'bar'), ('~s', 'foo', 'bar')]
assert h.count() == 2
h.clear()
assert h.count() == 0
f = tutils.tflow()
f.request.content = "foo"
h.add("~s", "foo", "bar")
h.run(f)
assert f.request.content == "foo"
h.clear()
h.add("~s", "one", "two")
h.add("~s", "one", "three")
f = tutils.tflow(resp=True)
f.request.headers["one"] = ["xxx"]
f.response.headers["one"] = ["xxx"]
h.run(f)
assert f.request.headers["one"] == ["xxx"]
assert f.response.headers["one"] == ["two", "three"]
h.clear()
h.add("~q", "one", "two")
h.add("~q", "one", "three")
f = tutils.tflow()
f.request.headers["one"] = ["xxx"]
h.run(f)
assert f.request.headers["one"] == ["two", "three"]
assert not h.add("~", "foo", "bar")
|
|
## @file
# This file is used to define common string related functions used in parsing process
#
# Copyright (c) 2007 - 2008, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import re
import DataType
import os.path
import string
import EdkLogger as EdkLogger
import GlobalData
from BuildToolError import *
from CommonDataClass.Exceptions import *
gHexVerPatt = re.compile('0x[a-f0-9]{4}[a-f0-9]{4}$', re.IGNORECASE)
gHumanReadableVerPatt = re.compile(r'([1-9][0-9]*|0)\.[0-9]{1,2}$')
## GetSplitValueList
#
# Get a value list from a string with multiple values splited with SplitTag
# The default SplitTag is DataType.TAB_VALUE_SPLIT
# 'AAA|BBB|CCC' -> ['AAA', 'BBB', 'CCC']
#
# @param String: The input string to be splitted
# @param SplitTag: The split key, default is DataType.TAB_VALUE_SPLIT
# @param MaxSplit: The max number of split values, default is -1
#
# @retval list() A list for splitted string
#
def GetSplitValueList(String, SplitTag=DataType.TAB_VALUE_SPLIT, MaxSplit= -1):
ValueList = []
Last = 0
Escaped = False
InString = False
for Index in range(0, len(String)):
Char = String[Index]
if not Escaped:
# Found a splitter not in a string, split it
if not InString and Char == SplitTag:
ValueList.append(String[Last:Index].strip())
Last = Index + 1
if MaxSplit > 0 and len(ValueList) >= MaxSplit:
break
if Char == '\\' and InString:
Escaped = True
elif Char == '"':
if not InString:
InString = True
else:
InString = False
else:
Escaped = False
if Last < len(String):
ValueList.append(String[Last:].strip())
elif Last == len(String):
ValueList.append('')
return ValueList
## GetSplitList
#
# Get a value list from a string with multiple values splited with SplitString
# The default SplitTag is DataType.TAB_VALUE_SPLIT
# 'AAA|BBB|CCC' -> ['AAA', 'BBB', 'CCC']
#
# @param String: The input string to be splitted
# @param SplitStr: The split key, default is DataType.TAB_VALUE_SPLIT
# @param MaxSplit: The max number of split values, default is -1
#
# @retval list() A list for splitted string
#
def GetSplitList(String, SplitStr=DataType.TAB_VALUE_SPLIT, MaxSplit= -1):
return map(lambda l: l.strip(), String.split(SplitStr, MaxSplit))
## MergeArches
#
# Find a key's all arches in dict, add the new arch to the list
# If not exist any arch, set the arch directly
#
# @param Dict: The input value for Dict
# @param Key: The input value for Key
# @param Arch: The Arch to be added or merged
#
def MergeArches(Dict, Key, Arch):
if Key in Dict.keys():
Dict[Key].append(Arch)
else:
Dict[Key] = Arch.split()
## GenDefines
#
# Parse a string with format "DEFINE <VarName> = <PATH>"
# Generate a map Defines[VarName] = PATH
# Return False if invalid format
#
# @param String: String with DEFINE statement
# @param Arch: Supportted Arch
# @param Defines: DEFINE statement to be parsed
#
# @retval 0 DEFINE statement found, and valid
# @retval 1 DEFINE statement found, but not valid
# @retval -1 DEFINE statement not found
#
def GenDefines(String, Arch, Defines):
if String.find(DataType.TAB_DEFINE + ' ') > -1:
List = String.replace(DataType.TAB_DEFINE + ' ', '').split(DataType.TAB_EQUAL_SPLIT)
if len(List) == 2:
Defines[(CleanString(List[0]), Arch)] = CleanString(List[1])
return 0
else:
return -1
return 1
## GenInclude
#
# Parse a string with format "!include <Filename>"
# Return the file path
# Return False if invalid format or NOT FOUND
#
# @param String: String with INCLUDE statement
# @param IncludeFiles: INCLUDE statement to be parsed
# @param Arch: Supportted Arch
#
# @retval True
# @retval False
#
def GenInclude(String, IncludeFiles, Arch):
if String.upper().find(DataType.TAB_INCLUDE.upper() + ' ') > -1:
IncludeFile = CleanString(String[String.upper().find(DataType.TAB_INCLUDE.upper() + ' ') + len(DataType.TAB_INCLUDE + ' ') : ])
MergeArches(IncludeFiles, IncludeFile, Arch)
return True
else:
return False
## GetLibraryClassesWithModuleType
#
# Get Library Class definition when no module type defined
#
# @param Lines: The content to be parsed
# @param Key: Reserved
# @param KeyValues: To store data after parsing
# @param CommentCharacter: Comment char, used to ignore comment content
#
# @retval True Get library classes successfully
#
def GetLibraryClassesWithModuleType(Lines, Key, KeyValues, CommentCharacter):
newKey = SplitModuleType(Key)
Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
LineList = Lines.splitlines()
for Line in LineList:
Line = CleanString(Line, CommentCharacter)
if Line != '' and Line[0] != CommentCharacter:
KeyValues.append([CleanString(Line, CommentCharacter), newKey[1]])
return True
## GetDynamics
#
# Get Dynamic Pcds
#
# @param Lines: The content to be parsed
# @param Key: Reserved
# @param KeyValues: To store data after parsing
# @param CommentCharacter: Comment char, used to ignore comment content
#
# @retval True Get Dynamic Pcds successfully
#
def GetDynamics(Lines, Key, KeyValues, CommentCharacter):
#
# Get SkuId Name List
#
SkuIdNameList = SplitModuleType(Key)
Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
LineList = Lines.splitlines()
for Line in LineList:
Line = CleanString(Line, CommentCharacter)
if Line != '' and Line[0] != CommentCharacter:
KeyValues.append([CleanString(Line, CommentCharacter), SkuIdNameList[1]])
return True
## SplitModuleType
#
# Split ModuleType out of section defien to get key
# [LibraryClass.Arch.ModuleType|ModuleType|ModuleType] -> [ 'LibraryClass.Arch', ['ModuleType', 'ModuleType', 'ModuleType'] ]
#
# @param Key: String to be parsed
#
# @retval ReturnValue A list for module types
#
def SplitModuleType(Key):
KeyList = Key.split(DataType.TAB_SPLIT)
#
# Fill in for arch
#
KeyList.append('')
#
# Fill in for moduletype
#
KeyList.append('')
ReturnValue = []
KeyValue = KeyList[0]
if KeyList[1] != '':
KeyValue = KeyValue + DataType.TAB_SPLIT + KeyList[1]
ReturnValue.append(KeyValue)
ReturnValue.append(GetSplitValueList(KeyList[2]))
return ReturnValue
## Replace macro in strings list
#
# This method replace macros used in a given string list. The macros are
# given in a dictionary.
#
# @param StringList StringList to be processed
# @param MacroDefinitions The macro definitions in the form of dictionary
# @param SelfReplacement To decide whether replace un-defined macro to ''
#
# @retval NewList A new string list whose macros are replaced
#
def ReplaceMacros(StringList, MacroDefinitions={}, SelfReplacement=False):
NewList = []
for String in StringList:
if type(String) == type(''):
NewList.append(ReplaceMacro(String, MacroDefinitions, SelfReplacement))
else:
NewList.append(String)
return NewList
## Replace macro in string
#
# This method replace macros used in given string. The macros are given in a
# dictionary.
#
# @param String String to be processed
# @param MacroDefinitions The macro definitions in the form of dictionary
# @param SelfReplacement To decide whether replace un-defined macro to ''
#
# @retval string The string whose macros are replaced
#
def ReplaceMacro(String, MacroDefinitions={}, SelfReplacement=False, RaiseError=False):
LastString = String
while String and MacroDefinitions:
MacroUsed = GlobalData.gMacroRefPattern.findall(String)
# no macro found in String, stop replacing
if len(MacroUsed) == 0:
break
for Macro in MacroUsed:
if Macro not in MacroDefinitions:
if RaiseError:
raise SymbolNotFound("%s not defined" % Macro)
if SelfReplacement:
String = String.replace("$(%s)" % Macro, '')
continue
String = String.replace("$(%s)" % Macro, MacroDefinitions[Macro])
# in case there's macro not defined
if String == LastString:
break
LastString = String
return String
## NormPath
#
# Create a normal path
# And replace DFEINE in the path
#
# @param Path: The input value for Path to be converted
# @param Defines: A set for DEFINE statement
#
# @retval Path Formatted path
#
def NormPath(Path, Defines={}):
IsRelativePath = False
if Path:
if Path[0] == '.':
IsRelativePath = True
#
# Replace with Define
#
if Defines:
Path = ReplaceMacro(Path, Defines)
#
# To local path format
#
Path = os.path.normpath(Path)
if IsRelativePath and Path[0] != '.':
Path = os.path.join('.', Path)
return Path
## CleanString
#
# Remove comments in a string
# Remove spaces
#
# @param Line: The string to be cleaned
# @param CommentCharacter: Comment char, used to ignore comment content, default is DataType.TAB_COMMENT_SPLIT
#
# @retval Path Formatted path
#
def CleanString(Line, CommentCharacter=DataType.TAB_COMMENT_SPLIT, AllowCppStyleComment=False, BuildOption=False):
#
# remove whitespace
#
Line = Line.strip();
#
# Replace Edk's comment character
#
if AllowCppStyleComment:
Line = Line.replace(DataType.TAB_COMMENT_EDK_SPLIT, CommentCharacter)
#
# remove comments, but we should escape comment character in string
#
InString = False
CommentInString = False
for Index in range(0, len(Line)):
if Line[Index] == '"':
InString = not InString
elif Line[Index] == CommentCharacter and InString :
CommentInString = True
elif Line[Index] == CommentCharacter and not InString :
Line = Line[0: Index]
break
if CommentInString and BuildOption:
Line = Line.replace('"', '')
ChIndex = Line.find('#')
while ChIndex >= 0:
if GlobalData.gIsWindows:
if ChIndex == 0 or Line[ChIndex - 1] != '^':
Line = Line[0:ChIndex] + '^' + Line[ChIndex:]
ChIndex = Line.find('#', ChIndex + 2)
else:
ChIndex = Line.find('#', ChIndex + 1)
else:
if ChIndex == 0 or Line[ChIndex - 1] != '\\':
Line = Line[0:ChIndex] + '\\' + Line[ChIndex:]
ChIndex = Line.find('#', ChIndex + 2)
else:
ChIndex = Line.find('#', ChIndex + 1)
#
# remove whitespace again
#
Line = Line.strip();
return Line
## CleanString2
#
# Split statement with comments in a string
# Remove spaces
#
# @param Line: The string to be cleaned
# @param CommentCharacter: Comment char, used to ignore comment content, default is DataType.TAB_COMMENT_SPLIT
#
# @retval Path Formatted path
#
def CleanString2(Line, CommentCharacter=DataType.TAB_COMMENT_SPLIT, AllowCppStyleComment=False):
#
# remove whitespace
#
Line = Line.strip();
#
# Replace Edk's comment character
#
if AllowCppStyleComment:
Line = Line.replace(DataType.TAB_COMMENT_EDK_SPLIT, CommentCharacter)
#
# separate comments and statements, but we should escape comment character in string
#
InString = False
CommentInString = False
Comment = ''
for Index in range(0, len(Line)):
if Line[Index] == '"':
InString = not InString
elif Line[Index] == CommentCharacter and InString:
CommentInString = True
elif Line[Index] == CommentCharacter and not InString:
Comment = Line[Index:].strip()
Line = Line[0:Index].strip()
break
return Line, Comment
## GetMultipleValuesOfKeyFromLines
#
# Parse multiple strings to clean comment and spaces
# The result is saved to KeyValues
#
# @param Lines: The content to be parsed
# @param Key: Reserved
# @param KeyValues: To store data after parsing
# @param CommentCharacter: Comment char, used to ignore comment content
#
# @retval True Successfully executed
#
def GetMultipleValuesOfKeyFromLines(Lines, Key, KeyValues, CommentCharacter):
Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
LineList = Lines.split('\n')
for Line in LineList:
Line = CleanString(Line, CommentCharacter)
if Line != '' and Line[0] != CommentCharacter:
KeyValues += [Line]
return True
## GetDefineValue
#
# Parse a DEFINE statement to get defined value
# DEFINE Key Value
#
# @param String: The content to be parsed
# @param Key: The key of DEFINE statement
# @param CommentCharacter: Comment char, used to ignore comment content
#
# @retval string The defined value
#
def GetDefineValue(String, Key, CommentCharacter):
String = CleanString(String)
return String[String.find(Key + ' ') + len(Key + ' ') : ]
## GetHexVerValue
#
# Get a Hex Version Value
#
# @param VerString: The version string to be parsed
#
#
# @retval: If VerString is incorrectly formatted, return "None" which will break the build.
# If VerString is correctly formatted, return a Hex value of the Version Number (0xmmmmnnnn)
# where mmmm is the major number and nnnn is the adjusted minor number.
#
def GetHexVerValue(VerString):
VerString = CleanString(VerString)
if gHumanReadableVerPatt.match(VerString):
ValueList = VerString.split('.')
Major = ValueList[0]
Minor = ValueList[1]
if len(Minor) == 1:
Minor += '0'
DeciValue = (int(Major) << 16) + int(Minor);
return "0x%08x" % DeciValue
elif gHexVerPatt.match(VerString):
return VerString
else:
return None
## GetSingleValueOfKeyFromLines
#
# Parse multiple strings as below to get value of each definition line
# Key1 = Value1
# Key2 = Value2
# The result is saved to Dictionary
#
# @param Lines: The content to be parsed
# @param Dictionary: To store data after parsing
# @param CommentCharacter: Comment char, be used to ignore comment content
# @param KeySplitCharacter: Key split char, between key name and key value. Key1 = Value1, '=' is the key split char
# @param ValueSplitFlag: Value split flag, be used to decide if has multiple values
# @param ValueSplitCharacter: Value split char, be used to split multiple values. Key1 = Value1|Value2, '|' is the value split char
#
# @retval True Successfully executed
#
def GetSingleValueOfKeyFromLines(Lines, Dictionary, CommentCharacter, KeySplitCharacter, ValueSplitFlag, ValueSplitCharacter):
Lines = Lines.split('\n')
Keys = []
Value = ''
DefineValues = ['']
SpecValues = ['']
for Line in Lines:
#
# Handle DEFINE and SPEC
#
if Line.find(DataType.TAB_INF_DEFINES_DEFINE + ' ') > -1:
if '' in DefineValues:
DefineValues.remove('')
DefineValues.append(GetDefineValue(Line, DataType.TAB_INF_DEFINES_DEFINE, CommentCharacter))
continue
if Line.find(DataType.TAB_INF_DEFINES_SPEC + ' ') > -1:
if '' in SpecValues:
SpecValues.remove('')
SpecValues.append(GetDefineValue(Line, DataType.TAB_INF_DEFINES_SPEC, CommentCharacter))
continue
#
# Handle Others
#
LineList = Line.split(KeySplitCharacter, 1)
if len(LineList) >= 2:
Key = LineList[0].split()
if len(Key) == 1 and Key[0][0] != CommentCharacter:
#
# Remove comments and white spaces
#
LineList[1] = CleanString(LineList[1], CommentCharacter)
if ValueSplitFlag:
Value = map(string.strip, LineList[1].split(ValueSplitCharacter))
else:
Value = CleanString(LineList[1], CommentCharacter).splitlines()
if Key[0] in Dictionary:
if Key[0] not in Keys:
Dictionary[Key[0]] = Value
Keys.append(Key[0])
else:
Dictionary[Key[0]].extend(Value)
else:
Dictionary[DataType.TAB_INF_DEFINES_MACRO][Key[0]] = Value[0]
if DefineValues == []:
DefineValues = ['']
if SpecValues == []:
SpecValues = ['']
Dictionary[DataType.TAB_INF_DEFINES_DEFINE] = DefineValues
Dictionary[DataType.TAB_INF_DEFINES_SPEC] = SpecValues
return True
## The content to be parsed
#
# Do pre-check for a file before it is parsed
# Check $()
# Check []
#
# @param FileName: Used for error report
# @param FileContent: File content to be parsed
# @param SupSectionTag: Used for error report
#
def PreCheck(FileName, FileContent, SupSectionTag):
LineNo = 0
IsFailed = False
NewFileContent = ''
for Line in FileContent.splitlines():
LineNo = LineNo + 1
#
# Clean current line
#
Line = CleanString(Line)
#
# Remove commented line
#
if Line.find(DataType.TAB_COMMA_SPLIT) == 0:
Line = ''
#
# Check $()
#
if Line.find('$') > -1:
if Line.find('$(') < 0 or Line.find(')') < 0:
EdkLogger.error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError=EdkLogger.IsRaiseError)
#
# Check []
#
if Line.find('[') > -1 or Line.find(']') > -1:
#
# Only get one '[' or one ']'
#
if not (Line.find('[') > -1 and Line.find(']') > -1):
EdkLogger.error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError=EdkLogger.IsRaiseError)
#
# Regenerate FileContent
#
NewFileContent = NewFileContent + Line + '\r\n'
if IsFailed:
EdkLogger.error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError=EdkLogger.IsRaiseError)
return NewFileContent
## CheckFileType
#
# Check if the Filename is including ExtName
# Return True if it exists
# Raise a error message if it not exists
#
# @param CheckFilename: Name of the file to be checked
# @param ExtName: Ext name of the file to be checked
# @param ContainerFilename: The container file which describes the file to be checked, used for error report
# @param SectionName: Used for error report
# @param Line: The line in container file which defines the file to be checked
#
# @retval True The file type is correct
#
def CheckFileType(CheckFilename, ExtName, ContainerFilename, SectionName, Line, LineNo= -1):
if CheckFilename != '' and CheckFilename != None:
(Root, Ext) = os.path.splitext(CheckFilename)
if Ext.upper() != ExtName.upper():
ContainerFile = open(ContainerFilename, 'r').read()
if LineNo == -1:
LineNo = GetLineNo(ContainerFile, Line)
ErrorMsg = "Invalid %s. '%s' is found, but '%s' file is needed" % (SectionName, CheckFilename, ExtName)
EdkLogger.error("Parser", PARSER_ERROR, ErrorMsg, Line=LineNo,
File=ContainerFilename, RaiseError=EdkLogger.IsRaiseError)
return True
## CheckFileExist
#
# Check if the file exists
# Return True if it exists
# Raise a error message if it not exists
#
# @param CheckFilename: Name of the file to be checked
# @param WorkspaceDir: Current workspace dir
# @param ContainerFilename: The container file which describes the file to be checked, used for error report
# @param SectionName: Used for error report
# @param Line: The line in container file which defines the file to be checked
#
# @retval The file full path if the file exists
#
def CheckFileExist(WorkspaceDir, CheckFilename, ContainerFilename, SectionName, Line, LineNo= -1):
CheckFile = ''
if CheckFilename != '' and CheckFilename != None:
CheckFile = WorkspaceFile(WorkspaceDir, CheckFilename)
if not os.path.isfile(CheckFile):
ContainerFile = open(ContainerFilename, 'r').read()
if LineNo == -1:
LineNo = GetLineNo(ContainerFile, Line)
ErrorMsg = "Can't find file '%s' defined in section '%s'" % (CheckFile, SectionName)
EdkLogger.error("Parser", PARSER_ERROR, ErrorMsg,
File=ContainerFilename, Line=LineNo, RaiseError=EdkLogger.IsRaiseError)
return CheckFile
## GetLineNo
#
# Find the index of a line in a file
#
# @param FileContent: Search scope
# @param Line: Search key
#
# @retval int Index of the line
# @retval -1 The line is not found
#
def GetLineNo(FileContent, Line, IsIgnoreComment=True):
LineList = FileContent.splitlines()
for Index in range(len(LineList)):
if LineList[Index].find(Line) > -1:
#
# Ignore statement in comment
#
if IsIgnoreComment:
if LineList[Index].strip()[0] == DataType.TAB_COMMENT_SPLIT:
continue
return Index + 1
return -1
## RaiseParserError
#
# Raise a parser error
#
# @param Line: String which has error
# @param Section: Used for error report
# @param File: File which has the string
# @param Format: Correct format
#
def RaiseParserError(Line, Section, File, Format='', LineNo= -1):
if LineNo == -1:
LineNo = GetLineNo(open(os.path.normpath(File), 'r').read(), Line)
ErrorMsg = "Invalid statement '%s' is found in section '%s'" % (Line, Section)
if Format != '':
Format = "Correct format is " + Format
EdkLogger.error("Parser", PARSER_ERROR, ErrorMsg, File=File, Line=LineNo, ExtraData=Format, RaiseError=EdkLogger.IsRaiseError)
## WorkspaceFile
#
# Return a full path with workspace dir
#
# @param WorkspaceDir: Workspace dir
# @param Filename: Relative file name
#
# @retval string A full path
#
def WorkspaceFile(WorkspaceDir, Filename):
return os.path.join(NormPath(WorkspaceDir), NormPath(Filename))
## Split string
#
# Revmove '"' which startswith and endswith string
#
# @param String: The string need to be splited
#
# @retval String: The string after removed '""'
#
def SplitString(String):
if String.startswith('\"'):
String = String[1:]
if String.endswith('\"'):
String = String[:-1]
return String
## Convert To Sql String
#
# 1. Replace "'" with "''" in each item of StringList
#
# @param StringList: A list for strings to be converted
#
def ConvertToSqlString(StringList):
return map(lambda s: s.replace("'", "''") , StringList)
## Convert To Sql String
#
# 1. Replace "'" with "''" in the String
#
# @param String: A String to be converted
#
def ConvertToSqlString2(String):
return String.replace("'", "''")
#
# Remove comment block
#
def RemoveBlockComment(Lines):
IsFindBlockComment = False
IsFindBlockCode = False
ReservedLine = ''
NewLines = []
for Line in Lines:
Line = Line.strip()
#
# Remove comment block
#
if Line.find(DataType.TAB_COMMENT_EDK_START) > -1:
ReservedLine = GetSplitList(Line, DataType.TAB_COMMENT_EDK_START, 1)[0]
IsFindBlockComment = True
if Line.find(DataType.TAB_COMMENT_EDK_END) > -1:
Line = ReservedLine + GetSplitList(Line, DataType.TAB_COMMENT_EDK_END, 1)[1]
ReservedLine = ''
IsFindBlockComment = False
if IsFindBlockComment:
NewLines.append('')
continue
NewLines.append(Line)
return NewLines
#
# Get String of a List
#
def GetStringOfList(List, Split=' '):
if type(List) != type([]):
return List
Str = ''
for Item in List:
Str = Str + Item + Split
return Str.strip()
#
# Get HelpTextList from HelpTextClassList
#
def GetHelpTextList(HelpTextClassList):
List = []
if HelpTextClassList:
for HelpText in HelpTextClassList:
if HelpText.String.endswith('\n'):
HelpText.String = HelpText.String[0: len(HelpText.String) - len('\n')]
List.extend(HelpText.String.split('\n'))
return List
def StringToArray(String):
if isinstance(String, unicode):
if len(unicode) == 0:
return "{0x00, 0x00}"
return "{%s, 0x00, 0x00}" % ", ".join(["0x%02x, 0x00" % ord(C) for C in String])
elif String.startswith('L"'):
if String == "L\"\"":
return "{0x00, 0x00}"
else:
return "{%s, 0x00, 0x00}" % ", ".join(["0x%02x, 0x00" % ord(C) for C in String[2:-1]])
elif String.startswith('"'):
if String == "\"\"":
return "{0x00,0x00}"
else:
StringLen = len(String[1:-1])
if StringLen % 2:
return "{%s, 0x00}" % ", ".join(["0x%02x" % ord(C) for C in String[1:-1]])
else:
return "{%s, 0x00,0x00}" % ", ".join(["0x%02x" % ord(C) for C in String[1:-1]])
elif String.startswith('{'):
StringLen = len(String.split(","))
if StringLen % 2:
return "{%s, 0x00}" % ", ".join([ C for C in String[1:-1].split(',')])
else:
return "{%s}" % ", ".join([ C for C in String[1:-1].split(',')])
else:
if len(String.split()) % 2:
return '{%s, 0}' % ', '.join(String.split())
else:
return '{%s, 0,0}' % ', '.join(String.split())
def StringArrayLength(String):
if isinstance(String, unicode):
return (len(String) + 1) * 2 + 1;
elif String.startswith('L"'):
return (len(String) - 3 + 1) * 2
elif String.startswith('"'):
return (len(String) - 2 + 1)
else:
return len(String.split()) + 1
def RemoveDupOption(OptionString, Which="/I", Against=None):
OptionList = OptionString.split()
ValueList = []
if Against:
ValueList += Against
for Index in range(len(OptionList)):
Opt = OptionList[Index]
if not Opt.startswith(Which):
continue
if len(Opt) > len(Which):
Val = Opt[len(Which):]
else:
Val = ""
if Val in ValueList:
OptionList[Index] = ""
else:
ValueList.append(Val)
return " ".join(OptionList)
##
#
# This acts like the main() function for the script, unless it is 'import'ed into another
# script.
#
if __name__ == '__main__':
pass
|
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for metrics."""
from . import metrics as localmot_metrics
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import pandas as pd
def testcase_no_confusion():
num_timesteps = 5
num_gt_ids = 2
num_tracker_ids = 2
# No overlap between pairs (0, 0) and (1, 1).
similarity = np.zeros([num_timesteps, num_gt_ids, num_tracker_ids])
similarity[:, 0, 1] = [0, 0, 0, 1, 1]
similarity[:, 1, 0] = [1, 1, 0, 0, 0]
gt_present = np.zeros([num_timesteps, num_gt_ids])
gt_present[:, 0] = [1, 1, 1, 1, 1]
gt_present[:, 1] = [1, 1, 1, 0, 0]
tracker_present = np.zeros([num_timesteps, num_tracker_ids])
tracker_present[:, 0] = [1, 1, 1, 1, 0]
tracker_present[:, 1] = [1, 1, 1, 1, 1]
expected = {
0: {
'num_frames': 1,
'gt_num_tracks': 8 / 5,
'pr_num_tracks': 9 / 5,
'gt_num_is_present': 8 / 5,
'pr_num_is_present': 9 / 5,
'track_tp': 4 / 5,
'ata': 4 / (0.5 * (8 + 9)),
'idtp': 4 / 5,
'idf1': 4 / (0.5 * (8 + 9)),
},
1: {
'num_frames': (2 + 3 + 3 + 3 + 2) / 5,
'gt_num_tracks': (2 + 2 + 2 + 2 + 1) / 5,
'pr_num_tracks': 2,
'gt_num_is_present': (4 + 6 + 5 + 4 + 2) / 5,
'pr_num_is_present': (4 + 6 + 6 + 5 + 3) / 5,
'track_tp': ((1) + (2 / 3) + (1 / 3 + 1 / 3) + (2 / 3) + (1)) / 5,
'ata': (((1) + (2 / 3) + (1 / 3 + 1 / 3) + (2 / 3) + (1)) /
(0.5 * ((2 + 2 + 2 + 2 + 1) +
(2 + 2 + 2 + 2 + 2)))),
'idtp': (2 + 2 + 2 + 2 + 2) / 5,
'idf1': ((2 + 2 + 2 + 2 + 2) /
(0.5 * ((4 + 6 + 5 + 4 + 2) + (4 + 6 + 6 + 5 + 3)))),
},
4: {
'num_frames': 5,
'gt_num_tracks': 2,
'pr_num_tracks': 2,
'gt_num_is_present': 8,
'pr_num_is_present': 9,
'track_tp': 2 / 5 + 2 / 4,
'ata': (2 / 5 + 2 / 4) / 2,
'idtp': 4,
'idf1': 4 / (0.5 * (8 + 9)),
},
}
data = _from_dense(
num_timesteps=num_timesteps,
num_gt_ids=num_gt_ids,
num_tracker_ids=num_tracker_ids,
gt_present=gt_present,
tracker_present=tracker_present,
similarity=similarity,
)
return data, expected
def testcase_with_confusion():
num_timesteps = 5
num_gt_ids = 2
num_tracker_ids = 2
similarity = np.zeros([num_timesteps, num_gt_ids, num_tracker_ids])
similarity[:, 0, 1] = [0, 0, 0, 1, 1]
similarity[:, 1, 0] = [1, 1, 0, 0, 0]
# Add some overlap between (0, 0) and (1, 1).
similarity[:, 0, 0] = [0, 0, 1, 0, 0]
similarity[:, 1, 1] = [0, 1, 0, 0, 0]
gt_present = np.zeros([num_timesteps, num_gt_ids])
gt_present[:, 0] = [1, 1, 1, 1, 1]
gt_present[:, 1] = [1, 1, 1, 0, 0]
tracker_present = np.zeros([num_timesteps, num_tracker_ids])
tracker_present[:, 0] = [1, 1, 1, 1, 0]
tracker_present[:, 1] = [1, 1, 1, 1, 1]
expected = {
0: {
'num_frames': 1,
'gt_num_tracks': 8 / 5,
'pr_num_tracks': 9 / 5,
'gt_num_is_present': 8 / 5,
'pr_num_is_present': 9 / 5,
'track_tp': 5 / 5,
'ata': 5 / (0.5 * (8 + 9)),
'idtp': 5 / 5,
'idf1': 5 / (0.5 * (8 + 9)),
},
4: {
'num_frames': 5,
'gt_num_tracks': 2,
'pr_num_tracks': 2,
'gt_num_is_present': 8,
'pr_num_is_present': 9,
'track_tp': 2 / 5 + 2 / 4,
'ata': (2 / 5 + 2 / 4) / 2,
'idtp': 4,
'idf1': 4 / (0.5 * (8 + 9)),
},
}
data = _from_dense(
num_timesteps=num_timesteps,
num_gt_ids=num_gt_ids,
num_tracker_ids=num_tracker_ids,
gt_present=gt_present,
tracker_present=tracker_present,
similarity=similarity,
)
return data, expected
def testcase_split_tracks():
num_timesteps = 5
num_gt_ids = 2
num_tracker_ids = 5
similarity = np.zeros([num_timesteps, num_gt_ids, num_tracker_ids])
# Split ground-truth 0 between tracks 0, 3.
similarity[:, 0, 0] = [1, 1, 0, 0, 0]
similarity[:, 0, 3] = [0, 0, 0, 1, 1]
# Split ground-truth 1 between tracks 1, 2, 4.
similarity[:, 1, 1] = [0, 0, 1, 1, 0]
similarity[:, 1, 2] = [0, 0, 0, 0, 1]
similarity[:, 1, 4] = [1, 1, 0, 0, 0]
gt_present = np.zeros([num_timesteps, num_gt_ids])
gt_present[:, 0] = [1, 1, 0, 1, 1]
gt_present[:, 1] = [1, 1, 1, 1, 1]
tracker_present = np.zeros([num_timesteps, num_tracker_ids])
tracker_present[:, 0] = [1, 1, 0, 0, 0]
tracker_present[:, 1] = [0, 0, 1, 1, 1]
tracker_present[:, 2] = [0, 0, 0, 0, 1]
tracker_present[:, 3] = [0, 0, 1, 1, 1]
tracker_present[:, 4] = [1, 1, 0, 0, 0]
expected = {
0: {
'num_frames': 1,
'gt_num_tracks': 9 / 5,
'pr_num_tracks': 11 / 5,
'gt_num_is_present': 9 / 5,
'pr_num_is_present': 11 / 5,
'track_tp': 9 / 5,
'ata': 9 / (0.5 * (9 + 11)),
'idtp': 9 / 5,
'idf1': 9 / (0.5 * (9 + 11)),
},
4: {
'num_frames': 5,
'gt_num_tracks': 2,
'pr_num_tracks': 5,
'gt_num_is_present': 9,
'pr_num_is_present': 11,
# For gt 0:
# (0, 0): 2 / 4
# (0, 3): 2 / 5
# For gt 1:
# (1, 1): 2 / 5
# (1, 2): 1 / 5
# (1, 4): 2 / 5
'track_tp': 2 / 4 + 2 / 5,
'ata': (2 / 4 + 2 / 5) / (0.5 * (2 + 5)),
# For gt 0:
# (0, 0): 2
# (0, 3): 2
# For gt 1:
# (1, 1): 2
# (1, 2): 1
# (1, 4): 2
'idtp': 4,
'idf1': 4 / (0.5 * (9 + 11)),
},
}
data = _from_dense(
num_timesteps=num_timesteps,
num_gt_ids=num_gt_ids,
num_tracker_ids=num_tracker_ids,
gt_present=gt_present,
tracker_present=tracker_present,
similarity=similarity,
)
return data, expected
def _from_dense(num_timesteps, num_gt_ids, num_tracker_ids, gt_present,
tracker_present, similarity):
gt_subset = [np.flatnonzero(gt_present[t, :]) for t in range(num_timesteps)]
tracker_subset = [
np.flatnonzero(tracker_present[t, :]) for t in range(num_timesteps)
]
similarity_subset = [
similarity[t][gt_subset[t], :][:, tracker_subset[t]]
for t in range(num_timesteps)
]
data = {
'num_timesteps': num_timesteps,
'num_gt_ids': num_gt_ids,
'num_tracker_ids': num_tracker_ids,
'num_gt_dets': np.sum(gt_present),
'num_tracker_dets': np.sum(tracker_present),
'gt_ids': gt_subset,
'tracker_ids': tracker_subset,
'similarity_scores': similarity_subset,
}
return data
TESTCASE_BY_NAME = {
'no_confusion': testcase_no_confusion(),
'with_confusion': testcase_with_confusion(),
'split_tracks': testcase_split_tracks(),
}
class MetricsTest(parameterized.TestCase):
@parameterized.parameters(
('no_confusion',),
('with_confusion',),
('split_tracks',))
def test_metrics_integer_horizons(self, sequence_name):
data, expected = TESTCASE_BY_NAME[sequence_name]
horizons = list(expected.keys())
local_stats = localmot_metrics.local_stats(
num_frames=data['num_timesteps'],
gt_id_subset=data['gt_ids'],
pr_id_subset=data['tracker_ids'],
similarity=data['similarity_scores'],
horizons=horizons)
normalized = localmot_metrics.normalize(local_stats)
result = pd.concat([normalized, local_stats], axis=1)
for r in horizons:
for key, value in expected[r].items():
self.assertAlmostEqual(result.loc[r, key], value,
msg=f'different value for {key} at horizon {r}')
@parameterized.parameters(
('no_confusion',),
('with_confusion',),
('split_tracks',))
def test_metrics_large_horizon_equals_strict(self, sequence_name):
data, _ = TESTCASE_BY_NAME[sequence_name]
evaluator = localmot_metrics.StatsEvaluator(
num_frames=data['num_timesteps'],
gt_id_subset=data['gt_ids'],
pr_id_subset=data['tracker_ids'],
similarity=data['similarity_scores'])
local_stats = evaluator.local(data['num_timesteps'] - 1)
strict_stats = evaluator.strict()
pd.testing.assert_series_equal(local_stats, strict_stats, check_names=False)
@parameterized.product(
sequence_name=['no_confusion', 'with_confusion'],
with_diagnostics=[True, False])
def test_fields(self, sequence_name, with_diagnostics):
data, _ = TESTCASE_BY_NAME[sequence_name]
stats = localmot_metrics.local_stats(
num_frames=data['num_timesteps'],
gt_id_subset=data['gt_ids'],
pr_id_subset=data['tracker_ids'],
similarity=data['similarity_scores'],
horizons=[1, 2, 5],
with_diagnostics=with_diagnostics)
self.assertContainsSubset(localmot_metrics.FIELDS_STATS, stats.columns)
result = localmot_metrics.normalize(stats)
self.assertContainsSubset(localmot_metrics.FIELDS_METRICS, result.columns)
@parameterized.parameters(
('no_confusion',),
('with_confusion',),
('split_tracks',))
def test_metrics_inf_horizon(self, sequence_name):
data, _ = TESTCASE_BY_NAME[sequence_name]
max_horizon = data['num_timesteps'] - 1
local_stats = localmot_metrics.local_stats(
num_frames=data['num_timesteps'],
gt_id_subset=data['gt_ids'],
pr_id_subset=data['tracker_ids'],
similarity=data['similarity_scores'],
horizons=[max_horizon, np.inf])
pd.testing.assert_series_equal(local_stats.loc[np.inf],
local_stats.loc[max_horizon],
check_names=False)
@parameterized.product(
sequence_name=['no_confusion', 'with_confusion', 'split_tracks'],
horizon=[0, 1, 3, np.inf],
)
def test_decomposition_stats(self, sequence_name, horizon):
data, _ = TESTCASE_BY_NAME[sequence_name]
stats = localmot_metrics.local_stats(
num_frames=data['num_timesteps'],
gt_id_subset=data['gt_ids'],
pr_id_subset=data['tracker_ids'],
similarity=data['similarity_scores'],
horizons=[horizon],
with_diagnostics=True)
self.assertTrue(np.all(stats >= 0))
self.assertTrue(np.all(stats['track_tp_approx'] <= stats['track_tp']))
self.assertTrue(np.all(stats['track_tp_approx'] <= stats['gt_frac_opt']))
self.assertTrue(np.all(stats['track_tp_approx'] <= stats['pr_frac_opt']))
self.assertTrue(np.all(stats['gt_frac_opt'] <= stats['gt_frac_max']))
self.assertTrue(np.all(stats['pr_frac_opt'] <= stats['pr_frac_max']))
self.assertTrue(np.all(stats['gt_frac_max'] <= stats['gt_frac_det']))
self.assertTrue(np.all(stats['pr_frac_max'] <= stats['pr_frac_det']))
np.testing.assert_allclose(
stats['gt_num_tracks'], (stats['track_tp_approx'] +
stats['track_fn_cover'] +
stats['track_fn_union']))
np.testing.assert_allclose(
stats['pr_num_tracks'], (stats['track_tp_approx'] +
stats['track_fp_cover'] +
stats['track_fp_union']))
np.testing.assert_allclose(
stats['track_fn_cover'], (stats['track_fn_cover_det'] +
stats['track_fn_cover_ass']))
np.testing.assert_allclose(
stats['track_fp_cover'], (stats['track_fp_cover_det'] +
stats['track_fp_cover_ass']))
np.testing.assert_allclose(
stats['track_fn_cover_ass'], (stats['track_fn_cover_ass_indep'] +
stats['track_fn_cover_ass_joint']))
np.testing.assert_allclose(
stats['track_fp_cover_ass'], (stats['track_fp_cover_ass_indep'] +
stats['track_fp_cover_ass_joint']))
np.testing.assert_allclose(
stats['track_fn_union'], (stats['track_fn_union_det'] +
stats['track_fn_union_ass']))
np.testing.assert_allclose(
stats['track_fp_union'], (stats['track_fp_union_det'] +
stats['track_fp_union_ass']))
@parameterized.product(
sequence_name=['no_confusion', 'with_confusion', 'split_tracks'],
horizon=[0, 1, 3, np.inf],
)
def test_decomposition_metrics(self, sequence_name, horizon):
data, _ = TESTCASE_BY_NAME[sequence_name]
stats = localmot_metrics.local_stats(
num_frames=data['num_timesteps'],
gt_id_subset=data['gt_ids'],
pr_id_subset=data['tracker_ids'],
similarity=data['similarity_scores'],
horizons=[horizon],
with_diagnostics=True)
metrics = localmot_metrics.normalize(stats)
self.assertTrue(np.all(metrics >= 0))
self.assertTrue(np.all(metrics <= 1))
# Decomposition of ATR.
np.testing.assert_allclose(
1 - metrics['atr_approx'], (metrics['atr_error_cover'] +
metrics['atr_error_union']))
np.testing.assert_allclose(
metrics['atr_error_cover'], (metrics['atr_error_cover_det'] +
metrics['atr_error_cover_ass']))
np.testing.assert_allclose(
metrics['atr_error_cover_ass'], (metrics['atr_error_cover_ass_indep'] +
metrics['atr_error_cover_ass_joint']))
np.testing.assert_allclose(
metrics['atr_error_union'], (metrics['atr_error_union_det'] +
metrics['atr_error_union_ass']))
np.testing.assert_allclose(
1 - metrics['atr_approx'], (metrics['atr_error_det_fn'] +
metrics['atr_error_det_fp'] +
metrics['atr_error_ass_split'] +
metrics['atr_error_ass_merge']))
# Decomposition of ATP.
np.testing.assert_allclose(
1 - metrics['atp_approx'], (metrics['atp_error_cover'] +
metrics['atp_error_union']))
np.testing.assert_allclose(
metrics['atp_error_cover'], (metrics['atp_error_cover_det'] +
metrics['atp_error_cover_ass']))
np.testing.assert_allclose(
metrics['atp_error_cover_ass'], (metrics['atp_error_cover_ass_indep'] +
metrics['atp_error_cover_ass_joint']))
np.testing.assert_allclose(
metrics['atp_error_union'], (metrics['atp_error_union_det'] +
metrics['atp_error_union_ass']))
np.testing.assert_allclose(
1 - metrics['atp_approx'], (metrics['atp_error_det_fn'] +
metrics['atp_error_det_fp'] +
metrics['atp_error_ass_split'] +
metrics['atp_error_ass_merge']))
# Decomposition of ATA.
np.testing.assert_allclose(
1 - metrics['ata_approx'], (metrics['ata_error_det_fn'] +
metrics['ata_error_det_fp'] +
metrics['ata_error_ass_split'] +
metrics['ata_error_ass_merge']))
@parameterized.parameters(
('no_confusion',),
('with_confusion',),
('split_tracks',))
def test_normalize_pd_series(self, sequence_name):
data, expected = TESTCASE_BY_NAME[sequence_name]
horizons = list(expected.keys())
stats = localmot_metrics.local_stats(
num_frames=data['num_timesteps'],
gt_id_subset=data['gt_ids'],
pr_id_subset=data['tracker_ids'],
similarity=data['similarity_scores'],
horizons=horizons,
with_diagnostics=True)
dataframe = localmot_metrics.normalize(stats)
for r in horizons:
series = localmot_metrics.normalize(stats.loc[r])
self.assertIsInstance(series, pd.Series)
pd.testing.assert_series_equal(
series, dataframe.loc[r], check_names=False)
if __name__ == '__main__':
absltest.main()
|
|
"""Adapted from Nematode: https://github.com/demelin/nematode """
# TODO: Add an attention visualization component - very important (~easy)
""" Layer implementations. """
import sys
import numpy as np
import tensorflow as tf
from tensorflow.python.ops.init_ops import glorot_uniform_initializer
# ModuleNotFoundError is new in 3.6; older versions will throw SystemError
if sys.version_info < (3, 6):
ModuleNotFoundError = SystemError
try:
from . import tf_utils
except (ModuleNotFoundError, ImportError) as e:
import tf_utils
def matmul_nd(nd_tensor, matrix):
""" Performs matrix multiplication for n-dimensional inputs. """
tensor_shape = tf_utils.get_shape_list(nd_tensor)
matrix_shape = tf_utils.get_shape_list(matrix)
initial_tensor_dims = tensor_shape[:-1]
flat_first_dim = tf.reduce_prod(input_tensor=initial_tensor_dims)
tensor_2d = tf.reshape(nd_tensor, [flat_first_dim, tensor_shape[-1]])
result_2d = tf.matmul(tensor_2d, matrix)
result_3d = tf.reshape(result_2d, initial_tensor_dims + [matrix_shape[-1]])
return result_3d
def get_right_context_mask(time_steps):
""" Generates the mask preventing the decoder from attending to unseen positions. """
# Generate mask that limits decoder self-attention up to and including the current position
attn_mask = tf.linalg.band_part(tf.ones([time_steps, time_steps]), -1, 0)
# Expand mask to 4d. so as to be compatible with attention weights
attn_mask = tf.expand_dims(tf.expand_dims(attn_mask, 0), 0)
# Illegal connections will be set to -inf when fed into the softmax function
# Padding for non-masked positions is applied to prevent NaNs
attn_mask = -1e9 * (1.0 - attn_mask)
return attn_mask
def get_positional_signal(time_steps, depth, float_dtype, min_timescale=1, max_timescale=10000):
""" Generates a series of sinusoid functions capable of expressing the relative and absolute position
of a token within a longer sequence. """
# Convert to floats
min_timescale = tf.cast(min_timescale, float_dtype)
max_timescale = tf.cast(max_timescale, float_dtype)
# Obtain timing signal via sinusoids
num_timescales = tf.cast(depth // 2, float_dtype)
log_timescale_increment = tf.math.log(max_timescale / min_timescale) / (num_timescales - tf.cast(1.0, float_dtype))
# Introduce an offset between individual timescales to obtain different frequencies
incremented_timescales = \
min_timescale * tf.exp(tf.range(num_timescales, dtype=float_dtype) * -log_timescale_increment)
# Assign the designated number of time-scales per token position
positions = tf.cast(tf.range(time_steps), float_dtype)
scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(incremented_timescales, 0)
positional_signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
# Pad the signal tensor, if needed
pad_size = depth % 2
if pad_size != 0:
tf.pad(tensor=positional_signal, paddings=[[0, 0], [0, pad_size]])
# Reshape the signal to make it compatible with the target tensor
positional_signal = tf.reshape(positional_signal, [1, time_steps, depth])
return positional_signal
class EmbeddingLayer(object):
""" Looks up embeddings for the specified token sequence in the learned embedding table; allows for easy weight
scaling and tying. """
def __init__(self, vocabulary_size, embedding_size, hidden_size, float_dtype, name):
# Set arguments
self.vocabulary_size = vocabulary_size
self.hidden_size = hidden_size
self.float_dtype = float_dtype
self.name = name
# Create embedding matrix and its transposes
with tf.compat.v1.variable_scope(self.name):
self.embedding_table = tf.compat.v1.get_variable(name='embedding_table',
shape=[vocabulary_size, embedding_size],
dtype=float_dtype,
initializer=glorot_uniform_initializer(),
trainable=True)
self.projection_matrix = tf.transpose(a=self.embedding_table, name='vocab_projection_matrix')
def embed(self, one_hot_inputs):
""" Embeds one-hot-vectors corresponding to input tokens. """
embeddings = tf.nn.embedding_lookup(params=self.embedding_table, ids=one_hot_inputs)
# Apply transformer-specific scaling
embeddings *= tf.sqrt(tf.cast(self.hidden_size, self.float_dtype))
return embeddings
def project(self, dec_out):
""" Projects the transformer decoder's output into the vocabulary space. """
projections = matmul_nd(dec_out, self.projection_matrix)
return projections
def get_embedding_table(self):
""" Recovers the learned embedding table. """
return self.embedding_table
def get_projection_matrix(self):
""" Recovers the pre-softmax projection matrix which is the inverse of the embedding table. """
return self.projection_matrix
def get_vocab_size(self):
""" Recovers the vocabulary size. """
return self.vocabulary_size
class LayerNormLayer(object):
""" Performs layer normalization by computing the mean and variance used for normalization from all of the
summed inputs to neurons in a layer. """
def __init__(self, dims_out, name=None, eps=1e-5):
if name is None:
name = 'layer_norm'
else:
name = '{:s}_layer_norm'.format(name)
with tf.compat.v1.variable_scope(name, values=[dims_out]):
self.offset = tf.compat.v1.get_variable(name='offset',
shape=[dims_out],
dtype=tf.float32,
initializer=tf.zeros_initializer())
self.scale = tf.compat.v1.get_variable(name='scale',
shape=[dims_out],
dtype=tf.float32,
initializer=tf.compat.v1.ones_initializer())
self.eps = tf.constant(eps)
def forward(self, inputs):
layer_mean, layer_var = tf.nn.moments(x=inputs, axes=-1, keepdims=True)
normalized = tf.add(
tf.multiply(self.scale, tf.math.divide(tf.subtract(inputs, layer_mean),
tf.sqrt(tf.add(layer_var, self.eps)))),
self.offset)
return normalized
class RMSNormLayer(object):
""" Performs root mean square layer normalization by computing root mean square of a layer and normalizing by this, thus re-scaling the layer.
In contrast to layer normalization, no mean re-centering is performed, making this computationally more efficient."""
def __init__(self, dims_out, name=None, eps=1e-5):
if name is None:
name = 'rms_norm'
else:
name = '{:s}_rms_norm'.format(name)
with tf.compat.v1.variable_scope(name, values=[dims_out]):
self.scale = tf.compat.v1.get_variable(name='scale',
shape=[dims_out],
dtype=tf.float32,
initializer=tf.compat.v1.ones_initializer())
self.eps = tf.constant(eps)
def forward(self, inputs):
meansquare = tf.reduce_mean(inputs**2, axis=-1, keepdims=True)
normalized = self.scale * inputs * tf.math.rsqrt(meansquare + self.eps)
return normalized
class ProcessingLayer(object):
""" Optionally applies residual connections, layer normalization, or dropout. """
def __init__(self, out_size, use_layer_norm, dropout_rate, training, name):
# Set attributes
self.use_layer_norm = use_layer_norm
self.training = training
self.name = name
with tf.compat.v1.variable_scope(self.name):
# Initialize layer normalization, if specified
if use_layer_norm is not False and use_layer_norm is not None:
self.layer_norm = use_layer_norm(out_size)
if dropout_rate > 0:
self.dropout = tf.keras.layers.Dropout(rate=dropout_rate)
else:
self.dropout = None
def forward(self, inputs, residual_inputs=None):
with tf.compat.v1.variable_scope(self.name, values=[inputs, residual_inputs], reuse=True):
outputs = inputs
# Apply dropout
if self.dropout is not None:
outputs = self.dropout(inputs, training=self.training)
# Apply residual connections
if residual_inputs is not None:
outputs = outputs + residual_inputs
# Apply layer normalization
if self.use_layer_norm:
outputs = self.layer_norm.forward(outputs)
return outputs
class FeedForwardLayer(object):
""" A single fully-connected feed-forward layer using standard dropout. """
def __init__(self,
in_size,
out_size,
float_dtype,
dropout_rate,
activation,
use_bias,
use_layer_norm,
training,
name):
# Set attributes
self.in_size = in_size
self.out_size = out_size
self.dropout_rate = dropout_rate
self.activation = activation
self.use_bias = use_bias
self.training = training
self.name = name
with tf.compat.v1.variable_scope(self.name):
# Set up layer normalization
if use_layer_norm is not False and use_layer_norm is not None:
self.layer_norm_layer = use_layer_norm(out_size)
else:
self.layer_norm_layer = None
if dropout_rate > 0:
self.dropout = tf.keras.layers.Dropout(rate=dropout_rate)
else:
self.dropout = None
# Define parameters
weights_shape = [in_size, out_size] if out_size is not None else [in_size]
self.weights = tf.compat.v1.get_variable(name='dense_layer_weights',
shape=weights_shape,
dtype=float_dtype,
initializer=glorot_uniform_initializer(),
trainable=True)
if use_bias:
biases_shape = [out_size] if out_size is not None else [in_size]
self.biases = tf.compat.v1.get_variable(name='dense_layer_biases',
shape=biases_shape,
dtype=float_dtype,
initializer=tf.zeros_initializer(),
trainable=True)
def forward(self, inputs):
with tf.compat.v1.variable_scope(self.name, values=[inputs]):
# Optionally apply dropout
if self.dropout is not None:
inputs = self.dropout(inputs, training=self.training)
# Feed through a dense layer
outputs = matmul_nd(inputs, self.weights)
if self.use_bias:
outputs += self.biases
if self.activation is not None:
outputs = self.activation(outputs)
# Optionally apply layer normalization
if self.layer_norm_layer is not None:
outputs = self.layer_norm_layer(outputs)
return outputs
class FeedForwardNetwork(object):
""" A fully connected feed-forward network that is applied to each position separately and identically. """
def __init__(self,
layer_dims,
float_dtype,
use_bias,
activation,
use_layer_norm,
dropout_rate,
training,
name=None):
# Set attributes
self.layer_dims = layer_dims
self.float_dtype = float_dtype
self.use_bias = use_bias
self.activation = activation
self.use_layer_norm = use_layer_norm
self.dropout_rate = dropout_rate
self.training = training
self.name = name
# Container for network layers
self.layers = list()
self._initialize_layers()
def _initialize_layers(self):
""" Builds the network from fully-connected layers. """
num_layers = len(self.layer_dims)
for layer_id in range(num_layers):
# Assure that no non-linearity or dropout is applied at the final layer
if layer_id == num_layers - 1:
layer_activation = None
dropout_rate = 0.0
else:
layer_activation = self.activation
dropout_rate = self.dropout_rate
# Add layer
if layer_id == 0:
input_dims = self.layer_dims[-1] # input and output dimensions of the sub-layer are identical
else:
input_dims = self.layer_dims[layer_id - 1]
self.layers.append(FeedForwardLayer(input_dims,
self.layer_dims[layer_id],
self.float_dtype,
dropout_rate=dropout_rate,
activation=layer_activation,
use_bias=self.use_bias,
use_layer_norm=self.use_layer_norm,
training=self.training,
name='ff_layer_{:d}'.format(layer_id + 1)))
def forward(self, inputs):
""" Propagates input data through the specified layers. """
with tf.compat.v1.variable_scope(self.name, values=[inputs]):
for layer in self.layers:
inputs = layer.forward(inputs)
return inputs
class PReLU(object):
""" Implements the adaptive Parametric Rectified Linear Unit activation function. """
def __init__(self,
in_size,
initial_slope=1.0,
name=None):
with tf.compat.v1.variable_scope(name, default_name='PReLu'):
self.slope = tf.Variable(initial_slope * np.ones((in_size,)).astype('float32'), name='slope')
def forward(self, inputs):
pos = tf.nn.relu(inputs)
neg = inputs - pos
outputs = pos + self.slope * neg
return outputs
class MaskedCrossEntropy(object):
""" Implements the cross-entropy loss with optionally applied label smoothing for better model generalization. """
def __init__(self, vocab_size, label_smoothing_discount, int_dtype, float_dtype, time_major, name=None):
# Set attributes
self.vocab_size = vocab_size
self.label_smoothing_discount = label_smoothing_discount
self.int_dtype = int_dtype
self.float_dtype = float_dtype
self.time_dim = int(not time_major) # i.e. 0 is time_major, 1 if batch_major
self.name = name
def _get_smoothing_parameters(self):
""" Calculates the confidence values used for label smoothing application. """
# Assign low confidence, i.e. the label smoothing discount value, to all non-true labels
one_out_vocab = tf.cast(self.vocab_size - 1, self.float_dtype)
# For cross-entropy, each row of the labels matrix must be a valid probability distribution
low_confidence = self.label_smoothing_discount / one_out_vocab
high_confidence = 1.0 - self.label_smoothing_discount
# Normalizing constant for better readability, which is the best cross-entropy value with soft targets
# Has no impact on training
normalizing_factor = -(1.0 * high_confidence * tf.math.log(high_confidence)
+ one_out_vocab * low_confidence * tf.math.log(low_confidence + 1e-20))
return high_confidence, low_confidence, normalizing_factor
def forward(self, logits, targets, target_mask, training):
with tf.compat.v1.name_scope(self.name, values=[logits, targets, target_mask]):
# Get smoothing parameters (no smoothing/ normalization at test time)
high_confidence, low_confidence, normalizing_factor = \
tf.cond(pred=tf.logical_and(training, tf.greater(self.label_smoothing_discount, 0.0)),
true_fn=self._get_smoothing_parameters,
false_fn=lambda: (1.0, 0.0, 0.0))
# If necessary, pad the label and the label-mask to match the length of decoder output
# Not sure if that's a sensible thing to do
targets_shape = tf.shape(input=targets)
logits_shape = tf.shape(input=logits)
targets_length = targets_shape[self.time_dim]
logits_length = logits_shape[self.time_dim]
def _get_pad_shape(shape_to_pad, shape_to_match):
""" Calculates the shape of the padding to be applied to the logits or targets. """
time_steps_to_pad = shape_to_match[self.time_dim] - shape_to_pad[self.time_dim]
if self.time_dim == 0:
pad_shape = [time_steps_to_pad, shape_to_pad[1]]
else:
pad_shape = [shape_to_pad[0], time_steps_to_pad]
return pad_shape
def _pad_targets(targets, target_mask, logits):
""" Pads the targets to match the size of the model-generated logits. """
pad_shape = _get_pad_shape(targets_shape, logits_shape)
targets = tf.concat([targets, tf.zeros(pad_shape, dtype=self.int_dtype)], axis=self.time_dim)
target_mask = tf.concat([target_mask, tf.zeros(pad_shape, dtype=self.float_dtype)], axis=self.time_dim)
return targets, target_mask, logits
def _pad_logits(targets, target_mask, logits):
""" Pads the logits to match the size of the ground-truth targets. """
pad_shape = _get_pad_shape(logits_shape, targets_shape)
logits = tf.concat([logits, tf.zeros(pad_shape + [logits_shape[-1]], dtype=self.float_dtype)],
axis=self.time_dim)
return targets, target_mask, logits
# For teacher-forcing with RNN models
targets, target_mask, logits = tf.cond(pred=tf.equal(targets_length, logits_length),
true_fn=lambda: (targets, target_mask, logits),
false_fn=lambda: tf.cond(pred=tf.less(targets_length, logits_length),
true_fn=lambda: _pad_targets(targets, target_mask, logits),
false_fn=lambda: _pad_logits(targets, target_mask, logits)))
# Project and optionally smooth target token ids
projected_targets = tf.one_hot(targets,
depth=self.vocab_size,
on_value=high_confidence,
off_value=low_confidence,
dtype=self.float_dtype)
# Compute token-level loss
flat_logits = tf.reshape(logits, [-1, self.vocab_size])
flat_targets = tf.reshape(projected_targets, [-1, self.vocab_size])
flat_loss = tf.nn.softmax_cross_entropy_with_logits(logits=flat_logits, labels=tf.stop_gradient(flat_targets))
flat_normalized_loss = flat_loss - normalizing_factor
# Compute sentence- and batch-level losses (i.e. mean token-loss per sentence/ batch)
normalized_loss = tf.reshape(flat_normalized_loss, tf.shape(input=targets))
masked_loss = normalized_loss * target_mask
sentence_lengths = tf.reduce_sum(input_tensor=target_mask, axis=self.time_dim, keepdims=False)
sentence_loss = tf.math.divide(tf.reduce_sum(input_tensor=masked_loss, axis=self.time_dim, keepdims=False), sentence_lengths)
batch_loss = tf.reduce_mean(input_tensor=sentence_loss, keepdims=False)
return masked_loss, sentence_loss, batch_loss
|
|
#!/usr/bin/env python
"""
Filter the event file and the exposure map, divide by CCD, then run xtdac on each CCD
"""
import argparse
import glob
import os
import sys
import shutil
import astropy.io.fits as pyfits
from XtDac.ChandraUtils import find_files
from XtDac.ChandraUtils import logging_system
from XtDac.ChandraUtils.data_package import DataPackage
from XtDac.ChandraUtils.run_command import CommandRunner
from XtDac.ChandraUtils.configuration import get_configuration
from XtDac.ChandraUtils.work_within_directory import work_within_directory
from XtDac.ChandraUtils.sanitize_filename import sanitize_filename
# This is just to make sure that CIAO is loaded
import psf
import caldb4
def filter_exposure_map(exposure_map, regions_file, eventfile, new_exposure_map, resample_factor=1):
if regions_file.find(".reg") < 0:
# Generate an almost empty event file which will be used by xtcheesemask to extract the WCS and the
# characteristics of the hardware unit (i.e., of the ccd)
with pyfits.open(eventfile) as f:
small_data = f['EVENTS'].data[:2]
header = f['EVENTS'].header
new_hdu = pyfits.BinTableHDU(data=small_data, header=header)
# Now append the region table
with pyfits.open(regions_file) as f:
region_hdu = f['SRCREG']
hdu_list = pyfits.HDUList([pyfits.PrimaryHDU(), new_hdu, region_hdu])
temp_file = '___2_events.fits'
hdu_list.writeto(temp_file, clobber=True)
else:
temp_file = regions_file
cmd_line = "xtcheesemask.py -i %s -r %s -o %s -s %s --no-reverse" \
% (exposure_map, temp_file, new_exposure_map, resample_factor)
runner.run(cmd_line)
os.remove(temp_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run Bayesian Block algorithm')
parser.add_argument("-c", "--config_file", help="Path to the configuration file", type=str, required=True)
parser.add_argument("-o", "--obsid", help="Observation ID Numbers", type=str, required=True, nargs="+")
parser.add_argument('--simulate', help='If set, a background-only dataset is simulated from the observation and '
'the algorithm is run on the simulated dataset', action='store_true')
parser.add_argument('--simulated_data', help='set this if you are using simulated data from a previous call of '
'xtdac_acis with --simulate',
action='store_true')
# parser.add_argument('-r', '--region_repo', help="Path to the repository of region files",
# type=str, required=True)
# parser.add_argument('-a', "--adj_factor",
# help="If region files need to be adjusted, what factor to increase axes of ellipses by",
# type=float, required=True)
# parser.add_argument("-e1", "--emin", help="Minimum energy (eV)", type=int, required=True)
#
# parser.add_argument("-e2", "--emax", help="Maximum energy (eV)", type=int, required=True)
#
# parser.add_argument("-c", "--ncpus", help="Number of CPUs to use (default=1)",
# type=int, default=1, required=False)
#
# parser.add_argument("-p", "--typeIerror",
# help="Type I error probability for the Bayesian Blocks algorithm.",
# type=float,
# default=1e-5,
# required=False)
#
# parser.add_argument("-s", "--sigmaThreshold",
# help="Threshold for the final significance. All intervals found "
# "by the bayesian blocks "
# "algorithm which does not surpass this threshold will not be saved in the "
# "final file.",
# type=float,
# default=5.0,
# required=False)
#
# parser.add_argument("-m", "--multiplicity", help="Control the overlap of the regions."
# " A multiplicity of 2 means the centers of the regions are"
# " shifted by 1/2 of the region size (they overlap by 50 percent),"
# " a multiplicity of 4 means they are shifted by 1/4 of "
# " their size (they overlap by 75 percent), and so on.",
# required=False, default=2.0, type=float)
#
# parser.add_argument("-v", "--verbosity", help="Info or debug", type=str, required=False, default='info',
# choices=['info', 'debug'])
# Get the logger
logger = logging_system.get_logger(os.path.basename(sys.argv[0]))
# Get the command runner
runner = CommandRunner(logger)
args = parser.parse_args()
# Get the configuration
config = get_configuration(args.config_file)
# Get work directory and sanitize it
work_directory = sanitize_filename(config['work directory'])
# Now remove [ and ] (which might be there if the user is running jobs array on PBS). They would confuse
# CIAO executables
work_directory = work_directory.replace("[","").replace("]","")
# Check whether we need to remove the workdir or not
remove_work_dir = bool(config['remove work directory'])
# Now move in the work directory and do the processing
# Encapsulate all in a try/except clause so that even in case of errors we have the opportunity to clean up
# the workdir
try:
for this_obsid in args.obsid:
with work_within_directory(os.path.join(work_directory, "__%s" % str(this_obsid)), create=True,
remove=remove_work_dir):
# Get the data package for the input data
data_package = DataPackage(os.path.join(config['data repository'], str(this_obsid)))
# If we are in simulation mode, simulate a new dataset and create a new data package to be used
# instead
if args.simulate:
logger.info("Simulating data...")
# NOTE: .get() will copy the files here
bkgmap = data_package.get("bkgmap")
asolfile = data_package.get("asol")
evtfile = data_package.get('evt3')
expfile = data_package.get('exp3')
sim_evt_file = "%s_sim.fits" % (os.path.splitext(os.path.basename(evtfile.filename))[0])
cmd_line = "xtc_simulate_bkg.py --bkgmap %s --expomap %s --evtfile %s --asolfile %s " \
"--outfile %s" % (bkgmap.filename, expfile.filename, evtfile.filename,
asolfile.filename, sim_evt_file)
runner.run(cmd_line)
logger.info("Creating new data package")
sim_dir = '_simulations'
if os.path.exists(sim_dir):
shutil.rmtree(sim_dir)
os.makedirs(sim_dir)
# Override the input data package with a new one
data_package = data_package.copy_to(sim_dir)
# Make temporarily read-write so we can update the event file
data_package.read_only = False
# Override in the new one the event file (so all subsequent commands will use that one)
data_package.store("evt3", sim_evt_file, "Simulated event file", force=True)
# Overrite in the new one the exposure map that has been modified (so all subsequent commands will
# use that one)
data_package.store("exp3", expfile.filename, "Simulated exposure map", force=True)
# Restore read-only status
data_package.read_only = True
# Make a specific output package (so we don't risk to overwrite an existing output package for real
# data)
out_package = DataPackage("%s_sim" % str(this_obsid), create=True)
else:
out_package = DataPackage(str(this_obsid), create=True)
# Make sure the output package is empty, otherwise emtpy it
out_package.clear()
# Start the processing
# NOTE: .get() will copy the file here
evtfile = data_package.get('evt3')
tsvfile = data_package.get('tsv')
expfile = data_package.get('exp3')
#######################################
# Filtering
#######################################
# Figure out the path for the regions files for this obsid
region_dir = os.path.join(os.path.expandvars(os.path.expanduser(config['region repository'])),
'%s' % int(this_obsid.split("_")[0]))
# If we are simulating, there is no need to randomize the time (the simulation does not discretize
# the arrival times into frames). Otherwise, if we are dealing with true data, we need to randomize
# the arrival time within each frame time
if args.simulate or args.simulated_data:
additional_options = '--simulation_mode'
else:
additional_options = ''
cmd_line = "xtc_filter_event_file.py --region_dir %s --in_package %s --out_package %s " \
"--emin %d --emax %d " \
"--adj_factor %s %s" \
% (region_dir, data_package.location, out_package.location,
config['energy min'], config['energy max'], config['adjustment factor'],
additional_options)
runner.run(cmd_line)
# Products are: filtered_evt3, all_regions and (if any) streak_regions_ds9
###### Remove hot pixels
events_no_hot_pixels = '%s_filtered_nohot.fits' % this_obsid
cmd_line = "xtc_prefilter_hot_pixels.py --evtfile %s --outfile %s" \
% (out_package.get('filtered_evt3').filename, events_no_hot_pixels)
runner.run(cmd_line)
out_package.store('filtered_nohot', events_no_hot_pixels,
"Filtered event file (evt3) with events in hot pixels removed")
#######################################
# Separate CCDs
#######################################
cmd_line = "xtc_separate_CCDs.py --evtfile %s" % out_package.get('filtered_nohot').filename
runner.run(cmd_line)
pattern = 'ccd_*_%s' % os.path.basename(out_package.get('filtered_nohot').filename)
ccd_files = find_files.find_files('.', pattern)
assert len(ccd_files) > 0, "No CCD in this observation?? This is likely a bug. " \
"I was looking for %s" % (pattern)
#######################################
# Run Bayesian Block on each CCD
#######################################
for ccd_file in ccd_files:
# Get the root of the ccd filename and the ccd number (will be used to name the files)
ccd_root = os.path.splitext(os.path.basename(ccd_file))[0]
ccd_number = os.path.basename(ccd_file).split("_")[1]
logger.info("########################################")
logger.info("Processing CCD %s..." % ccd_number)
logger.info("########################################")
# First filter the exposure map
filtered_expomap = 'ccd_%s_filtered_expomap.fits' % ccd_number
# xtcheesemask, used by filter_exposure_map, cannot overwrite files, so delete the file
# if existing
try:
os.remove(filtered_expomap)
except:
pass
# NOTE: use only a resample factor of 1, or the destreaking will fail
filter_exposure_map(data_package.get('exp3').filename, out_package.get('all_regions').filename,
ccd_file, filtered_expomap, resample_factor=1)
if out_package.has('streak_regions_ds9'):
# Filter also for the streaks
temp_file = '__expomap_temp.fits'
filter_exposure_map(filtered_expomap, out_package.get('streak_regions_ds9').filename,
ccd_file, temp_file, resample_factor=1)
os.remove(filtered_expomap)
os.rename(temp_file, filtered_expomap)
# Register the filtered expomap
out_package.store('ccd_%s_filtered_expomap' % ccd_number, filtered_expomap,
"Expomap for CCD %s, filtered for all the regions which have been "
"used for the event file" % ccd_number)
###### XTDAC #########
cmd_line = "xtdac.py -e %s -x %s -w yes -c %s -p %s -s %s -m %s -v %s --max_duration 50000 " \
"--transient_pos --min_number_of_events %i" \
% (ccd_file, filtered_expomap, config['number of processes'],
config['type I error probability'],
config['sigma threshold'], config['multiplicity'], config['verbosity'],
config['min number of events'])
runner.run(cmd_line)
#####################
# Now register the output in the output data package
baseroot = os.path.splitext(ccd_file)[0]
out_package.store("ccd_%s_raw_list" % ccd_number, "%s_res.txt" % baseroot,
"Unfiltered list of candidates for CCD %s (output of xtdac)" % ccd_number)
out_package.store("ccd_%s_xtdac_html" % ccd_number,
"%s_res.html" % baseroot,
"HTML file produced by xtdac, containing the unfiltered list of candidates "
"for ccd %s" % ccd_number)
output_files = glob.glob("%s_*candidate*.reg" % baseroot)
for i, output in enumerate(output_files):
reg_id = output.split("_")[-1].split(".reg")[0]
out_package.store("%s_candidate_reg%s" % (baseroot, reg_id), output,
"Ds9 region file for candidate %s" % reg_id)
#######################################
# Filter candidate list
#######################################
# Hot pixels
check_hp_file = "check_hp_%s_%s.txt" % (ccd_number, this_obsid)
cmd_line = "xtc_remove_hot_pixels.py --obsid %s --evtfile %s --bbfile %s --outfile %s --debug no" \
% (this_obsid, ccd_file, "%s_res.txt" % baseroot, check_hp_file)
runner.run(cmd_line)
# Register output
out_package.store("ccd_%s_check_hp" % ccd_number, check_hp_file,
"List of candidates for CCD %s with hot pixels flagged" % ccd_number)
# Variable sources
check_var_file = "check_var_%s_%s.txt" % (ccd_number, this_obsid)
cmd_line = "xtc_flag_near_variable_sources.py --bbfile %s --outfile %s --eventfile %s" \
% (check_hp_file, check_var_file, ccd_file)
runner.run(cmd_line)
# Register output
out_package.store("ccd_%s_check_var" % ccd_number, check_var_file,
"List of candidates for CCD %s with hot pixels and "
"variable sources flagged" % ccd_number)
# Now add candidates to master list (one list for this obsid)
candidate_file = "%s_all_candidates.txt" % this_obsid
cmd_line = "xtc_add_to_masterlist.py --package %s --masterfile %s" % (out_package.location,
candidate_file)
runner.run(cmd_line)
# Reopen the file and write the command line which generated this analysis as a comment
with open(candidate_file, "a") as f:
f.write("\n# command line:\n# %s\n" % " ".join(sys.argv))
out_package.store("candidates", candidate_file, "List of all candidates found in all CCDs")
# Now make the GIFs with the candidate transients
cmd_line = "xtc_gif_generator.py --masterfile %s" % candidate_file
runner.run(cmd_line)
# Register files
animated_gifs = glob.glob("*_cand_*.gif")
for animated_gif in animated_gifs:
root = os.path.splitext(animated_gif)[0]
out_package.store(root, animated_gif, "Animation of the transient %s" % root)
# Move package to output repository
logger.info("Move the results to the output repository %s" % config['output repository'])
out_repo_dir = sanitize_filename(config['output repository'])
if not os.path.exists(out_repo_dir):
logger.info("Creating directory %s" % out_repo_dir)
os.makedirs(out_repo_dir)
out_package.copy_to(out_repo_dir)
except:
raise
finally:
if remove_work_dir and config['work directory'] != '.':
shutil.rmtree(work_directory)
|
|
"""Support for interface with a Gree climate systems."""
import logging
from typing import List
from greeclimate.device import (
FanSpeed,
HorizontalSwing,
Mode,
TemperatureUnits,
VerticalSwing,
)
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
FAN_AUTO,
FAN_HIGH,
FAN_LOW,
FAN_MEDIUM,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_BOOST,
PRESET_ECO,
PRESET_NONE,
PRESET_SLEEP,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_SWING_MODE,
SUPPORT_TARGET_TEMPERATURE,
SWING_BOTH,
SWING_HORIZONTAL,
SWING_OFF,
SWING_VERTICAL,
)
from homeassistant.const import (
ATTR_TEMPERATURE,
PRECISION_WHOLE,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import (
COORDINATOR,
DOMAIN,
FAN_MEDIUM_HIGH,
FAN_MEDIUM_LOW,
MAX_TEMP,
MIN_TEMP,
TARGET_TEMPERATURE_STEP,
)
_LOGGER = logging.getLogger(__name__)
HVAC_MODES = {
Mode.Auto: HVAC_MODE_AUTO,
Mode.Cool: HVAC_MODE_COOL,
Mode.Dry: HVAC_MODE_DRY,
Mode.Fan: HVAC_MODE_FAN_ONLY,
Mode.Heat: HVAC_MODE_HEAT,
}
HVAC_MODES_REVERSE = {v: k for k, v in HVAC_MODES.items()}
PRESET_MODES = [
PRESET_ECO, # Power saving mode
PRESET_AWAY, # Steady heat, or 8C mode on gree units
PRESET_BOOST, # Turbo mode
PRESET_NONE, # Default operating mode
PRESET_SLEEP, # Sleep mode
]
FAN_MODES = {
FanSpeed.Auto: FAN_AUTO,
FanSpeed.Low: FAN_LOW,
FanSpeed.MediumLow: FAN_MEDIUM_LOW,
FanSpeed.Medium: FAN_MEDIUM,
FanSpeed.MediumHigh: FAN_MEDIUM_HIGH,
FanSpeed.High: FAN_HIGH,
}
FAN_MODES_REVERSE = {v: k for k, v in FAN_MODES.items()}
SWING_MODES = [SWING_OFF, SWING_VERTICAL, SWING_HORIZONTAL, SWING_BOTH]
SUPPORTED_FEATURES = (
SUPPORT_TARGET_TEMPERATURE
| SUPPORT_FAN_MODE
| SUPPORT_PRESET_MODE
| SUPPORT_SWING_MODE
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Gree HVAC device from a config entry."""
async_add_entities(
[
GreeClimateEntity(coordinator)
for coordinator in hass.data[DOMAIN][COORDINATOR]
]
)
class GreeClimateEntity(CoordinatorEntity, ClimateEntity):
"""Representation of a Gree HVAC device."""
def __init__(self, coordinator):
"""Initialize the Gree device."""
super().__init__(coordinator)
self._name = coordinator.device.device_info.name
self._mac = coordinator.device.device_info.mac
@property
def name(self) -> str:
"""Return the name of the device."""
return self._name
@property
def unique_id(self) -> str:
"""Return a unique id for the device."""
return self._mac
@property
def device_info(self):
"""Return device specific attributes."""
return {
"name": self._name,
"identifiers": {(DOMAIN, self._mac)},
"manufacturer": "Gree",
"connections": {(CONNECTION_NETWORK_MAC, self._mac)},
}
@property
def temperature_unit(self) -> str:
"""Return the temperature units for the device."""
units = self.coordinator.device.temperature_units
return TEMP_CELSIUS if units == TemperatureUnits.C else TEMP_FAHRENHEIT
@property
def precision(self) -> float:
"""Return the precision of temperature for the device."""
return PRECISION_WHOLE
@property
def current_temperature(self) -> float:
"""Return the target temperature, gree devices don't provide internal temp."""
return self.target_temperature
@property
def target_temperature(self) -> float:
"""Return the target temperature for the device."""
return self.coordinator.device.target_temperature
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
if ATTR_TEMPERATURE not in kwargs:
raise ValueError(f"Missing parameter {ATTR_TEMPERATURE}")
temperature = kwargs[ATTR_TEMPERATURE]
_LOGGER.debug(
"Setting temperature to %d for %s",
temperature,
self._name,
)
self.coordinator.device.target_temperature = round(temperature)
await self.coordinator.push_state_update()
self.async_write_ha_state()
@property
def min_temp(self) -> float:
"""Return the minimum temperature supported by the device."""
return MIN_TEMP
@property
def max_temp(self) -> float:
"""Return the maximum temperature supported by the device."""
return MAX_TEMP
@property
def target_temperature_step(self) -> float:
"""Return the target temperature step support by the device."""
return TARGET_TEMPERATURE_STEP
@property
def hvac_mode(self) -> str:
"""Return the current HVAC mode for the device."""
if not self.coordinator.device.power:
return HVAC_MODE_OFF
return HVAC_MODES.get(self.coordinator.device.mode)
async def async_set_hvac_mode(self, hvac_mode):
"""Set new target hvac mode."""
if hvac_mode not in self.hvac_modes:
raise ValueError(f"Invalid hvac_mode: {hvac_mode}")
_LOGGER.debug(
"Setting HVAC mode to %s for device %s",
hvac_mode,
self._name,
)
if hvac_mode == HVAC_MODE_OFF:
self.coordinator.device.power = False
await self.coordinator.push_state_update()
self.async_write_ha_state()
return
if not self.coordinator.device.power:
self.coordinator.device.power = True
self.coordinator.device.mode = HVAC_MODES_REVERSE.get(hvac_mode)
await self.coordinator.push_state_update()
self.async_write_ha_state()
@property
def hvac_modes(self) -> List[str]:
"""Return the HVAC modes support by the device."""
modes = [*HVAC_MODES_REVERSE]
modes.append(HVAC_MODE_OFF)
return modes
@property
def preset_mode(self) -> str:
"""Return the current preset mode for the device."""
if self.coordinator.device.steady_heat:
return PRESET_AWAY
if self.coordinator.device.power_save:
return PRESET_ECO
if self.coordinator.device.sleep:
return PRESET_SLEEP
if self.coordinator.device.turbo:
return PRESET_BOOST
return PRESET_NONE
async def async_set_preset_mode(self, preset_mode):
"""Set new preset mode."""
if preset_mode not in PRESET_MODES:
raise ValueError(f"Invalid preset mode: {preset_mode}")
_LOGGER.debug(
"Setting preset mode to %s for device %s",
preset_mode,
self._name,
)
self.coordinator.device.steady_heat = False
self.coordinator.device.power_save = False
self.coordinator.device.turbo = False
self.coordinator.device.sleep = False
if preset_mode == PRESET_AWAY:
self.coordinator.device.steady_heat = True
elif preset_mode == PRESET_ECO:
self.coordinator.device.power_save = True
elif preset_mode == PRESET_BOOST:
self.coordinator.device.turbo = True
elif preset_mode == PRESET_SLEEP:
self.coordinator.device.sleep = True
await self.coordinator.push_state_update()
self.async_write_ha_state()
@property
def preset_modes(self) -> List[str]:
"""Return the preset modes support by the device."""
return PRESET_MODES
@property
def fan_mode(self) -> str:
"""Return the current fan mode for the device."""
speed = self.coordinator.device.fan_speed
return FAN_MODES.get(speed)
async def async_set_fan_mode(self, fan_mode):
"""Set new target fan mode."""
if fan_mode not in FAN_MODES_REVERSE:
raise ValueError(f"Invalid fan mode: {fan_mode}")
self.coordinator.device.fan_speed = FAN_MODES_REVERSE.get(fan_mode)
await self.coordinator.push_state_update()
self.async_write_ha_state()
@property
def fan_modes(self) -> List[str]:
"""Return the fan modes support by the device."""
return [*FAN_MODES_REVERSE]
@property
def swing_mode(self) -> str:
"""Return the current swing mode for the device."""
h_swing = self.coordinator.device.horizontal_swing == HorizontalSwing.FullSwing
v_swing = self.coordinator.device.vertical_swing == VerticalSwing.FullSwing
if h_swing and v_swing:
return SWING_BOTH
if h_swing:
return SWING_HORIZONTAL
if v_swing:
return SWING_VERTICAL
return SWING_OFF
async def async_set_swing_mode(self, swing_mode):
"""Set new target swing operation."""
if swing_mode not in SWING_MODES:
raise ValueError(f"Invalid swing mode: {swing_mode}")
_LOGGER.debug(
"Setting swing mode to %s for device %s",
swing_mode,
self._name,
)
self.coordinator.device.horizontal_swing = HorizontalSwing.Center
self.coordinator.device.vertical_swing = VerticalSwing.FixedMiddle
if swing_mode in (SWING_BOTH, SWING_HORIZONTAL):
self.coordinator.device.horizontal_swing = HorizontalSwing.FullSwing
if swing_mode in (SWING_BOTH, SWING_VERTICAL):
self.coordinator.device.vertical_swing = VerticalSwing.FullSwing
await self.coordinator.push_state_update()
self.async_write_ha_state()
@property
def swing_modes(self) -> List[str]:
"""Return the swing modes currently supported for this device."""
return SWING_MODES
@property
def supported_features(self) -> int:
"""Return the supported features for this device integration."""
return SUPPORTED_FEATURES
|
|
"""Test the Broadlink config flow."""
import errno
import socket
import broadlink.exceptions as blke
import pytest
from homeassistant import config_entries
from homeassistant.components.broadlink.const import DOMAIN
from . import get_device
from tests.async_mock import call, patch
@pytest.fixture(autouse=True)
def broadlink_setup_fixture():
"""Mock broadlink entry setup."""
with patch(
"homeassistant.components.broadlink.async_setup_entry", return_value=True
):
yield
async def test_flow_user_works(hass):
"""Test a config flow initiated by the user.
Best case scenario with no errors or locks.
"""
device = get_device("Living Room")
mock_api = device.get_mock_api()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {}
with patch("broadlink.discover", return_value=[mock_api]) as mock_discover:
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": device.host, "timeout": device.timeout},
)
assert result["type"] == "form"
assert result["step_id"] == "finish"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"name": device.name},
)
assert result["type"] == "create_entry"
assert result["title"] == device.name
assert result["data"] == device.get_entry_data()
assert mock_discover.call_count == 1
assert mock_api.auth.call_count == 1
async def test_flow_user_already_in_progress(hass):
"""Test we do not accept more than one config flow per device."""
device = get_device("Living Room")
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("broadlink.discover", return_value=[device.get_mock_api()]):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": device.host, "timeout": device.timeout},
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("broadlink.discover", return_value=[device.get_mock_api()]):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": device.host, "timeout": device.timeout},
)
assert result["type"] == "abort"
assert result["reason"] == "already_in_progress"
async def test_flow_user_mac_already_configured(hass):
"""Test we do not accept more than one config entry per device.
We need to abort the flow and update the existing entry.
"""
device = get_device("Living Room")
mock_entry = device.get_mock_entry()
mock_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
device.host = "192.168.1.64"
device.timeout = 20
mock_api = device.get_mock_api()
with patch("broadlink.discover", return_value=[mock_api]):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": device.host, "timeout": device.timeout},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert dict(mock_entry.data) == device.get_entry_data()
assert mock_api.auth.call_count == 0
async def test_flow_user_invalid_ip_address(hass):
"""Test we handle an invalid IP address in the user step."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("broadlink.discover", side_effect=OSError(errno.EINVAL, None)):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "0.0.0.1"},
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {"base": "invalid_host"}
async def test_flow_user_invalid_hostname(hass):
"""Test we handle an invalid hostname in the user step."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("broadlink.discover", side_effect=OSError(socket.EAI_NONAME, None)):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "pancakemaster.local"},
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {"base": "invalid_host"}
async def test_flow_user_device_not_found(hass):
"""Test we handle a device not found in the user step."""
device = get_device("Living Room")
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("broadlink.discover", return_value=[]):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": device.host},
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {"base": "cannot_connect"}
async def test_flow_user_network_unreachable(hass):
"""Test we handle a network unreachable in the user step."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("broadlink.discover", side_effect=OSError(errno.ENETUNREACH, None)):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "192.168.1.32"},
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {"base": "cannot_connect"}
async def test_flow_user_os_error(hass):
"""Test we handle an OS error in the user step."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("broadlink.discover", side_effect=OSError()):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "192.168.1.32"},
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {"base": "unknown"}
async def test_flow_auth_authentication_error(hass):
"""Test we handle an authentication error in the auth step."""
device = get_device("Living Room")
mock_api = device.get_mock_api()
mock_api.auth.side_effect = blke.AuthenticationError()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("broadlink.discover", return_value=[mock_api]):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": device.host, "timeout": device.timeout},
)
assert result["type"] == "form"
assert result["step_id"] == "reset"
assert result["errors"] == {"base": "invalid_auth"}
async def test_flow_auth_device_offline(hass):
"""Test we handle a device offline in the auth step."""
device = get_device("Living Room")
mock_api = device.get_mock_api()
mock_api.auth.side_effect = blke.DeviceOfflineError()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("broadlink.discover", return_value=[mock_api]):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": device.host},
)
assert result["type"] == "form"
assert result["step_id"] == "auth"
assert result["errors"] == {"base": "cannot_connect"}
async def test_flow_auth_firmware_error(hass):
"""Test we handle a firmware error in the auth step."""
device = get_device("Living Room")
mock_api = device.get_mock_api()
mock_api.auth.side_effect = blke.BroadlinkException()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("broadlink.discover", return_value=[mock_api]):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": device.host},
)
assert result["type"] == "form"
assert result["step_id"] == "auth"
assert result["errors"] == {"base": "unknown"}
async def test_flow_auth_network_unreachable(hass):
"""Test we handle a network unreachable in the auth step."""
device = get_device("Living Room")
mock_api = device.get_mock_api()
mock_api.auth.side_effect = OSError(errno.ENETUNREACH, None)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("broadlink.discover", return_value=[mock_api]):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": device.host},
)
assert result["type"] == "form"
assert result["step_id"] == "auth"
assert result["errors"] == {"base": "cannot_connect"}
async def test_flow_auth_os_error(hass):
"""Test we handle an OS error in the auth step."""
device = get_device("Living Room")
mock_api = device.get_mock_api()
mock_api.auth.side_effect = OSError()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("broadlink.discover", return_value=[mock_api]):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": device.host},
)
assert result["type"] == "form"
assert result["step_id"] == "auth"
assert result["errors"] == {"base": "unknown"}
async def test_flow_reset_works(hass):
"""Test we finish a config flow after a factory reset."""
device = get_device("Living Room")
mock_api = device.get_mock_api()
mock_api.auth.side_effect = blke.AuthenticationError()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("broadlink.discover", return_value=[mock_api]):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": device.host, "timeout": device.timeout},
)
with patch("broadlink.discover", return_value=[device.get_mock_api()]):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": device.host, "timeout": device.timeout},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"name": device.name},
)
assert result["type"] == "create_entry"
assert result["title"] == device.name
assert result["data"] == device.get_entry_data()
async def test_flow_unlock_works(hass):
"""Test we finish a config flow with an unlock request."""
device = get_device("Living Room")
mock_api = device.get_mock_api()
mock_api.is_locked = True
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("broadlink.discover", return_value=[mock_api]):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": device.host, "timeout": device.timeout},
)
assert result["type"] == "form"
assert result["step_id"] == "unlock"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"unlock": True},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"name": device.name},
)
assert result["type"] == "create_entry"
assert result["title"] == device.name
assert result["data"] == device.get_entry_data()
assert mock_api.set_lock.call_args == call(False)
assert mock_api.set_lock.call_count == 1
async def test_flow_unlock_device_offline(hass):
"""Test we handle a device offline in the unlock step."""
device = get_device("Living Room")
mock_api = device.get_mock_api()
mock_api.is_locked = True
mock_api.set_lock.side_effect = blke.DeviceOfflineError
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("broadlink.discover", return_value=[mock_api]):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": device.host, "timeout": device.timeout},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"unlock": True},
)
assert result["type"] == "form"
assert result["step_id"] == "unlock"
assert result["errors"] == {"base": "cannot_connect"}
async def test_flow_unlock_firmware_error(hass):
"""Test we handle a firmware error in the unlock step."""
device = get_device("Living Room")
mock_api = device.get_mock_api()
mock_api.is_locked = True
mock_api.set_lock.side_effect = blke.BroadlinkException
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("broadlink.discover", return_value=[mock_api]):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": device.host, "timeout": device.timeout},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"unlock": True},
)
assert result["type"] == "form"
assert result["step_id"] == "unlock"
assert result["errors"] == {"base": "unknown"}
async def test_flow_unlock_network_unreachable(hass):
"""Test we handle a network unreachable in the unlock step."""
device = get_device("Living Room")
mock_api = device.get_mock_api()
mock_api.is_locked = True
mock_api.set_lock.side_effect = OSError(errno.ENETUNREACH, None)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("broadlink.discover", return_value=[mock_api]):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": device.host, "timeout": device.timeout},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"unlock": True},
)
assert result["type"] == "form"
assert result["step_id"] == "unlock"
assert result["errors"] == {"base": "cannot_connect"}
async def test_flow_unlock_os_error(hass):
"""Test we handle an OS error in the unlock step."""
device = get_device("Living Room")
mock_api = device.get_mock_api()
mock_api.is_locked = True
mock_api.set_lock.side_effect = OSError()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("broadlink.discover", return_value=[mock_api]):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": device.host, "timeout": device.timeout},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"unlock": True},
)
assert result["type"] == "form"
assert result["step_id"] == "unlock"
assert result["errors"] == {"base": "unknown"}
async def test_flow_do_not_unlock(hass):
"""Test we do not unlock the device if the user does not want to."""
device = get_device("Living Room")
mock_api = device.get_mock_api()
mock_api.is_locked = True
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("broadlink.discover", return_value=[mock_api]):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": device.host, "timeout": device.timeout},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"unlock": False},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"name": device.name},
)
assert result["type"] == "create_entry"
assert result["title"] == device.name
assert result["data"] == device.get_entry_data()
assert mock_api.set_lock.call_count == 0
async def test_flow_import_works(hass):
"""Test an import flow."""
device = get_device("Living Room")
mock_api = device.get_mock_api()
with patch("broadlink.discover", return_value=[mock_api]) as mock_discover:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={"host": device.host},
)
assert result["type"] == "form"
assert result["step_id"] == "finish"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"name": device.name},
)
assert result["type"] == "create_entry"
assert result["title"] == device.name
assert result["data"]["host"] == device.host
assert result["data"]["mac"] == device.mac
assert result["data"]["type"] == device.devtype
assert mock_api.auth.call_count == 1
assert mock_discover.call_count == 1
async def test_flow_import_already_in_progress(hass):
"""Test we do not import more than one flow per device."""
device = get_device("Living Room")
data = {"host": device.host}
with patch("broadlink.discover", return_value=[device.get_mock_api()]):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data=data
)
with patch("broadlink.discover", return_value=[device.get_mock_api()]):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data=data
)
assert result["type"] == "abort"
assert result["reason"] == "already_in_progress"
async def test_flow_import_host_already_configured(hass):
"""Test we do not import a host that is already configured."""
device = get_device("Living Room")
mock_entry = device.get_mock_entry()
mock_entry.add_to_hass(hass)
mock_api = device.get_mock_api()
with patch("broadlink.discover", return_value=[mock_api]):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={"host": device.host},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_flow_import_mac_already_configured(hass):
"""Test we do not import more than one config entry per device.
We need to abort the flow and update the existing entry.
"""
device = get_device("Living Room")
mock_entry = device.get_mock_entry()
mock_entry.add_to_hass(hass)
device.host = "192.168.1.16"
mock_api = device.get_mock_api()
with patch("broadlink.discover", return_value=[mock_api]):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={"host": device.host},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert mock_entry.data["host"] == device.host
assert mock_entry.data["mac"] == device.mac
assert mock_entry.data["type"] == device.devtype
assert mock_api.auth.call_count == 0
async def test_flow_import_device_not_found(hass):
"""Test we handle a device not found in the import step."""
with patch("broadlink.discover", return_value=[]):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={"host": "192.168.1.32"},
)
assert result["type"] == "abort"
assert result["reason"] == "cannot_connect"
async def test_flow_import_invalid_ip_address(hass):
"""Test we handle an invalid IP address in the import step."""
with patch("broadlink.discover", side_effect=OSError(errno.EINVAL, None)):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={"host": "0.0.0.1"},
)
assert result["type"] == "abort"
assert result["reason"] == "invalid_host"
async def test_flow_import_invalid_hostname(hass):
"""Test we handle an invalid hostname in the import step."""
with patch("broadlink.discover", side_effect=OSError(socket.EAI_NONAME, None)):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={"host": "hotdog.local"},
)
assert result["type"] == "abort"
assert result["reason"] == "invalid_host"
async def test_flow_import_network_unreachable(hass):
"""Test we handle a network unreachable in the import step."""
with patch("broadlink.discover", side_effect=OSError(errno.ENETUNREACH, None)):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={"host": "192.168.1.64"},
)
assert result["type"] == "abort"
assert result["reason"] == "cannot_connect"
async def test_flow_import_os_error(hass):
"""Test we handle an OS error in the import step."""
with patch("broadlink.discover", side_effect=OSError()):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={"host": "192.168.1.64"},
)
assert result["type"] == "abort"
assert result["reason"] == "unknown"
async def test_flow_reauth_works(hass):
"""Test a reauthentication flow."""
device = get_device("Living Room")
mock_entry = device.get_mock_entry()
mock_entry.add_to_hass(hass)
mock_api = device.get_mock_api()
mock_api.auth.side_effect = blke.AuthenticationError()
data = {"name": device.name, **device.get_entry_data()}
with patch("broadlink.gendevice", return_value=mock_api):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "reauth"}, data=data
)
assert result["type"] == "form"
assert result["step_id"] == "reset"
mock_api = device.get_mock_api()
with patch("broadlink.discover", return_value=[mock_api]) as mock_discover:
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": device.host, "timeout": device.timeout},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert dict(mock_entry.data) == device.get_entry_data()
assert mock_api.auth.call_count == 1
assert mock_discover.call_count == 1
async def test_flow_reauth_invalid_host(hass):
"""Test we do not accept an invalid host for reauthentication.
The MAC address cannot change.
"""
device = get_device("Living Room")
mock_entry = device.get_mock_entry()
mock_entry.add_to_hass(hass)
mock_api = device.get_mock_api()
mock_api.auth.side_effect = blke.AuthenticationError()
data = {"name": device.name, **device.get_entry_data()}
with patch("broadlink.gendevice", return_value=mock_api):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "reauth"}, data=data
)
device.mac = get_device("Office").mac
mock_api = device.get_mock_api()
with patch("broadlink.discover", return_value=[mock_api]) as mock_discover:
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": device.host, "timeout": device.timeout},
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {"base": "invalid_host"}
assert mock_discover.call_count == 1
assert mock_api.auth.call_count == 0
async def test_flow_reauth_valid_host(hass):
"""Test we accept a valid host for reauthentication.
The hostname/IP address may change. We need to update the entry.
"""
device = get_device("Living Room")
mock_entry = device.get_mock_entry()
mock_entry.add_to_hass(hass)
mock_api = device.get_mock_api()
mock_api.auth.side_effect = blke.AuthenticationError()
data = {"name": device.name, **device.get_entry_data()}
with patch("broadlink.gendevice", return_value=mock_api):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "reauth"}, data=data
)
device.host = "192.168.1.128"
mock_api = device.get_mock_api()
with patch("broadlink.discover", return_value=[mock_api]) as mock_discover:
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": device.host, "timeout": device.timeout},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert mock_entry.data["host"] == device.host
assert mock_discover.call_count == 1
assert mock_api.auth.call_count == 1
|
|
# Copyright (c) 2016 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, unicode_literals, print_function
from collections import deque
from ply import yacc
from . import ast
from .lexer import Lexer
from ..errors import ThriftParserError
__all__ = ['Parser']
class ParserSpec(object):
"""Parser specification for Thrift IDL files.
Adapted from ``thriftpy.parser.parser``."""
tokens = Lexer.tokens
def p_error(self, p):
if p is None:
raise ThriftParserError('Grammer error at EOF')
raise ThriftParserError(
'Grammar error %r at line %d' % (p.value, p.lineno)
)
def p_start(self, p):
'''start : header definition'''
p[0] = ast.Program(headers=p[1], definitions=p[2])
def p_header(self, p):
'''header : header_unit_ header
|'''
self._parse_seq(p)
def p_header_unit_(self, p):
'''header_unit_ : header_unit ';'
| header_unit'''
p[0] = p[1]
def p_header_unit(self, p):
'''header_unit : include
| namespace'''
p[0] = p[1]
def p_include(self, p):
'''include : INCLUDE IDENTIFIER LITERAL
| INCLUDE LITERAL'''
if len(p) == 4:
p[0] = ast.Include(name=p[2], path=p[3], lineno=p.lineno(1))
else:
p[0] = ast.Include(name=None, path=p[2], lineno=p.lineno(1))
def p_namespace(self, p):
'''namespace : NAMESPACE namespace_scope IDENTIFIER'''
p[0] = ast.Namespace(scope=p[2], name=p[3], lineno=p.lineno(1))
def p_namespace_scope(self, p):
'''namespace_scope : '*'
| IDENTIFIER'''
p[0] = p[1]
def p_sep(self, p):
'''sep : ','
| ';'
'''
def p_definition(self, p):
'''definition : definition_unit_ definition
|'''
self._parse_seq(p)
def p_definition_unit_(self, p):
'''definition_unit_ : definition_unit ';'
| definition_unit'''
p[0] = p[1]
def p_definition_unit(self, p):
'''definition_unit : const
| ttype
'''
p[0] = p[1]
def p_const_bool(self, p):
'''const_bool : TRUE
| FALSE'''
p[0] = p[1] == 'true'
def p_const(self, p):
'''const : CONST field_type IDENTIFIER '=' const_value
| CONST field_type IDENTIFIER '=' const_value sep'''
p[0] = ast.Const(
name=p[3],
value_type=p[2],
value=p[5],
lineno=p.lineno(3),
)
def p_const_value(self, p):
'''const_value : const_value_native
| const_ref'''
p[0] = p[1]
def p_const_value_native(self, p):
'''const_value_native : const_value_primitive
| const_list
| const_map'''
p[0] = p[1]
def p_const_value_primitive(self, p):
'''const_value_primitive : INTCONSTANT
| DUBCONSTANT
| LITERAL
| const_bool'''
p[0] = ast.ConstPrimitiveValue(p[1], lineno=p.lineno(1))
def p_const_list(self, p):
'''const_list : '[' const_list_seq ']' '''
p[0] = ast.ConstList(list(p[2]), p.lineno(1))
def p_const_list_seq(self, p):
'''const_list_seq : const_value sep const_list_seq
| const_value const_list_seq
|'''
self._parse_seq(p)
def p_const_map(self, p):
'''const_map : '{' const_map_seq '}' '''
p[0] = ast.ConstMap(dict(p[2]), p.lineno(1))
def p_const_map_seq(self, p):
'''const_map_seq : const_map_item sep const_map_seq
| const_map_item const_map_seq
|'''
self._parse_seq(p)
def p_const_map_item(self, p):
'''const_map_item : const_value ':' const_value '''
p[0] = (p[1], p[3])
def p_const_ref(self, p):
'''const_ref : IDENTIFIER'''
p[0] = ast.ConstReference(p[1], lineno=p.lineno(1))
def p_ttype(self, p):
'''ttype : typedef
| enum
| struct
| union
| exception
| service'''
p[0] = p[1]
def p_typedef(self, p):
'''typedef : TYPEDEF field_type IDENTIFIER annotations'''
p[0] = ast.Typedef(
name=p[3], target_type=p[2], annotations=p[4], lineno=p.lineno(3)
)
def p_enum(self, p): # noqa
'''enum : ENUM IDENTIFIER '{' enum_seq '}' annotations'''
p[0] = ast.Enum(
name=p[2], items=p[4], annotations=p[6], lineno=p.lineno(2)
)
def p_enum_seq(self, p):
'''enum_seq : enum_item sep enum_seq
| enum_item enum_seq
|'''
self._parse_seq(p)
def p_enum_item(self, p):
'''enum_item : IDENTIFIER '=' INTCONSTANT annotations
| IDENTIFIER annotations'''
if len(p) == 5:
p[0] = ast.EnumItem(
name=p[1], value=p[3], annotations=p[4], lineno=p.lineno(1)
)
else:
p[0] = ast.EnumItem(
name=p[1], value=None, annotations=p[2], lineno=p.lineno(1)
)
def p_struct(self, p):
'''struct : STRUCT IDENTIFIER '{' field_seq '}' annotations'''
p[0] = ast.Struct(
name=p[2], fields=p[4], annotations=p[6], lineno=p.lineno(2)
)
def p_union(self, p):
'''union : UNION IDENTIFIER '{' field_seq '}' annotations'''
p[0] = ast.Union(
name=p[2], fields=p[4], annotations=p[6], lineno=p.lineno(2)
)
def p_exception(self, p):
'''exception : EXCEPTION IDENTIFIER '{' field_seq '}' annotations'''
p[0] = ast.Exc(
name=p[2], fields=p[4], annotations=p[6], lineno=p.lineno(2)
)
def p_service(self, p):
'''service : SERVICE IDENTIFIER '{' function_seq '}' annotations
| SERVICE IDENTIFIER EXTENDS IDENTIFIER \
'{' function_seq '}' annotations
'''
if len(p) == 7:
p[0] = ast.Service(
name=p[2],
functions=p[4],
parent=None,
annotations=p[6],
lineno=p.lineno(2),
)
else:
p[0] = ast.Service(
name=p[2],
functions=p[6],
parent=ast.ServiceReference(p[4], p.lineno(4)),
annotations=p[8],
lineno=p.lineno(2),
)
def p_oneway(self, p):
'''oneway : ONEWAY
|'''
p[0] = len(p) > 1
def p_function(self, p):
'''function : oneway function_type IDENTIFIER '(' field_seq ')' \
throws annotations '''
p[0] = ast.Function(
name=p[3],
parameters=p[5],
return_type=p[2],
exceptions=p[7],
oneway=p[1],
annotations=p[8],
lineno=p.lineno(3),
)
def p_function_seq(self, p):
'''function_seq : function sep function_seq
| function function_seq
|'''
self._parse_seq(p)
def p_throws(self, p):
'''throws : THROWS '(' field_seq ')'
|'''
if len(p) == 5:
p[0] = p[3]
else:
p[0] = deque()
def p_function_type(self, p):
'''function_type : field_type
| VOID'''
if p[1] == 'void':
p[0] = None
else:
p[0] = p[1]
def p_field_seq(self, p):
'''field_seq : field sep field_seq
| field field_seq
|'''
self._parse_seq(p)
def p_field(self, p):
'''field : field_id field_req field_type IDENTIFIER annotations
| field_id field_req field_type IDENTIFIER '=' const_value \
annotations'''
if len(p) == 8:
default = p[6]
annotations = p[7]
else:
default = None
annotations = p[5]
p[0] = ast.Field(
id=p[1],
name=p[4],
field_type=p[3],
requiredness=p[2],
default=default,
annotations=annotations,
lineno=p.lineno(4),
)
def p_field_id(self, p):
'''field_id : INTCONSTANT ':'
| '''
if len(p) == 3:
if p[1] == 0:
# Prevent users from ever using field ID 0. It's reserved for
# internal use only.
raise ThriftParserError(
'Line %d: Field ID 0 is reserved for internal use.'
% p.lineno(1)
)
p[0] = p[1]
else:
p[0] = None
def p_field_req(self, p):
'''field_req : REQUIRED
| OPTIONAL
|'''
if len(p) == 2:
p[0] = p[1] == 'required'
else:
p[0] = None # don't have a default
def p_field_type(self, p):
'''field_type : ref_type
| definition_type'''
p[0] = p[1]
def p_ref_type(self, p):
'''ref_type : IDENTIFIER'''
p[0] = ast.DefinedType(p[1], lineno=p.lineno(1))
def p_base_type(self, p): # noqa
'''base_type : BOOL annotations
| BYTE annotations
| I8 annotations
| I16 annotations
| I32 annotations
| I64 annotations
| DOUBLE annotations
| STRING annotations
| BINARY annotations'''
name = p[1]
if name == 'i8':
name = 'byte'
p[0] = ast.PrimitiveType(name, p[2])
def p_container_type(self, p):
'''container_type : map_type
| list_type
| set_type'''
p[0] = p[1]
def p_map_type(self, p):
'''map_type : MAP '<' field_type ',' field_type '>' annotations'''
p[0] = ast.MapType(key_type=p[3], value_type=p[5], annotations=p[7])
def p_list_type(self, p):
'''list_type : LIST '<' field_type '>' annotations'''
p[0] = ast.ListType(value_type=p[3], annotations=p[5])
def p_set_type(self, p):
'''set_type : SET '<' field_type '>' annotations'''
p[0] = ast.SetType(value_type=p[3], annotations=p[5])
def p_definition_type(self, p):
'''definition_type : base_type
| container_type'''
p[0] = p[1]
def p_annotations(self, p):
'''annotations : '(' annotation_seq ')'
|'''
if len(p) == 1:
p[0] = []
else:
p[0] = list(p[2])
def p_annotation_seq(self, p):
'''annotation_seq : annotation sep annotation_seq
| annotation annotation_seq
|'''
self._parse_seq(p)
def p_annotation(self, p):
'''annotation : IDENTIFIER '=' LITERAL
| IDENTIFIER'''
if len(p) == 4:
p[0] = ast.Annotation(p[1], p[3], lineno=p.lineno(1))
else:
p[0] = ast.Annotation(p[1], True, lineno=p.lineno(1))
def _parse_seq(self, p):
"""Helper to parse sequence rules.
Sequence rules are in the form::
foo : foo_item sep foo
| foo_item foo
|
This function builds a deque of the items in-order.
If the number of tokens doesn't match, an exception is raised.
"""
# This basically says:
#
# - When you reach the end of the list, construct and return an empty
# deque.
# - Otherwise, prepend to start of what you got from the parser.
#
# So this ends up constructing an in-order list.
if len(p) == 4:
p[3].appendleft(p[1])
p[0] = p[3]
elif len(p) == 3:
p[2].appendleft(p[1])
p[0] = p[2]
elif len(p) == 1:
p[0] = deque()
else:
raise ThriftParserError(
'Wrong number of tokens received for expression at line %d'
% p.lineno(1)
)
class Parser(ParserSpec):
"""Parser for Thrift IDL files."""
def __init__(self, **kwargs):
if kwargs.pop('silent', False):
kwargs['errorlog'] = yacc.NullLogger()
kwargs.setdefault('debug', False)
kwargs.setdefault('write_tables', False)
self._parser = yacc.yacc(module=self, **kwargs)
self._lexer = Lexer()
def parse(self, input, **kwargs):
"""Parse the given input.
:param input:
String containing the text to be parsed.
:raises thriftrw.errors.ThriftParserError:
For parsing errors.
"""
return self._parser.parse(input, lexer=self._lexer, **kwargs)
|
|
"""Generated API Documentation sample using
doc_writer_sample.py."""
doc = {
"@context": {
"ApiDocumentation": "hydra:ApiDocumentation",
"description": "hydra:description",
"domain": {
"@id": "rdfs:domain",
"@type": "@id"
},
"expects": {
"@id": "hydra:expects",
"@type": "@id"
},
"hydra": "http://www.w3.org/ns/hydra/core#",
"label": "rdfs:label",
"method": "hydra:method",
"possibleStatus": "hydra:possibleStatus",
"property": {
"@id": "hydra:property",
"@type": "@id"
},
"range": {
"@id": "rdfs:range",
"@type": "@id"
},
"rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"readonly": "hydra:readonly",
"required": "hydra:required",
"returns": {
"@id": "hydra:returns",
"@type": "@id"
},
"statusCode": "hydra:statusCode",
"statusCodes": "hydra:statusCodes",
"subClassOf": {
"@id": "rdfs:subClassOf",
"@type": "@id"
},
"supportedClass": "hydra:supportedClass",
"supportedOperation": "hydra:supportedOperation",
"supportedProperty": "hydra:supportedProperty",
"title": "hydra:title",
"vocab": "https://hydrus.com/api/vocab#",
"writeonly": "hydra:writeonly"
},
"@id": "https://hydrus.com/api/vocab",
"@type": "ApiDocumentation",
"description": "Description for the API Documentation",
"possibleStatus": [],
"supportedClass": [
{
"@id": "vocab:dummyClass",
"@type": "hydra:Class",
"description": "A dummyClass for demo",
"supportedOperation": [
{
"@type": "http://schema.org/UpdateAction",
"expects": "vocab:dummyClass",
"method": "POST",
"possibleStatus": [
{
"description": "dummyClass updated",
"statusCode": 200
}
],
"returns": "null",
"title": "UpdateClass"
},
{
"@type": "http://schema.org/DeleteAction",
"expects": "null",
"method": "DELETE",
"possibleStatus": [
{
"description": "dummyClass deleted",
"statusCode": 200
}
],
"returns": "null",
"title": "DeleteClass"
},
{
"@type": "http://schema.org/AddAction",
"expects": "vocab:dummyClass",
"method": "PUT",
"possibleStatus": [
{
"description": "dummyClass successfully added",
"statusCode": 201
}
],
"returns": "null",
"title": "AddClass"
},
{
"@type": "http://schema.org/FindAction",
"expects": "null",
"method": "GET",
"possibleStatus": [
{
"description": "dummyClass returned",
"statusCode": 200
}
],
"returns": "vocab:dummyClass",
"title": "GetClass"
}
],
"supportedProperty": [
{
"@type": "SupportedProperty",
"property": "http://props.hydrus.com/prop1",
"readonly": "false",
"required": "false",
"title": "Prop1",
"writeonly": "true"
},
{
"@type": "SupportedProperty",
"property": "http://props.hydrus.com/prop1",
"readonly": "false",
"required": "false",
"title": "Prop2",
"writeonly": "true"
}
],
"title": "dummyClass"
},
{
"@id": "vocab:extraClass",
"@type": "hydra:Class",
"description": "Class without any explicit methods",
"supportedOperation": [],
"supportedProperty": [],
"title": "extraClass"
},
{
"@id": "vocab:singleClass",
"@type": "hydra:Class",
"description": "A non collection class",
"supportedOperation": [
{
"@type": "http://schema.org/UpdateAction",
"expects": "vocab:singleClass",
"method": "POST",
"possibleStatus": [
{
"description": "singleClass changed",
"statusCode": 200
}
],
"returns": "null",
"title": "UpdateClass"
},
{
"@type": "http://schema.org/DeleteAction",
"expects": "null",
"method": "DELETE",
"possibleStatus": [
{
"description": "singleClass deleted",
"statusCode": 200
}
],
"returns": "null",
"title": "DeleteClass"
},
{
"@type": "http://schema.org/AddAction",
"expects": "vocab:singleClass",
"method": "PUT",
"possibleStatus": [
{
"description": "singleClass successfully added",
"statusCode": 201
}
],
"returns": "null",
"title": "AddClass"
},
{
"@type": "http://schema.org/FindAction",
"expects": "null",
"method": "GET",
"possibleStatus": [
{
"description": "singleClass returned",
"statusCode": 200
}
],
"returns": "vocab:singleClass",
"title": "GetClass"
}
],
"supportedProperty": [
{
"@type": "SupportedProperty",
"property": "http://props.hydrus.com/prop1",
"readonly": "false",
"required": "false",
"title": "Prop1",
"writeonly": "true"
},
{
"@type": "SupportedProperty",
"property": "http://props.hydrus.com/prop1",
"readonly": "false",
"required": "false",
"title": "Prop2",
"writeonly": "true"
},
{
"@type": "SupportedProperty",
"property": "vocab:dummyClass",
"readonly": "false",
"required": "false",
"title": "dummyProp",
"writeonly": "true"
},
{
"@type": "SupportedProperty",
"property": "vocab:anotherSingleClass",
"readonly": "false",
"required": "false",
"title": "singleClassProp",
"writeonly": "true"
}
],
"title": "singleClass"
},
{
"@id": "vocab:anotherSingleClass",
"@type": "hydra:Class",
"description": "An another non collection class",
"supportedOperation": [
{
"@type": "http://schema.org/FindAction",
"expects": "null",
"method": "GET",
"possibleStatus": [
{
"description": "anotherSingleClass returned",
"statusCode": 200
}
],
"returns": "vocab:anotherSingleClass",
"title": "GetClass"
}
],
"supportedProperty": [
{
"@type": "SupportedProperty",
"property": "http://props.hydrus.com/prop1",
"readonly": "false",
"required": "false",
"title": "Prop1",
"writeonly": "true"
}
],
"title": "anotherSingleClass"
},
{
"@id": "http://www.w3.org/ns/hydra/core#Resource",
"@type": "hydra:Class",
"description": "null",
"supportedOperation": [],
"supportedProperty": [],
"title": "Resource"
},
{
"@id": "http://www.w3.org/ns/hydra/core#Collection",
"@type": "hydra:Class",
"description": "null",
"supportedOperation": [],
"supportedProperty": [
{
"@type": "SupportedProperty",
"property": "http://www.w3.org/ns/hydra/core#member",
"readonly": "false",
"required": "null",
"title": "members",
"writeonly": "false"
}
],
"title": "Collection"
},
{
"@id": "vocab:dummyClassCollection",
"@type": "hydra:Class",
"description": "A collection of dummyclass",
"subClassOf": "http://www.w3.org/ns/hydra/core#Collection",
"supportedOperation": [
{
"@id": "_:dummyclass_collection_retrieve",
"@type": "http://schema.org/FindAction",
"description": "Retrieves all dummyClass entities",
"expects": "null",
"method": "GET",
"returns": "vocab:dummyClassCollection",
"statusCodes": []
},
{
"@id": "_:dummyclass_create",
"@type": "http://schema.org/AddAction",
"description": "Create new dummyClass entitity",
"expects": "vocab:dummyClass",
"method": "PUT",
"returns": "vocab:dummyClass",
"statusCodes": [
{
"description": "If the dummyClass entity was created successfully.",
"statusCode": 201
}
]
}
],
"supportedProperty": [
{
"@type": "SupportedProperty",
"description": "The dummyclass",
"property": "http://www.w3.org/ns/hydra/core#member",
"readonly": "false",
"required": "false",
"title": "members",
"writeonly": "false"
}
],
"title": "dummyClassCollection"
},
{
"@id": "vocab:extraClassCollection",
"@type": "hydra:Class",
"description": "A collection of extraclass",
"subClassOf": "http://www.w3.org/ns/hydra/core#Collection",
"supportedOperation": [
{
"@id": "_:extraclass_collection_retrieve",
"@type": "http://schema.org/FindAction",
"description": "Retrieves all extraClass entities",
"expects": "null",
"method": "GET",
"returns": "vocab:extraClassCollection",
"statusCodes": []
},
{
"@id": "_:extraclass_create",
"@type": "http://schema.org/AddAction",
"description": "Create new extraClass entitity",
"expects": "vocab:extraClass",
"method": "PUT",
"returns": "vocab:extraClass",
"statusCodes": [
{
"description": "If the extraClass entity was created successfully.",
"statusCode": 201
}
]
}
],
"supportedProperty": [
{
"@type": "SupportedProperty",
"description": "The extraclass",
"property": "http://www.w3.org/ns/hydra/core#member",
"readonly": "false",
"required": "false",
"title": "members",
"writeonly": "false"
}
],
"title": "extraClassCollection"
},
{
"@id": "vocab:EntryPoint",
"@type": "hydra:Class",
"description": "The main entry point or homepage of the API.",
"supportedOperation": [
{
"@id": "_:entry_point",
"@type": "http://schema.org/FindAction",
"description": "The APIs main entry point.",
"expects": "null",
"method": "GET",
"returns": "null",
"statusCodes": "vocab:EntryPoint"
}
],
"supportedProperty": [
{
"hydra:description": "The singleClass Class",
"hydra:title": "singleclass",
"property": {
"@id": "vocab:EntryPoint/singleClass",
"@type": "hydra:Link",
"description": "A non collection class",
"domain": "vocab:EntryPoint",
"label": "singleClass",
"range": "vocab:singleClass",
"supportedOperation": [
{
"@id": "updateclass",
"@type": "http://schema.org/UpdateAction",
"description": "null",
"expects": "vocab:singleClass",
"label": "UpdateClass",
"method": "POST",
"returns": "null",
"statusCodes": [
{
"description": "singleClass changed",
"statusCode": 200
}
]
},
{
"@id": "deleteclass",
"@type": "http://schema.org/DeleteAction",
"description": "null",
"expects": "null",
"label": "DeleteClass",
"method": "DELETE",
"returns": "null",
"statusCodes": [
{
"description": "singleClass deleted",
"statusCode": 200
}
]
},
{
"@id": "addclass",
"@type": "http://schema.org/AddAction",
"description": "null",
"expects": "vocab:singleClass",
"label": "AddClass",
"method": "PUT",
"returns": "null",
"statusCodes": [
{
"description": "singleClass successfully added",
"statusCode": 201
}
]
},
{
"@id": "getclass",
"@type": "http://schema.org/FindAction",
"description": "null",
"expects": "null",
"label": "GetClass",
"method": "GET",
"returns": "vocab:singleClass",
"statusCodes": [
{
"description": "singleClass returned",
"statusCode": 200
}
]
}
]
},
"readonly": "true",
"required": "null",
"writeonly": "false"
},
{
"hydra:description": "The anotherSingleClass Class",
"hydra:title": "anothersingleclass",
"property": {
"@id": "vocab:EntryPoint/anotherSingleClass",
"@type": "hydra:Link",
"description": "An another non collection class",
"domain": "vocab:EntryPoint",
"label": "anotherSingleClass",
"range": "vocab:anotherSingleClass",
"supportedOperation": [
{
"@id": "getclass",
"@type": "http://schema.org/FindAction",
"description": "null",
"expects": "null",
"label": "GetClass",
"method": "GET",
"returns": "vocab:anotherSingleClass",
"statusCodes": [
{
"description": "anotherSingleClass returned",
"statusCode": 200
}
]
}
]
},
"readonly": "true",
"required": "null",
"writeonly": "false"
},
{
"hydra:description": "The dummyClassCollection collection",
"hydra:title": "dummyclasscollection",
"property": {
"@id": "vocab:EntryPoint/DcTest",
"@type": "hydra:Link",
"description": "The dummyClassCollection collection",
"domain": "vocab:EntryPoint",
"label": "dummyClassCollection",
"range": "vocab:dummyClassCollection",
"supportedOperation": [
{
"@id": "_:dummyclass_collection_retrieve",
"@type": "http://schema.org/FindAction",
"description": "Retrieves all dummyClass entities",
"expects": "null",
"method": "GET",
"returns": "vocab:dummyClassCollection",
"statusCodes": []
},
{
"@id": "_:dummyclass_create",
"@type": "http://schema.org/AddAction",
"description": "Create new dummyClass entitity",
"expects": "vocab:dummyClass",
"method": "PUT",
"returns": "vocab:dummyClass",
"statusCodes": [
{
"description": "If the dummyClass entity was created successfully.",
"statusCode": 201
}
]
}
]
},
"readonly": "true",
"required": "null",
"writeonly": "false"
},
{
"hydra:description": "The extraClassCollection collection",
"hydra:title": "extraclasscollection",
"property": {
"@id": "vocab:EntryPoint/EcTest",
"@type": "hydra:Link",
"description": "The extraClassCollection collection",
"domain": "vocab:EntryPoint",
"label": "extraClassCollection",
"range": "vocab:extraClassCollection",
"supportedOperation": [
{
"@id": "_:extraclass_collection_retrieve",
"@type": "http://schema.org/FindAction",
"description": "Retrieves all extraClass entities",
"expects": "null",
"method": "GET",
"returns": "vocab:extraClassCollection",
"statusCodes": []
},
{
"@id": "_:extraclass_create",
"@type": "http://schema.org/AddAction",
"description": "Create new extraClass entitity",
"expects": "vocab:extraClass",
"method": "PUT",
"returns": "vocab:extraClass",
"statusCodes": [
{
"description": "If the extraClass entity was created successfully.",
"statusCode": 201
}
]
}
]
},
"readonly": "true",
"required": "null",
"writeonly": "false"
}
],
"title": "EntryPoint"
}
],
"title": "Title for the API Documentation"
}# nopep8
|
|
"""
Tests for the model-based activation workflow.
"""
import datetime
from django.conf import settings
from django.contrib.auth.models import User
from django.core import mail
from django.core.urlresolvers import reverse
from django.test import override_settings, TestCase
from registration.forms import RegistrationForm
from registration.models import RegistrationProfile
@override_settings(
ROOT_URLCONF='registration.backends.model_activation.urls',
ACCOUNT_ACTIVATION_DAYS=7,
REGISTRATION_OPEN=True
)
class ModelActivationViewTests(TestCase):
"""
Tests for the model-based activation workflow.
"""
def test_registration_open(self):
"""
``REGISTRATION_OPEN``, when ``True``, permits registration.
"""
resp = self.client.get(reverse('registration_register'))
self.assertEqual(200, resp.status_code)
@override_settings(REGISTRATION_OPEN=False)
def test_registration_closed(self):
"""
``REGISTRATION_OPEN``, when ``False``, disallows registration.
"""
resp = self.client.get(reverse('registration_register'))
self.assertRedirects(resp, reverse('registration_disallowed'))
resp = self.client.post(
reverse('registration_register'),
data={'username': 'bob',
'email': 'bob@example.com',
'password1': 'secret',
'password2': 'secret'}
)
self.assertRedirects(resp, reverse('registration_disallowed'))
def test_registration_get(self):
"""
HTTP ``GET`` to the registration view uses the appropriate
template and populates a registration form into the context.
"""
resp = self.client.get(reverse('registration_register'))
self.assertEqual(200, resp.status_code)
self.assertTemplateUsed(
resp, 'registration/registration_form.html'
)
self.assertTrue(
isinstance(
resp.context['form'],
RegistrationForm
)
)
def test_registration(self):
"""
Registration creates a new inactive account and a new profile
with activation key, populates the correct account data and
sends an activation email.
"""
resp = self.client.post(
reverse('registration_register'),
data={'username': 'bob',
'email': 'bob@example.com',
'password1': 'secret',
'password2': 'secret'}
)
self.assertRedirects(resp, reverse('registration_complete'))
new_user = User.objects.get(username='bob')
self.assertTrue(new_user.check_password('secret'))
self.assertEqual(new_user.email, 'bob@example.com')
# New user must not be active.
self.assertFalse(new_user.is_active)
# A registration profile was created, and an activation email
# was sent.
self.assertEqual(RegistrationProfile.objects.count(), 1)
self.assertEqual(len(mail.outbox), 1)
def test_registration_no_sites(self):
"""
Registration still functions properly when
``django.contrib.sites`` is not installed; the fallback will
be a ``RequestSite`` instance.
"""
with self.modify_settings(INSTALLED_APPS={
'remove': [
'django.contrib.sites'
]
}):
resp = self.client.post(
reverse('registration_register'),
data={'username': 'bob',
'email': 'bob@example.com',
'password1': 'secret',
'password2': 'secret'}
)
self.assertEqual(302, resp.status_code)
new_user = User.objects.get(username='bob')
self.assertTrue(new_user.check_password('secret'))
self.assertEqual(new_user.email, 'bob@example.com')
self.assertFalse(new_user.is_active)
self.assertEqual(1, RegistrationProfile.objects.count())
self.assertEqual(len(mail.outbox), 1)
def test_registration_failure(self):
"""
Registering with invalid data fails.
"""
resp = self.client.post(
reverse('registration_register'),
data={'username': 'bob',
'email': 'bob@example.com',
'password1': 'secret',
'password2': 'notsecret'}
)
self.assertEqual(200, resp.status_code)
self.assertFalse(resp.context['form'].is_valid())
self.assertEqual(0, len(mail.outbox))
def test_activation(self):
"""
Activation of an account functions properly.
"""
resp = self.client.post(
reverse('registration_register'),
data={'username': 'bob',
'email': 'bob@example.com',
'password1': 'secret',
'password2': 'secret'}
)
profile = RegistrationProfile.objects.get(user__username='bob')
resp = self.client.get(
reverse(
'registration_activate',
args=(),
kwargs={'activation_key': profile.activation_key}
)
)
self.assertRedirects(resp, reverse('registration_activation_complete'))
def test_activation_expired(self):
"""
An expired account can't be activated.
"""
resp = self.client.post(
reverse('registration_register'),
data={'username': 'bob',
'email': 'bob@example.com',
'password1': 'secret',
'password2': 'secret'}
)
profile = RegistrationProfile.objects.get(user__username='bob')
user = profile.user
user.date_joined -= datetime.timedelta(
days=settings.ACCOUNT_ACTIVATION_DAYS + 1
)
user.save()
resp = self.client.get(
reverse(
'registration_activate',
args=(),
kwargs={'activation_key': profile.activation_key}
)
)
self.assertEqual(200, resp.status_code)
self.assertTemplateUsed(resp, 'registration/activate.html')
@override_settings(
ROOT_URLCONF='registration.backends.default.urls',
ACCOUNT_ACTIVATION_DAYS=7,
REGISTRATION_OPEN=True
)
class ModelActivationCompatibilityTests(ModelActivationViewTests):
"""
Re-run the model-activation workflow tests, but using the
'registration.backends.default' import compatibility support, to
ensure that it works.
"""
def test_view_imports(self):
"""
Importing the views from the old location works, and returns
the correct view classes.
"""
from registration.backends.default import views as old_views
from registration.backends.model_activation import views as new_views
self.assertEqual(
old_views.ActivationView.__class__,
new_views.ActivationView.__class__
)
self.assertEqual(
old_views.RegistrationView.__class__,
new_views.RegistrationView.__class__
)
|
|
"""
Provide pre-made queries on top of the recorder component.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/history/
"""
import asyncio
from collections import defaultdict
from datetime import timedelta
from itertools import groupby
import voluptuous as vol
from homeassistant.const import HTTP_BAD_REQUEST
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
from homeassistant.components import recorder, script
from homeassistant.components.frontend import register_built_in_panel
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import ATTR_HIDDEN
DOMAIN = 'history'
DEPENDENCIES = ['recorder', 'http']
CONF_EXCLUDE = 'exclude'
CONF_INCLUDE = 'include'
CONF_ENTITIES = 'entities'
CONF_DOMAINS = 'domains'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
CONF_EXCLUDE: vol.Schema({
vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids,
vol.Optional(CONF_DOMAINS, default=[]):
vol.All(cv.ensure_list, [cv.string])
}),
CONF_INCLUDE: vol.Schema({
vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids,
vol.Optional(CONF_DOMAINS, default=[]):
vol.All(cv.ensure_list, [cv.string])
})
}),
}, extra=vol.ALLOW_EXTRA)
SIGNIFICANT_DOMAINS = ('thermostat', 'climate')
IGNORE_DOMAINS = ('zone', 'scene',)
def last_5_states(entity_id):
"""Return the last 5 states for entity_id."""
entity_id = entity_id.lower()
states = recorder.get_model('States')
return recorder.execute(
recorder.query('States').filter(
(states.entity_id == entity_id) &
(states.last_changed == states.last_updated)
).order_by(states.state_id.desc()).limit(5))
def get_significant_states(start_time, end_time=None, entity_id=None,
filters=None):
"""
Return states changes during UTC period start_time - end_time.
Significant states are all states where there is a state change,
as well as all states from certain domains (for instance
thermostat so that we get current temperature in our graphs).
"""
entity_ids = (entity_id.lower(), ) if entity_id is not None else None
states = recorder.get_model('States')
query = recorder.query('States').filter(
(states.domain.in_(SIGNIFICANT_DOMAINS) |
(states.last_changed == states.last_updated)) &
(states.last_updated > start_time))
if filters:
query = filters.apply(query, entity_ids)
if end_time is not None:
query = query.filter(states.last_updated < end_time)
states = (
state for state in recorder.execute(
query.order_by(states.entity_id, states.last_updated))
if (_is_significant(state) and
not state.attributes.get(ATTR_HIDDEN, False)))
return states_to_json(states, start_time, entity_id, filters)
def state_changes_during_period(start_time, end_time=None, entity_id=None):
"""Return states changes during UTC period start_time - end_time."""
states = recorder.get_model('States')
query = recorder.query('States').filter(
(states.last_changed == states.last_updated) &
(states.last_changed > start_time))
if end_time is not None:
query = query.filter(states.last_updated < end_time)
if entity_id is not None:
query = query.filter_by(entity_id=entity_id.lower())
states = recorder.execute(
query.order_by(states.entity_id, states.last_updated))
return states_to_json(states, start_time, entity_id)
def get_states(utc_point_in_time, entity_ids=None, run=None, filters=None):
"""Return the states at a specific point in time."""
if run is None:
run = recorder.run_information(utc_point_in_time)
# History did not run before utc_point_in_time
if run is None:
return []
from sqlalchemy import and_, func
states = recorder.get_model('States')
most_recent_state_ids = recorder.query(
func.max(states.state_id).label('max_state_id')
).filter(
(states.created >= run.start) &
(states.created < utc_point_in_time) &
(~states.domain.in_(IGNORE_DOMAINS)))
if filters:
most_recent_state_ids = filters.apply(most_recent_state_ids,
entity_ids)
most_recent_state_ids = most_recent_state_ids.group_by(
states.entity_id).subquery()
query = recorder.query('States').join(most_recent_state_ids, and_(
states.state_id == most_recent_state_ids.c.max_state_id))
for state in recorder.execute(query):
if not state.attributes.get(ATTR_HIDDEN, False):
yield state
def states_to_json(states, start_time, entity_id, filters=None):
"""Convert SQL results into JSON friendly data structure.
This takes our state list and turns it into a JSON friendly data
structure {'entity_id': [list of states], 'entity_id2': [list of states]}
We also need to go back and create a synthetic zero data point for
each list of states, otherwise our graphs won't start on the Y
axis correctly.
"""
result = defaultdict(list)
entity_ids = [entity_id] if entity_id is not None else None
# Get the states at the start time
for state in get_states(start_time, entity_ids, filters=filters):
state.last_changed = start_time
state.last_updated = start_time
result[state.entity_id].append(state)
# Append all changes to it
for entity_id, group in groupby(states, lambda state: state.entity_id):
result[entity_id].extend(group)
return result
def get_state(utc_point_in_time, entity_id, run=None):
"""Return a state at a specific point in time."""
states = list(get_states(utc_point_in_time, (entity_id,), run))
return states[0] if states else None
# pylint: disable=unused-argument
def setup(hass, config):
"""Setup the history hooks."""
filters = Filters()
exclude = config[DOMAIN].get(CONF_EXCLUDE)
if exclude:
filters.excluded_entities = exclude[CONF_ENTITIES]
filters.excluded_domains = exclude[CONF_DOMAINS]
include = config[DOMAIN].get(CONF_INCLUDE)
if include:
filters.included_entities = include[CONF_ENTITIES]
filters.included_domains = include[CONF_DOMAINS]
hass.http.register_view(Last5StatesView)
hass.http.register_view(HistoryPeriodView(filters))
register_built_in_panel(hass, 'history', 'History', 'mdi:poll-box')
return True
class Last5StatesView(HomeAssistantView):
"""Handle last 5 state view requests."""
url = '/api/history/entity/{entity_id}/recent_states'
name = 'api:history:entity-recent-states'
@asyncio.coroutine
def get(self, request, entity_id):
"""Retrieve last 5 states of entity."""
result = yield from request.app['hass'].loop.run_in_executor(
None, last_5_states, entity_id)
return self.json(result)
class HistoryPeriodView(HomeAssistantView):
"""Handle history period requests."""
url = '/api/history/period'
name = 'api:history:view-period'
extra_urls = ['/api/history/period/{datetime}']
def __init__(self, filters):
"""Initilalize the history period view."""
self.filters = filters
@asyncio.coroutine
def get(self, request, datetime=None):
"""Return history over a period of time."""
if datetime:
datetime = dt_util.parse_datetime(datetime)
if datetime is None:
return self.json_message('Invalid datetime', HTTP_BAD_REQUEST)
one_day = timedelta(days=1)
if datetime:
start_time = dt_util.as_utc(datetime)
else:
start_time = dt_util.utcnow() - one_day
end_time = start_time + one_day
entity_id = request.GET.get('filter_entity_id')
result = yield from request.app['hass'].loop.run_in_executor(
None, get_significant_states, start_time, end_time, entity_id,
self.filters)
return self.json(result.values())
class Filters(object):
"""Container for the configured include and exclude filters."""
def __init__(self):
"""Initialise the include and exclude filters."""
self.excluded_entities = []
self.excluded_domains = []
self.included_entities = []
self.included_domains = []
def apply(self, query, entity_ids=None):
"""Apply the include/exclude filter on domains and entities on query.
Following rules apply:
* only the include section is configured - just query the specified
entities or domains.
* only the exclude section is configured - filter the specified
entities and domains from all the entities in the system.
* if include and exclude is defined - select the entities specified in
the include and filter out the ones from the exclude list.
"""
states = recorder.get_model('States')
# specific entities requested - do not in/exclude anything
if entity_ids is not None:
return query.filter(states.entity_id.in_(entity_ids))
query = query.filter(~states.domain.in_(IGNORE_DOMAINS))
filter_query = None
# filter if only excluded domain is configured
if self.excluded_domains and not self.included_domains:
filter_query = ~states.domain.in_(self.excluded_domains)
if self.included_entities:
filter_query &= states.entity_id.in_(self.included_entities)
# filter if only included domain is configured
elif not self.excluded_domains and self.included_domains:
filter_query = states.domain.in_(self.included_domains)
if self.included_entities:
filter_query |= states.entity_id.in_(self.included_entities)
# filter if included and excluded domain is configured
elif self.excluded_domains and self.included_domains:
filter_query = ~states.domain.in_(self.excluded_domains)
if self.included_entities:
filter_query &= (states.domain.in_(self.included_domains) |
states.entity_id.in_(self.included_entities))
else:
filter_query &= (states.domain.in_(self.included_domains) & ~
states.domain.in_(self.excluded_domains))
# no domain filter just included entities
elif not self.excluded_domains and not self.included_domains and \
self.included_entities:
filter_query = states.entity_id.in_(self.included_entities)
if filter_query is not None:
query = query.filter(filter_query)
# finally apply excluded entities filter if configured
if self.excluded_entities:
query = query.filter(~states.entity_id.in_(self.excluded_entities))
return query
def _is_significant(state):
"""Test if state is significant for history charts.
Will only test for things that are not filtered out in SQL.
"""
# scripts that are not cancellable will never change state
return (state.domain != 'script' or
state.attributes.get(script.ATTR_CAN_CANCEL))
|
|
"""
Tests for smoothing and estimation of unobserved states and disturbances
- Predicted states: :math:`E(\alpha_t | Y_{t-1})`
- Filtered states: :math:`E(\alpha_t | Y_t)`
- Smoothed states: :math:`E(\alpha_t | Y_n)`
- Smoothed disturbances :math:`E(\varepsilon_t | Y_n), E(\eta_t | Y_n)`
Tested against R (FKF, KalmanRun / KalmanSmooth), Stata (sspace), and
MATLAB (ssm toolbox)
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
import os
from statsmodels import datasets
from statsmodels.tsa.statespace import mlemodel, sarimax
from statsmodels.tsa.statespace.tools import compatibility_mode
from statsmodels.tsa.statespace.kalman_filter import (
FILTER_CONVENTIONAL, FILTER_COLLAPSED, FILTER_UNIVARIATE)
from statsmodels.tsa.statespace.kalman_smoother import (
SMOOTH_CONVENTIONAL, SMOOTH_CLASSICAL, SMOOTH_ALTERNATIVE,
SMOOTH_UNIVARIATE)
from numpy.testing import assert_allclose, assert_almost_equal, assert_equal, assert_raises
from nose.exc import SkipTest
current_path = os.path.dirname(os.path.abspath(__file__))
class TestStatesAR3(object):
@classmethod
def setup_class(cls, alternate_timing=False, *args, **kwargs):
# Dataset / Stata comparison
path = current_path + os.sep + 'results/results_wpi1_ar3_stata.csv'
cls.stata = pd.read_csv(path)
cls.stata.index = pd.date_range(start='1960-01-01', periods=124,
freq='QS')
# Matlab comparison
path = current_path + os.sep+'results/results_wpi1_ar3_matlab_ssm.csv'
matlab_names = [
'a1', 'a2', 'a3', 'detP', 'alphahat1', 'alphahat2', 'alphahat3',
'detV', 'eps', 'epsvar', 'eta', 'etavar'
]
cls.matlab_ssm = pd.read_csv(path, header=None, names=matlab_names)
cls.model = sarimax.SARIMAX(
cls.stata['wpi'], order=(3, 1, 0), simple_differencing=True,
hamilton_representation=True, *args, **kwargs
)
if alternate_timing:
cls.model.ssm.timing_init_filtered = True
# Parameters from from Stata's sspace MLE estimation
params = np.r_[.5270715, .0952613, .2580355, .5307459]
cls.results = cls.model.smooth(params, cov_type='none')
# Calculate the determinant of the covariance matrices (for easy
# comparison to other languages without having to store 2-dim arrays)
cls.results.det_predicted_state_cov = np.zeros((1, cls.model.nobs))
cls.results.det_smoothed_state_cov = np.zeros((1, cls.model.nobs))
for i in range(cls.model.nobs):
cls.results.det_predicted_state_cov[0, i] = np.linalg.det(
cls.results.filter_results.predicted_state_cov[:, :, i])
cls.results.det_smoothed_state_cov[0, i] = np.linalg.det(
cls.results.smoother_results.smoothed_state_cov[:, :, i])
if not compatibility_mode:
# Perform simulation smoothing
n_disturbance_variates = (
(cls.model.k_endog + cls.model.ssm.k_posdef) * cls.model.nobs
)
cls.sim = cls.model.simulation_smoother(filter_timing=0)
cls.sim.simulate(
disturbance_variates=np.zeros(n_disturbance_variates),
initial_state_variates=np.zeros(cls.model.k_states)
)
def test_predict_obs(self):
assert_almost_equal(
self.results.filter_results.predict().forecasts[0],
self.stata.ix[1:, 'dep1'], 4
)
def test_standardized_residuals(self):
assert_almost_equal(
self.results.filter_results.standardized_forecasts_error[0],
self.stata.ix[1:, 'sr1'], 4
)
def test_predicted_states(self):
assert_almost_equal(
self.results.filter_results.predicted_state[:, :-1].T,
self.stata.ix[1:, ['sp1', 'sp2', 'sp3']], 4
)
assert_almost_equal(
self.results.filter_results.predicted_state[:, :-1].T,
self.matlab_ssm[['a1', 'a2', 'a3']], 4
)
def test_predicted_states_cov(self):
assert_almost_equal(
self.results.det_predicted_state_cov.T,
self.matlab_ssm[['detP']], 4
)
def test_filtered_states(self):
assert_almost_equal(
self.results.filter_results.filtered_state.T,
self.stata.ix[1:, ['sf1', 'sf2', 'sf3']], 4
)
def test_smoothed_states(self):
assert_almost_equal(
self.results.smoother_results.smoothed_state.T,
self.stata.ix[1:, ['sm1', 'sm2', 'sm3']], 4
)
assert_almost_equal(
self.results.smoother_results.smoothed_state.T,
self.matlab_ssm[['alphahat1', 'alphahat2', 'alphahat3']], 4
)
def test_smoothed_states_cov(self):
assert_almost_equal(
self.results.det_smoothed_state_cov.T,
self.matlab_ssm[['detV']], 4
)
def test_smoothed_measurement_disturbance(self):
assert_almost_equal(
self.results.smoother_results.smoothed_measurement_disturbance.T,
self.matlab_ssm[['eps']], 4
)
def test_smoothed_measurement_disturbance_cov(self):
res = self.results.smoother_results
assert_almost_equal(
res.smoothed_measurement_disturbance_cov[0].T,
self.matlab_ssm[['epsvar']], 4
)
def test_smoothed_state_disturbance(self):
assert_almost_equal(
self.results.smoother_results.smoothed_state_disturbance.T,
self.matlab_ssm[['eta']], 4
)
def test_smoothed_state_disturbance_cov(self):
assert_almost_equal(
self.results.smoother_results.smoothed_state_disturbance_cov[0].T,
self.matlab_ssm[['etavar']], 4
)
class TestStatesAR3AlternateTiming(TestStatesAR3):
@classmethod
def setup_class(cls, *args, **kwargs):
if compatibility_mode:
raise SkipTest
super(TestStatesAR3AlternateTiming, cls).setup_class(
alternate_timing=True, *args, **kwargs)
class TestStatesAR3AlternativeSmoothing(TestStatesAR3):
@classmethod
def setup_class(cls, *args, **kwargs):
if compatibility_mode:
raise SkipTest
super(TestStatesAR3AlternativeSmoothing, cls).setup_class(
smooth_method=SMOOTH_ALTERNATIVE, *args, **kwargs)
def test_smoothed_states(self):
# Initialization issues can change the first few smoothed states
assert_almost_equal(
self.results.smoother_results.smoothed_state.T[2:],
self.stata.ix[3:, ['sm1', 'sm2', 'sm3']], 4
)
assert_almost_equal(
self.results.smoother_results.smoothed_state.T[2:],
self.matlab_ssm.ix[2:, ['alphahat1', 'alphahat2', 'alphahat3']], 4
)
def test_smoothed_states_cov(self):
assert_almost_equal(
self.results.det_smoothed_state_cov.T[1:],
self.matlab_ssm.ix[1:, ['detV']], 4
)
def test_smooth_method(self):
assert_equal(self.model.ssm.smooth_method, SMOOTH_ALTERNATIVE)
assert_equal(self.model.ssm._kalman_smoother.smooth_method,
SMOOTH_ALTERNATIVE)
assert_equal(self.model.ssm._kalman_smoother._smooth_method,
SMOOTH_ALTERNATIVE)
class TestStatesAR3UnivariateSmoothing(TestStatesAR3):
@classmethod
def setup_class(cls, *args, **kwargs):
if compatibility_mode:
raise SkipTest
super(TestStatesAR3UnivariateSmoothing, cls).setup_class(
filter_method=FILTER_UNIVARIATE, *args, **kwargs)
def test_smooth_method(self):
assert_equal(self.model.ssm.smooth_method, 0)
assert_equal(self.model.ssm._kalman_smoother.smooth_method, 0)
assert_equal(self.model.ssm._kalman_smoother._smooth_method,
SMOOTH_UNIVARIATE)
class TestStatesMissingAR3(object):
@classmethod
def setup_class(cls, alternate_timing=False, *args, **kwargs):
# Dataset
path = current_path + os.sep + 'results/results_wpi1_ar3_stata.csv'
cls.stata = pd.read_csv(path)
cls.stata.index = pd.date_range(start='1960-01-01', periods=124,
freq='QS')
# Matlab comparison
path = current_path + os.sep+'results/results_wpi1_missing_ar3_matlab_ssm.csv'
matlab_names = [
'a1','a2','a3','detP','alphahat1','alphahat2','alphahat3',
'detV','eps','epsvar','eta','etavar'
]
cls.matlab_ssm = pd.read_csv(path, header=None, names=matlab_names)
# KFAS comparison
path = current_path + os.sep+'results/results_smoothing3_R.csv'
cls.R_ssm = pd.read_csv(path)
# Create missing observations
cls.stata['dwpi'] = cls.stata['wpi'].diff()
cls.stata.ix[10:21, 'dwpi'] = np.nan
cls.model = sarimax.SARIMAX(
cls.stata.ix[1:,'dwpi'], order=(3, 0, 0),
hamilton_representation=True, *args, **kwargs
)
if alternate_timing:
cls.model.ssm.timing_init_filtered = True
# Parameters from from Stata's sspace MLE estimation
params = np.r_[.5270715, .0952613, .2580355, .5307459]
cls.results = cls.model.smooth(params, return_ssm=True)
# Calculate the determinant of the covariance matrices (for easy
# comparison to other languages without having to store 2-dim arrays)
cls.results.det_predicted_state_cov = np.zeros((1, cls.model.nobs))
cls.results.det_smoothed_state_cov = np.zeros((1, cls.model.nobs))
for i in range(cls.model.nobs):
cls.results.det_predicted_state_cov[0,i] = np.linalg.det(
cls.results.predicted_state_cov[:,:,i])
cls.results.det_smoothed_state_cov[0,i] = np.linalg.det(
cls.results.smoothed_state_cov[:,:,i])
if not compatibility_mode:
# Perform simulation smoothing
n_disturbance_variates = (
(cls.model.k_endog + cls.model.k_posdef) * cls.model.nobs
)
cls.sim = cls.model.simulation_smoother()
cls.sim.simulate(
disturbance_variates=np.zeros(n_disturbance_variates),
initial_state_variates=np.zeros(cls.model.k_states)
)
def test_predicted_states(self):
assert_almost_equal(
self.results.predicted_state[:,:-1].T,
self.matlab_ssm[['a1', 'a2', 'a3']], 4
)
def test_predicted_states_cov(self):
assert_almost_equal(
self.results.det_predicted_state_cov.T,
self.matlab_ssm[['detP']], 4
)
def test_smoothed_states(self):
assert_almost_equal(
self.results.smoothed_state.T,
self.matlab_ssm[['alphahat1', 'alphahat2', 'alphahat3']], 4
)
def test_smoothed_states_cov(self):
assert_almost_equal(
self.results.det_smoothed_state_cov.T,
self.matlab_ssm[['detV']], 4
)
def test_smoothed_measurement_disturbance(self):
assert_almost_equal(
self.results.smoothed_measurement_disturbance.T,
self.matlab_ssm[['eps']], 4
)
def test_smoothed_measurement_disturbance_cov(self):
assert_almost_equal(
self.results.smoothed_measurement_disturbance_cov[0].T,
self.matlab_ssm[['epsvar']], 4
)
# There is a discrepancy between MATLAB ssm toolbox and
# statsmodels.tsa.statespace on the following variables in the case of
# missing data. Tests against the R package KFAS confirm our results
def test_smoothed_state_disturbance(self):
# assert_almost_equal(
# self.results.smoothed_state_disturbance.T,
# self.matlab_ssm[['eta']], 4
# )
assert_almost_equal(
self.results.smoothed_state_disturbance.T,
self.R_ssm[['etahat']], 9
)
def test_smoothed_state_disturbance_cov(self):
# assert_almost_equal(
# self.results.smoothed_state_disturbance_cov[0].T,
# self.matlab_ssm[['etavar']], 4
# )
assert_almost_equal(
self.results.smoothed_state_disturbance_cov[0,0,:],
self.R_ssm['detVeta'], 9
)
class TestStatesMissingAR3AlternateTiming(TestStatesMissingAR3):
@classmethod
def setup_class(cls, *args, **kwargs):
if compatibility_mode:
raise SkipTest
super(TestStatesMissingAR3AlternateTiming, cls).setup_class(alternate_timing=True, *args, **kwargs)
class TestStatesMissingAR3AlternativeSmoothing(TestStatesMissingAR3):
@classmethod
def setup_class(cls, *args, **kwargs):
if compatibility_mode:
raise SkipTest
super(TestStatesMissingAR3AlternativeSmoothing, cls).setup_class(
smooth_method=SMOOTH_ALTERNATIVE, *args, **kwargs)
def test_smooth_method(self):
assert_equal(self.model.ssm.smooth_method, SMOOTH_ALTERNATIVE)
assert_equal(self.model.ssm._kalman_smoother.smooth_method,
SMOOTH_ALTERNATIVE)
assert_equal(self.model.ssm._kalman_smoother._smooth_method,
SMOOTH_ALTERNATIVE)
class TestStatesMissingAR3UnivariateSmoothing(TestStatesMissingAR3):
@classmethod
def setup_class(cls, *args, **kwargs):
if compatibility_mode:
raise SkipTest
super(TestStatesMissingAR3UnivariateSmoothing, cls).setup_class(
filter_method=FILTER_UNIVARIATE, *args, **kwargs)
def test_smooth_method(self):
assert_equal(self.model.ssm.smooth_method, 0)
assert_equal(self.model.ssm._kalman_smoother.smooth_method, 0)
assert_equal(self.model.ssm._kalman_smoother._smooth_method,
SMOOTH_UNIVARIATE)
class TestMultivariateMissing(object):
"""
Tests for most filtering and smoothing variables against output from the
R library KFAS.
Note that KFAS uses the univariate approach which generally will result in
different predicted values and covariance matrices associated with the
measurement equation (e.g. forecasts, etc.). In this case, although the
model is multivariate, each of the series is truly independent so the values
will be the same regardless of whether the univariate approach is used or
not.
"""
@classmethod
def setup_class(cls, **kwargs):
# Results
path = current_path + os.sep + 'results/results_smoothing_R.csv'
cls.desired = pd.read_csv(path)
# Data
dta = datasets.macrodata.load_pandas().data
dta.index = pd.date_range(start='1959-01-01', end='2009-7-01', freq='QS')
obs = dta[['realgdp','realcons','realinv']].diff().ix[1:]
obs.ix[0:50, 0] = np.nan
obs.ix[19:70, 1] = np.nan
obs.ix[39:90, 2] = np.nan
obs.ix[119:130, 0] = np.nan
obs.ix[119:130, 2] = np.nan
# Create the model
mod = mlemodel.MLEModel(obs, k_states=3, k_posdef=3, **kwargs)
mod['design'] = np.eye(3)
mod['obs_cov'] = np.eye(3)
mod['transition'] = np.eye(3)
mod['selection'] = np.eye(3)
mod['state_cov'] = np.eye(3)
mod.initialize_approximate_diffuse(1e6)
cls.model = mod
cls.results = mod.smooth([], return_ssm=True)
# Calculate the determinant of the covariance matrices (for easy
# comparison to other languages without having to store 2-dim arrays)
cls.results.det_scaled_smoothed_estimator_cov = (
np.zeros((1, cls.model.nobs)))
cls.results.det_predicted_state_cov = np.zeros((1, cls.model.nobs))
cls.results.det_smoothed_state_cov = np.zeros((1, cls.model.nobs))
cls.results.det_smoothed_state_disturbance_cov = (
np.zeros((1, cls.model.nobs)))
for i in range(cls.model.nobs):
cls.results.det_scaled_smoothed_estimator_cov[0,i] = (
np.linalg.det(
cls.results.scaled_smoothed_estimator_cov[:,:,i]))
cls.results.det_predicted_state_cov[0,i] = np.linalg.det(
cls.results.predicted_state_cov[:,:,i+1])
cls.results.det_smoothed_state_cov[0,i] = np.linalg.det(
cls.results.smoothed_state_cov[:,:,i])
cls.results.det_smoothed_state_disturbance_cov[0,i] = (
np.linalg.det(
cls.results.smoothed_state_disturbance_cov[:,:,i]))
def test_loglike(self):
assert_allclose(np.sum(self.results.llf_obs), -205310.9767)
def test_scaled_smoothed_estimator(self):
assert_allclose(
self.results.scaled_smoothed_estimator.T,
self.desired[['r1', 'r2', 'r3']]
)
def test_scaled_smoothed_estimator_cov(self):
assert_allclose(
self.results.det_scaled_smoothed_estimator_cov.T,
self.desired[['detN']]
)
def test_forecasts(self):
assert_allclose(
self.results.forecasts.T,
self.desired[['m1', 'm2', 'm3']]
)
def test_forecasts_error(self):
assert_allclose(
self.results.forecasts_error.T,
self.desired[['v1', 'v2', 'v3']]
)
def test_forecasts_error_cov(self):
assert_allclose(
self.results.forecasts_error_cov.diagonal(),
self.desired[['F1', 'F2', 'F3']]
)
def test_predicted_states(self):
assert_allclose(
self.results.predicted_state[:,1:].T,
self.desired[['a1', 'a2', 'a3']]
)
def test_predicted_states_cov(self):
assert_allclose(
self.results.det_predicted_state_cov.T,
self.desired[['detP']]
)
def test_smoothed_states(self):
assert_allclose(
self.results.smoothed_state.T,
self.desired[['alphahat1', 'alphahat2', 'alphahat3']]
)
def test_smoothed_states_cov(self):
assert_allclose(
self.results.det_smoothed_state_cov.T,
self.desired[['detV']]
)
def test_smoothed_forecasts(self):
assert_allclose(
self.results.smoothed_forecasts.T,
self.desired[['muhat1','muhat2','muhat3']]
)
def test_smoothed_state_disturbance(self):
assert_allclose(
self.results.smoothed_state_disturbance.T,
self.desired[['etahat1','etahat2','etahat3']]
)
def test_smoothed_state_disturbance_cov(self):
assert_allclose(
self.results.det_smoothed_state_disturbance_cov.T,
self.desired[['detVeta']]
)
def test_smoothed_measurement_disturbance(self):
assert_allclose(
self.results.smoothed_measurement_disturbance.T,
self.desired[['epshat1','epshat2','epshat3']]
)
def test_smoothed_measurement_disturbance_cov(self):
assert_allclose(
self.results.smoothed_measurement_disturbance_cov.diagonal(),
self.desired[['Veps1','Veps2','Veps3']]
)
class TestMultivariateMissingClassicalSmoothing(TestMultivariateMissing):
@classmethod
def setup_class(cls, *args, **kwargs):
if compatibility_mode:
raise SkipTest
super(TestMultivariateMissingClassicalSmoothing, cls).setup_class(
smooth_method=SMOOTH_CLASSICAL, *args, **kwargs)
def test_smooth_method(self):
assert_equal(self.model.ssm.smooth_method, SMOOTH_CLASSICAL)
assert_equal(self.model.ssm._kalman_smoother.smooth_method,
SMOOTH_CLASSICAL)
assert_equal(self.model.ssm._kalman_smoother._smooth_method,
SMOOTH_CLASSICAL)
class TestMultivariateMissingAlternativeSmoothing(TestMultivariateMissing):
@classmethod
def setup_class(cls, *args, **kwargs):
if compatibility_mode:
raise SkipTest
super(TestMultivariateMissingAlternativeSmoothing, cls).setup_class(
smooth_method=SMOOTH_ALTERNATIVE, *args, **kwargs)
def test_smooth_method(self):
assert_equal(self.model.ssm.smooth_method, SMOOTH_ALTERNATIVE)
assert_equal(self.model.ssm._kalman_smoother.smooth_method,
SMOOTH_ALTERNATIVE)
assert_equal(self.model.ssm._kalman_smoother._smooth_method,
SMOOTH_ALTERNATIVE)
class TestMultivariateMissingUnivariateSmoothing(TestMultivariateMissing):
@classmethod
def setup_class(cls, *args, **kwargs):
if compatibility_mode:
raise SkipTest
super(TestMultivariateMissingUnivariateSmoothing, cls).setup_class(
filter_method=FILTER_UNIVARIATE, *args, **kwargs)
def test_smooth_method(self):
assert_equal(self.model.ssm.smooth_method, 0)
assert_equal(self.model.ssm._kalman_smoother.smooth_method, 0)
assert_equal(self.model.ssm._kalman_smoother._smooth_method,
SMOOTH_UNIVARIATE)
class TestMultivariateVAR(object):
"""
Tests for most filtering and smoothing variables against output from the
R library KFAS.
Note that KFAS uses the univariate approach which generally will result in
different predicted values and covariance matrices associated with the
measurement equation (e.g. forecasts, etc.). In this case, although the
model is multivariate, each of the series is truly independent so the values
will be the same regardless of whether the univariate approach is used or
not.
"""
@classmethod
def setup_class(cls, *args, **kwargs):
# Results
path = current_path + os.sep + 'results/results_smoothing2_R.csv'
cls.desired = pd.read_csv(path)
# Data
dta = datasets.macrodata.load_pandas().data
dta.index = pd.date_range(start='1959-01-01', end='2009-7-01', freq='QS')
obs = np.log(dta[['realgdp','realcons','realinv']]).diff().ix[1:]
# Create the model
mod = mlemodel.MLEModel(obs, k_states=3, k_posdef=3, **kwargs)
mod['design'] = np.eye(3)
mod['obs_cov'] = np.array([[ 0.0000640649, 0. , 0. ],
[ 0. , 0.0000572802, 0. ],
[ 0. , 0. , 0.0017088585]])
mod['transition'] = np.array([[-0.1119908792, 0.8441841604, 0.0238725303],
[ 0.2629347724, 0.4996718412, -0.0173023305],
[-3.2192369082, 4.1536028244, 0.4514379215]])
mod['selection'] = np.eye(3)
mod['state_cov'] = np.array([[ 0.0000640649, 0.0000388496, 0.0002148769],
[ 0.0000388496, 0.0000572802, 0.000001555 ],
[ 0.0002148769, 0.000001555 , 0.0017088585]])
mod.initialize_approximate_diffuse(1e6)
cls.model = mod
cls.results = mod.smooth([], return_ssm=True)
# Calculate the determinant of the covariance matrices (for easy
# comparison to other languages without having to store 2-dim arrays)
cls.results.det_scaled_smoothed_estimator_cov = (
np.zeros((1, cls.model.nobs)))
cls.results.det_predicted_state_cov = np.zeros((1, cls.model.nobs))
cls.results.det_smoothed_state_cov = np.zeros((1, cls.model.nobs))
cls.results.det_smoothed_state_disturbance_cov = (
np.zeros((1, cls.model.nobs)))
for i in range(cls.model.nobs):
cls.results.det_scaled_smoothed_estimator_cov[0,i] = (
np.linalg.det(
cls.results.scaled_smoothed_estimator_cov[:,:,i]))
cls.results.det_predicted_state_cov[0,i] = np.linalg.det(
cls.results.predicted_state_cov[:,:,i+1])
cls.results.det_smoothed_state_cov[0,i] = np.linalg.det(
cls.results.smoothed_state_cov[:,:,i])
cls.results.det_smoothed_state_disturbance_cov[0,i] = (
np.linalg.det(
cls.results.smoothed_state_disturbance_cov[:,:,i]))
def test_loglike(self):
assert_allclose(np.sum(self.results.llf_obs), 1695.34872)
def test_scaled_smoothed_estimator(self):
assert_allclose(
self.results.scaled_smoothed_estimator.T,
self.desired[['r1', 'r2', 'r3']], atol=1e-4
)
def test_scaled_smoothed_estimator_cov(self):
assert_allclose(
np.log(self.results.det_scaled_smoothed_estimator_cov.T),
np.log(self.desired[['detN']]), atol=1e-6
)
def test_forecasts(self):
assert_allclose(
self.results.forecasts.T,
self.desired[['m1', 'm2', 'm3']], atol=1e-6
)
def test_forecasts_error(self):
assert_allclose(
self.results.forecasts_error.T[:, 0],
self.desired['v1'], atol=1e-6
)
def test_forecasts_error_cov(self):
assert_allclose(
self.results.forecasts_error_cov.diagonal()[:, 0],
self.desired['F1'], atol=1e-6
)
def test_predicted_states(self):
assert_allclose(
self.results.predicted_state[:,1:].T,
self.desired[['a1', 'a2', 'a3']], atol=1e-6
)
def test_predicted_states_cov(self):
assert_allclose(
self.results.det_predicted_state_cov.T,
self.desired[['detP']], atol=1e-16
)
def test_smoothed_states(self):
assert_allclose(
self.results.smoothed_state.T,
self.desired[['alphahat1', 'alphahat2', 'alphahat3']], atol=1e-6
)
def test_smoothed_states_cov(self):
assert_allclose(
self.results.det_smoothed_state_cov.T,
self.desired[['detV']], atol=1e-16
)
def test_smoothed_forecasts(self):
assert_allclose(
self.results.smoothed_forecasts.T,
self.desired[['muhat1','muhat2','muhat3']], atol=1e-6
)
def test_smoothed_state_disturbance(self):
assert_allclose(
self.results.smoothed_state_disturbance.T,
self.desired[['etahat1','etahat2','etahat3']], atol=1e-6
)
def test_smoothed_state_disturbance_cov(self):
assert_allclose(
self.results.det_smoothed_state_disturbance_cov.T,
self.desired[['detVeta']], atol=1e-18
)
def test_smoothed_measurement_disturbance(self):
assert_allclose(
self.results.smoothed_measurement_disturbance.T,
self.desired[['epshat1','epshat2','epshat3']], atol=1e-6
)
def test_smoothed_measurement_disturbance_cov(self):
assert_allclose(
self.results.smoothed_measurement_disturbance_cov.diagonal(),
self.desired[['Veps1','Veps2','Veps3']], atol=1e-6
)
class TestMultivariateVARAlternativeSmoothing(TestMultivariateVAR):
@classmethod
def setup_class(cls, *args, **kwargs):
if compatibility_mode:
raise SkipTest
super(TestMultivariateVARAlternativeSmoothing, cls).setup_class(
smooth_method=SMOOTH_ALTERNATIVE, *args, **kwargs)
def test_smooth_method(self):
assert_equal(self.model.ssm.smooth_method, SMOOTH_ALTERNATIVE)
assert_equal(self.model.ssm._kalman_smoother.smooth_method,
SMOOTH_ALTERNATIVE)
assert_equal(self.model.ssm._kalman_smoother._smooth_method,
SMOOTH_ALTERNATIVE)
class TestMultivariateVARClassicalSmoothing(TestMultivariateVAR):
@classmethod
def setup_class(cls, *args, **kwargs):
if compatibility_mode:
raise SkipTest
super(TestMultivariateVARClassicalSmoothing, cls).setup_class(
smooth_method=SMOOTH_CLASSICAL, *args, **kwargs)
def test_smooth_method(self):
assert_equal(self.model.ssm.smooth_method, SMOOTH_CLASSICAL)
assert_equal(self.model.ssm._kalman_smoother.smooth_method,
SMOOTH_CLASSICAL)
assert_equal(self.model.ssm._kalman_smoother._smooth_method,
SMOOTH_CLASSICAL)
class TestMultivariateVARUnivariate(object):
"""
Tests for most filtering and smoothing variables against output from the
R library KFAS.
Note that KFAS uses the univariate approach which generally will result in
different predicted values and covariance matrices associated with the
measurement equation (e.g. forecasts, etc.). In this case, although the
model is multivariate, each of the series is truly independent so the values
will be the same regardless of whether the univariate approach is used or
not.
"""
@classmethod
def setup_class(cls, *args, **kwargs):
if compatibility_mode:
raise SkipTest
# Results
path = current_path + os.sep + 'results/results_smoothing2_R.csv'
cls.desired = pd.read_csv(path)
# Data
dta = datasets.macrodata.load_pandas().data
dta.index = pd.date_range(start='1959-01-01', end='2009-7-01', freq='QS')
obs = np.log(dta[['realgdp','realcons','realinv']]).diff().ix[1:]
# Create the model
mod = mlemodel.MLEModel(obs, k_states=3, k_posdef=3, **kwargs)
mod.ssm.filter_univariate = True
mod['design'] = np.eye(3)
mod['obs_cov'] = np.array([[ 0.0000640649, 0. , 0. ],
[ 0. , 0.0000572802, 0. ],
[ 0. , 0. , 0.0017088585]])
mod['transition'] = np.array([[-0.1119908792, 0.8441841604, 0.0238725303],
[ 0.2629347724, 0.4996718412, -0.0173023305],
[-3.2192369082, 4.1536028244, 0.4514379215]])
mod['selection'] = np.eye(3)
mod['state_cov'] = np.array([[ 0.0000640649, 0.0000388496, 0.0002148769],
[ 0.0000388496, 0.0000572802, 0.000001555 ],
[ 0.0002148769, 0.000001555 , 0.0017088585]])
mod.initialize_approximate_diffuse(1e6)
cls.model = mod
cls.results = mod.smooth([], return_ssm=True)
# Calculate the determinant of the covariance matrices (for easy
# comparison to other languages without having to store 2-dim arrays)
cls.results.det_scaled_smoothed_estimator_cov = (
np.zeros((1, cls.model.nobs)))
cls.results.det_predicted_state_cov = np.zeros((1, cls.model.nobs))
cls.results.det_smoothed_state_cov = np.zeros((1, cls.model.nobs))
cls.results.det_smoothed_state_disturbance_cov = (
np.zeros((1, cls.model.nobs)))
for i in range(cls.model.nobs):
cls.results.det_scaled_smoothed_estimator_cov[0,i] = (
np.linalg.det(
cls.results.scaled_smoothed_estimator_cov[:,:,i]))
cls.results.det_predicted_state_cov[0,i] = np.linalg.det(
cls.results.predicted_state_cov[:,:,i+1])
cls.results.det_smoothed_state_cov[0,i] = np.linalg.det(
cls.results.smoothed_state_cov[:,:,i])
cls.results.det_smoothed_state_disturbance_cov[0,i] = (
np.linalg.det(
cls.results.smoothed_state_disturbance_cov[:,:,i]))
def test_loglike(self):
assert_allclose(np.sum(self.results.llf_obs), 1695.34872)
def test_scaled_smoothed_estimator(self):
assert_allclose(
self.results.scaled_smoothed_estimator.T,
self.desired[['r1', 'r2', 'r3']], atol=1e-4
)
def test_scaled_smoothed_estimator_cov(self):
assert_allclose(
np.log(self.results.det_scaled_smoothed_estimator_cov.T),
np.log(self.desired[['detN']])
)
def test_forecasts(self):
assert_allclose(
self.results.forecasts.T[:, 0],
self.desired['m1'], atol=1e-6
)
def test_forecasts_error(self):
assert_allclose(
self.results.forecasts_error.T,
self.desired[['v1', 'v2', 'v3']], atol=1e-6
)
def test_forecasts_error_cov(self):
assert_allclose(
self.results.forecasts_error_cov.diagonal(),
self.desired[['F1', 'F2', 'F3']]
)
def test_predicted_states(self):
assert_allclose(
self.results.predicted_state[:,1:].T,
self.desired[['a1', 'a2', 'a3']], atol=1e-8
)
def test_predicted_states_cov(self):
assert_allclose(
self.results.det_predicted_state_cov.T,
self.desired[['detP']], atol=1e-18
)
def test_smoothed_states(self):
assert_allclose(
self.results.smoothed_state.T,
self.desired[['alphahat1', 'alphahat2', 'alphahat3']], atol=1e-6
)
def test_smoothed_states_cov(self):
assert_allclose(
self.results.det_smoothed_state_cov.T,
self.desired[['detV']], atol=1e-18
)
def test_smoothed_forecasts(self):
assert_allclose(
self.results.smoothed_forecasts.T,
self.desired[['muhat1','muhat2','muhat3']], atol=1e-6
)
def test_smoothed_state_disturbance(self):
assert_allclose(
self.results.smoothed_state_disturbance.T,
self.desired[['etahat1','etahat2','etahat3']], atol=1e-6
)
def test_smoothed_state_disturbance_cov(self):
assert_allclose(
self.results.det_smoothed_state_disturbance_cov.T,
self.desired[['detVeta']], atol=1e-18
)
def test_smoothed_measurement_disturbance(self):
assert_allclose(
self.results.smoothed_measurement_disturbance.T,
self.desired[['epshat1','epshat2','epshat3']], atol=1e-6
)
def test_smoothed_measurement_disturbance_cov(self):
assert_allclose(
self.results.smoothed_measurement_disturbance_cov.diagonal(),
self.desired[['Veps1','Veps2','Veps3']]
)
class TestMultivariateVARUnivariateSmoothing(TestMultivariateVARUnivariate):
@classmethod
def setup_class(cls, *args, **kwargs):
if compatibility_mode:
raise SkipTest
super(TestMultivariateVARUnivariateSmoothing, cls).setup_class(
filter_method=FILTER_UNIVARIATE, *args, **kwargs)
def test_filter_method(self):
assert_equal(self.model.ssm.filter_method, FILTER_UNIVARIATE)
assert_equal(self.model.ssm._kalman_smoother.filter_method,
FILTER_UNIVARIATE)
def test_smooth_method(self):
assert_equal(self.model.ssm.smooth_method, 0)
assert_equal(self.model.ssm._kalman_smoother.smooth_method, 0)
assert_equal(self.model.ssm._kalman_smoother._smooth_method,
SMOOTH_UNIVARIATE)
class TestVARAutocovariances(object):
@classmethod
def setup_class(cls, which='mixed', *args, **kwargs):
if compatibility_mode:
raise SkipTest
# Data
dta = datasets.macrodata.load_pandas().data
dta.index = pd.date_range(start='1959-01-01', end='2009-7-01', freq='QS')
obs = np.log(dta[['realgdp','realcons','realinv']]).diff().ix[1:]
if which == 'all':
obs.ix[:50, :] = np.nan
obs.ix[119:130, :] = np.nan
elif which == 'partial':
obs.ix[0:50, 0] = np.nan
obs.ix[119:130, 0] = np.nan
elif which == 'mixed':
obs.ix[0:50, 0] = np.nan
obs.ix[19:70, 1] = np.nan
obs.ix[39:90, 2] = np.nan
obs.ix[119:130, 0] = np.nan
obs.ix[119:130, 2] = np.nan
# Create the model with typical state space
mod = mlemodel.MLEModel(obs, k_states=3, k_posdef=3, **kwargs)
mod['design'] = np.eye(3)
mod['obs_cov'] = np.array([[ 609.0746647855, 0. , 0. ],
[ 0. , 1.8774916622, 0. ],
[ 0. , 0. , 124.6768281675]])
mod['transition'] = np.array([[-0.8110473405, 1.8005304445, 1.0215975772],
[-1.9846632699, 2.4091302213, 1.9264449765],
[ 0.9181658823, -0.2442384581, -0.6393462272]])
mod['selection'] = np.eye(3)
mod['state_cov'] = np.array([[ 1552.9758843938, 612.7185121905, 877.6157204992],
[ 612.7185121905, 467.8739411204, 70.608037339 ],
[ 877.6157204992, 70.608037339 , 900.5440385836]])
mod.initialize_approximate_diffuse(1e6)
cls.model = mod
cls.results = mod.smooth([], return_ssm=True)
# Create the model with augmented state space
kwargs.pop('filter_collapsed', None)
mod = mlemodel.MLEModel(obs, k_states=6, k_posdef=3, **kwargs)
mod['design', :3, :3] = np.eye(3)
mod['obs_cov'] = np.array([[ 609.0746647855, 0. , 0. ],
[ 0. , 1.8774916622, 0. ],
[ 0. , 0. , 124.6768281675]])
mod['transition', :3, :3] = np.array([[-0.8110473405, 1.8005304445, 1.0215975772],
[-1.9846632699, 2.4091302213, 1.9264449765],
[ 0.9181658823, -0.2442384581, -0.6393462272]])
mod['transition', 3:, :3] = np.eye(3)
mod['selection', :3, :3] = np.eye(3)
mod['state_cov'] = np.array([[ 1552.9758843938, 612.7185121905, 877.6157204992],
[ 612.7185121905, 467.8739411204, 70.608037339 ],
[ 877.6157204992, 70.608037339 , 900.5440385836]])
mod.initialize_approximate_diffuse(1e6)
cls.augmented_model = mod
cls.augmented_results = mod.smooth([], return_ssm=True)
def test_smoothed_state_autocov(self):
# Cov(\alpha_{t+1}, \alpha_t)
# Initialization makes these two methods slightly different for the
# first few observations
assert_allclose(self.results.smoothed_state_autocov[:, :, 0:5],
self.augmented_results.smoothed_state_cov[:3, 3:, 1:6],
atol=1e-4)
assert_allclose(self.results.smoothed_state_autocov[:, :, 5:-1],
self.augmented_results.smoothed_state_cov[:3, 3:, 6:],
atol=1e-7)
class TestVARAutocovariancesAlternativeSmoothing(TestVARAutocovariances):
@classmethod
def setup_class(cls, *args, **kwargs):
if compatibility_mode:
raise SkipTest
super(TestVARAutocovariancesAlternativeSmoothing, cls).setup_class(
smooth_method=SMOOTH_ALTERNATIVE, *args, **kwargs)
def test_smooth_method(self):
assert_equal(self.model.ssm.smooth_method, SMOOTH_ALTERNATIVE)
assert_equal(self.model.ssm._kalman_smoother.smooth_method,
SMOOTH_ALTERNATIVE)
assert_equal(self.model.ssm._kalman_smoother._smooth_method,
SMOOTH_ALTERNATIVE)
class TestVARAutocovariancesClassicalSmoothing(TestVARAutocovariances):
@classmethod
def setup_class(cls, *args, **kwargs):
if compatibility_mode:
raise SkipTest
super(TestVARAutocovariancesClassicalSmoothing, cls).setup_class(
smooth_method=SMOOTH_CLASSICAL, *args, **kwargs)
def test_smooth_method(self):
assert_equal(self.model.ssm.smooth_method, SMOOTH_CLASSICAL)
assert_equal(self.model.ssm._kalman_smoother.smooth_method,
SMOOTH_CLASSICAL)
assert_equal(self.model.ssm._kalman_smoother._smooth_method,
SMOOTH_CLASSICAL)
class TestVARAutocovariancesUnivariateSmoothing(TestVARAutocovariances):
@classmethod
def setup_class(cls, *args, **kwargs):
if compatibility_mode:
raise SkipTest
super(TestVARAutocovariancesUnivariateSmoothing, cls).setup_class(
filter_method=FILTER_UNIVARIATE, *args, **kwargs)
def test_filter_method(self):
assert_equal(self.model.ssm.filter_method, FILTER_UNIVARIATE)
assert_equal(self.model.ssm._kalman_smoother.filter_method,
FILTER_UNIVARIATE)
def test_smooth_method(self):
assert_equal(self.model.ssm.smooth_method, 0)
assert_equal(self.model.ssm._kalman_smoother.smooth_method, 0)
assert_equal(self.model.ssm._kalman_smoother._smooth_method,
SMOOTH_UNIVARIATE)
|
|
# GENERATED FILE - DO NOT EDIT THIS FILE UNLESS YOU ARE A WIZZARD
#pylint: skip-file
from heat.engine import properties
from heat.engine import constraints
from heat.engine import attributes
from heat.common.i18n import _
from avi.heat.avi_resource import AviResource
from avi.heat.avi_resource import AviNestedResource
from options import *
from options import *
from match import *
class LdapDirectorySettings(object):
# all schemas
admin_bind_dn_schema = properties.Schema(
properties.Schema.STRING,
_("LDAP Admin User DN. Administrator credentials are required to search for users under user search DN or groups under group search DN."),
required=False,
update_allowed=True,
)
password_schema = properties.Schema(
properties.Schema.STRING,
_("LDAP Admin User Password"),
required=False,
update_allowed=True,
)
user_search_dn_schema = properties.Schema(
properties.Schema.STRING,
_("LDAP user search DN is the root of search for a given user in the LDAP directory. Only user records present in this LDAP directory sub-tree will be validated."),
required=False,
update_allowed=True,
)
user_search_scope_schema = properties.Schema(
properties.Schema.STRING,
_("LDAP user search scope defines how deep to search for the user starting from user search DN. (Default: AUTH_LDAP_SCOPE_ONE)"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['AUTH_LDAP_SCOPE_BASE', 'AUTH_LDAP_SCOPE_ONE', 'AUTH_LDAP_SCOPE_SUBTREE']),
],
)
user_id_attribute_schema = properties.Schema(
properties.Schema.STRING,
_("LDAP user id attribute is the login attribute that uniquely identifies a single user record."),
required=False,
update_allowed=True,
)
user_attributes_item_schema = properties.Schema(
properties.Schema.STRING,
_("LDAP user attributes to fetch on a successful user bind."),
required=True,
update_allowed=False,
)
user_attributes_schema = properties.Schema(
properties.Schema.LIST,
_("LDAP user attributes to fetch on a successful user bind."),
schema=user_attributes_item_schema,
required=False,
update_allowed=True,
)
group_search_dn_schema = properties.Schema(
properties.Schema.STRING,
_("LDAP group search DN is the root of search for a given group in the LDAP directory. Only matching groups present in this LDAP directory sub-tree will be checked for user membership."),
required=False,
update_allowed=True,
)
group_member_attribute_schema = properties.Schema(
properties.Schema.STRING,
_("LDAP group attribute that identifies each of the group members."),
required=False,
update_allowed=True,
)
group_search_scope_schema = properties.Schema(
properties.Schema.STRING,
_("LDAP group search scope defines how deep to search for the group starting from the group search DN. (Default: AUTH_LDAP_SCOPE_SUBTREE)"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['AUTH_LDAP_SCOPE_BASE', 'AUTH_LDAP_SCOPE_ONE', 'AUTH_LDAP_SCOPE_SUBTREE']),
],
)
group_member_is_full_dn_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Group member entries contain full DNs instead of just user id attribute values (Default: True)"),
required=False,
update_allowed=True,
)
group_filter_schema = properties.Schema(
properties.Schema.STRING,
_("Group filter is used to identify groups during search"),
required=False,
update_allowed=True,
)
ignore_referrals_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("During user or group search, ignore searching referrals. (Default: False)"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'admin_bind_dn',
'password',
'user_search_dn',
'user_search_scope',
'user_id_attribute',
'user_attributes',
'group_search_dn',
'group_member_attribute',
'group_search_scope',
'group_member_is_full_dn',
'group_filter',
'ignore_referrals',
)
# mapping of properties to their schemas
properties_schema = {
'admin_bind_dn': admin_bind_dn_schema,
'password': password_schema,
'user_search_dn': user_search_dn_schema,
'user_search_scope': user_search_scope_schema,
'user_id_attribute': user_id_attribute_schema,
'user_attributes': user_attributes_schema,
'group_search_dn': group_search_dn_schema,
'group_member_attribute': group_member_attribute_schema,
'group_search_scope': group_search_scope_schema,
'group_member_is_full_dn': group_member_is_full_dn_schema,
'group_filter': group_filter_schema,
'ignore_referrals': ignore_referrals_schema,
}
class LdapUserBindSettings(object):
# all schemas
dn_template_schema = properties.Schema(
properties.Schema.STRING,
_("LDAP user DN pattern is used to bind LDAP user after replacing the user token with real username."),
required=False,
update_allowed=True,
)
token_schema = properties.Schema(
properties.Schema.STRING,
_("LDAP token is replaced with real user name in the user DN pattern."),
required=False,
update_allowed=True,
)
user_id_attribute_schema = properties.Schema(
properties.Schema.STRING,
_("LDAP user id attribute is the login attribute that uniquely identifies a single user record."),
required=False,
update_allowed=True,
)
user_attributes_item_schema = properties.Schema(
properties.Schema.STRING,
_("LDAP user attributes to fetch on a successful user bind."),
required=True,
update_allowed=False,
)
user_attributes_schema = properties.Schema(
properties.Schema.LIST,
_("LDAP user attributes to fetch on a successful user bind."),
schema=user_attributes_item_schema,
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'dn_template',
'token',
'user_id_attribute',
'user_attributes',
)
# mapping of properties to their schemas
properties_schema = {
'dn_template': dn_template_schema,
'token': token_schema,
'user_id_attribute': user_id_attribute_schema,
'user_attributes': user_attributes_schema,
}
class SamlIdentityProviderSettings(object):
# all schemas
metadata_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.3) SAML IDP metadata"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'metadata',
)
# mapping of properties to their schemas
properties_schema = {
'metadata': metadata_schema,
}
class HTTPClientAuthenticationParams(object):
# all schemas
type_schema = properties.Schema(
properties.Schema.STRING,
_("type of client authentication"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['HTTP_BASIC_AUTH']),
],
)
request_uri_path_schema = properties.Schema(
properties.Schema.MAP,
_("Rrequest URI path when the authentication applies"),
schema=StringMatch.properties_schema,
required=False,
update_allowed=True,
)
auth_profile_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("Auth Profile to use for validating users You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
realm_schema = properties.Schema(
properties.Schema.STRING,
_("Basic authentication realm to present to a user along with the prompt for credentials."),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'type',
'request_uri_path',
'auth_profile_uuid',
'realm',
)
# mapping of properties to their schemas
properties_schema = {
'type': type_schema,
'request_uri_path': request_uri_path_schema,
'auth_profile_uuid': auth_profile_uuid_schema,
'realm': realm_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'auth_profile_uuid': 'authprofile',
'request_uri_path': getattr(StringMatch, 'field_references', {}),
}
unique_keys = {
'request_uri_path': getattr(StringMatch, 'unique_keys', {}),
}
class SamlServiceProviderNode(object):
# all schemas
name_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.3) Refers to the Cluster name identifier (Virtual IP or FQDN)."),
required=True,
update_allowed=False,
)
entity_id_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.3) Globally unique entityID for this node. Entity ID on the IDP should match this."),
required=False,
update_allowed=False,
)
single_signon_url_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.3) Single Signon URL to be programmed on the IDP."),
required=False,
update_allowed=False,
)
signing_cert_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.3) Service Provider signing certificate for metadata"),
required=False,
update_allowed=False,
)
signing_key_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.3) Service Provider signing key for metadata"),
required=False,
update_allowed=False,
)
# properties list
PROPERTIES = (
'name',
'entity_id',
'single_signon_url',
'signing_cert',
'signing_key',
)
# mapping of properties to their schemas
properties_schema = {
'name': name_schema,
'entity_id': entity_id_schema,
'single_signon_url': single_signon_url_schema,
'signing_cert': signing_cert_schema,
'signing_key': signing_key_schema,
}
unique_keys = {
'my_key': 'name',
}
class AuthTacacsPlusAttributeValuePair(object):
# all schemas
name_schema = properties.Schema(
properties.Schema.STRING,
_("attribute name"),
required=False,
update_allowed=True,
)
value_schema = properties.Schema(
properties.Schema.STRING,
_("attribute value"),
required=False,
update_allowed=True,
)
mandatory_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("mandatory"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'name',
'value',
'mandatory',
)
# mapping of properties to their schemas
properties_schema = {
'name': name_schema,
'value': value_schema,
'mandatory': mandatory_schema,
}
class SamlServiceProviderSettings(object):
# all schemas
saml_entity_type_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.3) Type of SAML endpoint (Default: AUTH_SAML_CLUSTER_VIP)"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['AUTH_SAML_CLUSTER_VIP', 'AUTH_SAML_DNS_FQDN']),
],
)
fqdn_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.3) FQDN if entity type is DNS_FQDN "),
required=False,
update_allowed=True,
)
sp_nodes_item_schema = properties.Schema(
properties.Schema.MAP,
_("(Introduced in: 17.2.3) Service Provider node information"),
schema=SamlServiceProviderNode.properties_schema,
required=True,
update_allowed=False,
)
sp_nodes_schema = properties.Schema(
properties.Schema.LIST,
_("(Introduced in: 17.2.3) Service Provider node information"),
schema=sp_nodes_item_schema,
required=False,
update_allowed=False,
)
org_name_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.3) Service Provider Organization Name"),
required=False,
update_allowed=True,
)
org_display_name_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.3) Service Provider Organization Display Name"),
required=False,
update_allowed=True,
)
org_url_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.3) Service Provider Organization URL"),
required=False,
update_allowed=True,
)
tech_contact_name_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.3) Service Provider technical contact name"),
required=False,
update_allowed=True,
)
tech_contact_email_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.3) Service Provider technical contact email"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'saml_entity_type',
'fqdn',
'sp_nodes',
'org_name',
'org_display_name',
'org_url',
'tech_contact_name',
'tech_contact_email',
)
# mapping of properties to their schemas
properties_schema = {
'saml_entity_type': saml_entity_type_schema,
'fqdn': fqdn_schema,
'sp_nodes': sp_nodes_schema,
'org_name': org_name_schema,
'org_display_name': org_display_name_schema,
'org_url': org_url_schema,
'tech_contact_name': tech_contact_name_schema,
'tech_contact_email': tech_contact_email_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'sp_nodes': getattr(SamlServiceProviderNode, 'field_references', {}),
}
unique_keys = {
'sp_nodes': getattr(SamlServiceProviderNode, 'unique_keys', {}),
}
class AuthMatchAttribute(object):
# all schemas
criteria_schema = properties.Schema(
properties.Schema.STRING,
_("rule match criteria"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['AUTH_MATCH_CONTAINS', 'AUTH_MATCH_DOES_NOT_CONTAIN', 'AUTH_MATCH_REGEX']),
],
)
name_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
values_item_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=False,
)
values_schema = properties.Schema(
properties.Schema.LIST,
_(""),
schema=values_item_schema,
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'criteria',
'name',
'values',
)
# mapping of properties to their schemas
properties_schema = {
'criteria': criteria_schema,
'name': name_schema,
'values': values_schema,
}
class AuthProfileHTTPClientParams(object):
# all schemas
request_header_schema = properties.Schema(
properties.Schema.STRING,
_("Insert an HTTP header. This field is used to define the header name. The value of the header is set to the client's HTTP Auth user ID."),
required=False,
update_allowed=True,
)
cache_expiration_time_schema = properties.Schema(
properties.Schema.NUMBER,
_("The max allowed length of time a clients authentication is cached (Units: SEC) (Default: 5)"),
required=False,
update_allowed=True,
)
require_user_groups_item_schema = properties.Schema(
properties.Schema.STRING,
_("A user should be a member of these groups. Each group is defined by the DN. For example, CN=testgroup,OU=groups,dc=example,dc=avinetworks,DC=com"),
required=True,
update_allowed=False,
)
require_user_groups_schema = properties.Schema(
properties.Schema.LIST,
_("A user should be a member of these groups. Each group is defined by the DN. For example, CN=testgroup,OU=groups,dc=example,dc=avinetworks,DC=com"),
schema=require_user_groups_item_schema,
required=False,
update_allowed=True,
)
group_member_is_full_dn_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Group member entries contain full DNs instead of just user id attribute values (Default: False)"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'request_header',
'cache_expiration_time',
'require_user_groups',
'group_member_is_full_dn',
)
# mapping of properties to their schemas
properties_schema = {
'request_header': request_header_schema,
'cache_expiration_time': cache_expiration_time_schema,
'require_user_groups': require_user_groups_schema,
'group_member_is_full_dn': group_member_is_full_dn_schema,
}
class AuthMatchGroupMembership(object):
# all schemas
criteria_schema = properties.Schema(
properties.Schema.STRING,
_("rule match criteria"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['AUTH_MATCH_CONTAINS', 'AUTH_MATCH_DOES_NOT_CONTAIN', 'AUTH_MATCH_REGEX']),
],
)
groups_item_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=False,
)
groups_schema = properties.Schema(
properties.Schema.LIST,
_(""),
schema=groups_item_schema,
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'criteria',
'groups',
)
# mapping of properties to their schemas
properties_schema = {
'criteria': criteria_schema,
'groups': groups_schema,
}
class AuthMappingRule(object):
# all schemas
index_schema = properties.Schema(
properties.Schema.NUMBER,
_(""),
required=True,
update_allowed=True,
)
group_match_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=AuthMatchGroupMembership.properties_schema,
required=False,
update_allowed=True,
)
attribute_match_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=AuthMatchAttribute.properties_schema,
required=False,
update_allowed=True,
)
assign_tenant_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['ASSIGN_ALL', 'ASSIGN_FROM_SELECT_LIST', 'ASSIGN_MATCHING_ATTRIBUTE_REGEX', 'ASSIGN_MATCHING_ATTRIBUTE_VALUE', 'ASSIGN_MATCHING_GROUP_NAME', 'ASSIGN_MATCHING_GROUP_REGEX']),
],
)
tenant_attribute_name_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
tenant_uuids_item_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=False,
)
tenant_uuids_schema = properties.Schema(
properties.Schema.LIST,
_(" You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
schema=tenant_uuids_item_schema,
required=False,
update_allowed=True,
)
assign_role_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['ASSIGN_ALL', 'ASSIGN_FROM_SELECT_LIST', 'ASSIGN_MATCHING_ATTRIBUTE_REGEX', 'ASSIGN_MATCHING_ATTRIBUTE_VALUE', 'ASSIGN_MATCHING_GROUP_NAME', 'ASSIGN_MATCHING_GROUP_REGEX']),
],
)
role_attribute_name_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
role_uuids_item_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=False,
)
role_uuids_schema = properties.Schema(
properties.Schema.LIST,
_(" You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
schema=role_uuids_item_schema,
required=False,
update_allowed=True,
)
is_superuser_schema = properties.Schema(
properties.Schema.BOOLEAN,
_(""),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'index',
'group_match',
'attribute_match',
'assign_tenant',
'tenant_attribute_name',
'tenant_uuids',
'assign_role',
'role_attribute_name',
'role_uuids',
'is_superuser',
)
# mapping of properties to their schemas
properties_schema = {
'index': index_schema,
'group_match': group_match_schema,
'attribute_match': attribute_match_schema,
'assign_tenant': assign_tenant_schema,
'tenant_attribute_name': tenant_attribute_name_schema,
'tenant_uuids': tenant_uuids_schema,
'assign_role': assign_role_schema,
'role_attribute_name': role_attribute_name_schema,
'role_uuids': role_uuids_schema,
'is_superuser': is_superuser_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'role_uuids': 'role',
'group_match': getattr(AuthMatchGroupMembership, 'field_references', {}),
'attribute_match': getattr(AuthMatchAttribute, 'field_references', {}),
'tenant_uuids': 'tenant',
}
unique_keys = {
'group_match': getattr(AuthMatchGroupMembership, 'unique_keys', {}),
'my_key': 'index',
'attribute_match': getattr(AuthMatchAttribute, 'unique_keys', {}),
}
class TacacsPlusAuthSettings(object):
# all schemas
server_item_schema = properties.Schema(
properties.Schema.STRING,
_("TACACS+ server IP address"),
required=True,
update_allowed=False,
)
server_schema = properties.Schema(
properties.Schema.LIST,
_("TACACS+ server IP address"),
schema=server_item_schema,
required=False,
update_allowed=True,
)
port_schema = properties.Schema(
properties.Schema.NUMBER,
_("TACACS+ server listening port (Default: 49)"),
required=False,
update_allowed=True,
)
password_schema = properties.Schema(
properties.Schema.STRING,
_("TACACS+ server shared secret"),
required=False,
update_allowed=True,
)
service_schema = properties.Schema(
properties.Schema.STRING,
_("TACACS+ service (Default: AUTH_TACACS_PLUS_SERVICE_LOGIN)"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['AUTH_TACACS_PLUS_SERVICE_ARAP', 'AUTH_TACACS_PLUS_SERVICE_ENABLE', 'AUTH_TACACS_PLUS_SERVICE_FWPROXY', 'AUTH_TACACS_PLUS_SERVICE_LOGIN', 'AUTH_TACACS_PLUS_SERVICE_NASI', 'AUTH_TACACS_PLUS_SERVICE_NONE', 'AUTH_TACACS_PLUS_SERVICE_PPP', 'AUTH_TACACS_PLUS_SERVICE_PT', 'AUTH_TACACS_PLUS_SERVICE_RCMD', 'AUTH_TACACS_PLUS_SERVICE_X25']),
],
)
authorization_attrs_item_schema = properties.Schema(
properties.Schema.MAP,
_("TACACS+ authorization attribute value pairs"),
schema=AuthTacacsPlusAttributeValuePair.properties_schema,
required=True,
update_allowed=False,
)
authorization_attrs_schema = properties.Schema(
properties.Schema.LIST,
_("TACACS+ authorization attribute value pairs"),
schema=authorization_attrs_item_schema,
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'server',
'port',
'password',
'service',
'authorization_attrs',
)
# mapping of properties to their schemas
properties_schema = {
'server': server_schema,
'port': port_schema,
'password': password_schema,
'service': service_schema,
'authorization_attrs': authorization_attrs_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'authorization_attrs': getattr(AuthTacacsPlusAttributeValuePair, 'field_references', {}),
}
unique_keys = {
'authorization_attrs': getattr(AuthTacacsPlusAttributeValuePair, 'unique_keys', {}),
}
class LdapAuthSettings(object):
# all schemas
server_item_schema = properties.Schema(
properties.Schema.STRING,
_("LDAP server IP address"),
required=True,
update_allowed=False,
)
server_schema = properties.Schema(
properties.Schema.LIST,
_("LDAP server IP address"),
schema=server_item_schema,
required=False,
update_allowed=True,
)
port_schema = properties.Schema(
properties.Schema.NUMBER,
_("Query the LDAP servers on this port. (Default: 389)"),
required=False,
update_allowed=True,
)
security_mode_schema = properties.Schema(
properties.Schema.STRING,
_("LDAP connection security mode."),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['AUTH_LDAP_SECURE_NONE', 'AUTH_LDAP_SECURE_USE_LDAPS']),
],
)
base_dn_schema = properties.Schema(
properties.Schema.STRING,
_("The LDAP base DN. For example, avinetworks,com would be DC=avinetworks,DC=com"),
required=False,
update_allowed=True,
)
bind_as_administrator_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("LDAP administrator credentials are used to search for users and group memberships. (Default: True)"),
required=False,
update_allowed=True,
)
settings_schema = properties.Schema(
properties.Schema.MAP,
_("LDAP full directory configuration with administrator credentials"),
schema=LdapDirectorySettings.properties_schema,
required=False,
update_allowed=True,
)
user_bind_schema = properties.Schema(
properties.Schema.MAP,
_("LDAP anonymous bind configuration"),
schema=LdapUserBindSettings.properties_schema,
required=False,
update_allowed=True,
)
email_attribute_schema = properties.Schema(
properties.Schema.STRING,
_("LDAP attribute that refers to user email"),
required=False,
update_allowed=True,
)
full_name_attribute_schema = properties.Schema(
properties.Schema.STRING,
_("LDAP attribute that refers to user's full name"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'server',
'port',
'security_mode',
'base_dn',
'bind_as_administrator',
'settings',
'user_bind',
'email_attribute',
'full_name_attribute',
)
# mapping of properties to their schemas
properties_schema = {
'server': server_schema,
'port': port_schema,
'security_mode': security_mode_schema,
'base_dn': base_dn_schema,
'bind_as_administrator': bind_as_administrator_schema,
'settings': settings_schema,
'user_bind': user_bind_schema,
'email_attribute': email_attribute_schema,
'full_name_attribute': full_name_attribute_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'user_bind': getattr(LdapUserBindSettings, 'field_references', {}),
'settings': getattr(LdapDirectorySettings, 'field_references', {}),
}
unique_keys = {
'user_bind': getattr(LdapUserBindSettings, 'unique_keys', {}),
'settings': getattr(LdapDirectorySettings, 'unique_keys', {}),
}
class SamlSettings(object):
# all schemas
idp_schema = properties.Schema(
properties.Schema.MAP,
_("(Introduced in: 17.2.3) Configure remote Identity provider settings"),
schema=SamlIdentityProviderSettings.properties_schema,
required=False,
update_allowed=True,
)
sp_schema = properties.Schema(
properties.Schema.MAP,
_("(Introduced in: 17.2.3) Configure service provider settings for the Controller"),
schema=SamlServiceProviderSettings.properties_schema,
required=True,
update_allowed=True,
)
# properties list
PROPERTIES = (
'idp',
'sp',
)
# mapping of properties to their schemas
properties_schema = {
'idp': idp_schema,
'sp': sp_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'sp': getattr(SamlServiceProviderSettings, 'field_references', {}),
'idp': getattr(SamlIdentityProviderSettings, 'field_references', {}),
}
unique_keys = {
'sp': getattr(SamlServiceProviderSettings, 'unique_keys', {}),
'idp': getattr(SamlIdentityProviderSettings, 'unique_keys', {}),
}
class AuthProfile(AviResource):
resource_name = "authprofile"
# all schemas
avi_version_schema = properties.Schema(
properties.Schema.STRING,
_("Avi Version to use for the object. Default is 16.4.2. If you plan to use any fields introduced after 16.4.2, then this needs to be explicitly set."),
required=False,
update_allowed=True,
)
name_schema = properties.Schema(
properties.Schema.STRING,
_("Name of the Auth Profile."),
required=True,
update_allowed=True,
)
type_schema = properties.Schema(
properties.Schema.STRING,
_("Type of the Auth Profile."),
required=True,
update_allowed=True,
constraints=[
constraints.AllowedValues(['AUTH_PROFILE_LDAP', 'AUTH_PROFILE_SAML', 'AUTH_PROFILE_TACACS_PLUS']),
],
)
ldap_schema = properties.Schema(
properties.Schema.MAP,
_("LDAP server and directory settings."),
schema=LdapAuthSettings.properties_schema,
required=False,
update_allowed=True,
)
http_schema = properties.Schema(
properties.Schema.MAP,
_("HTTP user authentication params."),
schema=AuthProfileHTTPClientParams.properties_schema,
required=False,
update_allowed=True,
)
tacacs_plus_schema = properties.Schema(
properties.Schema.MAP,
_("TACACS+ settings"),
schema=TacacsPlusAuthSettings.properties_schema,
required=False,
update_allowed=True,
)
saml_schema = properties.Schema(
properties.Schema.MAP,
_("(Introduced in: 17.2.3) SAML settings"),
schema=SamlSettings.properties_schema,
required=False,
update_allowed=True,
)
description_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'avi_version',
'name',
'type',
'ldap',
'http',
'tacacs_plus',
'saml',
'description',
)
# mapping of properties to their schemas
properties_schema = {
'avi_version': avi_version_schema,
'name': name_schema,
'type': type_schema,
'ldap': ldap_schema,
'http': http_schema,
'tacacs_plus': tacacs_plus_schema,
'saml': saml_schema,
'description': description_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'saml': getattr(SamlSettings, 'field_references', {}),
'http': getattr(AuthProfileHTTPClientParams, 'field_references', {}),
'tacacs_plus': getattr(TacacsPlusAuthSettings, 'field_references', {}),
'ldap': getattr(LdapAuthSettings, 'field_references', {}),
}
unique_keys = {
'saml': getattr(SamlSettings, 'unique_keys', {}),
'http': getattr(AuthProfileHTTPClientParams, 'unique_keys', {}),
'tacacs_plus': getattr(TacacsPlusAuthSettings, 'unique_keys', {}),
'ldap': getattr(LdapAuthSettings, 'unique_keys', {}),
}
def resource_mapping():
return {
'Avi::LBaaS::AuthProfile': AuthProfile,
}
|
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Run inference for pre-processed data with a trained model.
"""
import ast
from collections import namedtuple
from dataclasses import dataclass, field
from enum import Enum, auto
import hydra
from hydra.core.config_store import ConfigStore
import logging
import math
import os
from omegaconf import OmegaConf
from typing import Optional
import sys
import editdistance
import torch
from hydra.core.hydra_config import HydraConfig
from fairseq import checkpoint_utils, progress_bar, tasks, utils
from fairseq.data.data_utils import post_process
from fairseq.dataclass.configs import FairseqDataclass, FairseqConfig
from fairseq.logging.meters import StopwatchMeter
from omegaconf import open_dict
from examples.speech_recognition.kaldi.kaldi_decoder import KaldiDecoderConfig
logging.root.setLevel(logging.INFO)
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger(__name__)
class DecoderType(Enum):
VITERBI = auto()
KENLM = auto()
FAIRSEQ = auto()
KALDI = auto()
@dataclass
class UnsupGenerateConfig(FairseqDataclass):
fairseq: FairseqConfig = FairseqConfig()
lm_weight: float = field(
default=2.0,
metadata={"help": "language model weight"},
)
w2l_decoder: DecoderType = field(
default=DecoderType.VITERBI,
metadata={"help": "type of decoder to use"},
)
kaldi_decoder_config: Optional[KaldiDecoderConfig] = None
lexicon: Optional[str] = field(
default=None,
metadata={
"help": "path to lexicon. This is also used to 'phonemize' for unsupvised param tuning"
},
)
lm_model: Optional[str] = field(
default=None,
metadata={"help": "path to language model (kenlm or fairseq)"},
)
unit_lm: bool = field(
default=False,
metadata={"help": "whether to use unit lm"},
)
beam_threshold: float = field(
default=50.0,
metadata={"help": "beam score threshold"},
)
beam_size_token: float = field(
default=100.0,
metadata={"help": "max tokens per beam"},
)
beam: int = field(
default=5,
metadata={"help": "decoder beam size"},
)
nbest: int = field(
default=1,
metadata={"help": "number of results to return"},
)
word_score: float = field(
default=1.0,
metadata={"help": "word score to add at end of word"},
)
unk_weight: float = field(
default=-math.inf,
metadata={"help": "unknown token weight"},
)
sil_weight: float = field(
default=0.0,
metadata={"help": "silence token weight"},
)
targets: Optional[str] = field(
default=None,
metadata={"help": "extension of ground truth labels to compute UER"},
)
results_path: Optional[str] = field(
default=None,
metadata={"help": "where to store results"},
)
post_process: Optional[str] = field(
default=None,
metadata={"help": "how to post process results"},
)
vocab_usage_power: float = field(
default=2,
metadata={"help": "for unsupervised param tuning"},
)
viterbi_transcript: Optional[str] = field(
default=None,
metadata={"help": "for unsupervised param tuning"},
)
min_lm_ppl: float = field(
default=0,
metadata={"help": "for unsupervised param tuning"},
)
min_vt_uer: float = field(
default=0,
metadata={"help": "for unsupervised param tuning"},
)
blank_weight: float = field(
default=0,
metadata={"help": "value to add or set for blank emission"},
)
blank_mode: str = field(
default="set",
metadata={
"help": "can be add or set, how to modify blank emission with blank weight"
},
)
sil_is_blank: bool = field(
default=False,
metadata={"help": "if true, <SIL> token is same as blank token"},
)
unsupervised_tuning: bool = field(
default=False,
metadata={
"help": "if true, returns a score based on unsupervised param selection metric instead of UER"
},
)
is_ax: bool = field(
default=False,
metadata={
"help": "if true, assumes we are using ax for tuning and returns a tuple for ax to consume"
},
)
def get_dataset_itr(cfg, task):
return task.get_batch_iterator(
dataset=task.dataset(cfg.fairseq.dataset.gen_subset),
max_tokens=cfg.fairseq.dataset.max_tokens,
max_sentences=cfg.fairseq.dataset.batch_size,
max_positions=(sys.maxsize, sys.maxsize),
ignore_invalid_inputs=cfg.fairseq.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=cfg.fairseq.dataset.required_batch_size_multiple,
num_shards=cfg.fairseq.dataset.num_shards,
shard_id=cfg.fairseq.dataset.shard_id,
num_workers=cfg.fairseq.dataset.num_workers,
data_buffer_size=cfg.fairseq.dataset.data_buffer_size,
).next_epoch_itr(shuffle=False)
def process_predictions(
cfg: UnsupGenerateConfig,
hypos,
tgt_dict,
target_tokens,
res_files,
):
retval = []
word_preds = []
transcriptions = []
dec_scores = []
for i, hypo in enumerate(hypos[: min(len(hypos), cfg.nbest)]):
if torch.is_tensor(hypo["tokens"]):
tokens = hypo["tokens"].int().cpu()
tokens = tokens[tokens >= tgt_dict.nspecial]
hyp_pieces = tgt_dict.string(tokens)
else:
hyp_pieces = " ".join(hypo["tokens"])
if "words" in hypo and len(hypo["words"]) > 0:
hyp_words = " ".join(hypo["words"])
else:
hyp_words = post_process(hyp_pieces, cfg.post_process)
to_write = {}
if res_files is not None:
to_write[res_files["hypo.units"]] = hyp_pieces
to_write[res_files["hypo.words"]] = hyp_words
tgt_words = ""
if target_tokens is not None:
if isinstance(target_tokens, str):
tgt_pieces = tgt_words = target_tokens
else:
tgt_pieces = tgt_dict.string(target_tokens)
tgt_words = post_process(tgt_pieces, cfg.post_process)
if res_files is not None:
to_write[res_files["ref.units"]] = tgt_pieces
to_write[res_files["ref.words"]] = tgt_words
if not cfg.fairseq.common_eval.quiet:
logger.info(f"HYPO {i}:" + hyp_words)
if tgt_words:
logger.info("TARGET:" + tgt_words)
if "am_score" in hypo and "lm_score" in hypo:
logger.info(
f"DECODER AM SCORE: {hypo['am_score']}, DECODER LM SCORE: {hypo['lm_score']}, DECODER SCORE: {hypo['score']}"
)
elif "score" in hypo:
logger.info(f"DECODER SCORE: {hypo['score']}")
logger.info("___________________")
hyp_words_arr = hyp_words.split()
tgt_words_arr = tgt_words.split()
retval.append(
(
editdistance.eval(hyp_words_arr, tgt_words_arr),
len(hyp_words_arr),
len(tgt_words_arr),
hyp_pieces,
hyp_words,
)
)
word_preds.append(hyp_words_arr)
transcriptions.append(to_write)
dec_scores.append(-hypo.get("score", 0)) # negate cuz kaldi returns NLL
if len(retval) > 1:
best = None
for r, t in zip(retval, transcriptions):
if best is None or r[0] < best[0][0]:
best = r, t
for dest, tran in best[1].items():
print(tran, file=dest)
dest.flush()
return best[0]
assert len(transcriptions) == 1
for dest, tran in transcriptions[0].items():
print(tran, file=dest)
return retval[0]
def prepare_result_files(cfg: UnsupGenerateConfig):
def get_res_file(file_prefix):
if cfg.fairseq.dataset.num_shards > 1:
file_prefix = f"{cfg.fairseq.dataset.shard_id}_{file_prefix}"
path = os.path.join(
cfg.results_path,
"{}{}.txt".format(
cfg.fairseq.dataset.gen_subset,
file_prefix,
),
)
return open(path, "w", buffering=1)
if not cfg.results_path:
return None
return {
"hypo.words": get_res_file(""),
"hypo.units": get_res_file("_units"),
"ref.words": get_res_file("_ref"),
"ref.units": get_res_file("_ref_units"),
"hypo.nbest.words": get_res_file("_nbest_words"),
}
def optimize_models(cfg: UnsupGenerateConfig, use_cuda, models):
"""Optimize ensemble for generation"""
for model in models:
model.eval()
if cfg.fairseq.common.fp16:
model.half()
if use_cuda:
model.cuda()
GenResult = namedtuple(
"GenResult",
[
"count",
"errs_t",
"gen_timer",
"lengths_hyp_unit_t",
"lengths_hyp_t",
"lengths_t",
"lm_score_t",
"num_feats",
"num_sentences",
"num_symbols",
"vt_err_t",
"vt_length_t",
],
)
def generate(cfg: UnsupGenerateConfig, models, saved_cfg, use_cuda):
task = tasks.setup_task(cfg.fairseq.task)
saved_cfg.task.labels = cfg.fairseq.task.labels
task.load_dataset(cfg.fairseq.dataset.gen_subset, task_cfg=saved_cfg.task)
# Set dictionary
tgt_dict = task.target_dictionary
logger.info(
"| {} {} {} examples".format(
cfg.fairseq.task.data,
cfg.fairseq.dataset.gen_subset,
len(task.dataset(cfg.fairseq.dataset.gen_subset)),
)
)
# Load dataset (possibly sharded)
itr = get_dataset_itr(cfg, task)
# Initialize generator
gen_timer = StopwatchMeter()
def build_generator(cfg: UnsupGenerateConfig):
w2l_decoder = cfg.w2l_decoder
if w2l_decoder == DecoderType.VITERBI:
from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder
return W2lViterbiDecoder(cfg, task.target_dictionary)
elif w2l_decoder == DecoderType.KENLM:
from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
return W2lKenLMDecoder(cfg, task.target_dictionary)
elif w2l_decoder == DecoderType.FAIRSEQ:
from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder
return W2lFairseqLMDecoder(cfg, task.target_dictionary)
elif w2l_decoder == DecoderType.KALDI:
from examples.speech_recognition.kaldi.kaldi_decoder import KaldiDecoder
assert cfg.kaldi_decoder_config is not None
return KaldiDecoder(
cfg.kaldi_decoder_config,
cfg.beam,
)
else:
raise NotImplementedError(
"only wav2letter decoders with (viterbi, kenlm, fairseqlm) options are supported at the moment but found "
+ str(w2l_decoder)
)
generator = build_generator(cfg)
kenlm = None
fairseq_lm = None
if cfg.lm_model is not None:
import kenlm
kenlm = kenlm.Model(cfg.lm_model)
num_sentences = 0
if cfg.results_path is not None and not os.path.exists(cfg.results_path):
os.makedirs(cfg.results_path)
res_files = prepare_result_files(cfg)
errs_t = 0
lengths_hyp_t = 0
lengths_hyp_unit_t = 0
lengths_t = 0
count = 0
num_feats = 0
all_hyp_pieces = []
all_hyp_words = []
num_symbols = (
len([s for s in tgt_dict.symbols if not s.startswith("madeup")])
- tgt_dict.nspecial
)
targets = None
if cfg.targets is not None:
tgt_path = os.path.join(
cfg.fairseq.task.data, cfg.fairseq.dataset.gen_subset + "." + cfg.targets
)
if os.path.exists(tgt_path):
with open(tgt_path, "r") as f:
targets = f.read().splitlines()
viterbi_transcript = None
if cfg.viterbi_transcript is not None and len(cfg.viterbi_transcript) > 0:
logger.info(f"loading viterbi transcript from {cfg.viterbi_transcript}")
with open(cfg.viterbi_transcript, "r") as vf:
viterbi_transcript = vf.readlines()
viterbi_transcript = [v.rstrip().split() for v in viterbi_transcript]
gen_timer.start()
start = 0
end = len(itr)
hypo_futures = None
if cfg.w2l_decoder == DecoderType.KALDI:
logger.info("Extracting features")
hypo_futures = []
samples = []
with progress_bar.build_progress_bar(cfg.fairseq.common, itr) as t:
for i, sample in enumerate(t):
if "net_input" not in sample or i < start or i >= end:
continue
if "padding_mask" not in sample["net_input"]:
sample["net_input"]["padding_mask"] = None
hypos, num_feats = gen_hypos(
generator, models, num_feats, sample, task, use_cuda
)
hypo_futures.append(hypos)
samples.append(sample)
itr = list(zip(hypo_futures, samples))
start = 0
end = len(itr)
logger.info("Finished extracting features")
with progress_bar.build_progress_bar(cfg.fairseq.common, itr) as t:
for i, sample in enumerate(t):
if i < start or i >= end:
continue
if hypo_futures is not None:
hypos, sample = sample
hypos = [h.result() for h in hypos]
else:
if "net_input" not in sample:
continue
hypos, num_feats = gen_hypos(
generator, models, num_feats, sample, task, use_cuda
)
for i, sample_id in enumerate(sample["id"].tolist()):
if targets is not None:
target_tokens = targets[sample_id]
elif "target" in sample or "target_label" in sample:
toks = (
sample["target"][i, :]
if "target_label" not in sample
else sample["target_label"][i, :]
)
target_tokens = utils.strip_pad(toks, tgt_dict.pad()).int().cpu()
else:
target_tokens = None
# Process top predictions
(
errs,
length_hyp,
length,
hyp_pieces,
hyp_words,
) = process_predictions(
cfg,
hypos[i],
tgt_dict,
target_tokens,
res_files,
)
errs_t += errs
lengths_hyp_t += length_hyp
lengths_hyp_unit_t += (
len(hyp_pieces) if len(hyp_pieces) > 0 else len(hyp_words)
)
lengths_t += length
count += 1
all_hyp_pieces.append(hyp_pieces)
all_hyp_words.append(hyp_words)
num_sentences += (
sample["nsentences"] if "nsentences" in sample else sample["id"].numel()
)
lm_score_sum = 0
if kenlm is not None:
if cfg.unit_lm:
lm_score_sum = sum(kenlm.score(w) for w in all_hyp_pieces)
else:
lm_score_sum = sum(kenlm.score(w) for w in all_hyp_words)
elif fairseq_lm is not None:
lm_score_sum = sum(fairseq_lm.score([h.split() for h in all_hyp_words])[0])
vt_err_t = 0
vt_length_t = 0
if viterbi_transcript is not None:
unit_hyps = []
if cfg.targets is not None and cfg.lexicon is not None:
lex = {}
with open(cfg.lexicon, "r") as lf:
for line in lf:
items = line.rstrip().split()
lex[items[0]] = items[1:]
for h in all_hyp_pieces:
hyp_ws = []
for w in h.split():
assert w in lex, w
hyp_ws.extend(lex[w])
unit_hyps.append(hyp_ws)
else:
unit_hyps.extend([h.split() for h in all_hyp_words])
vt_err_t = sum(
editdistance.eval(vt, h) for vt, h in zip(viterbi_transcript, unit_hyps)
)
vt_length_t = sum(len(h) for h in viterbi_transcript)
if res_files is not None:
for r in res_files.values():
r.close()
gen_timer.stop(lengths_hyp_t)
return GenResult(
count,
errs_t,
gen_timer,
lengths_hyp_unit_t,
lengths_hyp_t,
lengths_t,
lm_score_sum,
num_feats,
num_sentences,
num_symbols,
vt_err_t,
vt_length_t,
)
def gen_hypos(generator, models, num_feats, sample, task, use_cuda):
sample = utils.move_to_cuda(sample) if use_cuda else sample
if "features" in sample["net_input"]:
sample["net_input"]["dense_x_only"] = True
num_feats += (
sample["net_input"]["features"].shape[0]
* sample["net_input"]["features"].shape[1]
)
hypos = task.inference_step(generator, models, sample, None)
return hypos, num_feats
def main(cfg: UnsupGenerateConfig, model=None):
if (
cfg.fairseq.dataset.max_tokens is None
and cfg.fairseq.dataset.batch_size is None
):
cfg.fairseq.dataset.max_tokens = 1024000
use_cuda = torch.cuda.is_available() and not cfg.fairseq.common.cpu
task = tasks.setup_task(cfg.fairseq.task)
overrides = ast.literal_eval(cfg.fairseq.common_eval.model_overrides)
if cfg.fairseq.task._name == "unpaired_audio_text":
overrides["model"] = {
"blank_weight": cfg.blank_weight,
"blank_mode": cfg.blank_mode,
"blank_is_sil": cfg.sil_is_blank,
"no_softmax": True,
"segmentation": {
"type": "NONE",
},
}
else:
overrides["model"] = {
"blank_weight": cfg.blank_weight,
"blank_mode": cfg.blank_mode,
}
if model is None:
# Load ensemble
logger.info("| loading model(s) from {}".format(cfg.fairseq.common_eval.path))
models, saved_cfg = checkpoint_utils.load_model_ensemble(
cfg.fairseq.common_eval.path.split("\\"),
arg_overrides=overrides,
task=task,
suffix=cfg.fairseq.checkpoint.checkpoint_suffix,
strict=(cfg.fairseq.checkpoint.checkpoint_shard_count == 1),
num_shards=cfg.fairseq.checkpoint.checkpoint_shard_count,
)
optimize_models(cfg, use_cuda, models)
else:
models = [model]
saved_cfg = cfg.fairseq
with open_dict(saved_cfg.task):
saved_cfg.task.shuffle = False
saved_cfg.task.sort_by_length = False
gen_result = generate(cfg, models, saved_cfg, use_cuda)
wer = None
if gen_result.lengths_t > 0:
wer = gen_result.errs_t * 100.0 / gen_result.lengths_t
logger.info(f"WER: {wer}")
lm_ppl = float("inf")
if gen_result.lm_score_t != 0 and gen_result.lengths_hyp_t > 0:
hyp_len = gen_result.lengths_hyp_t
lm_ppl = math.pow(
10, -gen_result.lm_score_t / (hyp_len + gen_result.num_sentences)
)
logger.info(f"LM PPL: {lm_ppl}")
logger.info(
"| Processed {} sentences ({} tokens) in {:.1f}s ({:.2f}"
" sentences/s, {:.2f} tokens/s)".format(
gen_result.num_sentences,
gen_result.gen_timer.n,
gen_result.gen_timer.sum,
gen_result.num_sentences / gen_result.gen_timer.sum,
1.0 / gen_result.gen_timer.avg,
)
)
vt_diff = None
if gen_result.vt_length_t > 0:
vt_diff = gen_result.vt_err_t / gen_result.vt_length_t
vt_diff = max(cfg.min_vt_uer, vt_diff)
lm_ppl = max(cfg.min_lm_ppl, lm_ppl)
if not cfg.unsupervised_tuning:
weighted_score = wer
else:
weighted_score = math.log(lm_ppl) * (vt_diff or 1.0)
res = (
f"| Generate {cfg.fairseq.dataset.gen_subset} with beam={cfg.beam}, "
f"lm_weight={cfg.kaldi_decoder_config.acoustic_scale if cfg.kaldi_decoder_config else cfg.lm_weight}, "
f"word_score={cfg.word_score}, sil_weight={cfg.sil_weight}, blank_weight={cfg.blank_weight}, "
f"WER: {wer}, LM_PPL: {lm_ppl}, num feats: {gen_result.num_feats}, "
f"length: {gen_result.lengths_hyp_t}, UER to viterbi: {(vt_diff or 0) * 100}, score: {weighted_score}"
)
logger.info(res)
# print(res)
return task, weighted_score
@hydra.main(
config_path=os.path.join("../../..", "fairseq", "config"), config_name="config"
)
def hydra_main(cfg):
with open_dict(cfg):
# make hydra logging work with ddp (see # see https://github.com/facebookresearch/hydra/issues/1126)
cfg.job_logging_cfg = OmegaConf.to_container(
HydraConfig.get().job_logging, resolve=True
)
cfg = OmegaConf.create(
OmegaConf.to_container(cfg, resolve=False, enum_to_str=False)
)
OmegaConf.set_struct(cfg, True)
logger.info(cfg)
utils.import_user_module(cfg.fairseq.common)
_, score = main(cfg)
if cfg.is_ax:
return score, None
return score
def cli_main():
try:
from hydra._internal.utils import get_args
cfg_name = get_args().config_name or "config"
except:
logger.warning("Failed to get config name from hydra args")
cfg_name = "config"
cs = ConfigStore.instance()
cs.store(name=cfg_name, node=UnsupGenerateConfig)
hydra_main()
if __name__ == "__main__":
cli_main()
|
|
#!/usr/bin/env python
#
# backend for serial IO for POSIX compatible systems, like Linux, OSX
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C) 2001-2020 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
#
# parts based on code from Grant B. Edwards <grante@visi.com>:
# ftp://ftp.visi.com/users/grante/python/PosixSerial.py
#
# references: http://www.easysw.com/~mike/serial/serial.html
# Collection of port names (was previously used by number_to_device which was
# removed.
# - Linux /dev/ttyS%d (confirmed)
# - cygwin/win32 /dev/com%d (confirmed)
# - openbsd (OpenBSD) /dev/cua%02d
# - bsd*, freebsd* /dev/cuad%d
# - darwin (OS X) /dev/cuad%d
# - netbsd /dev/dty%02d (NetBSD 1.6 testing by Erk)
# - irix (IRIX) /dev/ttyf%d (partially tested) names depending on flow control
# - hp (HP-UX) /dev/tty%dp0 (not tested)
# - sunos (Solaris/SunOS) /dev/tty%c (letters, 'a'..'z') (confirmed)
# - aix (AIX) /dev/tty%d
from __future__ import absolute_import
# pylint: disable=abstract-method
import errno
import fcntl
import os
import select
import struct
import sys
import termios
import serial
from serial.serialutil import SerialBase, SerialException, to_bytes, \
PortNotOpenError, SerialTimeoutException, Timeout
class PlatformSpecificBase(object):
BAUDRATE_CONSTANTS = {}
def _set_special_baudrate(self, baudrate):
raise NotImplementedError('non-standard baudrates are not supported on this platform')
def _set_rs485_mode(self, rs485_settings):
raise NotImplementedError('RS485 not supported on this platform')
def set_low_latency_mode(self, low_latency_settings):
raise NotImplementedError('Low latency not supported on this platform')
def _update_break_state(self):
"""\
Set break: Controls TXD. When active, no transmitting is possible.
"""
if self._break_state:
fcntl.ioctl(self.fd, TIOCSBRK)
else:
fcntl.ioctl(self.fd, TIOCCBRK)
# some systems support an extra flag to enable the two in POSIX unsupported
# paritiy settings for MARK and SPACE
CMSPAR = 0 # default, for unsupported platforms, override below
# try to detect the OS so that a device can be selected...
# this code block should supply a device() and set_special_baudrate() function
# for the platform
plat = sys.platform.lower()
if plat[:5] == 'linux': # Linux (confirmed) # noqa
import array
# extra termios flags
CMSPAR = 0o10000000000 # Use "stick" (mark/space) parity
# baudrate ioctls
TCGETS2 = 0x802C542A
TCSETS2 = 0x402C542B
BOTHER = 0o010000
# RS485 ioctls
TIOCGRS485 = 0x542E
TIOCSRS485 = 0x542F
SER_RS485_ENABLED = 0b00000001
SER_RS485_RTS_ON_SEND = 0b00000010
SER_RS485_RTS_AFTER_SEND = 0b00000100
SER_RS485_RX_DURING_TX = 0b00010000
class PlatformSpecific(PlatformSpecificBase):
BAUDRATE_CONSTANTS = {
0: 0o000000, # hang up
50: 0o000001,
75: 0o000002,
110: 0o000003,
134: 0o000004,
150: 0o000005,
200: 0o000006,
300: 0o000007,
600: 0o000010,
1200: 0o000011,
1800: 0o000012,
2400: 0o000013,
4800: 0o000014,
9600: 0o000015,
19200: 0o000016,
38400: 0o000017,
57600: 0o010001,
115200: 0o010002,
230400: 0o010003,
460800: 0o010004,
500000: 0o010005,
576000: 0o010006,
921600: 0o010007,
1000000: 0o010010,
1152000: 0o010011,
1500000: 0o010012,
2000000: 0o010013,
2500000: 0o010014,
3000000: 0o010015,
3500000: 0o010016,
4000000: 0o010017
}
def set_low_latency_mode(self, low_latency_settings):
buf = array.array('i', [0] * 32)
try:
# get serial_struct
fcntl.ioctl(self.fd, termios.TIOCGSERIAL, buf)
# set or unset ASYNC_LOW_LATENCY flag
if low_latency_settings:
buf[4] |= 0x2000
else:
buf[4] &= ~0x2000
# set serial_struct
fcntl.ioctl(self.fd, termios.TIOCSSERIAL, buf)
except IOError as e:
raise ValueError('Failed to update ASYNC_LOW_LATENCY flag to {}: {}'.format(low_latency_settings, e))
def _set_special_baudrate(self, baudrate):
# right size is 44 on x86_64, allow for some growth
buf = array.array('i', [0] * 64)
try:
# get serial_struct
fcntl.ioctl(self.fd, TCGETS2, buf)
# set custom speed
buf[2] &= ~termios.CBAUD
buf[2] |= BOTHER
buf[9] = buf[10] = baudrate
# set serial_struct
fcntl.ioctl(self.fd, TCSETS2, buf)
except IOError as e:
raise ValueError('Failed to set custom baud rate ({}): {}'.format(baudrate, e))
def _set_rs485_mode(self, rs485_settings):
buf = array.array('i', [0] * 8) # flags, delaytx, delayrx, padding
try:
fcntl.ioctl(self.fd, TIOCGRS485, buf)
buf[0] |= SER_RS485_ENABLED
if rs485_settings is not None:
if rs485_settings.loopback:
buf[0] |= SER_RS485_RX_DURING_TX
else:
buf[0] &= ~SER_RS485_RX_DURING_TX
if rs485_settings.rts_level_for_tx:
buf[0] |= SER_RS485_RTS_ON_SEND
else:
buf[0] &= ~SER_RS485_RTS_ON_SEND
if rs485_settings.rts_level_for_rx:
buf[0] |= SER_RS485_RTS_AFTER_SEND
else:
buf[0] &= ~SER_RS485_RTS_AFTER_SEND
if rs485_settings.delay_before_tx is not None:
buf[1] = int(rs485_settings.delay_before_tx * 1000)
if rs485_settings.delay_before_rx is not None:
buf[2] = int(rs485_settings.delay_before_rx * 1000)
else:
buf[0] = 0 # clear SER_RS485_ENABLED
fcntl.ioctl(self.fd, TIOCSRS485, buf)
except IOError as e:
raise ValueError('Failed to set RS485 mode: {}'.format(e))
elif plat == 'cygwin': # cygwin/win32 (confirmed)
class PlatformSpecific(PlatformSpecificBase):
BAUDRATE_CONSTANTS = {
128000: 0x01003,
256000: 0x01005,
500000: 0x01007,
576000: 0x01008,
921600: 0x01009,
1000000: 0x0100a,
1152000: 0x0100b,
1500000: 0x0100c,
2000000: 0x0100d,
2500000: 0x0100e,
3000000: 0x0100f
}
elif plat[:6] == 'darwin': # OS X
import array
IOSSIOSPEED = 0x80045402 # _IOW('T', 2, speed_t)
class PlatformSpecific(PlatformSpecificBase):
osx_version = os.uname()[2].split('.')
TIOCSBRK = 0x2000747B # _IO('t', 123)
TIOCCBRK = 0x2000747A # _IO('t', 122)
# Tiger or above can support arbitrary serial speeds
if int(osx_version[0]) >= 8:
def _set_special_baudrate(self, baudrate):
# use IOKit-specific call to set up high speeds
buf = array.array('i', [baudrate])
fcntl.ioctl(self.fd, IOSSIOSPEED, buf, 1)
def _update_break_state(self):
"""\
Set break: Controls TXD. When active, no transmitting is possible.
"""
if self._break_state:
fcntl.ioctl(self.fd, PlatformSpecific.TIOCSBRK)
else:
fcntl.ioctl(self.fd, PlatformSpecific.TIOCCBRK)
elif plat[:3] == 'bsd' or \
plat[:7] == 'freebsd' or \
plat[:6] == 'netbsd' or \
plat[:7] == 'openbsd':
class ReturnBaudrate(object):
def __getitem__(self, key):
return key
class PlatformSpecific(PlatformSpecificBase):
# Only tested on FreeBSD:
# The baud rate may be passed in as
# a literal value.
BAUDRATE_CONSTANTS = ReturnBaudrate()
TIOCSBRK = 0x2000747B # _IO('t', 123)
TIOCCBRK = 0x2000747A # _IO('t', 122)
def _update_break_state(self):
"""\
Set break: Controls TXD. When active, no transmitting is possible.
"""
if self._break_state:
fcntl.ioctl(self.fd, PlatformSpecific.TIOCSBRK)
else:
fcntl.ioctl(self.fd, PlatformSpecific.TIOCCBRK)
else:
class PlatformSpecific(PlatformSpecificBase):
pass
# load some constants for later use.
# try to use values from termios, use defaults from linux otherwise
TIOCMGET = getattr(termios, 'TIOCMGET', 0x5415)
TIOCMBIS = getattr(termios, 'TIOCMBIS', 0x5416)
TIOCMBIC = getattr(termios, 'TIOCMBIC', 0x5417)
TIOCMSET = getattr(termios, 'TIOCMSET', 0x5418)
# TIOCM_LE = getattr(termios, 'TIOCM_LE', 0x001)
TIOCM_DTR = getattr(termios, 'TIOCM_DTR', 0x002)
TIOCM_RTS = getattr(termios, 'TIOCM_RTS', 0x004)
# TIOCM_ST = getattr(termios, 'TIOCM_ST', 0x008)
# TIOCM_SR = getattr(termios, 'TIOCM_SR', 0x010)
TIOCM_CTS = getattr(termios, 'TIOCM_CTS', 0x020)
TIOCM_CAR = getattr(termios, 'TIOCM_CAR', 0x040)
TIOCM_RNG = getattr(termios, 'TIOCM_RNG', 0x080)
TIOCM_DSR = getattr(termios, 'TIOCM_DSR', 0x100)
TIOCM_CD = getattr(termios, 'TIOCM_CD', TIOCM_CAR)
TIOCM_RI = getattr(termios, 'TIOCM_RI', TIOCM_RNG)
# TIOCM_OUT1 = getattr(termios, 'TIOCM_OUT1', 0x2000)
# TIOCM_OUT2 = getattr(termios, 'TIOCM_OUT2', 0x4000)
if hasattr(termios, 'TIOCINQ'):
TIOCINQ = termios.TIOCINQ
else:
TIOCINQ = getattr(termios, 'FIONREAD', 0x541B)
TIOCOUTQ = getattr(termios, 'TIOCOUTQ', 0x5411)
TIOCM_zero_str = struct.pack('I', 0)
TIOCM_RTS_str = struct.pack('I', TIOCM_RTS)
TIOCM_DTR_str = struct.pack('I', TIOCM_DTR)
TIOCSBRK = getattr(termios, 'TIOCSBRK', 0x5427)
TIOCCBRK = getattr(termios, 'TIOCCBRK', 0x5428)
class Serial(SerialBase, PlatformSpecific):
"""\
Serial port class POSIX implementation. Serial port configuration is
done with termios and fcntl. Runs on Linux and many other Un*x like
systems.
"""
def open(self):
"""\
Open port with current settings. This may throw a SerialException
if the port cannot be opened."""
if self._port is None:
raise SerialException("Port must be configured before it can be used.")
if self.is_open:
raise SerialException("Port is already open.")
self.fd = None
# open
try:
self.fd = os.open(self.portstr, os.O_RDWR | os.O_NOCTTY | os.O_NONBLOCK)
except OSError as msg:
self.fd = None
raise SerialException(msg.errno, "could not open port {}: {}".format(self._port, msg))
#~ fcntl.fcntl(self.fd, fcntl.F_SETFL, 0) # set blocking
self.pipe_abort_read_r, self.pipe_abort_read_w = None, None
self.pipe_abort_write_r, self.pipe_abort_write_w = None, None
try:
self._reconfigure_port(force_update=True)
try:
if not self._dsrdtr:
self._update_dtr_state()
if not self._rtscts:
self._update_rts_state()
except IOError as e:
# ignore Invalid argument and Inappropriate ioctl
if e.errno not in (errno.EINVAL, errno.ENOTTY):
raise
self._reset_input_buffer()
self.pipe_abort_read_r, self.pipe_abort_read_w = os.pipe()
self.pipe_abort_write_r, self.pipe_abort_write_w = os.pipe()
fcntl.fcntl(self.pipe_abort_read_r, fcntl.F_SETFL, os.O_NONBLOCK)
fcntl.fcntl(self.pipe_abort_write_r, fcntl.F_SETFL, os.O_NONBLOCK)
except BaseException:
try:
os.close(self.fd)
except Exception:
# ignore any exception when closing the port
# also to keep original exception that happened when setting up
pass
self.fd = None
if self.pipe_abort_read_w is not None:
os.close(self.pipe_abort_read_w)
self.pipe_abort_read_w = None
if self.pipe_abort_read_r is not None:
os.close(self.pipe_abort_read_r)
self.pipe_abort_read_r = None
if self.pipe_abort_write_w is not None:
os.close(self.pipe_abort_write_w)
self.pipe_abort_write_w = None
if self.pipe_abort_write_r is not None:
os.close(self.pipe_abort_write_r)
self.pipe_abort_write_r = None
raise
self.is_open = True
def _reconfigure_port(self, force_update=False):
"""Set communication parameters on opened port."""
if self.fd is None:
raise SerialException("Can only operate on a valid file descriptor")
# if exclusive lock is requested, create it before we modify anything else
if self._exclusive is not None:
if self._exclusive:
try:
fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError as msg:
raise SerialException(msg.errno, "Could not exclusively lock port {}: {}".format(self._port, msg))
else:
fcntl.flock(self.fd, fcntl.LOCK_UN)
custom_baud = None
vmin = vtime = 0 # timeout is done via select
if self._inter_byte_timeout is not None:
vmin = 1
vtime = int(self._inter_byte_timeout * 10)
try:
orig_attr = termios.tcgetattr(self.fd)
iflag, oflag, cflag, lflag, ispeed, ospeed, cc = orig_attr
except termios.error as msg: # if a port is nonexistent but has a /dev file, it'll fail here
raise SerialException("Could not configure port: {}".format(msg))
# set up raw mode / no echo / binary
cflag |= (termios.CLOCAL | termios.CREAD)
lflag &= ~(termios.ICANON | termios.ECHO | termios.ECHOE |
termios.ECHOK | termios.ECHONL |
termios.ISIG | termios.IEXTEN) # |termios.ECHOPRT
for flag in ('ECHOCTL', 'ECHOKE'): # netbsd workaround for Erk
if hasattr(termios, flag):
lflag &= ~getattr(termios, flag)
oflag &= ~(termios.OPOST | termios.ONLCR | termios.OCRNL)
iflag &= ~(termios.INLCR | termios.IGNCR | termios.ICRNL | termios.IGNBRK)
if hasattr(termios, 'IUCLC'):
iflag &= ~termios.IUCLC
if hasattr(termios, 'PARMRK'):
iflag &= ~termios.PARMRK
# setup baud rate
try:
ispeed = ospeed = getattr(termios, 'B{}'.format(self._baudrate))
except AttributeError:
try:
ispeed = ospeed = self.BAUDRATE_CONSTANTS[self._baudrate]
except KeyError:
#~ raise ValueError('Invalid baud rate: %r' % self._baudrate)
# See if BOTHER is defined for this platform; if it is, use
# this for a speed not defined in the baudrate constants list.
try:
ispeed = ospeed = BOTHER
except NameError:
# may need custom baud rate, it isn't in our list.
ispeed = ospeed = getattr(termios, 'B38400')
try:
custom_baud = int(self._baudrate) # store for later
except ValueError:
raise ValueError('Invalid baud rate: {!r}'.format(self._baudrate))
else:
if custom_baud < 0:
raise ValueError('Invalid baud rate: {!r}'.format(self._baudrate))
# setup char len
cflag &= ~termios.CSIZE
if self._bytesize == 8:
cflag |= termios.CS8
elif self._bytesize == 7:
cflag |= termios.CS7
elif self._bytesize == 6:
cflag |= termios.CS6
elif self._bytesize == 5:
cflag |= termios.CS5
else:
raise ValueError('Invalid char len: {!r}'.format(self._bytesize))
# setup stop bits
if self._stopbits == serial.STOPBITS_ONE:
cflag &= ~(termios.CSTOPB)
elif self._stopbits == serial.STOPBITS_ONE_POINT_FIVE:
cflag |= (termios.CSTOPB) # XXX same as TWO.. there is no POSIX support for 1.5
elif self._stopbits == serial.STOPBITS_TWO:
cflag |= (termios.CSTOPB)
else:
raise ValueError('Invalid stop bit specification: {!r}'.format(self._stopbits))
# setup parity
iflag &= ~(termios.INPCK | termios.ISTRIP)
if self._parity == serial.PARITY_NONE:
cflag &= ~(termios.PARENB | termios.PARODD | CMSPAR)
elif self._parity == serial.PARITY_EVEN:
cflag &= ~(termios.PARODD | CMSPAR)
cflag |= (termios.PARENB)
elif self._parity == serial.PARITY_ODD:
cflag &= ~CMSPAR
cflag |= (termios.PARENB | termios.PARODD)
elif self._parity == serial.PARITY_MARK and CMSPAR:
cflag |= (termios.PARENB | CMSPAR | termios.PARODD)
elif self._parity == serial.PARITY_SPACE and CMSPAR:
cflag |= (termios.PARENB | CMSPAR)
cflag &= ~(termios.PARODD)
else:
raise ValueError('Invalid parity: {!r}'.format(self._parity))
# setup flow control
# xonxoff
if hasattr(termios, 'IXANY'):
if self._xonxoff:
iflag |= (termios.IXON | termios.IXOFF) # |termios.IXANY)
else:
iflag &= ~(termios.IXON | termios.IXOFF | termios.IXANY)
else:
if self._xonxoff:
iflag |= (termios.IXON | termios.IXOFF)
else:
iflag &= ~(termios.IXON | termios.IXOFF)
# rtscts
if hasattr(termios, 'CRTSCTS'):
if self._rtscts:
cflag |= (termios.CRTSCTS)
else:
cflag &= ~(termios.CRTSCTS)
elif hasattr(termios, 'CNEW_RTSCTS'): # try it with alternate constant name
if self._rtscts:
cflag |= (termios.CNEW_RTSCTS)
else:
cflag &= ~(termios.CNEW_RTSCTS)
# XXX should there be a warning if setting up rtscts (and xonxoff etc) fails??
# buffer
# vmin "minimal number of characters to be read. 0 for non blocking"
if vmin < 0 or vmin > 255:
raise ValueError('Invalid vmin: {!r}'.format(vmin))
cc[termios.VMIN] = vmin
# vtime
if vtime < 0 or vtime > 255:
raise ValueError('Invalid vtime: {!r}'.format(vtime))
cc[termios.VTIME] = vtime
# activate settings
if force_update or [iflag, oflag, cflag, lflag, ispeed, ospeed, cc] != orig_attr:
termios.tcsetattr(
self.fd,
termios.TCSANOW,
[iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
# apply custom baud rate, if any
if custom_baud is not None:
self._set_special_baudrate(custom_baud)
if self._rs485_mode is not None:
self._set_rs485_mode(self._rs485_mode)
def close(self):
"""Close port"""
if self.is_open:
if self.fd is not None:
os.close(self.fd)
self.fd = None
os.close(self.pipe_abort_read_w)
os.close(self.pipe_abort_read_r)
os.close(self.pipe_abort_write_w)
os.close(self.pipe_abort_write_r)
self.pipe_abort_read_r, self.pipe_abort_read_w = None, None
self.pipe_abort_write_r, self.pipe_abort_write_w = None, None
self.is_open = False
# - - - - - - - - - - - - - - - - - - - - - - - -
@property
def in_waiting(self):
"""Return the number of bytes currently in the input buffer."""
#~ s = fcntl.ioctl(self.fd, termios.FIONREAD, TIOCM_zero_str)
s = fcntl.ioctl(self.fd, TIOCINQ, TIOCM_zero_str)
return struct.unpack('I', s)[0]
# select based implementation, proved to work on many systems
def read(self, size=1):
"""\
Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read.
"""
if not self.is_open:
raise PortNotOpenError()
read = bytearray()
timeout = Timeout(self._timeout)
while len(read) < size:
try:
ready, _, _ = select.select([self.fd, self.pipe_abort_read_r], [], [], timeout.time_left())
if self.pipe_abort_read_r in ready:
os.read(self.pipe_abort_read_r, 1000)
break
# If select was used with a timeout, and the timeout occurs, it
# returns with empty lists -> thus abort read operation.
# For timeout == 0 (non-blocking operation) also abort when
# there is nothing to read.
if not ready:
break # timeout
buf = os.read(self.fd, size - len(read))
except OSError as e:
# this is for Python 3.x where select.error is a subclass of
# OSError ignore BlockingIOErrors and EINTR. other errors are shown
# https://www.python.org/dev/peps/pep-0475.
if e.errno not in (errno.EAGAIN, errno.EALREADY, errno.EWOULDBLOCK, errno.EINPROGRESS, errno.EINTR):
raise SerialException('read failed: {}'.format(e))
except select.error as e:
# this is for Python 2.x
# ignore BlockingIOErrors and EINTR. all errors are shown
# see also http://www.python.org/dev/peps/pep-3151/#select
if e[0] not in (errno.EAGAIN, errno.EALREADY, errno.EWOULDBLOCK, errno.EINPROGRESS, errno.EINTR):
raise SerialException('read failed: {}'.format(e))
else:
# read should always return some data as select reported it was
# ready to read when we get to this point.
if not buf:
# Disconnected devices, at least on Linux, show the
# behavior that they are always ready to read immediately
# but reading returns nothing.
raise SerialException(
'device reports readiness to read but returned no data '
'(device disconnected or multiple access on port?)')
read.extend(buf)
if timeout.expired():
break
return bytes(read)
def cancel_read(self):
if self.is_open:
os.write(self.pipe_abort_read_w, b"x")
def cancel_write(self):
if self.is_open:
os.write(self.pipe_abort_write_w, b"x")
def write(self, data):
"""Output the given byte string over the serial port."""
if not self.is_open:
raise PortNotOpenError()
d = to_bytes(data)
tx_len = length = len(d)
timeout = Timeout(self._write_timeout)
while tx_len > 0:
try:
n = os.write(self.fd, d)
if timeout.is_non_blocking:
# Zero timeout indicates non-blocking - simply return the
# number of bytes of data actually written
return n
elif not timeout.is_infinite:
# when timeout is set, use select to wait for being ready
# with the time left as timeout
if timeout.expired():
raise SerialTimeoutException('Write timeout')
abort, ready, _ = select.select([self.pipe_abort_write_r], [self.fd], [], timeout.time_left())
if abort:
os.read(self.pipe_abort_write_r, 1000)
break
if not ready:
raise SerialTimeoutException('Write timeout')
else:
assert timeout.time_left() is None
# wait for write operation
abort, ready, _ = select.select([self.pipe_abort_write_r], [self.fd], [], None)
if abort:
os.read(self.pipe_abort_write_r, 1)
break
if not ready:
raise SerialException('write failed (select)')
d = d[n:]
tx_len -= n
except SerialException:
raise
except OSError as e:
# this is for Python 3.x where select.error is a subclass of
# OSError ignore BlockingIOErrors and EINTR. other errors are shown
# https://www.python.org/dev/peps/pep-0475.
if e.errno not in (errno.EAGAIN, errno.EALREADY, errno.EWOULDBLOCK, errno.EINPROGRESS, errno.EINTR):
raise SerialException('write failed: {}'.format(e))
except select.error as e:
# this is for Python 2.x
# ignore BlockingIOErrors and EINTR. all errors are shown
# see also http://www.python.org/dev/peps/pep-3151/#select
if e[0] not in (errno.EAGAIN, errno.EALREADY, errno.EWOULDBLOCK, errno.EINPROGRESS, errno.EINTR):
raise SerialException('write failed: {}'.format(e))
if not timeout.is_non_blocking and timeout.expired():
raise SerialTimeoutException('Write timeout')
return length - len(d)
def flush(self):
"""\
Flush of file like objects. In this case, wait until all data
is written.
"""
if not self.is_open:
raise PortNotOpenError()
termios.tcdrain(self.fd)
def _reset_input_buffer(self):
"""Clear input buffer, discarding all that is in the buffer."""
termios.tcflush(self.fd, termios.TCIFLUSH)
def reset_input_buffer(self):
"""Clear input buffer, discarding all that is in the buffer."""
if not self.is_open:
raise PortNotOpenError()
self._reset_input_buffer()
def reset_output_buffer(self):
"""\
Clear output buffer, aborting the current output and discarding all
that is in the buffer.
"""
if not self.is_open:
raise PortNotOpenError()
termios.tcflush(self.fd, termios.TCOFLUSH)
def send_break(self, duration=0.25):
"""\
Send break condition. Timed, returns to idle state after given
duration.
"""
if not self.is_open:
raise PortNotOpenError()
termios.tcsendbreak(self.fd, int(duration / 0.25))
def _update_rts_state(self):
"""Set terminal status line: Request To Send"""
if self._rts_state:
fcntl.ioctl(self.fd, TIOCMBIS, TIOCM_RTS_str)
else:
fcntl.ioctl(self.fd, TIOCMBIC, TIOCM_RTS_str)
def _update_dtr_state(self):
"""Set terminal status line: Data Terminal Ready"""
if self._dtr_state:
fcntl.ioctl(self.fd, TIOCMBIS, TIOCM_DTR_str)
else:
fcntl.ioctl(self.fd, TIOCMBIC, TIOCM_DTR_str)
@property
def cts(self):
"""Read terminal status line: Clear To Send"""
if not self.is_open:
raise PortNotOpenError()
s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
return struct.unpack('I', s)[0] & TIOCM_CTS != 0
@property
def dsr(self):
"""Read terminal status line: Data Set Ready"""
if not self.is_open:
raise PortNotOpenError()
s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
return struct.unpack('I', s)[0] & TIOCM_DSR != 0
@property
def ri(self):
"""Read terminal status line: Ring Indicator"""
if not self.is_open:
raise PortNotOpenError()
s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
return struct.unpack('I', s)[0] & TIOCM_RI != 0
@property
def cd(self):
"""Read terminal status line: Carrier Detect"""
if not self.is_open:
raise PortNotOpenError()
s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
return struct.unpack('I', s)[0] & TIOCM_CD != 0
# - - platform specific - - - -
@property
def out_waiting(self):
"""Return the number of bytes currently in the output buffer."""
#~ s = fcntl.ioctl(self.fd, termios.FIONREAD, TIOCM_zero_str)
s = fcntl.ioctl(self.fd, TIOCOUTQ, TIOCM_zero_str)
return struct.unpack('I', s)[0]
def fileno(self):
"""\
For easier use of the serial port instance with select.
WARNING: this function is not portable to different platforms!
"""
if not self.is_open:
raise PortNotOpenError()
return self.fd
def set_input_flow_control(self, enable=True):
"""\
Manually control flow - when software flow control is enabled.
This will send XON (true) or XOFF (false) to the other device.
WARNING: this function is not portable to different platforms!
"""
if not self.is_open:
raise PortNotOpenError()
if enable:
termios.tcflow(self.fd, termios.TCION)
else:
termios.tcflow(self.fd, termios.TCIOFF)
def set_output_flow_control(self, enable=True):
"""\
Manually control flow of outgoing data - when hardware or software flow
control is enabled.
WARNING: this function is not portable to different platforms!
"""
if not self.is_open:
raise PortNotOpenError()
if enable:
termios.tcflow(self.fd, termios.TCOON)
else:
termios.tcflow(self.fd, termios.TCOOFF)
def nonblocking(self):
"""DEPRECATED - has no use"""
import warnings
warnings.warn("nonblocking() has no effect, already nonblocking", DeprecationWarning)
class PosixPollSerial(Serial):
"""\
Poll based read implementation. Not all systems support poll properly.
However this one has better handling of errors, such as a device
disconnecting while it's in use (e.g. USB-serial unplugged).
"""
def read(self, size=1):
"""\
Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read.
"""
if not self.is_open:
raise PortNotOpenError()
read = bytearray()
timeout = Timeout(self._timeout)
poll = select.poll()
poll.register(self.fd, select.POLLIN | select.POLLERR | select.POLLHUP | select.POLLNVAL)
poll.register(self.pipe_abort_read_r, select.POLLIN | select.POLLERR | select.POLLHUP | select.POLLNVAL)
if size > 0:
while len(read) < size:
# print "\tread(): size",size, "have", len(read) #debug
# wait until device becomes ready to read (or something fails)
for fd, event in poll.poll(None if timeout.is_infinite else (timeout.time_left() * 1000)):
if fd == self.pipe_abort_read_r:
break
if event & (select.POLLERR | select.POLLHUP | select.POLLNVAL):
raise SerialException('device reports error (poll)')
# we don't care if it is select.POLLIN or timeout, that's
# handled below
if fd == self.pipe_abort_read_r:
os.read(self.pipe_abort_read_r, 1000)
break
buf = os.read(self.fd, size - len(read))
read.extend(buf)
if timeout.expired() \
or (self._inter_byte_timeout is not None and self._inter_byte_timeout > 0) and not buf:
break # early abort on timeout
return bytes(read)
class VTIMESerial(Serial):
"""\
Implement timeout using vtime of tty device instead of using select.
This means that no inter character timeout can be specified and that
the error handling is degraded.
Overall timeout is disabled when inter-character timeout is used.
Note that this implementation does NOT support cancel_read(), it will
just ignore that.
"""
def _reconfigure_port(self, force_update=True):
"""Set communication parameters on opened port."""
super(VTIMESerial, self)._reconfigure_port()
fcntl.fcntl(self.fd, fcntl.F_SETFL, 0) # clear O_NONBLOCK
if self._inter_byte_timeout is not None:
vmin = 1
vtime = int(self._inter_byte_timeout * 10)
elif self._timeout is None:
vmin = 1
vtime = 0
else:
vmin = 0
vtime = int(self._timeout * 10)
try:
orig_attr = termios.tcgetattr(self.fd)
iflag, oflag, cflag, lflag, ispeed, ospeed, cc = orig_attr
except termios.error as msg: # if a port is nonexistent but has a /dev file, it'll fail here
raise serial.SerialException("Could not configure port: {}".format(msg))
if vtime < 0 or vtime > 255:
raise ValueError('Invalid vtime: {!r}'.format(vtime))
cc[termios.VTIME] = vtime
cc[termios.VMIN] = vmin
termios.tcsetattr(
self.fd,
termios.TCSANOW,
[iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
def read(self, size=1):
"""\
Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read.
"""
if not self.is_open:
raise PortNotOpenError()
read = bytearray()
while len(read) < size:
buf = os.read(self.fd, size - len(read))
if not buf:
break
read.extend(buf)
return bytes(read)
# hack to make hasattr return false
cancel_read = property()
|
|
"""Wrapper for Xxf86vm
Generated with:
tools/genwrappers.py xf86vmode
Do not modify this file.
"""
import ctypes
from ctypes import *
import pyglet.lib
_lib = pyglet.lib.load_library('Xxf86vm')
_int_types = (c_int16, c_int32)
if hasattr(ctypes, 'c_int64'):
# Some builds of ctypes apparently do not have c_int64
# defined; it's a pretty good bet that these builds do not
# have 64-bit pointers.
_int_types += (ctypes.c_int64,)
for t in _int_types:
if sizeof(t) == sizeof(c_size_t):
c_ptrdiff_t = t
class c_void(Structure):
# c_void_p is a buggy return type, converting to int, so
# POINTER(None) == c_void_p is actually written as
# POINTER(c_void), so it can be treated as a real pointer.
_fields_ = [('dummy', c_int)]
import pyglet.libs.x11.xlib
X_XF86VidModeQueryVersion = 0 # /usr/include/X11/extensions/xf86vmode.h:4885
X_XF86VidModeGetModeLine = 1 # /usr/include/X11/extensions/xf86vmode.h:4886
X_XF86VidModeModModeLine = 2 # /usr/include/X11/extensions/xf86vmode.h:4887
X_XF86VidModeSwitchMode = 3 # /usr/include/X11/extensions/xf86vmode.h:4888
X_XF86VidModeGetMonitor = 4 # /usr/include/X11/extensions/xf86vmode.h:4889
X_XF86VidModeLockModeSwitch = 5 # /usr/include/X11/extensions/xf86vmode.h:4890
# /usr/include/X11/extensions/xf86vmode.h:4891
X_XF86VidModeGetAllModeLines = 6
X_XF86VidModeAddModeLine = 7 # /usr/include/X11/extensions/xf86vmode.h:4892
X_XF86VidModeDeleteModeLine = 8 # /usr/include/X11/extensions/xf86vmode.h:4893
# /usr/include/X11/extensions/xf86vmode.h:4894
X_XF86VidModeValidateModeLine = 9
X_XF86VidModeSwitchToMode = 10 # /usr/include/X11/extensions/xf86vmode.h:4895
X_XF86VidModeGetViewPort = 11 # /usr/include/X11/extensions/xf86vmode.h:4896
X_XF86VidModeSetViewPort = 12 # /usr/include/X11/extensions/xf86vmode.h:4897
X_XF86VidModeGetDotClocks = 13 # /usr/include/X11/extensions/xf86vmode.h:4899
# /usr/include/X11/extensions/xf86vmode.h:4900
X_XF86VidModeSetClientVersion = 14
X_XF86VidModeSetGamma = 15 # /usr/include/X11/extensions/xf86vmode.h:4901
X_XF86VidModeGetGamma = 16 # /usr/include/X11/extensions/xf86vmode.h:4902
X_XF86VidModeGetGammaRamp = 17 # /usr/include/X11/extensions/xf86vmode.h:4903
X_XF86VidModeSetGammaRamp = 18 # /usr/include/X11/extensions/xf86vmode.h:4904
# /usr/include/X11/extensions/xf86vmode.h:4905
X_XF86VidModeGetGammaRampSize = 19
# /usr/include/X11/extensions/xf86vmode.h:4906
X_XF86VidModeGetPermissions = 20
CLKFLAG_PROGRAMABLE = 1 # /usr/include/X11/extensions/xf86vmode.h:4908
XF86VidModeNumberEvents = 0 # /usr/include/X11/extensions/xf86vmode.h:4919
XF86VidModeBadClock = 0 # /usr/include/X11/extensions/xf86vmode.h:4922
XF86VidModeBadHTimings = 1 # /usr/include/X11/extensions/xf86vmode.h:4923
XF86VidModeBadVTimings = 2 # /usr/include/X11/extensions/xf86vmode.h:4924
XF86VidModeModeUnsuitable = 3 # /usr/include/X11/extensions/xf86vmode.h:4925
# /usr/include/X11/extensions/xf86vmode.h:4926
XF86VidModeExtensionDisabled = 4
XF86VidModeClientNotLocal = 5 # /usr/include/X11/extensions/xf86vmode.h:4927
XF86VidModeZoomLocked = 6 # /usr/include/X11/extensions/xf86vmode.h:4928
XF86VidModeNumberErrors = 7 # /usr/include/X11/extensions/xf86vmode.h:4929
XF86VM_READ_PERMISSION = 1 # /usr/include/X11/extensions/xf86vmode.h:4931
XF86VM_WRITE_PERMISSION = 2 # /usr/include/X11/extensions/xf86vmode.h:4932
class struct_anon_93(Structure):
__slots__ = [
'hdisplay',
'hsyncstart',
'hsyncend',
'htotal',
'hskew',
'vdisplay',
'vsyncstart',
'vsyncend',
'vtotal',
'flags',
'privsize',
'private',
]
INT32 = c_int # /usr/include/X11/Xmd.h:135
struct_anon_93._fields_ = [
('hdisplay', c_ushort),
('hsyncstart', c_ushort),
('hsyncend', c_ushort),
('htotal', c_ushort),
('hskew', c_ushort),
('vdisplay', c_ushort),
('vsyncstart', c_ushort),
('vsyncend', c_ushort),
('vtotal', c_ushort),
('flags', c_uint),
('privsize', c_int),
('private', POINTER(INT32)),
]
# /usr/include/X11/extensions/xf86vmode.h:4954
XF86VidModeModeLine = struct_anon_93
class struct_anon_94(Structure):
__slots__ = [
'dotclock',
'hdisplay',
'hsyncstart',
'hsyncend',
'htotal',
'hskew',
'vdisplay',
'vsyncstart',
'vsyncend',
'vtotal',
'flags',
'privsize',
'private',
]
struct_anon_94._fields_ = [
('dotclock', c_uint),
('hdisplay', c_ushort),
('hsyncstart', c_ushort),
('hsyncend', c_ushort),
('htotal', c_ushort),
('hskew', c_ushort),
('vdisplay', c_ushort),
('vsyncstart', c_ushort),
('vsyncend', c_ushort),
('vtotal', c_ushort),
('flags', c_uint),
('privsize', c_int),
('private', POINTER(INT32)),
]
# /usr/include/X11/extensions/xf86vmode.h:4975
XF86VidModeModeInfo = struct_anon_94
class struct_anon_95(Structure):
__slots__ = [
'hi',
'lo',
]
struct_anon_95._fields_ = [
('hi', c_float),
('lo', c_float),
]
# /usr/include/X11/extensions/xf86vmode.h:4980
XF86VidModeSyncRange = struct_anon_95
class struct_anon_96(Structure):
__slots__ = [
'vendor',
'model',
'EMPTY',
'nhsync',
'hsync',
'nvsync',
'vsync',
]
struct_anon_96._fields_ = [
('vendor', c_char_p),
('model', c_char_p),
('EMPTY', c_float),
('nhsync', c_ubyte),
('hsync', POINTER(XF86VidModeSyncRange)),
('nvsync', c_ubyte),
('vsync', POINTER(XF86VidModeSyncRange)),
]
# /usr/include/X11/extensions/xf86vmode.h:4990
XF86VidModeMonitor = struct_anon_96
class struct_anon_97(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'root',
'state',
'kind',
'forced',
'time',
]
Display = pyglet.libs.x11.xlib.Display
Window = pyglet.libs.x11.xlib.Window
Time = pyglet.libs.x11.xlib.Time
struct_anon_97._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('root', Window),
('state', c_int),
('kind', c_int),
('forced', c_int),
('time', Time),
]
# /usr/include/X11/extensions/xf86vmode.h:5002
XF86VidModeNotifyEvent = struct_anon_97
class struct_anon_98(Structure):
__slots__ = [
'red',
'green',
'blue',
]
struct_anon_98._fields_ = [
('red', c_float),
('green', c_float),
('blue', c_float),
]
# /usr/include/X11/extensions/xf86vmode.h:5008
XF86VidModeGamma = struct_anon_98
# /usr/include/X11/extensions/xf86vmode.h:5018
XF86VidModeQueryVersion = _lib.XF86VidModeQueryVersion
XF86VidModeQueryVersion.restype = c_int
XF86VidModeQueryVersion.argtypes = [
POINTER(Display), POINTER(c_int), POINTER(c_int)]
# /usr/include/X11/extensions/xf86vmode.h:5024
XF86VidModeQueryExtension = _lib.XF86VidModeQueryExtension
XF86VidModeQueryExtension.restype = c_int
XF86VidModeQueryExtension.argtypes = [
POINTER(Display), POINTER(c_int), POINTER(c_int)]
# /usr/include/X11/extensions/xf86vmode.h:5030
XF86VidModeSetClientVersion = _lib.XF86VidModeSetClientVersion
XF86VidModeSetClientVersion.restype = c_int
XF86VidModeSetClientVersion.argtypes = [POINTER(Display)]
# /usr/include/X11/extensions/xf86vmode.h:5034
XF86VidModeGetModeLine = _lib.XF86VidModeGetModeLine
XF86VidModeGetModeLine.restype = c_int
XF86VidModeGetModeLine.argtypes = [
POINTER(Display), c_int, POINTER(c_int), POINTER(XF86VidModeModeLine)]
# /usr/include/X11/extensions/xf86vmode.h:5041
XF86VidModeGetAllModeLines = _lib.XF86VidModeGetAllModeLines
XF86VidModeGetAllModeLines.restype = c_int
XF86VidModeGetAllModeLines.argtypes = [POINTER(Display), c_int, POINTER(
c_int), POINTER(POINTER(POINTER(XF86VidModeModeInfo)))]
# /usr/include/X11/extensions/xf86vmode.h:5048
XF86VidModeAddModeLine = _lib.XF86VidModeAddModeLine
XF86VidModeAddModeLine.restype = c_int
XF86VidModeAddModeLine.argtypes = [POINTER(Display), c_int, POINTER(
XF86VidModeModeInfo), POINTER(XF86VidModeModeInfo)]
# /usr/include/X11/extensions/xf86vmode.h:5055
XF86VidModeDeleteModeLine = _lib.XF86VidModeDeleteModeLine
XF86VidModeDeleteModeLine.restype = c_int
XF86VidModeDeleteModeLine.argtypes = [
POINTER(Display), c_int, POINTER(XF86VidModeModeInfo)]
# /usr/include/X11/extensions/xf86vmode.h:5061
XF86VidModeModModeLine = _lib.XF86VidModeModModeLine
XF86VidModeModModeLine.restype = c_int
XF86VidModeModModeLine.argtypes = [
POINTER(Display), c_int, POINTER(XF86VidModeModeLine)]
# /usr/include/X11/extensions/xf86vmode.h:5067
XF86VidModeValidateModeLine = _lib.XF86VidModeValidateModeLine
XF86VidModeValidateModeLine.restype = c_int
XF86VidModeValidateModeLine.argtypes = [
POINTER(Display), c_int, POINTER(XF86VidModeModeInfo)]
# /usr/include/X11/extensions/xf86vmode.h:5073
XF86VidModeSwitchMode = _lib.XF86VidModeSwitchMode
XF86VidModeSwitchMode.restype = c_int
XF86VidModeSwitchMode.argtypes = [POINTER(Display), c_int, c_int]
# /usr/include/X11/extensions/xf86vmode.h:5079
XF86VidModeSwitchToMode = _lib.XF86VidModeSwitchToMode
XF86VidModeSwitchToMode.restype = c_int
XF86VidModeSwitchToMode.argtypes = [
POINTER(Display), c_int, POINTER(XF86VidModeModeInfo)]
# /usr/include/X11/extensions/xf86vmode.h:5085
XF86VidModeLockModeSwitch = _lib.XF86VidModeLockModeSwitch
XF86VidModeLockModeSwitch.restype = c_int
XF86VidModeLockModeSwitch.argtypes = [POINTER(Display), c_int, c_int]
# /usr/include/X11/extensions/xf86vmode.h:5091
XF86VidModeGetMonitor = _lib.XF86VidModeGetMonitor
XF86VidModeGetMonitor.restype = c_int
XF86VidModeGetMonitor.argtypes = [
POINTER(Display), c_int, POINTER(XF86VidModeMonitor)]
# /usr/include/X11/extensions/xf86vmode.h:5097
XF86VidModeGetViewPort = _lib.XF86VidModeGetViewPort
XF86VidModeGetViewPort.restype = c_int
XF86VidModeGetViewPort.argtypes = [
POINTER(Display), c_int, POINTER(c_int), POINTER(c_int)]
# /usr/include/X11/extensions/xf86vmode.h:5104
XF86VidModeSetViewPort = _lib.XF86VidModeSetViewPort
XF86VidModeSetViewPort.restype = c_int
XF86VidModeSetViewPort.argtypes = [POINTER(Display), c_int, c_int, c_int]
# /usr/include/X11/extensions/xf86vmode.h:5111
XF86VidModeGetDotClocks = _lib.XF86VidModeGetDotClocks
XF86VidModeGetDotClocks.restype = c_int
XF86VidModeGetDotClocks.argtypes = [POINTER(Display), c_int, POINTER(
c_int), POINTER(c_int), POINTER(c_int), POINTER(POINTER(c_int))]
# /usr/include/X11/extensions/xf86vmode.h:5120
XF86VidModeGetGamma = _lib.XF86VidModeGetGamma
XF86VidModeGetGamma.restype = c_int
XF86VidModeGetGamma.argtypes = [
POINTER(Display), c_int, POINTER(XF86VidModeGamma)]
# /usr/include/X11/extensions/xf86vmode.h:5126
XF86VidModeSetGamma = _lib.XF86VidModeSetGamma
XF86VidModeSetGamma.restype = c_int
XF86VidModeSetGamma.argtypes = [
POINTER(Display), c_int, POINTER(XF86VidModeGamma)]
# /usr/include/X11/extensions/xf86vmode.h:5132
XF86VidModeSetGammaRamp = _lib.XF86VidModeSetGammaRamp
XF86VidModeSetGammaRamp.restype = c_int
XF86VidModeSetGammaRamp.argtypes = [POINTER(Display), c_int, c_int, POINTER(
c_ushort), POINTER(c_ushort), POINTER(c_ushort)]
# /usr/include/X11/extensions/xf86vmode.h:5141
XF86VidModeGetGammaRamp = _lib.XF86VidModeGetGammaRamp
XF86VidModeGetGammaRamp.restype = c_int
XF86VidModeGetGammaRamp.argtypes = [POINTER(Display), c_int, c_int, POINTER(
c_ushort), POINTER(c_ushort), POINTER(c_ushort)]
# /usr/include/X11/extensions/xf86vmode.h:5150
XF86VidModeGetGammaRampSize = _lib.XF86VidModeGetGammaRampSize
XF86VidModeGetGammaRampSize.restype = c_int
XF86VidModeGetGammaRampSize.argtypes = [
POINTER(Display), c_int, POINTER(c_int)]
# /usr/include/X11/extensions/xf86vmode.h:5156
XF86VidModeGetPermissions = _lib.XF86VidModeGetPermissions
XF86VidModeGetPermissions.restype = c_int
XF86VidModeGetPermissions.argtypes = [POINTER(Display), c_int, POINTER(c_int)]
__all__ = ['X_XF86VidModeQueryVersion', 'X_XF86VidModeGetModeLine',
'X_XF86VidModeModModeLine', 'X_XF86VidModeSwitchMode',
'X_XF86VidModeGetMonitor', 'X_XF86VidModeLockModeSwitch',
'X_XF86VidModeGetAllModeLines', 'X_XF86VidModeAddModeLine',
'X_XF86VidModeDeleteModeLine', 'X_XF86VidModeValidateModeLine',
'X_XF86VidModeSwitchToMode', 'X_XF86VidModeGetViewPort',
'X_XF86VidModeSetViewPort', 'X_XF86VidModeGetDotClocks',
'X_XF86VidModeSetClientVersion', 'X_XF86VidModeSetGamma',
'X_XF86VidModeGetGamma', 'X_XF86VidModeGetGammaRamp',
'X_XF86VidModeSetGammaRamp', 'X_XF86VidModeGetGammaRampSize',
'X_XF86VidModeGetPermissions', 'CLKFLAG_PROGRAMABLE',
'XF86VidModeNumberEvents', 'XF86VidModeBadClock', 'XF86VidModeBadHTimings',
'XF86VidModeBadVTimings', 'XF86VidModeModeUnsuitable',
'XF86VidModeExtensionDisabled', 'XF86VidModeClientNotLocal',
'XF86VidModeZoomLocked', 'XF86VidModeNumberErrors', 'XF86VM_READ_PERMISSION',
'XF86VM_WRITE_PERMISSION', 'XF86VidModeModeLine', 'XF86VidModeModeInfo',
'XF86VidModeSyncRange', 'XF86VidModeMonitor', 'XF86VidModeNotifyEvent',
'XF86VidModeGamma', 'XF86VidModeQueryVersion', 'XF86VidModeQueryExtension',
'XF86VidModeSetClientVersion', 'XF86VidModeGetModeLine',
'XF86VidModeGetAllModeLines', 'XF86VidModeAddModeLine',
'XF86VidModeDeleteModeLine', 'XF86VidModeModModeLine',
'XF86VidModeValidateModeLine', 'XF86VidModeSwitchMode',
'XF86VidModeSwitchToMode', 'XF86VidModeLockModeSwitch',
'XF86VidModeGetMonitor', 'XF86VidModeGetViewPort', 'XF86VidModeSetViewPort',
'XF86VidModeGetDotClocks', 'XF86VidModeGetGamma', 'XF86VidModeSetGamma',
'XF86VidModeSetGammaRamp', 'XF86VidModeGetGammaRamp',
'XF86VidModeGetGammaRampSize', 'XF86VidModeGetPermissions']
|
|
# -*- coding: utf-8 -*-
"""
test all other .agg behavior
"""
from __future__ import print_function
import pytest
from collections import OrderedDict
import datetime as dt
from functools import partial
import numpy as np
import pandas as pd
from pandas import (
date_range, DataFrame, Index, MultiIndex, PeriodIndex, period_range, Series
)
from pandas.core.groupby.groupby import SpecificationError
from pandas.io.formats.printing import pprint_thing
import pandas.util.testing as tm
def test_agg_api():
# GH 6337
# http://stackoverflow.com/questions/21706030/pandas-groupby-agg-function-column-dtype-error
# different api for agg when passed custom function with mixed frame
df = DataFrame({'data1': np.random.randn(5),
'data2': np.random.randn(5),
'key1': ['a', 'a', 'b', 'b', 'a'],
'key2': ['one', 'two', 'one', 'two', 'one']})
grouped = df.groupby('key1')
def peak_to_peak(arr):
return arr.max() - arr.min()
expected = grouped.agg([peak_to_peak])
expected.columns = ['data1', 'data2']
result = grouped.agg(peak_to_peak)
tm.assert_frame_equal(result, expected)
def test_agg_datetimes_mixed():
data = [[1, '2012-01-01', 1.0],
[2, '2012-01-02', 2.0],
[3, None, 3.0]]
df1 = DataFrame({'key': [x[0] for x in data],
'date': [x[1] for x in data],
'value': [x[2] for x in data]})
data = [[row[0],
(dt.datetime.strptime(row[1], '%Y-%m-%d').date()
if row[1] else None),
row[2]]
for row in data]
df2 = DataFrame({'key': [x[0] for x in data],
'date': [x[1] for x in data],
'value': [x[2] for x in data]})
df1['weights'] = df1['value'] / df1['value'].sum()
gb1 = df1.groupby('date').aggregate(np.sum)
df2['weights'] = df1['value'] / df1['value'].sum()
gb2 = df2.groupby('date').aggregate(np.sum)
assert (len(gb1) == len(gb2))
def test_agg_period_index():
prng = period_range('2012-1-1', freq='M', periods=3)
df = DataFrame(np.random.randn(3, 2), index=prng)
rs = df.groupby(level=0).sum()
assert isinstance(rs.index, PeriodIndex)
# GH 3579
index = period_range(start='1999-01', periods=5, freq='M')
s1 = Series(np.random.rand(len(index)), index=index)
s2 = Series(np.random.rand(len(index)), index=index)
series = [('s1', s1), ('s2', s2)]
df = DataFrame.from_dict(OrderedDict(series))
grouped = df.groupby(df.index.month)
list(grouped)
def test_agg_dict_parameter_cast_result_dtypes():
# GH 12821
df = DataFrame({'class': ['A', 'A', 'B', 'B', 'C', 'C', 'D', 'D'],
'time': date_range('1/1/2011', periods=8, freq='H')})
df.loc[[0, 1, 2, 5], 'time'] = None
# test for `first` function
exp = df.loc[[0, 3, 4, 6]].set_index('class')
grouped = df.groupby('class')
tm.assert_frame_equal(grouped.first(), exp)
tm.assert_frame_equal(grouped.agg('first'), exp)
tm.assert_frame_equal(grouped.agg({'time': 'first'}), exp)
tm.assert_series_equal(grouped.time.first(), exp['time'])
tm.assert_series_equal(grouped.time.agg('first'), exp['time'])
# test for `last` function
exp = df.loc[[0, 3, 4, 7]].set_index('class')
grouped = df.groupby('class')
tm.assert_frame_equal(grouped.last(), exp)
tm.assert_frame_equal(grouped.agg('last'), exp)
tm.assert_frame_equal(grouped.agg({'time': 'last'}), exp)
tm.assert_series_equal(grouped.time.last(), exp['time'])
tm.assert_series_equal(grouped.time.agg('last'), exp['time'])
# count
exp = pd.Series([2, 2, 2, 2],
index=Index(list('ABCD'), name='class'),
name='time')
tm.assert_series_equal(grouped.time.agg(len), exp)
tm.assert_series_equal(grouped.time.size(), exp)
exp = pd.Series([0, 1, 1, 2],
index=Index(list('ABCD'), name='class'),
name='time')
tm.assert_series_equal(grouped.time.count(), exp)
def test_agg_cast_results_dtypes():
# similar to GH12821
# xref #11444
u = [dt.datetime(2015, x + 1, 1) for x in range(12)]
v = list('aaabbbbbbccd')
df = pd.DataFrame({'X': v, 'Y': u})
result = df.groupby('X')['Y'].agg(len)
expected = df.groupby('X')['Y'].count()
tm.assert_series_equal(result, expected)
def test_aggregate_float64_no_int64():
# see gh-11199
df = DataFrame({"a": [1, 2, 3, 4, 5],
"b": [1, 2, 2, 4, 5],
"c": [1, 2, 3, 4, 5]})
expected = DataFrame({"a": [1, 2.5, 4, 5]}, index=[1, 2, 4, 5])
expected.index.name = "b"
result = df.groupby("b")[["a"]].mean()
tm.assert_frame_equal(result, expected)
expected = DataFrame({"a": [1, 2.5, 4, 5], "c": [1, 2.5, 4, 5]},
index=[1, 2, 4, 5])
expected.index.name = "b"
result = df.groupby("b")[["a", "c"]].mean()
tm.assert_frame_equal(result, expected)
def test_aggregate_api_consistency():
# GH 9052
# make sure that the aggregates via dict
# are consistent
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': np.random.randn(8) + 1.0,
'D': np.arange(8)})
grouped = df.groupby(['A', 'B'])
c_mean = grouped['C'].mean()
c_sum = grouped['C'].sum()
d_mean = grouped['D'].mean()
d_sum = grouped['D'].sum()
result = grouped['D'].agg(['sum', 'mean'])
expected = pd.concat([d_sum, d_mean], axis=1)
expected.columns = ['sum', 'mean']
tm.assert_frame_equal(result, expected, check_like=True)
result = grouped.agg([np.sum, np.mean])
expected = pd.concat([c_sum, c_mean, d_sum, d_mean], axis=1)
expected.columns = MultiIndex.from_product([['C', 'D'],
['sum', 'mean']])
tm.assert_frame_equal(result, expected, check_like=True)
result = grouped[['D', 'C']].agg([np.sum, np.mean])
expected = pd.concat([d_sum, d_mean, c_sum, c_mean], axis=1)
expected.columns = MultiIndex.from_product([['D', 'C'],
['sum', 'mean']])
tm.assert_frame_equal(result, expected, check_like=True)
result = grouped.agg({'C': 'mean', 'D': 'sum'})
expected = pd.concat([d_sum, c_mean], axis=1)
tm.assert_frame_equal(result, expected, check_like=True)
result = grouped.agg({'C': ['mean', 'sum'],
'D': ['mean', 'sum']})
expected = pd.concat([c_mean, c_sum, d_mean, d_sum], axis=1)
expected.columns = MultiIndex.from_product([['C', 'D'],
['mean', 'sum']])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = grouped[['D', 'C']].agg({'r': np.sum,
'r2': np.mean})
expected = pd.concat([d_sum, c_sum, d_mean, c_mean], axis=1)
expected.columns = MultiIndex.from_product([['r', 'r2'],
['D', 'C']])
tm.assert_frame_equal(result, expected, check_like=True)
def test_agg_dict_renaming_deprecation():
# 15931
df = pd.DataFrame({'A': [1, 1, 1, 2, 2],
'B': range(5),
'C': range(5)})
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False) as w:
df.groupby('A').agg({'B': {'foo': ['sum', 'max']},
'C': {'bar': ['count', 'min']}})
assert "using a dict with renaming" in str(w[0].message)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df.groupby('A')[['B', 'C']].agg({'ma': 'max'})
with tm.assert_produces_warning(FutureWarning) as w:
df.groupby('A').B.agg({'foo': 'count'})
assert "using a dict on a Series for aggregation" in str(w[0].message)
def test_agg_compat():
# GH 12334
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': np.random.randn(8) + 1.0,
'D': np.arange(8)})
g = df.groupby(['A', 'B'])
expected = pd.concat([g['D'].sum(), g['D'].std()], axis=1)
expected.columns = MultiIndex.from_tuples([('C', 'sum'),
('C', 'std')])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = g['D'].agg({'C': ['sum', 'std']})
tm.assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([g['D'].sum(), g['D'].std()], axis=1)
expected.columns = ['C', 'D']
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = g['D'].agg({'C': 'sum', 'D': 'std'})
tm.assert_frame_equal(result, expected, check_like=True)
def test_agg_nested_dicts():
# API change for disallowing these types of nested dicts
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': np.random.randn(8) + 1.0,
'D': np.arange(8)})
g = df.groupby(['A', 'B'])
msg = r'cannot perform renaming for r[1-2] with a nested dictionary'
with tm.assert_raises_regex(SpecificationError, msg):
g.aggregate({'r1': {'C': ['mean', 'sum']},
'r2': {'D': ['mean', 'sum']}})
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = g.agg({'C': {'ra': ['mean', 'std']},
'D': {'rb': ['mean', 'std']}})
expected = pd.concat([g['C'].mean(), g['C'].std(),
g['D'].mean(), g['D'].std()],
axis=1)
expected.columns = pd.MultiIndex.from_tuples(
[('ra', 'mean'), ('ra', 'std'),
('rb', 'mean'), ('rb', 'std')])
tm.assert_frame_equal(result, expected, check_like=True)
# same name as the original column
# GH9052
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
expected = g['D'].agg({'result1': np.sum, 'result2': np.mean})
expected = expected.rename(columns={'result1': 'D'})
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = g['D'].agg({'D': np.sum, 'result2': np.mean})
tm.assert_frame_equal(result, expected, check_like=True)
def test_agg_item_by_item_raise_typeerror():
df = DataFrame(np.random.randint(10, size=(20, 10)))
def raiseException(df):
pprint_thing('----------------------------------------')
pprint_thing(df.to_string())
raise TypeError('test')
with tm.assert_raises_regex(TypeError, 'test'):
df.groupby(0).agg(raiseException)
def test_series_agg_multikey():
ts = tm.makeTimeSeries()
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.agg(np.sum)
expected = grouped.sum()
tm.assert_series_equal(result, expected)
def test_series_agg_multi_pure_python():
data = DataFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def bad(x):
assert (len(x.values.base) > 0)
return 'foo'
result = data.groupby(['A', 'B']).agg(bad)
expected = data.groupby(['A', 'B']).agg(lambda x: 'foo')
tm.assert_frame_equal(result, expected)
def test_agg_consistency():
# agg with ([]) and () not consistent
# GH 6715
def P1(a):
try:
return np.percentile(a.dropna(), q=1)
except Exception:
return np.nan
df = DataFrame({'col1': [1, 2, 3, 4],
'col2': [10, 25, 26, 31],
'date': [dt.date(2013, 2, 10), dt.date(2013, 2, 10),
dt.date(2013, 2, 11), dt.date(2013, 2, 11)]})
g = df.groupby('date')
expected = g.agg([P1])
expected.columns = expected.columns.levels[0]
result = g.agg(P1)
tm.assert_frame_equal(result, expected)
def test_agg_callables():
# GH 7929
df = DataFrame({'foo': [1, 2], 'bar': [3, 4]}).astype(np.int64)
class fn_class(object):
def __call__(self, x):
return sum(x)
equiv_callables = [sum,
np.sum,
lambda x: sum(x),
lambda x: x.sum(),
partial(sum),
fn_class(), ]
expected = df.groupby("foo").agg(sum)
for ecall in equiv_callables:
result = df.groupby('foo').agg(ecall)
tm.assert_frame_equal(result, expected)
def test_agg_over_numpy_arrays():
# GH 3788
df = pd.DataFrame([[1, np.array([10, 20, 30])],
[1, np.array([40, 50, 60])],
[2, np.array([20, 30, 40])]],
columns=['category', 'arraydata'])
result = df.groupby('category').agg(sum)
expected_data = [[np.array([50, 70, 90])], [np.array([20, 30, 40])]]
expected_index = pd.Index([1, 2], name='category')
expected_column = ['arraydata']
expected = pd.DataFrame(expected_data,
index=expected_index,
columns=expected_column)
tm.assert_frame_equal(result, expected)
def test_agg_timezone_round_trip():
# GH 15426
ts = pd.Timestamp("2016-01-01 12:00:00", tz='US/Pacific')
df = pd.DataFrame({'a': 1,
'b': [ts + dt.timedelta(minutes=nn)
for nn in range(10)]})
result1 = df.groupby('a')['b'].agg(np.min).iloc[0]
result2 = df.groupby('a')['b'].agg(lambda x: np.min(x)).iloc[0]
result3 = df.groupby('a')['b'].min().iloc[0]
assert result1 == ts
assert result2 == ts
assert result3 == ts
dates = [pd.Timestamp("2016-01-0%d 12:00:00" % i, tz='US/Pacific')
for i in range(1, 5)]
df = pd.DataFrame({'A': ['a', 'b'] * 2, 'B': dates})
grouped = df.groupby('A')
ts = df['B'].iloc[0]
assert ts == grouped.nth(0)['B'].iloc[0]
assert ts == grouped.head(1)['B'].iloc[0]
assert ts == grouped.first()['B'].iloc[0]
assert ts == grouped.apply(lambda x: x.iloc[0])[0]
ts = df['B'].iloc[2]
assert ts == grouped.last()['B'].iloc[0]
assert ts == grouped.apply(lambda x: x.iloc[-1])[0]
def test_sum_uint64_overflow():
# see gh-14758
# Convert to uint64 and don't overflow
df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], dtype=object)
df = df + 9223372036854775807
index = pd.Index([9223372036854775808,
9223372036854775810,
9223372036854775812],
dtype=np.uint64)
expected = pd.DataFrame({1: [9223372036854775809,
9223372036854775811,
9223372036854775813]},
index=index)
expected.index.name = 0
result = df.groupby(0).sum()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("structure, expected", [
(tuple, pd.DataFrame({'C': {(1, 1): (1, 1, 1), (3, 4): (3, 4, 4)}})),
(list, pd.DataFrame({'C': {(1, 1): [1, 1, 1], (3, 4): [3, 4, 4]}})),
(lambda x: tuple(x), pd.DataFrame({'C': {(1, 1): (1, 1, 1),
(3, 4): (3, 4, 4)}})),
(lambda x: list(x), pd.DataFrame({'C': {(1, 1): [1, 1, 1],
(3, 4): [3, 4, 4]}}))
])
def test_agg_structs_dataframe(structure, expected):
df = pd.DataFrame({'A': [1, 1, 1, 3, 3, 3],
'B': [1, 1, 1, 4, 4, 4],
'C': [1, 1, 1, 3, 4, 4]})
result = df.groupby(['A', 'B']).aggregate(structure)
expected.index.names = ['A', 'B']
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("structure, expected", [
(tuple, pd.Series([(1, 1, 1), (3, 4, 4)], index=[1, 3], name='C')),
(list, pd.Series([[1, 1, 1], [3, 4, 4]], index=[1, 3], name='C')),
(lambda x: tuple(x), pd.Series([(1, 1, 1), (3, 4, 4)],
index=[1, 3], name='C')),
(lambda x: list(x), pd.Series([[1, 1, 1], [3, 4, 4]],
index=[1, 3], name='C'))
])
def test_agg_structs_series(structure, expected):
# Issue #18079
df = pd.DataFrame({'A': [1, 1, 1, 3, 3, 3],
'B': [1, 1, 1, 4, 4, 4],
'C': [1, 1, 1, 3, 4, 4]})
result = df.groupby('A')['C'].aggregate(structure)
expected.index.name = 'A'
tm.assert_series_equal(result, expected)
@pytest.mark.xfail(reason="GH-18869: agg func not called on empty groups.")
def test_agg_category_nansum(observed):
categories = ['a', 'b', 'c']
df = pd.DataFrame({"A": pd.Categorical(['a', 'a', 'b'],
categories=categories),
'B': [1, 2, 3]})
result = df.groupby("A", observed=observed).B.agg(np.nansum)
expected = pd.Series([3, 3, 0],
index=pd.CategoricalIndex(['a', 'b', 'c'],
categories=categories,
name='A'),
name='B')
if observed:
expected = expected[expected != 0]
tm.assert_series_equal(result, expected)
|
|
#!/usr/bin/env ambari-python-wrap
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from math import ceil
from stack_advisor import DefaultStackAdvisor
class BaseBIGTOP08StackAdvisor(DefaultStackAdvisor):
def getComponentLayoutValidations(self, services, hosts):
"""Returns array of Validation objects about issues with hostnames components assigned to"""
items = []
# Validating NAMENODE and SECONDARY_NAMENODE are on different hosts if possible
hostsList = [host["Hosts"]["host_name"] for host in hosts["items"]]
hostsCount = len(hostsList)
componentsListList = [service["components"] for service in services["services"]]
componentsList = [item for sublist in componentsListList for item in sublist]
nameNodeHosts = [component["StackServiceComponents"]["hostnames"] for component in componentsList if component["StackServiceComponents"]["component_name"] == "NAMENODE"]
secondaryNameNodeHosts = [component["StackServiceComponents"]["hostnames"] for component in componentsList if component["StackServiceComponents"]["component_name"] == "SECONDARY_NAMENODE"]
# Validating cardinality
for component in componentsList:
if component["StackServiceComponents"]["cardinality"] is not None:
componentName = component["StackServiceComponents"]["component_name"]
componentDisplayName = component["StackServiceComponents"]["display_name"]
componentHostsCount = 0
if component["StackServiceComponents"]["hostnames"] is not None:
componentHostsCount = len(component["StackServiceComponents"]["hostnames"])
cardinality = str(component["StackServiceComponents"]["cardinality"])
# cardinality types: null, 1+, 1-2, 1, ALL
message = None
if "+" in cardinality:
hostsMin = int(cardinality[:-1])
if componentHostsCount < hostsMin:
message = "At least {0} {1} components should be installed in cluster.".format(hostsMin, componentDisplayName)
elif "-" in cardinality:
nums = cardinality.split("-")
hostsMin = int(nums[0])
hostsMax = int(nums[1])
if componentHostsCount > hostsMax or componentHostsCount < hostsMin:
message = "Between {0} and {1} {2} components should be installed in cluster.".format(hostsMin, hostsMax, componentDisplayName)
elif "ALL" == cardinality:
if componentHostsCount != hostsCount:
message = "{0} component should be installed on all hosts in cluster.".format(componentDisplayName)
else:
if componentHostsCount != int(cardinality):
message = "Exactly {0} {1} components should be installed in cluster.".format(int(cardinality), componentDisplayName)
if message is not None:
items.append({"type": 'host-component', "level": 'ERROR', "message": message, "component-name": componentName})
# Validating host-usage
usedHostsListList = [component["StackServiceComponents"]["hostnames"] for component in componentsList if not self.isComponentNotValuable(component)]
usedHostsList = [item for sublist in usedHostsListList for item in sublist]
nonUsedHostsList = [item for item in hostsList if item not in usedHostsList]
for host in nonUsedHostsList:
items.append( { "type": 'host-component', "level": 'ERROR', "message": 'Host is not used', "host": str(host) } )
return items
def getServiceConfigurationRecommenderDict(self):
return {
"YARN": self.recommendYARNConfigurations,
"MAPREDUCE2": self.recommendMapReduce2Configurations
}
def putProperty(self, config, configType):
config[configType] = {"properties": {}}
def appendProperty(key, value):
config[configType]["properties"][key] = str(value)
return appendProperty
def recommendYARNConfigurations(self, configurations, clusterData):
putYarnProperty = self.putProperty(configurations, "yarn-site")
putYarnProperty('yarn.nodemanager.resource.memory-mb', int(round(clusterData['containers'] * clusterData['ramPerContainer'])))
putYarnProperty('yarn.scheduler.minimum-allocation-mb', int(clusterData['ramPerContainer']))
putYarnProperty('yarn.scheduler.maximum-allocation-mb', int(round(clusterData['containers'] * clusterData['ramPerContainer'])))
def recommendMapReduce2Configurations(self, configurations, clusterData):
putMapredProperty = self.putProperty(configurations, "mapred-site")
putMapredProperty('yarn.app.mapreduce.am.resource.mb', int(clusterData['amMemory']))
putMapredProperty('yarn.app.mapreduce.am.command-opts', "-Xmx" + str(int(round(0.8 * clusterData['amMemory']))) + "m")
putMapredProperty('mapreduce.map.memory.mb', clusterData['mapMemory'])
putMapredProperty('mapreduce.reduce.memory.mb', int(clusterData['reduceMemory']))
putMapredProperty('mapreduce.map.java.opts', "-Xmx" + str(int(round(0.8 * clusterData['mapMemory']))) + "m")
putMapredProperty('mapreduce.reduce.java.opts', "-Xmx" + str(int(round(0.8 * clusterData['reduceMemory']))) + "m")
putMapredProperty('mapreduce.task.io.sort.mb', min(int(round(0.4 * clusterData['mapMemory'])), 1024))
def getConfigurationClusterSummary(self, servicesList, hosts, components, services):
hBaseInstalled = False
if 'HBASE' in servicesList:
hBaseInstalled = True
cluster = {
"cpu": 0,
"disk": 0,
"ram": 0,
"hBaseInstalled": hBaseInstalled,
"components": components
}
if len(hosts["items"]) > 0:
host = hosts["items"][0]["Hosts"]
cluster["cpu"] = host["cpu_count"]
cluster["disk"] = len(host["disk_info"])
cluster["ram"] = int(host["total_mem"] / (1024 * 1024))
ramRecommendations = [
{"os":1, "hbase":1},
{"os":2, "hbase":1},
{"os":2, "hbase":2},
{"os":4, "hbase":4},
{"os":6, "hbase":8},
{"os":8, "hbase":8},
{"os":8, "hbase":8},
{"os":12, "hbase":16},
{"os":24, "hbase":24},
{"os":32, "hbase":32},
{"os":64, "hbase":64}
]
index = {
cluster["ram"] <= 4: 0,
4 < cluster["ram"] <= 8: 1,
8 < cluster["ram"] <= 16: 2,
16 < cluster["ram"] <= 24: 3,
24 < cluster["ram"] <= 48: 4,
48 < cluster["ram"] <= 64: 5,
64 < cluster["ram"] <= 72: 6,
72 < cluster["ram"] <= 96: 7,
96 < cluster["ram"] <= 128: 8,
128 < cluster["ram"] <= 256: 9,
256 < cluster["ram"]: 10
}[1]
cluster["reservedRam"] = ramRecommendations[index]["os"]
cluster["hbaseRam"] = ramRecommendations[index]["hbase"]
cluster["minContainerSize"] = {
cluster["ram"] <= 4: 256,
4 < cluster["ram"] <= 8: 512,
8 < cluster["ram"] <= 24: 1024,
24 < cluster["ram"]: 2048
}[1]
totalAvailableRam = cluster["ram"] - cluster["reservedRam"]
if cluster["hBaseInstalled"]:
totalAvailableRam -= cluster["hbaseRam"]
cluster["totalAvailableRam"] = max(2048, totalAvailableRam * 1024)
'''containers = max(3, min (2*cores,min (1.8*DISKS,(Total available RAM) / MIN_CONTAINER_SIZE))))'''
cluster["containers"] = round(max(3,
min(2 * cluster["cpu"],
min(ceil(1.8 * cluster["disk"]),
cluster["totalAvailableRam"] / cluster["minContainerSize"]))))
'''ramPerContainers = max(2GB, RAM - reservedRam - hBaseRam) / containers'''
cluster["ramPerContainer"] = abs(cluster["totalAvailableRam"] / cluster["containers"])
'''If greater than 1GB, value will be in multiples of 512.'''
if cluster["ramPerContainer"] > 1024:
cluster["ramPerContainer"] = int(cluster["ramPerContainer"] / 512) * 512
cluster["mapMemory"] = int(cluster["ramPerContainer"])
cluster["reduceMemory"] = cluster["ramPerContainer"]
cluster["amMemory"] = max(cluster["mapMemory"], cluster["reduceMemory"])
return cluster
def getConfigurationsValidationItems(self, services, hosts):
"""Returns array of Validation objects about issues with configuration values provided in services"""
items = []
recommendations = self.recommendConfigurations(services, hosts)
recommendedDefaults = recommendations["recommendations"]["blueprint"]["configurations"]
configurations = services["configurations"]
for service in services["services"]:
serviceName = service["StackServices"]["service_name"]
validator = self.validateServiceConfigurations(serviceName)
if validator is not None:
siteName = validator[0]
method = validator[1]
if siteName in recommendedDefaults:
siteProperties = getSiteProperties(configurations, siteName)
if siteProperties is not None:
resultItems = method(siteProperties, recommendedDefaults[siteName]["properties"], configurations)
items.extend(resultItems)
return items
def getServiceConfigurationValidators(self):
return {
"MAPREDUCE2": ["mapred-site", self.validateMapReduce2Configurations],
"YARN": ["yarn-site", self.validateYARNConfigurations]
}
def validateServiceConfigurations(self, serviceName):
return self.getServiceConfigurationValidators().get(serviceName, None)
def toConfigurationValidationProblems(self, validationProblems, siteName):
result = []
for validationProblem in validationProblems:
validationItem = validationProblem.get("item", None)
if validationItem is not None:
problem = {"type": 'configuration', "level": validationItem["level"], "message": validationItem["message"],
"config-type": siteName, "config-name": validationProblem["config-name"] }
result.append(problem)
return result
def getWarnItem(self, message):
return {"level": "WARN", "message": message}
def getErrorItem(self, message):
return {"level": "ERROR", "message": message}
def validatorLessThenDefaultValue(self, properties, recommendedDefaults, propertyName):
if not propertyName in properties:
return self.getErrorItem("Value should be set")
value = to_number(properties[propertyName])
if value is None:
return self.getErrorItem("Value should be integer")
defaultValue = to_number(recommendedDefaults[propertyName])
if defaultValue is None:
return None
if value < defaultValue:
return self.getWarnItem("Value is less than the recommended default of {0}".format(defaultValue))
return None
def validateXmxValue(self, properties, recommendedDefaults, propertyName):
if not propertyName in properties:
return self.getErrorItem("Value should be set")
value = properties[propertyName]
defaultValue = recommendedDefaults[propertyName]
if defaultValue is None:
return self.getErrorItem("Config's default value can't be null or undefined")
if not checkXmxValueFormat(value):
return self.getErrorItem('Invalid value format')
valueInt = formatXmxSizeToBytes(getXmxSize(value))
defaultValueXmx = getXmxSize(defaultValue)
defaultValueInt = formatXmxSizeToBytes(defaultValueXmx)
if valueInt < defaultValueInt:
return self.getWarnItem("Value is less than the recommended default of -Xmx" + defaultValueXmx)
return None
def validateMapReduce2Configurations(self, properties, recommendedDefaults, configurations, services, hosts):
validationItems = [ {"config-name": 'mapreduce.map.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'mapreduce.map.java.opts')},
{"config-name": 'mapreduce.reduce.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'mapreduce.reduce.java.opts')},
{"config-name": 'mapreduce.task.io.sort.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.task.io.sort.mb')},
{"config-name": 'mapreduce.map.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.map.memory.mb')},
{"config-name": 'mapreduce.reduce.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.reduce.memory.mb')},
{"config-name": 'yarn.app.mapreduce.am.resource.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.app.mapreduce.am.resource.mb')},
{"config-name": 'yarn.app.mapreduce.am.command-opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'yarn.app.mapreduce.am.command-opts')} ]
return self.toConfigurationValidationProblems(validationItems, "mapred-site")
def validateYARNConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
validationItems = [ {"config-name": 'yarn.nodemanager.resource.memory-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.nodemanager.resource.memory-mb')},
{"config-name": 'yarn.scheduler.minimum-allocation-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.scheduler.minimum-allocation-mb')},
{"config-name": 'yarn.scheduler.maximum-allocation-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.scheduler.maximum-allocation-mb')} ]
return self.toConfigurationValidationProblems(validationItems, "yarn-site")
def getMastersWithMultipleInstances(self):
return ['ZOOKEEPER_SERVER', 'HBASE_MASTER']
def getNotValuableComponents(self):
return ['JOURNALNODE', 'ZKFC', 'GANGLIA_MONITOR']
def getNotPreferableOnServerComponents(self):
return ['GANGLIA_SERVER']
def getCardinalitiesDict(self):
return {
'ZOOKEEPER_SERVER': {"min": 3},
'HBASE_MASTER': {"min": 1},
}
def getComponentLayoutSchemes(self):
return {
'NAMENODE': {"else": 0},
'SECONDARY_NAMENODE': {"else": 1},
'HBASE_MASTER': {6: 0, 31: 2, "else": 3},
'HISTORYSERVER': {31: 1, "else": 2},
'RESOURCEMANAGER': {31: 1, "else": 2},
'OOZIE_SERVER': {6: 1, 31: 2, "else": 3},
'HIVE_SERVER': {6: 1, 31: 2, "else": 4},
'HIVE_METASTORE': {6: 1, 31: 2, "else": 4},
'WEBHCAT_SERVER': {6: 1, 31: 2, "else": 4},
}
class BIGTOP08StackAdvisor(BaseBIGTOP08StackAdvisor):
def getServiceConfigurationRecommenderDict(self):
parentRecommendConfDict = super(BIGTOP08StackAdvisor, self).getServiceConfigurationRecommenderDict()
childRecommendConfDict = {
"OOZIE": self.recommendOozieConfigurations,
"HIVE": self.recommendHiveConfigurations,
"TEZ": self.recommendTezConfigurations
}
parentRecommendConfDict.update(childRecommendConfDict)
return parentRecommendConfDict
def recommendOozieConfigurations(self, configurations, clusterData, services, hosts):
if "FALCON_SERVER" in clusterData["components"]:
putMapredProperty = self.putProperty(configurations, "oozie-site")
putMapredProperty("oozie.services.ext",
"org.apache.oozie.service.JMSAccessorService," +
"org.apache.oozie.service.PartitionDependencyManagerService," +
"org.apache.oozie.service.HCatAccessorService")
def recommendHiveConfigurations(self, configurations, clusterData, services, hosts):
containerSize = clusterData['mapMemory'] if clusterData['mapMemory'] > 2048 else int(clusterData['reduceMemory'])
containerSize = min(clusterData['containers'] * clusterData['ramPerContainer'], containerSize)
putHiveProperty = self.putProperty(configurations, "hive-site")
putHiveProperty('hive.auto.convert.join.noconditionaltask.size', int(round(containerSize / 3)) * 1048576)
putHiveProperty('hive.tez.java.opts', "-server -Xmx" + str(int(round((0.8 * containerSize) + 0.5)))
+ "m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC")
putHiveProperty('hive.tez.container.size', containerSize)
def recommendTezConfigurations(self, configurations, clusterData, services, hosts):
putTezProperty = self.putProperty(configurations, "tez-site")
putTezProperty("tez.am.resource.memory.mb", int(clusterData['amMemory']))
putTezProperty("tez.am.java.opts",
"-server -Xmx" + str(int(0.8 * clusterData["amMemory"]))
+ "m -Djava.net.preferIPv4Stack=true -XX:+UseNUMA -XX:+UseParallelGC")
def getNotPreferableOnServerComponents(self):
return ['STORM_UI_SERVER', 'DRPC_SERVER', 'STORM_REST_API', 'NIMBUS', 'GANGLIA_SERVER']
def getNotValuableComponents(self):
return ['JOURNALNODE', 'ZKFC', 'GANGLIA_MONITOR', 'APP_TIMELINE_SERVER']
def getComponentLayoutSchemes(self):
parentSchemes = super(BIGTOP08StackAdvisor, self).getComponentLayoutSchemes()
childSchemes = {
'APP_TIMELINE_SERVER': {31: 1, "else": 2},
'FALCON_SERVER': {6: 1, 31: 2, "else": 3}
}
parentSchemes.update(childSchemes)
return parentSchemes
def getServiceConfigurationValidators(self):
parentValidators = super(BIGTOP08StackAdvisor, self).getServiceConfigurationValidators()
childValidators = {
"HIVE": ["hive-site", self.validateHiveConfigurations],
"TEZ": ["tez-site", self.validateTezConfigurations]
}
parentValidators.update(childValidators)
return parentValidators
def validateHiveConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
validationItems = [ {"config-name": 'hive.tez.container.size', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'hive.tez.container.size')},
{"config-name": 'hive.tez.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'hive.tez.java.opts')},
{"config-name": 'hive.auto.convert.join.noconditionaltask.size', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'hive.auto.convert.join.noconditionaltask.size')} ]
return self.toConfigurationValidationProblems(validationItems, "hive-site")
def validateTezConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
validationItems = [ {"config-name": 'tez.am.resource.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'tez.am.resource.memory.mb')},
{"config-name": 'tez.am.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'tez.am.java.opts')} ]
return self.toConfigurationValidationProblems(validationItems, "tez-site")
# Validation helper methods
def getSiteProperties(configurations, siteName):
siteConfig = configurations.get(siteName)
if siteConfig is None:
return None
return siteConfig.get("properties")
def to_number(s):
try:
return int(re.sub("\D", "", s))
except ValueError:
return None
def checkXmxValueFormat(value):
p = re.compile('-Xmx(\d+)(b|k|m|g|p|t|B|K|M|G|P|T)?')
matches = p.findall(value)
return len(matches) == 1
def getXmxSize(value):
p = re.compile("-Xmx(\d+)(.?)")
result = p.findall(value)[0]
if len(result) > 1:
# result[1] - is a space or size formatter (b|k|m|g etc)
return result[0] + result[1].lower()
return result[0]
def formatXmxSizeToBytes(value):
value = value.lower()
if len(value) == 0:
return 0
modifier = value[-1]
if modifier == ' ' or modifier in "0123456789":
modifier = 'b'
m = {
modifier == 'b': 1,
modifier == 'k': 1024,
modifier == 'm': 1024 * 1024,
modifier == 'g': 1024 * 1024 * 1024,
modifier == 't': 1024 * 1024 * 1024 * 1024,
modifier == 'p': 1024 * 1024 * 1024 * 1024 * 1024
}[1]
return to_number(value) * m
def getPort(address):
"""
Extracts port from the address like 0.0.0.0:1019
"""
if address is None:
return None
m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
if m is not None:
return int(m.group(2))
else:
return None
def isSecurePort(port):
"""
Returns True if port is root-owned at *nix systems
"""
if port is not None:
return port < 1024
else:
return False
|
|
"""
Support for ISY994 binary sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/binary_sensor.isy994/
"""
import asyncio
import logging
from datetime import timedelta
from typing import Callable # noqa
from homeassistant.core import callback
from homeassistant.components.binary_sensor import BinarySensorDevice, DOMAIN
from homeassistant.components.isy994 import (ISY994_NODES, ISY994_PROGRAMS,
ISYDevice)
from homeassistant.const import STATE_ON, STATE_OFF
from homeassistant.helpers.typing import ConfigType
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
ISY_DEVICE_TYPES = {
'moisture': ['16.8', '16.13', '16.14'],
'opening': ['16.9', '16.6', '16.7', '16.2', '16.17', '16.20', '16.21'],
'motion': ['16.1', '16.4', '16.5', '16.3']
}
# pylint: disable=unused-argument
def setup_platform(hass, config: ConfigType,
add_devices: Callable[[list], None], discovery_info=None):
"""Set up the ISY994 binary sensor platform."""
devices = []
devices_by_nid = {}
child_nodes = []
for node in hass.data[ISY994_NODES][DOMAIN]:
if node.parent_node is None:
device = ISYBinarySensorDevice(node)
devices.append(device)
devices_by_nid[node.nid] = device
else:
# We'll process the child nodes last, to ensure all parent nodes
# have been processed
child_nodes.append(node)
for node in child_nodes:
try:
parent_device = devices_by_nid[node.parent_node.nid]
except KeyError:
_LOGGER.error("Node %s has a parent node %s, but no device "
"was created for the parent. Skipping.",
node.nid, node.parent_nid)
else:
device_type = _detect_device_type(node)
subnode_id = int(node.nid[-1])
if device_type == 'opening':
# Door/window sensors use an optional "negative" node
if subnode_id == 4:
# Subnode 4 is the heartbeat node, which we will represent
# as a separate binary_sensor
device = ISYBinarySensorHeartbeat(node, parent_device)
parent_device.add_heartbeat_device(device)
devices.append(device)
elif subnode_id == 2:
parent_device.add_negative_node(node)
elif device_type == 'moisture':
# Moisure nodes have a subnode 2, but we ignore it because it's
# just the inverse of the primary node.
if subnode_id == 4:
# Heartbeat node
device = ISYBinarySensorHeartbeat(node, parent_device)
parent_device.add_heartbeat_device(device)
devices.append(device)
else:
# We don't yet have any special logic for other sensor types,
# so add the nodes as individual devices
device = ISYBinarySensorDevice(node)
devices.append(device)
for name, status, _ in hass.data[ISY994_PROGRAMS][DOMAIN]:
devices.append(ISYBinarySensorProgram(name, status))
add_devices(devices)
def _detect_device_type(node) -> str:
try:
device_type = node.type
except AttributeError:
# The type attribute didn't exist in the ISY's API response
return None
split_type = device_type.split('.')
for device_class, ids in ISY_DEVICE_TYPES.items():
if '{}.{}'.format(split_type[0], split_type[1]) in ids:
return device_class
return None
def _is_val_unknown(val):
"""Determine if a number value represents UNKNOWN from PyISY."""
return val == -1*float('inf')
class ISYBinarySensorDevice(ISYDevice, BinarySensorDevice):
"""Representation of an ISY994 binary sensor device.
Often times, a single device is represented by multiple nodes in the ISY,
allowing for different nuances in how those devices report their on and
off events. This class turns those multiple nodes in to a single Hass
entity and handles both ways that ISY binary sensors can work.
"""
def __init__(self, node) -> None:
"""Initialize the ISY994 binary sensor device."""
super().__init__(node)
self._negative_node = None
self._heartbeat_device = None
self._device_class_from_type = _detect_device_type(self._node)
# pylint: disable=protected-access
if _is_val_unknown(self._node.status._val):
self._computed_state = None
else:
self._computed_state = bool(self._node.status._val)
@asyncio.coroutine
def async_added_to_hass(self) -> None:
"""Subscribe to the node and subnode event emitters."""
yield from super().async_added_to_hass()
self._node.controlEvents.subscribe(self._positive_node_control_handler)
if self._negative_node is not None:
self._negative_node.controlEvents.subscribe(
self._negative_node_control_handler)
def add_heartbeat_device(self, device) -> None:
"""Register a heartbeat device for this sensor.
The heartbeat node beats on its own, but we can gain a little
reliability by considering any node activity for this sensor
to be a heartbeat as well.
"""
self._heartbeat_device = device
def _heartbeat(self) -> None:
"""Send a heartbeat to our heartbeat device, if we have one."""
if self._heartbeat_device is not None:
self._heartbeat_device.heartbeat()
def add_negative_node(self, child) -> None:
"""Add a negative node to this binary sensor device.
The negative node is a node that can receive the 'off' events
for the sensor, depending on device configuration and type.
"""
self._negative_node = child
# pylint: disable=protected-access
if not _is_val_unknown(self._negative_node.status._val):
# If the negative node has a value, it means the negative node is
# in use for this device. Therefore, we cannot determine the state
# of the sensor until we receive our first ON event.
self._computed_state = None
def _negative_node_control_handler(self, event: object) -> None:
"""Handle an "On" control event from the "negative" node."""
if event == 'DON':
_LOGGER.debug("Sensor %s turning Off via the Negative node "
"sending a DON command", self.name)
self._computed_state = False
self.schedule_update_ha_state()
self._heartbeat()
def _positive_node_control_handler(self, event: object) -> None:
"""Handle On and Off control event coming from the primary node.
Depending on device configuration, sometimes only On events
will come to this node, with the negative node representing Off
events
"""
if event == 'DON':
_LOGGER.debug("Sensor %s turning On via the Primary node "
"sending a DON command", self.name)
self._computed_state = True
self.schedule_update_ha_state()
self._heartbeat()
if event == 'DOF':
_LOGGER.debug("Sensor %s turning Off via the Primary node "
"sending a DOF command", self.name)
self._computed_state = False
self.schedule_update_ha_state()
self._heartbeat()
# pylint: disable=unused-argument
def on_update(self, event: object) -> None:
"""Ignore primary node status updates.
We listen directly to the Control events on all nodes for this
device.
"""
pass
@property
def value(self) -> object:
"""Get the current value of the device.
Insteon leak sensors set their primary node to On when the state is
DRY, not WET, so we invert the binary state if the user indicates
that it is a moisture sensor.
"""
if self._computed_state is None:
# Do this first so we don't invert None on moisture sensors
return None
if self.device_class == 'moisture':
return not self._computed_state
return self._computed_state
@property
def is_on(self) -> bool:
"""Get whether the ISY994 binary sensor device is on.
Note: This method will return false if the current state is UNKNOWN
"""
return bool(self.value)
@property
def state(self):
"""Return the state of the binary sensor."""
if self._computed_state is None:
return None
return STATE_ON if self.is_on else STATE_OFF
@property
def device_class(self) -> str:
"""Return the class of this device.
This was discovered by parsing the device type code during init
"""
return self._device_class_from_type
class ISYBinarySensorHeartbeat(ISYDevice, BinarySensorDevice):
"""Representation of the battery state of an ISY994 sensor."""
def __init__(self, node, parent_device) -> None:
"""Initialize the ISY994 binary sensor device."""
super().__init__(node)
self._computed_state = None
self._parent_device = parent_device
self._heartbeat_timer = None
@asyncio.coroutine
def async_added_to_hass(self) -> None:
"""Subscribe to the node and subnode event emitters."""
yield from super().async_added_to_hass()
self._node.controlEvents.subscribe(
self._heartbeat_node_control_handler)
# Start the timer on bootup, so we can change from UNKNOWN to ON
self._restart_timer()
def _heartbeat_node_control_handler(self, event: object) -> None:
"""Update the heartbeat timestamp when an On event is sent."""
if event == 'DON':
self.heartbeat()
def heartbeat(self):
"""Mark the device as online, and restart the 25 hour timer.
This gets called when the heartbeat node beats, but also when the
parent sensor sends any events, as we can trust that to mean the device
is online. This mitigates the risk of false positives due to a single
missed heartbeat event.
"""
self._computed_state = False
self._restart_timer()
self.schedule_update_ha_state()
def _restart_timer(self):
"""Restart the 25 hour timer."""
try:
self._heartbeat_timer()
self._heartbeat_timer = None
except TypeError:
# No heartbeat timer is active
pass
# pylint: disable=unused-argument
@callback
def timer_elapsed(now) -> None:
"""Heartbeat missed; set state to indicate dead battery."""
self._computed_state = True
self._heartbeat_timer = None
self.schedule_update_ha_state()
point_in_time = dt_util.utcnow() + timedelta(hours=25)
_LOGGER.debug("Timer starting. Now: %s Then: %s",
dt_util.utcnow(), point_in_time)
self._heartbeat_timer = async_track_point_in_utc_time(
self.hass, timer_elapsed, point_in_time)
# pylint: disable=unused-argument
def on_update(self, event: object) -> None:
"""Ignore node status updates.
We listen directly to the Control events for this device.
"""
pass
@property
def value(self) -> object:
"""Get the current value of this sensor."""
return self._computed_state
@property
def is_on(self) -> bool:
"""Get whether the ISY994 binary sensor device is on.
Note: This method will return false if the current state is UNKNOWN
"""
return bool(self.value)
@property
def state(self):
"""Return the state of the binary sensor."""
if self._computed_state is None:
return None
return STATE_ON if self.is_on else STATE_OFF
@property
def device_class(self) -> str:
"""Get the class of this device."""
return 'battery'
@property
def device_state_attributes(self):
"""Get the state attributes for the device."""
attr = super().device_state_attributes
attr['parent_entity_id'] = self._parent_device.entity_id
return attr
class ISYBinarySensorProgram(ISYDevice, BinarySensorDevice):
"""Representation of an ISY994 binary sensor program.
This does not need all of the subnode logic in the device version of binary
sensors.
"""
def __init__(self, name, node) -> None:
"""Initialize the ISY994 binary sensor program."""
super().__init__(node)
self._name = name
@property
def is_on(self) -> bool:
"""Get whether the ISY994 binary sensor device is on."""
return bool(self.value)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCircuitConnectionsOperations:
"""ExpressRouteCircuitConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
connection_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
connection_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified Express Route Circuit Connection from the specified express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the express route circuit connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
connection_name=connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
async def get(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
connection_name: str,
**kwargs: Any
) -> "_models.ExpressRouteCircuitConnection":
"""Gets the specified Express Route Circuit Connection from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the express route circuit connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_11_01.models.ExpressRouteCircuitConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
connection_name: str,
express_route_circuit_connection_parameters: "_models.ExpressRouteCircuitConnection",
**kwargs: Any
) -> "_models.ExpressRouteCircuitConnection":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(express_route_circuit_connection_parameters, 'ExpressRouteCircuitConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitConnection', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuitConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
connection_name: str,
express_route_circuit_connection_parameters: "_models.ExpressRouteCircuitConnection",
**kwargs: Any
) -> AsyncLROPoller["_models.ExpressRouteCircuitConnection"]:
"""Creates or updates a Express Route Circuit Connection in the specified express route circuits.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the express route circuit connection.
:type connection_name: str
:param express_route_circuit_connection_parameters: Parameters supplied to the create or update
express route circuit connection operation.
:type express_route_circuit_connection_parameters: ~azure.mgmt.network.v2019_11_01.models.ExpressRouteCircuitConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCircuitConnection or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_11_01.models.ExpressRouteCircuitConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
connection_name=connection_name,
express_route_circuit_connection_parameters=express_route_circuit_connection_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
def list(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ExpressRouteCircuitConnectionListResult"]:
"""Gets all global reach connections associated with a private peering in an express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCircuitConnectionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_11_01.models.ExpressRouteCircuitConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections'} # type: ignore
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.