gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# AISim2.py
# Reversi (Othello)
"""
Reversi Simulation 1 - a computer vs. computer tile flipping game simulation
An example from Chapter 16 of
'Invent Your Own Games With Python' by Al Sweigart
A.C. LoGreco
"""
import random
import sys
def drawBoard(board):
"""
This function prints out the board that it was passed. Returns None.
"""
HLINE = ' +---+---+---+---+---+---+---+---+'
VLINE = ' | | | | | | | | |'
print(' 1 2 3 4 5 6 7 8')
print(HLINE)
for y in range(8):
print(VLINE)
print(y+1, end=' ')
for x in range(8):
print('| %s' % (board[x][y]), end=' ')
print('|')
print(VLINE)
print(HLINE)
def resetBoard(board):
"""
Blanks out the board it is passed, except for the original starting position.
"""
for x in range(8):
for y in range(8):
board[x][y] = ' '
# Starting pieces:
board[3][3] = 'X'
board[3][4] = 'O'
board[4][3] = 'O'
board[4][4] = 'X'
def getNewBoard():
"""
Creates a brand new, blank board data structure.
"""
board = []
for i in range(8):
board.append([' '] * 8)
return board
def isValidMove(board, tile, xstart, ystart):
"""
Returns False if the player's move on space xstart, ystart is invalid.
If it is a valid move, returns a list of spaces that would become the
player's if they made a move here.
"""
if board[xstart][ystart] != ' ' or not isOnBoard(xstart, ystart):
return False
board[xstart][ystart] = tile # temporarily set the tile on the board.
if tile == 'X':
otherTile = 'O'
else:
otherTile = 'X'
tilesToFlip = []
for xdirection, ydirection in [[0, 1], [1, 1], [1, 0], [1, -1], [0, -1],
[-1, -1], [-1, 0], [-1, 1]]:
x, y = xstart, ystart
x += xdirection # first step in the direction
y += ydirection # first step in the direction
if isOnBoard(x, y) and board[x][y] == otherTile:
# There is a piece belonging to the other player next to our piece.
x += xdirection
y += ydirection
if not isOnBoard(x, y):
continue
while board[x][y] == otherTile:
x += xdirection
y += ydirection
if not isOnBoard(x, y):
# break out of while loop, then continue in for loop
break
if not isOnBoard(x, y):
continue
if board[x][y] == tile:
# There are pieces to flip over. Go in the reverse direction
# until we reach the original space, noting all the tiles
# along the way.
while True:
x -= xdirection
y -= ydirection
if x == xstart and y == ystart:
break
tilesToFlip.append([x, y])
board[xstart][ystart] = ' ' # restore the empty space
if len(tilesToFlip) == 0:
# If no tiles were flipped, this is not a valid move.
return False
return tilesToFlip
def isOnBoard(x, y):
"""
Returns True if the coordinates are loacated on the board.
"""
return x >= 0 and x <= 7 and y >= 0 and y <= 7
def getBoardWithValidMoves(board, tile):
"""
Returns a new board with '.' marking the valid moves the given player
can make.
"""
dupeBoard = getBoardCopy(board)
for x, y in getValidMoves(dupeBoard, tile):
dupeBoard[x][y] = '.'
return dupeBoard
def getValidMoves(board, tile):
"""
Returns a list of [x,y] lists of valid moves for the given player on the
given board.
"""
validMoves = []
for x in range(8):
for y in range(8):
if isValidMove(board, tile, x, y) != False:
validMoves.append([x, y])
return validMoves
def getScoreOfBoard(board):
"""
Determine the score by counting the tiles. Returns a dictionary with
keys 'X' and 'O'.
"""
xscore = 0
oscore = 0
for x in range(8):
for y in range (8):
if board[x][y] == 'X':
xscore += 1
if board[x][y] == 'O':
oscore += 1
return {'X':xscore, 'O':oscore}
def enterPlayerTile():
"""
Lets the player type which tile they want to be.
Returns a list with the player's tile as the first item, and the
computer's tile as the second.
"""
tile = ''
while not (tile == 'X' or tile == 'O'):
print('Do you want to be X or O?')
tile = input().upper()
# the first element in the list is the player's tile, the second is the
# computer's tile.
if tile == 'X':
return ['X', 'O']
else:
return ['O', 'X']
def whoGoesFirst():
"""
Randomly choose the player who goes first.
"""
if random.randint(0, 1) == 0:
return 'computer'
else:
return 'player'
def playAgain():
"""
This function returns True if the player wants to play again,
otherwise it returns False.
"""
print('Do you want to play again? (yes or no)')
return input().lower().startswith('y')
def makeMove(board, tile, xstart, ystart):
"""
Place the tile on the board at xstart, ystart, and flip any of the
opponent's pieces.
Returns False if this is an invalid move, True if it is valid.
"""
tilesToFlip = isValidMove(board, tile, xstart, ystart)
if tilesToFlip == False:
return False
board[xstart][ystart] = tile
for x, y in tilesToFlip:
board[x][y] = tile
return True
def getBoardCopy(board):
"""
Make a duplicate of the board list and return the duplicate.
"""
dupeBoard = getNewBoard()
for x in range(8):
for y in range(8):
dupeBoard[x][y] = board[x][y]
return dupeBoard
def isOnCorner(x, y):
"""
Returns True if the position is in one of the four corners.
"""
return ((x == 0 and y == 0) or
(x == 7 and y == 0) or
(x == 0 and y == 7) or
(x == 7 and y == 7))
def getPlayerMove(board, playerTile):
"""
Let the player type in their move.
Returns the move as [x, y].
(or returns the strings 'hints' or 'quit')
"""
DIGITS1TO8 = '1 2 3 4 5 6 7 8'.split()
while True:
print('Enter your move, or type quit to end the game, or hints to turn off/on hints.')
move = input().lower()
if move == 'quit':
return 'quit'
if move == 'hints':
return 'hints'
if len(move) == 2 and move[0] in DIGITS1TO8 and move[1] in DIGITS1TO8:
x = int(move[0]) - 1
y = int(move[1]) - 1
if isValidMove(board, playerTile, x, y) == False:
continue
else:
break
else:
print('That is not a valid move. Type the x digit (1-8), then the y digit (1-8).')
print('For example, 81 will be the top-right corner.')
return [x, y]
def getComputerMove(board, computerTile):
"""
Given a board and the computer's tile, determine where to
move and return that move as a [x, y] list.
"""
possibleMoves = getValidMoves(board, computerTile)
# randomize the order of possible moves
random.shuffle(possibleMoves)
# always go for a corner if available.
for x, y in possibleMoves:
if isOnCorner(x, y):
return [x, y]
# Go through all the possible moves and remember the best scoring move.
bestScore = -1
for x, y in possibleMoves:
dupeBoard = getBoardCopy(board)
makeMove(dupeBoard, computerTile, x, y)
score = getScoreOfBoard(dupeBoard)[computerTile]
if score > bestScore:
bestMove = [x, y]
bestScore = score
return bestMove
def showPoints(playerTile, computerTile):
"""
Prints out the current score.
"""
scores = getScoreOfBoard(mainBoard)
print('You have %s points. The computer has %s points.' %
(scores[playerTile], scores[computerTile]))
# Main Game Loop
print('Welcome to Reversi!')
xwins = 0
owins = 0
ties = 0
numGames = int(input('Enter number of games to run: '))
for game in range(numGames):
print('Game #%s:' % (game), end=' ')
# Reset the board and game.
mainBoard = getNewBoard()
resetBoard(mainBoard)
if whoGoesFirst() == 'player':
turn = 'X'
else:
turn = 'O'
while True:
if turn == 'X':
# X's turn.
otherTile = 'O'
x, y = getComputerMove(mainBoard, 'X')
makeMove(mainBoard, 'X', x, y)
else:
# O's turn.
otherTile = 'X'
x, y = getComputerMove(mainBoard, 'O')
makeMove(mainBoard, 'O', x, y)
if getValidMoves(mainBoard, otherTile) == []:
break
else:
turn = otherTile
# Display the final score.
scores = getScoreOfBoard(mainBoard)
print('X scored %s points. O scored %s points.' %
(scores['X'], scores['O']))
if scores['X'] > scores['O']:
xwins += 1
elif scores['X'] < scores['O']:
owins += 1
else:
ties += 1
numGames = float(numGames)
xpercent = round(((xwins / numGames) * 100), 2)
opercent = round(((owins / numGames) * 100), 2)
tiepercent = round(((ties / numGames) * 100), 2)
print('X wins %s games (%s%%), O wins %s games (%s%%), ties for %s games (%s%%) of %s games total.' %
(xwins, xpercent, owins, opercent, ties, tiepercent, numGames))
|
|
#!/usr/bin/python3.4
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Implements a Windows version of a client responder. This should run with the
native Python for Windows.
Install on a Windows server:
Place the following lines in c:\autoexec.bat::
PATH=%PATH%;C:\Python26;C:\Python26\Scripts
Now run (all on one line)::
C:\Python26>python.exe %PYTHONLIB%\site-packages\pycopia\remote\WindowsServer.py
--username DOMAIN\Administrator --password xxxxxxxx install
OR, for system process that can interact with console::
C:\Python26>python.exe %PYTHONLIB%\site-packages\pycopia\remote\WindowsServer.py
--interactive install
Note: if you get an error about an account not existing, you may need
to supply the username like this:
.\Administrator
If a username was supplied to run as, go to the Service Manger from the
Windows control panel, and perform the following.
- Select "Remote Agent Server" from the list. Right-clieck and select "properties".
- Select the "Log On" tab.
- Click the "This account:" radio button.
- Enter DOMAIN\Administrator in the account box (or something else appropriate).
- Enter the proper password (twice).
- Click "Apply". You should confirm a message saying user is
enabled to log in as a service.
- Click "General" tab.
- You may now start the service.
You may also need to disable the Windows firewall for this to function
properly. This service is a massive security hole, so only run it on
a throw-away test machine on an isolated network.
"""
import os, sys, shutil, errno
import threading
# Pycopia imports
from pycopia.aid import IF
from pycopia.anypath import cygwin2nt, nt2cygwin
from pycopia import shparser
# returnable objects
from pycopia.remote.WindowsObjects import ExitStatus
# Windows stuff
import msvcrt
import win32api
import win32file
import win32net
import win32process
import win32event
# constants
import pywintypes
import win32con
import win32netcon
# some constants that the API forgot...
USE_WILDCARD = -1
USE_DISKDEV = 0
USE_SPOOLDEV = 1
USE_CHARDEV = 2
USE_IPC = 3
def setConfig():
Pyro.config.PYRO_STORAGE = "C:\\tmp\\"
Pyro.config.PYRO_LOGFILE = "C:\\tmp\\agent_svc.log"
Pyro.config.PYRO_TRACELEVEL=3
Pyro.config.PYRO_USER_LOGFILE = "C:\\tmp\\agent_user.log"
Pyro.config.PYRO_USER_TRACELEVEL = 3
Pyro.config.PYRO_PORT = 7867 # don't conflict with cygwin Pyro
import Pyro
import Pyro.util
setConfig()
Log=Pyro.util.Log
import Pyro.core
import Pyro.naming
from Pyro.ext.BasicNTService import BasicNTService, getRegistryParameters
_EXIT = False
UserLog = Pyro.util.UserLogger()
# msg, warn, or error methods
class WindowsFile(file):
"""A file object with some extra methods that match those in UserFile
(which has Posix extensions)."""
def locking(self, mode, nbytes):
return msvcrt.locking(self.fileno(), mode, nbytes)
def __repr__(self):
return "WindowsFile(%r, %r)" % (self.name, self.mode)
def lock_exclusive(self, length, start=0, whence=0, nb=0):
"""Locking method compatible with Posix files."""
if nb:
mode = msvcrt.LK_NBLCK
else:
mode = msvcrt.LK_LOCK
orig = self.tell()
self.seek(start, whence)
try:
msvcrt.locking(self.fileno(), mode, length)
finally:
self.seek(orig)
lock = lock_exclusive
def unlock(self, length, start=0, whence=0):
"""Posix compatible unlock."""
orig = self.tell()
self.seek(start, whence)
try:
msvcrt.locking(self.fileno(), msvcrt.LK_UNLCK, length)
finally:
self.seek(orig)
def get_osfhandle(self):
return msvcrt.get_osfhandle(self.fileno())
split_command_line = shparser.get_command_splitter()
# quick hack ... Windows sucks. No signal handling or anything useful, so it has to be faked.
class WindowsProcess(object):
def __init__(self, cmdline, logfile=None, env=None, callback=None, merge=True, pwent=None, async=False):
self.deadchild = False
self.exitstatus = None
self.cmdline = cmdline
self._callback = callback
self._buf = ""
self._log = logfile
if merge:
self.child_stdin, self.child_stdout = os.popen2(cmdline, "t", -1)
self.child_stderr = None
else:
self.child_stdin, self.child_stdout, self.child_stderr = os.popen3(cmdline, "t", -1)
self.childpid, self.handle = self._scan_for_self()
# since the Python popenX functions do not provide the PID, it must be
# scanned for in this ugly manner. 8-(
def _scan_for_self(self):
win32api.Sleep(2000) # sleep to give time for process to be seen in system table.
basename = self.cmdline.split()[0]
pids = win32process.EnumProcesses()
if not pids:
UserLog.warn("WindowsProcess", "no pids", pids)
for pid in pids:
try:
handle = win32api.OpenProcess(
win32con.PROCESS_QUERY_INFORMATION | win32con.PROCESS_VM_READ,
pywintypes.FALSE, pid)
except pywintypes.error as err:
UserLog.warn("WindowsProcess", str(err))
continue
try:
modlist = win32process.EnumProcessModules(handle)
except pywintypes.error as err:
UserLog.warn("WindowsProcess",str(err))
continue
for mod in modlist:
mname = win32process.GetModuleFileNameEx(handle, mod)
if mname.find(basename) >= 0:
return int(pid), handle
raise WindowsError("could not find process for %r" % (basename,))
def write(self, data):
return self.child_stdin.write(data)
def kill(self):
handle = win32api.OpenProcess(
win32con.PROCESS_VM_READ | win32con.PROCESS_TERMINATE, pywintypes.FALSE, self.childpid)
win32process.TerminateProcess(handle, 3)
def read(self, amt=1048576):
bs = len(self._buf)
while bs < amt:
c = self._read(4096)
if not c:
break
self._buf += c
bs = len(self._buf)
data = self._buf[:amt]
self._buf = self._buf[amt:]
return data
def readerr(self, amt=-1):
if self.child_stderr:
return self.child_stderr.read(amt)
def _read(self, amt):
data = self.child_stdout.read(amt)
if self._log:
self._log.write(data)
return data
def close(self):
if win32process.GetExitCodeProcess(self.handle) == win32con.STILL_ACTIVE:
self.kill()
self.child_stdin.close()
self.child_stdin = None
if self.child_stderr:
self.child_stdin.close()
self.child_stdin = None
es = ExitStatus(self.cmdline, self.child_stdout.close())
if self.exitstatus is None:
self.exitstatus = es
self.child_stdout = None
self.dead()
return self.exitstatus
def poll(self):
es = win32process.GetExitCodeProcess(self.handle)
if es == win32con.STILL_ACTIVE:
return None
else:
self.exitstatus = ExitStatus(self.cmdline, es)
self.dead()
return self.exitstatus
# called when process determined to be daed
def dead(self):
if not self.deadchild:
self.deadchild = True
if self._callback:
self._callback(self)
# check if still running
def alive(self):
es = win32process.GetExitCodeProcess(self.handle)
if es == win32con.STILL_ACTIVE:
return True
else:
return False
# wait until finished
def wait(self):
# let python read until EOF for a wait
try:
self._buf += self.child_stdout.read()
self.close()
except: # closed file?
pass
return self.exitstatus
def status(self):
return self.exitstatus
def isdead(self):
return self.deadchild
# considered true if child alive, false if child dead
def __bool__(self):
return not self.deadchild
# A server that performs filer client operations. This mostly delegates to the
# os module. But some special methods are provided for common functions.
class Win32Agent(Pyro.core.SynchronizedObjBase):
def __init__(self):
Pyro.core.SynchronizedObjBase.__init__(self)
self._files = {}
self._procs = {}
self._dirstack = []
def platform(self):
return sys.platform
def whatami(self):
"""Return agent implementation (class name)."""
return self.__class__.__name__
# Since file objects are not pickle-able, a handle is returned. Use the
# handle for subsequent file operations on f* methods.
def fopen(self, fname, mode="r", bufsize=-1):
"Opens a file object and returns a handle to it."
fname = cygwin2nt(fname)
fo = WindowsFile(fname, mode, bufsize)
UserLog.msg("fopen", fname)
handle = fo.fileno()
self._files[handle] = fo
return handle
def CreateFile(self, fname, mode="r", bufsize=-1):
"Open a file the same way a File Directory migration engine would."
fname = cygwin2nt(fname)
UserLog.msg("CreateFile", fname)
if mode == "r":
wmode = win32file.GENERIC_READ
elif mode == "w":
wmode = win32file.GENERIC_WRITE
elif mode in ( 'r+', 'w+', 'a+'):
wmode = win32file.GENERIC_READ | win32file.GENERIC_WRITE
else:
raise ValueError("invalid file mode")
h = win32file.CreateFile(
fname, # CTSTR lpFileName,
wmode, # DWORD dwDesiredAccess,
win32file.FILE_SHARE_DELETE | win32file.FILE_SHARE_READ | win32file.FILE_SHARE_WRITE, # DWORD dwShareMode,
None, # LPSECURITY_ATTRIBUTES lpSecurityAttributes,
win32file.OPEN_EXISTING, # DWORD dwCreationDisposition,
win32file.FILE_ATTRIBUTE_NORMAL, # DWORD dwFlagsAndAttributes,
0, # HANDLE hTemplateFile
)
self._files[int(h)] = h
return int(h)
def fclose(self, handle):
"Closes a file object given the handle."
fo = self._files.get(handle, None)
if fo:
if type(fo) is WindowsFile:
fo.close()
del self._files[handle]
else:
fo.Close() # pyHANDLE from CreateFile
def fread(self, handle, amt=-1):
"Reads from the file object given the handle and amount to read."
fo = self._files.get(handle, None)
if fo:
if type(fo) is WindowsFile:
return fo.read(amt)
else:
return win32file.ReadFile(fo, amt, None)
def fwrite(self, handle, data):
"Writes to a file object given the handle."
fo = self._files.get(handle, None)
if fo:
if type(fo) is WindowsFile:
return fo.write(data)
else:
return win32file.WriteFile(fo, data, None)
def fsync(self, handle):
"fsync the file object."
fo = self._files.get(handle, None)
if fo:
fo.flush()
return os.fsync(fo.fileno())
def fseek(self, handle, pos, how=0):
"Seek in the file object."
fo = self._files.get(handle, None)
if fo:
if type(fo) is WindowsFile:
return fo.seek(pos, how)
else:
win32file.SetFilePointer(fo, pos, how)
def ftell(self, handle):
"Tell where the seek pointer is in the file object."
fo = self._files.get(handle, None)
if fo:
if type(fo) is WindowsFile:
return fo.tell()
def fflush(self, handle):
"""Flush the file object buffer."""
fo = self._files.get(handle, None)
if fo:
return fo.flush()
def fileno(self, handle):
"Return the file objects file descriptor."
fo = self._files.get(handle, None)
if fo:
return fo.fileno()
def get_handle_info(self, handle):
fo = self._files.get(handle, None)
if fo:
return repr(fo) # XXX
else:
return None
def flock(self, handle, length=0, start=0, whence=0, nonblocking=False):
"""Lock the file with the given range."""
fo = self._files.get(handle, None)
if fo:
return fo.lock_exclusive(length, start, whence, nonblocking)
def funlock(self, handle, length, start=0, whence=0):
fo = self._files.get(handle, None)
if fo:
fo.unlock(length, start, whence)
def flist(self):
return list(self._files.keys())
def unlink(self, path):
"Unlink (delete) the given file."
path = cygwin2nt(path)
return os.unlink(path)
def rename(self, src, dst):
"Rename file from src to dst."
src = cygwin2nt(src)
dst = cygwin2nt(dst)
return os.rename(src, dst)
# directory methods
def mkdir(self, path, mode=0o777):
"Make a directory."
path = cygwin2nt(path)
return os.mkdir(path, mode)
def makedirs(self, path, mode=0o777):
"Make a full path."
path = cygwin2nt(path)
return os.makedirs(path, mode)
def chdir(self, path):
path = cygwin2nt(path)
return os.chdir(path)
def rmdir(self, path):
"Delete a directory."
path = cygwin2nt(path)
return os.rmdir(path)
def getcwd(self):
return os.getcwd()
def getcwdu(self):
return os.getcwd()
def pushd(self, path=None):
self._dirstack.append(os.getcwd())
if path:
path = cygwin2nt(path)
os.chdir(path)
def popd(self):
try:
path = self._dirstack.pop()
except IndexError:
return None
else:
os.chdir(path)
return path
def listdir(self, path):
path = cygwin2nt(path)
return os.listdir(path)
ls = listdir
def listfiles(self, path):
path = cygwin2nt(path)
isfile = os.path.isfile
pjoin = os.path.join
rv = []
for fname in os.listdir(path):
if isfile(pjoin(path, fname)):
rv.append(nt2cygwin(fname))
return rv
def chmod(self, path, mode):
path = cygwin2nt(path)
return os.chmod(path, mode)
def chown(self, path, uid, gid):
path = cygwin2nt(path)
return os.chown(path, uid, gid)
def stat(self, path):
path = cygwin2nt(path)
return os.stat(path)
def statvfs(self, path):
path = cygwin2nt(path)
return os.statvfs(path)
# fd ops ruturn the file descript as handle (of course)
def open(self, fname, flags, mode=0o777):
fd = os.open(fname, mode)
return fd
def close(self, fd):
return os.close(fd)
def write(self, fd, data):
return os.write(fd, data)
def read(self, fd, n):
return os.read(fd, n)
# end fd ops
# shutil interface
def copyfile(self,src, dst):
return shutil.copyfile(src, dst)
def copymode(self, src, dst):
return shutil.copymode(src, dst)
def copystat(self, src, dst):
return shutil.copystat(src, dst)
def copy(self, src, dst):
return shutil.copy(src, dst)
def copy2(self, src, dst):
return shutil.copy2(src, dst)
def copytree(self, src, dst, symlinks=False):
return shutil.copytree(src, dst, symlinks)
def move(self, src, dst):
return win32file.MoveFile(str(src), str(dst))
def rmtree(self, path):
path = cygwin2nt(path)
for fname in os.listdir(path):
file_or_dir = os.path.join(path, fname)
if os.path.isdir(file_or_dir) and not os.path.islink(file_or_dir):
self.rmtree(file_or_dir) #it's a directory reucursive call to function again
else:
try:
os.remove(file_or_dir) #it's a file, delete it
except:
#probably failed because it is not a normal file
win32api.SetFileAttributes(file_or_dir, win32file.FILE_ATTRIBUTE_NORMAL)
os.remove(file_or_dir) #it's a file, delete it
os.rmdir(path) #delete the directory here
# os.path delegates
def exists(self, path):
path = cygwin2nt(path)
return os.path.exists(path)
def isabs(self, path):
path = cygwin2nt(path)
return os.path.isabs(path)
def isdir(self, path):
path = cygwin2nt(path)
return os.path.isdir(path)
def isfile(self, path):
path = cygwin2nt(path)
return os.path.isfile(path)
def islink(self, path):
path = cygwin2nt(path)
return os.path.islink(path)
def ismount(self, path):
path = cygwin2nt(path)
return os.path.ismount(path)
# process control, these calls are syncronous (they block)
def system(self, cmd):
UserLog.msg("system", cmd)
return os.system(cmd) # remember, stdout is on the server
def run(self, cmd, user=None):
if user is None:
return self.pipe(cmd)
else:
return self.run_as(cmd, user.name, user.passwd)
def run_async(self, cmd, user=None):
UserLog.msg("run_async", cmd, str(user))
proc = WindowsProcess(cmd, pwent=user)
self._procs[proc.childpid] = proc
return proc.childpid
def _get_process(self, pid):
return self._procs.get(pid, None)
def read_process(self, pid, N=-1):
proc = self._get_process(pid)
if proc:
return proc.read(N)
else:
return ''
def write_process(self, pid, data):
proc = self._get_process(pid)
if proc:
return proc.write(data)
def poll(self, pid):
"""Poll for async process. Returns exitstatus if done."""
try:
proc = self._procs[pid]
except KeyError:
return -errno.ENOENT
if proc.poll() is None:
return -errno.EAGAIN
else:
del self._procs[pid]
return proc.exitstatus
def waitpid(self, pid):
while True:
rv = self.poll(pid)
if rv == -errno.ENOENT:
return None
if rv == -errno.EAGAIN:
proc = self._procs[pid]
es = proc.wait()
del self._procs[pid]
return es
else: # already exited
del self._procs[pid]
return rv
def kill(self, pid):
"""Kills a process that was started by run_async."""
try:
proc = self._procs.pop(pid)
except KeyError:
return -errno.ENOENT
else:
proc.kill()
sts = proc.wait()
return sts
def killall(self):
rv = []
for pid in self._procs:
rv.append(self.kill(pid))
return rv
def plist(self):
return list(self._procs.keys())
def spawn(self, cmd, user=None, async=True):
# keep the "async" parameter for compatibility with the
# PosixServer.
if user:
cmd = ("runas /user:%s " % user) + cmd
UserLog.msg("spawn", cmd)
L = split_command_line(cmd)
pid = os.spawnv(os.P_DETACH, L[0], L)
return pid
def pipe(self, cmd):
UserLog.msg("pipe", cmd)
proc = os.popen(cmd, 'r')
text = proc.read()
sts = proc.close()
if sts is None:
sts = 0
return ExitStatus(cmd, sts), text
def python(self, snippet):
try:
code = compile(str(snippet) + '\n', '<WindowsServer>', 'eval')
rv = eval(code, globals(), vars(self))
except:
t, v, tb = sys.exc_info()
return '*** %s (%s)' % (t, v)
else:
return rv
def pyexec(self, snippet):
try:
code = compile(str(snippet) + '\n', '<WindowsServer>', 'exec')
exec(code, globals(), vars(self))
except:
t, v, tb = sys.exc_info()
return '*** %s (%s)' % (t, v)
else:
return
# method that exists just to check if everything is working
def alive(self):
return True
def suicide(self):
"Kill myself. The server manager will ressurect me. How nice."
global _EXIT
_EXIT = True
def clean(self):
self.chdir("C:\\tmp")
for f in self.flist():
try:
self.fclose(f)
except:
pass
for pid in self.plist():
try:
self.kill(pid)
except:
pass
def NetUseAdd(self, drive, share, username=None, domainname=None, password=None):
"""Calls Windows API to map a drive. Note that this does not automatically use DFS."""
ui2={}
ui2['local'] = "%s:" % drive[0].upper()
ui2['remote'] = str(share) # \\servername\sharename
ui2['asg_type'] = USE_DISKDEV
if username:
ui2['username'] = str(username)
if domainname:
ui2['domainname'] = str(domainname)
if password:
ui2['password'] = str(password)
return win32net.NetUseAdd(None,2,ui2)
def NetUseDelete(self, drive, forcelevel=0):
"""Remove a mapped drive."""
ui2 = win32net.NetUseGetInfo(None, "%s:" % drive[0].upper(), 2)
return win32net.NetUseDel(None, ui2['remote'], max(0, min(forcelevel, 3)))
#win32net.USE_NOFORCE
#win32net.USE_FORCE
#win32net.USE_LOTS_OF_FORCE
def net_use(self, drive, share, user=None, domainname=None, password=None):
"""Map a drive on a Windows client using the *net* command."""
cmd = "net use %s: %s %s" % (drive[0].upper(), share, IF(password, password, ""))
if user:
cmd += " /USER:%s%s" % (IF(domainname, "%s\\"%domainname, ""), user)
return self.pipe(cmd)
def net_use_delete(self, drive):
"""Unmap a drive on a Windows client using the *net* command."""
cmd = "net use %s: /delete /y" % (drive[0].upper(),)
return self.pipe(cmd)
def md5sums(self, path):
"""Reads the md5sums.txt file in path and returns the number of files
checked good, then number bad (failures), and a list of the failures."""
from pycopia import md5lib
failures = []
counter = Counter()
md5lib.check_md5sums(path, failures.append, counter)
return counter.good, counter.bad, failures
def _get_home(self):
try: # F&*#!&@ windows
HOME = os.environ['USERPROFILE']
except KeyError:
try:
HOME = os.path.join(os.environ["HOMEDRIVE"], os.environ["HOMEPATH"])
except KeyError:
HOME = "C:\\"
return HOME
def get_tarball(self, url):
self.pushd(self._get_home())
# the ncftpget will check if the file is current, will not download if not needed
exitstatus, out = self.pipe('wget -q "%s"' % (url,))
self.popd()
return exitstatus
def run_script(self, script):
"""Runs a script from a shell."""
name = os.path.join("c:\\", "tmp", "clnt%d.bat" % (os.getpid(),))
sfile = open(name, "w")
sfile.write(str(script))
sfile.write("\n") # just in case string has no newline at the end
sfile.close()
try:
sts, out = self.pipe(name)
finally:
os.unlink(name)
return ExitStatus("cmd.exe", sts), out
# for PosixServer duck typing
def mount(self, host, export, mountpoint):
"""Map a drive on a client. Same as mount on NFS. The mountpoint should
be a drive letter (without the colon). """
return self.net_use(mountpoint, r"\\%s\%s" % (host, export))
def umount(self, mountpoint):
"""Unmap a drive on a client."""
return self.net_use_delete(mountpoint)
def run_as(self, cmd, user, password):
cmd = 'runas /user:%s %s' % (user, cmd)
return self.pipe(cmd)
def get_short_pathname(self, path):
"""Get the short file name of path."""
path = cygwin2nt(path)
return win32api.GetShortPathName(path)
def win32(self, funcname, *args, **kwargs):
"""Generic interface to win32. Calls a win32api function by name."""
f = getattr(win32api, funcname)
return f(*args, **kwargs)
def hostname(self):
"""Returns the client hosts name."""
return win32api.GetComputerName()
# Windows file API interface
def CopyFile(self, src, dst):
src = cygwin2nt(src)
dst = cygwin2nt(dst)
return win32file.CopyFile(src, dst, 1)
def GetFileAttributes(self, name):
name = cygwin2nt(name)
return win32file.GetFileAttributes(name)
def GetFileAttributeFlags(self):
return {
"ARCHIVE":win32file.FILE_ATTRIBUTE_ARCHIVE,
"COMPRESSED":win32file.FILE_ATTRIBUTE_COMPRESSED,
"DIRECTORY":win32file.FILE_ATTRIBUTE_DIRECTORY,
"HIDDEN":win32file.FILE_ATTRIBUTE_HIDDEN,
"NORMAL":win32file.FILE_ATTRIBUTE_NORMAL,
"OFFLINE":win32file.FILE_ATTRIBUTE_OFFLINE,
"READONLY":win32file.FILE_ATTRIBUTE_READONLY,
"SYSTEM":win32file.FILE_ATTRIBUTE_SYSTEM,
"TEMPORARY":win32file.FILE_ATTRIBUTE_TEMPORARY,
}
def SetFileAttributes(self, name, flags):
name = cygwin2nt(name)
return win32file.SetFileAttributes(name, flags)
def add_share(self, pathname):
"""Create a new share on this server. A directory is also created. """
drive, sharename = os.path.split(pathname)
if not os.path.isdir(pathname):
os.mkdir(pathname)
shinfo={} # shinfo struct
shinfo['netname'] = sharename
shinfo['type'] = win32netcon.STYPE_DISKTREE
shinfo['remark'] = 'Testing share %s' % (sharename,)
shinfo['permissions'] = 0
shinfo['max_uses'] = -1
shinfo['current_uses'] = 0
shinfo['path'] = pathname
shinfo['passwd'] = ''
win32net.NetShareAdd(None,2,shinfo)
return sharename
def del_share(self, pathname):
"""Remove a share. Returns True if successful, False otherwise."""
drive, sharename = os.path.split(pathname)
try:
win32net.NetShareDel(None, sharename, 0)
except:
ex, val, tb = sys.exc_info()
UserLog.warn("del_share", str(ex), str(val))
return False
else:
return True
# md5sums callback for counting files
class Counter(object):
def __init__(self):
self.good = 0
self.bad = 0
def __call__(self, name, disp):
if disp:
self.good += 1
else:
self.bad += 1
######## main program #####
class AgentThread(threading.Thread):
""" Agent runs in this thread.
"""
def __init__(self, stopcallback):
threading.Thread.__init__(self)
Log.msg("Win32Agent", "initializing")
self._stopcallback = stopcallback
def run(self):
try:
run_server()
except Exception as x :
Log.error("NS daemon","COULD NOT START!!!",x)
raise SystemExit
self._stopcallback()
def run_server():
os.chdir(r"C:\tmp")
Pyro.core.initServer(banner=0, storageCheck=0)
ns=Pyro.naming.NameServerLocator().getNS()
daemon=Pyro.core.Daemon()
daemon.useNameServer(ns)
uri=daemon.connectPersistent(Win32Agent(),
"Agents.%s" % (win32api.GetComputerName().lower(),))
daemon.requestLoop(_checkexit)
daemon.shutdown()
def _checkexit():
global _EXIT
return not _EXIT
class RemoteAgentService(BasicNTService):
_svc_name_ = 'RemoteAgentService'
_svc_display_name_ = "Remote Agent Server"
_svc_description_ = 'Provides Windows remote control agent.'
def __init__(self, args):
super(RemoteAgentService, self).__init__(args)
if not os.path.isdir(Pyro.config.PYRO_STORAGE):
os.mkdir(Pyro.config.PYRO_STORAGE)
self._thread = AgentThread(self.SvcStop)
def _doRun(self):
self._thread.start()
def _doStop(self):
self._thread.join()
self._thread = None
if __name__ == '__main__':
RemoteAgentService.HandleCommandLine()
|
|
from __future__ import division
import warnings
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.dummy import DummyClassifier, DummyRegressor
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="most_frequent", random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
def test_most_frequent_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
clf = DummyClassifier(strategy="most_frequent", random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_invalid():
X = [[0]] * 5 # ignored
y = [0] * 5 # ignored
est = DummyRegressor(strategy="quantile")
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=None)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=[0])
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=-0.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=1.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile='abc')
assert_raises(TypeError, est.fit, X, y)
def test_quantile_strategy_empty_train():
est = DummyRegressor(strategy="quantile", quantile=0.4)
assert_raises(ValueError, est.fit, [], [])
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1],
[4, 0],
[1, 1],
[1, 4],
[1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1],
[2, 2],
[1, 4],
[4, 2],
[1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
assert_warns_message(UserWarning,
"the uniform strategy would not save memory",
clf.fit, X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1/3, decimal=1)
assert_almost_equal(p[2], 1/3, decimal=1)
assert_almost_equal(p[4], 1/3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1],
[0, 0],
[1, 1],
[1, 4],
[1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[0], 1. / 5, decimal=1)
assert_almost_equal(p[4], 1. / 5, decimal=1)
def test_most_frequent_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0],
[1, 3],
[4, 0],
[0, 1],
[1, 0]]))
n_samples = len(X)
clf = DummyClassifier(strategy="most_frequent", random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
if __name__ == '__main__':
import nose
nose.runmodule()
|
|
"""IMAP4 client.
Based on RFC 2060.
Public class: IMAP4
Public variable: Debug
Public functions: Internaldate2tuple
Int2AP
ParseFlags
Time2Internaldate
"""
# Author: Piers Lauder <piers@cs.su.oz.au> December 1997.
#
# Authentication code contributed by Donn Cave <donn@u.washington.edu> June 1998.
# String method conversion by ESR, February 2001.
# GET/SETACL contributed by Anthony Baxter <anthony@interlink.com.au> April 2001.
# IMAP4_SSL contributed by Tino Lange <Tino.Lange@isg.de> March 2002.
# GET/SETQUOTA contributed by Andreas Zeidler <az@kreativkombinat.de> June 2002.
# PROXYAUTH contributed by Rick Holbert <holbert.13@osu.edu> November 2002.
# GET/SETANNOTATION contributed by Tomas Lindroos <skitta@abo.fi> June 2005.
__version__ = "2.58"
import binascii, random, re, socket, subprocess, sys, time
__all__ = ["IMAP4", "IMAP4_stream", "Internaldate2tuple",
"Int2AP", "ParseFlags", "Time2Internaldate"]
# Globals
CRLF = '\r\n'
Debug = 0
IMAP4_PORT = 143
IMAP4_SSL_PORT = 993
AllowedVersions = ('IMAP4REV1', 'IMAP4') # Most recent first
# Commands
Commands = {
# name valid states
'APPEND': ('AUTH', 'SELECTED'),
'AUTHENTICATE': ('NONAUTH',),
'CAPABILITY': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'),
'CHECK': ('SELECTED',),
'CLOSE': ('SELECTED',),
'COPY': ('SELECTED',),
'CREATE': ('AUTH', 'SELECTED'),
'DELETE': ('AUTH', 'SELECTED'),
'DELETEACL': ('AUTH', 'SELECTED'),
'EXAMINE': ('AUTH', 'SELECTED'),
'EXPUNGE': ('SELECTED',),
'FETCH': ('SELECTED',),
'GETACL': ('AUTH', 'SELECTED'),
'GETANNOTATION':('AUTH', 'SELECTED'),
'GETQUOTA': ('AUTH', 'SELECTED'),
'GETQUOTAROOT': ('AUTH', 'SELECTED'),
'MYRIGHTS': ('AUTH', 'SELECTED'),
'LIST': ('AUTH', 'SELECTED'),
'LOGIN': ('NONAUTH',),
'LOGOUT': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'),
'LSUB': ('AUTH', 'SELECTED'),
'NAMESPACE': ('AUTH', 'SELECTED'),
'NOOP': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'),
'PARTIAL': ('SELECTED',), # NB: obsolete
'PROXYAUTH': ('AUTH',),
'RENAME': ('AUTH', 'SELECTED'),
'SEARCH': ('SELECTED',),
'SELECT': ('AUTH', 'SELECTED'),
'SETACL': ('AUTH', 'SELECTED'),
'SETANNOTATION':('AUTH', 'SELECTED'),
'SETQUOTA': ('AUTH', 'SELECTED'),
'SORT': ('SELECTED',),
'STATUS': ('AUTH', 'SELECTED'),
'STORE': ('SELECTED',),
'SUBSCRIBE': ('AUTH', 'SELECTED'),
'THREAD': ('SELECTED',),
'UID': ('SELECTED',),
'UNSUBSCRIBE': ('AUTH', 'SELECTED'),
}
# Patterns to match server responses
Continuation = re.compile(r'\+( (?P<data>.*))?')
Flags = re.compile(r'.*FLAGS \((?P<flags>[^\)]*)\)')
InternalDate = re.compile(r'.*INTERNALDATE "'
r'(?P<day>[ 0123][0-9])-(?P<mon>[A-Z][a-z][a-z])-(?P<year>[0-9][0-9][0-9][0-9])'
r' (?P<hour>[0-9][0-9]):(?P<min>[0-9][0-9]):(?P<sec>[0-9][0-9])'
r' (?P<zonen>[-+])(?P<zoneh>[0-9][0-9])(?P<zonem>[0-9][0-9])'
r'"')
Literal = re.compile(r'.*{(?P<size>\d+)}$')
MapCRLF = re.compile(r'\r\n|\r|\n')
Response_code = re.compile(r'\[(?P<type>[A-Z-]+)( (?P<data>[^\]]*))?\]')
Untagged_response = re.compile(r'\* (?P<type>[A-Z-]+)( (?P<data>.*))?')
Untagged_status = re.compile(r'\* (?P<data>\d+) (?P<type>[A-Z-]+)( (?P<data2>.*))?')
class IMAP4:
"""IMAP4 client class.
Instantiate with: IMAP4([host[, port]])
host - host's name (default: localhost);
port - port number (default: standard IMAP4 port).
All IMAP4rev1 commands are supported by methods of the same
name (in lower-case).
All arguments to commands are converted to strings, except for
AUTHENTICATE, and the last argument to APPEND which is passed as
an IMAP4 literal. If necessary (the string contains any
non-printing characters or white-space and isn't enclosed with
either parentheses or double quotes) each string is quoted.
However, the 'password' argument to the LOGIN command is always
quoted. If you want to avoid having an argument string quoted
(eg: the 'flags' argument to STORE) then enclose the string in
parentheses (eg: "(\Deleted)").
Each command returns a tuple: (type, [data, ...]) where 'type'
is usually 'OK' or 'NO', and 'data' is either the text from the
tagged response, or untagged results from command. Each 'data'
is either a string, or a tuple. If a tuple, then the first part
is the header of the response, and the second part contains
the data (ie: 'literal' value).
Errors raise the exception class <instance>.error("<reason>").
IMAP4 server errors raise <instance>.abort("<reason>"),
which is a sub-class of 'error'. Mailbox status changes
from READ-WRITE to READ-ONLY raise the exception class
<instance>.readonly("<reason>"), which is a sub-class of 'abort'.
"error" exceptions imply a program error.
"abort" exceptions imply the connection should be reset, and
the command re-tried.
"readonly" exceptions imply the command should be re-tried.
Note: to use this module, you must read the RFCs pertaining to the
IMAP4 protocol, as the semantics of the arguments to each IMAP4
command are left to the invoker, not to mention the results. Also,
most IMAP servers implement a sub-set of the commands available here.
"""
class error(Exception): pass # Logical errors - debug required
class abort(error): pass # Service errors - close and retry
class readonly(abort): pass # Mailbox status changed to READ-ONLY
mustquote = re.compile(r"[^\w!#$%&'*+,.:;<=>?^`|~-]")
def __init__(self, host = '', port = IMAP4_PORT):
self.debug = Debug
self.state = 'LOGOUT'
self.literal = None # A literal argument to a command
self.tagged_commands = {} # Tagged commands awaiting response
self.untagged_responses = {} # {typ: [data, ...], ...}
self.continuation_response = '' # Last continuation response
self.is_readonly = False # READ-ONLY desired state
self.tagnum = 0
# Open socket to server.
self.open(host, port)
# Create unique tag for this session,
# and compile tagged response matcher.
self.tagpre = Int2AP(random.randint(4096, 65535))
self.tagre = re.compile(r'(?P<tag>'
+ self.tagpre
+ r'\d+) (?P<type>[A-Z]+) (?P<data>.*)')
# Get server welcome message,
# request and store CAPABILITY response.
if __debug__:
self._cmd_log_len = 10
self._cmd_log_idx = 0
self._cmd_log = {} # Last `_cmd_log_len' interactions
if self.debug >= 1:
self._mesg('imaplib version %s' % __version__)
self._mesg('new IMAP4 connection, tag=%s' % self.tagpre)
self.welcome = self._get_response()
if 'PREAUTH' in self.untagged_responses:
self.state = 'AUTH'
elif 'OK' in self.untagged_responses:
self.state = 'NONAUTH'
else:
raise self.error(self.welcome)
typ, dat = self.capability()
if dat == [None]:
raise self.error('no CAPABILITY response from server')
self.capabilities = tuple(dat[-1].upper().split())
if __debug__:
if self.debug >= 3:
self._mesg('CAPABILITIES: %r' % (self.capabilities,))
for version in AllowedVersions:
if not version in self.capabilities:
continue
self.PROTOCOL_VERSION = version
return
raise self.error('server not IMAP4 compliant')
def __getattr__(self, attr):
# Allow UPPERCASE variants of IMAP4 command methods.
if attr in Commands:
return getattr(self, attr.lower())
raise AttributeError("Unknown IMAP4 command: '%s'" % attr)
# Overridable methods
def open(self, host = '', port = IMAP4_PORT):
"""Setup connection to remote server on "host:port"
(default: localhost:standard IMAP4 port).
This connection will be used by the routines:
read, readline, send, shutdown.
"""
self.host = host
self.port = port
self.sock = socket.create_connection((host, port))
self.file = self.sock.makefile('rb')
def read(self, size):
"""Read 'size' bytes from remote."""
return self.file.read(size)
def readline(self):
"""Read line from remote."""
return self.file.readline()
def send(self, data):
"""Send data to remote."""
self.sock.sendall(data)
def shutdown(self):
"""Close I/O established in "open"."""
self.file.close()
self.sock.close()
def socket(self):
"""Return socket instance used to connect to IMAP4 server.
socket = <instance>.socket()
"""
return self.sock
# Utility methods
def recent(self):
"""Return most recent 'RECENT' responses if any exist,
else prompt server for an update using the 'NOOP' command.
(typ, [data]) = <instance>.recent()
'data' is None if no new messages,
else list of RECENT responses, most recent last.
"""
name = 'RECENT'
typ, dat = self._untagged_response('OK', [None], name)
if dat[-1]:
return typ, dat
typ, dat = self.noop() # Prod server for response
return self._untagged_response(typ, dat, name)
def response(self, code):
"""Return data for response 'code' if received, or None.
Old value for response 'code' is cleared.
(code, [data]) = <instance>.response(code)
"""
return self._untagged_response(code, [None], code.upper())
# IMAP4 commands
def append(self, mailbox, flags, date_time, message):
"""Append message to named mailbox.
(typ, [data]) = <instance>.append(mailbox, flags, date_time, message)
All args except `message' can be None.
"""
name = 'APPEND'
if not mailbox:
mailbox = 'INBOX'
if flags:
if (flags[0],flags[-1]) != ('(',')'):
flags = '(%s)' % flags
else:
flags = None
if date_time:
date_time = Time2Internaldate(date_time)
else:
date_time = None
self.literal = MapCRLF.sub(CRLF, message)
return self._simple_command(name, mailbox, flags, date_time)
def authenticate(self, mechanism, authobject):
"""Authenticate command - requires response processing.
'mechanism' specifies which authentication mechanism is to
be used - it must appear in <instance>.capabilities in the
form AUTH=<mechanism>.
'authobject' must be a callable object:
data = authobject(response)
It will be called to process server continuation responses.
It should return data that will be encoded and sent to server.
It should return None if the client abort response '*' should
be sent instead.
"""
mech = mechanism.upper()
# XXX: shouldn't this code be removed, not commented out?
#cap = 'AUTH=%s' % mech
#if not cap in self.capabilities: # Let the server decide!
# raise self.error("Server doesn't allow %s authentication." % mech)
self.literal = _Authenticator(authobject).process
typ, dat = self._simple_command('AUTHENTICATE', mech)
if typ != 'OK':
raise self.error(dat[-1])
self.state = 'AUTH'
return typ, dat
def capability(self):
"""(typ, [data]) = <instance>.capability()
Fetch capabilities list from server."""
name = 'CAPABILITY'
typ, dat = self._simple_command(name)
return self._untagged_response(typ, dat, name)
def check(self):
"""Checkpoint mailbox on server.
(typ, [data]) = <instance>.check()
"""
return self._simple_command('CHECK')
def close(self):
"""Close currently selected mailbox.
Deleted messages are removed from writable mailbox.
This is the recommended command before 'LOGOUT'.
(typ, [data]) = <instance>.close()
"""
try:
typ, dat = self._simple_command('CLOSE')
finally:
self.state = 'AUTH'
return typ, dat
def copy(self, message_set, new_mailbox):
"""Copy 'message_set' messages onto end of 'new_mailbox'.
(typ, [data]) = <instance>.copy(message_set, new_mailbox)
"""
return self._simple_command('COPY', message_set, new_mailbox)
def create(self, mailbox):
"""Create new mailbox.
(typ, [data]) = <instance>.create(mailbox)
"""
return self._simple_command('CREATE', mailbox)
def delete(self, mailbox):
"""Delete old mailbox.
(typ, [data]) = <instance>.delete(mailbox)
"""
return self._simple_command('DELETE', mailbox)
def deleteacl(self, mailbox, who):
"""Delete the ACLs (remove any rights) set for who on mailbox.
(typ, [data]) = <instance>.deleteacl(mailbox, who)
"""
return self._simple_command('DELETEACL', mailbox, who)
def expunge(self):
"""Permanently remove deleted items from selected mailbox.
Generates 'EXPUNGE' response for each deleted message.
(typ, [data]) = <instance>.expunge()
'data' is list of 'EXPUNGE'd message numbers in order received.
"""
name = 'EXPUNGE'
typ, dat = self._simple_command(name)
return self._untagged_response(typ, dat, name)
def fetch(self, message_set, message_parts):
"""Fetch (parts of) messages.
(typ, [data, ...]) = <instance>.fetch(message_set, message_parts)
'message_parts' should be a string of selected parts
enclosed in parentheses, eg: "(UID BODY[TEXT])".
'data' are tuples of message part envelope and data.
"""
name = 'FETCH'
typ, dat = self._simple_command(name, message_set, message_parts)
return self._untagged_response(typ, dat, name)
def getacl(self, mailbox):
"""Get the ACLs for a mailbox.
(typ, [data]) = <instance>.getacl(mailbox)
"""
typ, dat = self._simple_command('GETACL', mailbox)
return self._untagged_response(typ, dat, 'ACL')
def getannotation(self, mailbox, entry, attribute):
"""(typ, [data]) = <instance>.getannotation(mailbox, entry, attribute)
Retrieve ANNOTATIONs."""
typ, dat = self._simple_command('GETANNOTATION', mailbox, entry, attribute)
return self._untagged_response(typ, dat, 'ANNOTATION')
def getquota(self, root):
"""Get the quota root's resource usage and limits.
Part of the IMAP4 QUOTA extension defined in rfc2087.
(typ, [data]) = <instance>.getquota(root)
"""
typ, dat = self._simple_command('GETQUOTA', root)
return self._untagged_response(typ, dat, 'QUOTA')
def getquotaroot(self, mailbox):
"""Get the list of quota roots for the named mailbox.
(typ, [[QUOTAROOT responses...], [QUOTA responses]]) = <instance>.getquotaroot(mailbox)
"""
typ, dat = self._simple_command('GETQUOTAROOT', mailbox)
typ, quota = self._untagged_response(typ, dat, 'QUOTA')
typ, quotaroot = self._untagged_response(typ, dat, 'QUOTAROOT')
return typ, [quotaroot, quota]
def list(self, directory='""', pattern='*'):
"""List mailbox names in directory matching pattern.
(typ, [data]) = <instance>.list(directory='""', pattern='*')
'data' is list of LIST responses.
"""
name = 'LIST'
typ, dat = self._simple_command(name, directory, pattern)
return self._untagged_response(typ, dat, name)
def login(self, user, password):
"""Identify client using plaintext password.
(typ, [data]) = <instance>.login(user, password)
NB: 'password' will be quoted.
"""
typ, dat = self._simple_command('LOGIN', user, self._quote(password))
if typ != 'OK':
raise self.error(dat[-1])
self.state = 'AUTH'
return typ, dat
def login_cram_md5(self, user, password):
""" Force use of CRAM-MD5 authentication.
(typ, [data]) = <instance>.login_cram_md5(user, password)
"""
self.user, self.password = user, password
return self.authenticate('CRAM-MD5', self._CRAM_MD5_AUTH)
def _CRAM_MD5_AUTH(self, challenge):
""" Authobject to use with CRAM-MD5 authentication. """
import hmac
return self.user + " " + hmac.HMAC(self.password, challenge).hexdigest()
def logout(self):
"""Shutdown connection to server.
(typ, [data]) = <instance>.logout()
Returns server 'BYE' response.
"""
self.state = 'LOGOUT'
try: typ, dat = self._simple_command('LOGOUT')
except: typ, dat = 'NO', ['%s: %s' % sys.exc_info()[:2]]
self.shutdown()
if 'BYE' in self.untagged_responses:
return 'BYE', self.untagged_responses['BYE']
return typ, dat
def lsub(self, directory='""', pattern='*'):
"""List 'subscribed' mailbox names in directory matching pattern.
(typ, [data, ...]) = <instance>.lsub(directory='""', pattern='*')
'data' are tuples of message part envelope and data.
"""
name = 'LSUB'
typ, dat = self._simple_command(name, directory, pattern)
return self._untagged_response(typ, dat, name)
def myrights(self, mailbox):
"""Show my ACLs for a mailbox (i.e. the rights that I have on mailbox).
(typ, [data]) = <instance>.myrights(mailbox)
"""
typ,dat = self._simple_command('MYRIGHTS', mailbox)
return self._untagged_response(typ, dat, 'MYRIGHTS')
def namespace(self):
""" Returns IMAP namespaces ala rfc2342
(typ, [data, ...]) = <instance>.namespace()
"""
name = 'NAMESPACE'
typ, dat = self._simple_command(name)
return self._untagged_response(typ, dat, name)
def noop(self):
"""Send NOOP command.
(typ, [data]) = <instance>.noop()
"""
if __debug__:
if self.debug >= 3:
self._dump_ur(self.untagged_responses)
return self._simple_command('NOOP')
def partial(self, message_num, message_part, start, length):
"""Fetch truncated part of a message.
(typ, [data, ...]) = <instance>.partial(message_num, message_part, start, length)
'data' is tuple of message part envelope and data.
"""
name = 'PARTIAL'
typ, dat = self._simple_command(name, message_num, message_part, start, length)
return self._untagged_response(typ, dat, 'FETCH')
def proxyauth(self, user):
"""Assume authentication as "user".
Allows an authorised administrator to proxy into any user's
mailbox.
(typ, [data]) = <instance>.proxyauth(user)
"""
name = 'PROXYAUTH'
return self._simple_command('PROXYAUTH', user)
def rename(self, oldmailbox, newmailbox):
"""Rename old mailbox name to new.
(typ, [data]) = <instance>.rename(oldmailbox, newmailbox)
"""
return self._simple_command('RENAME', oldmailbox, newmailbox)
def search(self, charset, *criteria):
"""Search mailbox for matching messages.
(typ, [data]) = <instance>.search(charset, criterion, ...)
'data' is space separated list of matching message numbers.
"""
name = 'SEARCH'
if charset:
typ, dat = self._simple_command(name, 'CHARSET', charset, *criteria)
else:
typ, dat = self._simple_command(name, *criteria)
return self._untagged_response(typ, dat, name)
def select(self, mailbox='INBOX', readonly=False):
"""Select a mailbox.
Flush all untagged responses.
(typ, [data]) = <instance>.select(mailbox='INBOX', readonly=False)
'data' is count of messages in mailbox ('EXISTS' response).
Mandated responses are ('FLAGS', 'EXISTS', 'RECENT', 'UIDVALIDITY'), so
other responses should be obtained via <instance>.response('FLAGS') etc.
"""
self.untagged_responses = {} # Flush old responses.
self.is_readonly = readonly
if readonly:
name = 'EXAMINE'
else:
name = 'SELECT'
typ, dat = self._simple_command(name, mailbox)
if typ != 'OK':
self.state = 'AUTH' # Might have been 'SELECTED'
return typ, dat
self.state = 'SELECTED'
if 'READ-ONLY' in self.untagged_responses \
and not readonly:
if __debug__:
if self.debug >= 1:
self._dump_ur(self.untagged_responses)
raise self.readonly('%s is not writable' % mailbox)
return typ, self.untagged_responses.get('EXISTS', [None])
def setacl(self, mailbox, who, what):
"""Set a mailbox acl.
(typ, [data]) = <instance>.setacl(mailbox, who, what)
"""
return self._simple_command('SETACL', mailbox, who, what)
def setannotation(self, *args):
"""(typ, [data]) = <instance>.setannotation(mailbox[, entry, attribute]+)
Set ANNOTATIONs."""
typ, dat = self._simple_command('SETANNOTATION', *args)
return self._untagged_response(typ, dat, 'ANNOTATION')
def setquota(self, root, limits):
"""Set the quota root's resource limits.
(typ, [data]) = <instance>.setquota(root, limits)
"""
typ, dat = self._simple_command('SETQUOTA', root, limits)
return self._untagged_response(typ, dat, 'QUOTA')
def sort(self, sort_criteria, charset, *search_criteria):
"""IMAP4rev1 extension SORT command.
(typ, [data]) = <instance>.sort(sort_criteria, charset, search_criteria, ...)
"""
name = 'SORT'
#if not name in self.capabilities: # Let the server decide!
# raise self.error('unimplemented extension command: %s' % name)
if (sort_criteria[0],sort_criteria[-1]) != ('(',')'):
sort_criteria = '(%s)' % sort_criteria
typ, dat = self._simple_command(name, sort_criteria, charset, *search_criteria)
return self._untagged_response(typ, dat, name)
def status(self, mailbox, names):
"""Request named status conditions for mailbox.
(typ, [data]) = <instance>.status(mailbox, names)
"""
name = 'STATUS'
#if self.PROTOCOL_VERSION == 'IMAP4': # Let the server decide!
# raise self.error('%s unimplemented in IMAP4 (obtain IMAP4rev1 server, or re-code)' % name)
typ, dat = self._simple_command(name, mailbox, names)
return self._untagged_response(typ, dat, name)
def store(self, message_set, command, flags):
"""Alters flag dispositions for messages in mailbox.
(typ, [data]) = <instance>.store(message_set, command, flags)
"""
if (flags[0],flags[-1]) != ('(',')'):
flags = '(%s)' % flags # Avoid quoting the flags
typ, dat = self._simple_command('STORE', message_set, command, flags)
return self._untagged_response(typ, dat, 'FETCH')
def subscribe(self, mailbox):
"""Subscribe to new mailbox.
(typ, [data]) = <instance>.subscribe(mailbox)
"""
return self._simple_command('SUBSCRIBE', mailbox)
def thread(self, threading_algorithm, charset, *search_criteria):
"""IMAPrev1 extension THREAD command.
(type, [data]) = <instance>.thread(threading_algorithm, charset, search_criteria, ...)
"""
name = 'THREAD'
typ, dat = self._simple_command(name, threading_algorithm, charset, *search_criteria)
return self._untagged_response(typ, dat, name)
def uid(self, command, *args):
"""Execute "command arg ..." with messages identified by UID,
rather than message number.
(typ, [data]) = <instance>.uid(command, arg1, arg2, ...)
Returns response appropriate to 'command'.
"""
command = command.upper()
if not command in Commands:
raise self.error("Unknown IMAP4 UID command: %s" % command)
if self.state not in Commands[command]:
raise self.error("command %s illegal in state %s, "
"only allowed in states %s" %
(command, self.state,
', '.join(Commands[command])))
name = 'UID'
typ, dat = self._simple_command(name, command, *args)
if command in ('SEARCH', 'SORT', 'THREAD'):
name = command
else:
name = 'FETCH'
return self._untagged_response(typ, dat, name)
def unsubscribe(self, mailbox):
"""Unsubscribe from old mailbox.
(typ, [data]) = <instance>.unsubscribe(mailbox)
"""
return self._simple_command('UNSUBSCRIBE', mailbox)
def xatom(self, name, *args):
"""Allow simple extension commands
notified by server in CAPABILITY response.
Assumes command is legal in current state.
(typ, [data]) = <instance>.xatom(name, arg, ...)
Returns response appropriate to extension command `name'.
"""
name = name.upper()
#if not name in self.capabilities: # Let the server decide!
# raise self.error('unknown extension command: %s' % name)
if not name in Commands:
Commands[name] = (self.state,)
return self._simple_command(name, *args)
# Private methods
def _append_untagged(self, typ, dat):
if dat is None: dat = ''
ur = self.untagged_responses
if __debug__:
if self.debug >= 5:
self._mesg('untagged_responses[%s] %s += ["%s"]' %
(typ, len(ur.get(typ,'')), dat))
if typ in ur:
ur[typ].append(dat)
else:
ur[typ] = [dat]
def _check_bye(self):
bye = self.untagged_responses.get('BYE')
if bye:
raise self.abort(bye[-1])
def _command(self, name, *args):
if self.state not in Commands[name]:
self.literal = None
raise self.error("command %s illegal in state %s, "
"only allowed in states %s" %
(name, self.state,
', '.join(Commands[name])))
for typ in ('OK', 'NO', 'BAD'):
if typ in self.untagged_responses:
del self.untagged_responses[typ]
if 'READ-ONLY' in self.untagged_responses \
and not self.is_readonly:
raise self.readonly('mailbox status changed to READ-ONLY')
tag = self._new_tag()
data = '%s %s' % (tag, name)
for arg in args:
if arg is None: continue
data = '%s %s' % (data, self._checkquote(arg))
literal = self.literal
if literal is not None:
self.literal = None
if type(literal) is type(self._command):
literator = literal
else:
literator = None
data = '%s {%s}' % (data, len(literal))
if __debug__:
if self.debug >= 4:
self._mesg('> %s' % data)
else:
self._log('> %s' % data)
try:
self.send('%s%s' % (data, CRLF))
except (socket.error, OSError), val:
raise self.abort('socket error: %s' % val)
if literal is None:
return tag
while 1:
# Wait for continuation response
while self._get_response():
if self.tagged_commands[tag]: # BAD/NO?
return tag
# Send literal
if literator:
literal = literator(self.continuation_response)
if __debug__:
if self.debug >= 4:
self._mesg('write literal size %s' % len(literal))
try:
self.send(literal)
self.send(CRLF)
except (socket.error, OSError), val:
raise self.abort('socket error: %s' % val)
if not literator:
break
return tag
def _command_complete(self, name, tag):
self._check_bye()
try:
typ, data = self._get_tagged_response(tag)
except self.abort, val:
raise self.abort('command: %s => %s' % (name, val))
except self.error, val:
raise self.error('command: %s => %s' % (name, val))
self._check_bye()
if typ == 'BAD':
raise self.error('%s command error: %s %s' % (name, typ, data))
return typ, data
def _get_response(self):
# Read response and store.
#
# Returns None for continuation responses,
# otherwise first response line received.
resp = self._get_line()
# Command completion response?
if self._match(self.tagre, resp):
tag = self.mo.group('tag')
if not tag in self.tagged_commands:
raise self.abort('unexpected tagged response: %s' % resp)
typ = self.mo.group('type')
dat = self.mo.group('data')
self.tagged_commands[tag] = (typ, [dat])
else:
dat2 = None
# '*' (untagged) responses?
if not self._match(Untagged_response, resp):
if self._match(Untagged_status, resp):
dat2 = self.mo.group('data2')
if self.mo is None:
# Only other possibility is '+' (continuation) response...
if self._match(Continuation, resp):
self.continuation_response = self.mo.group('data')
return None # NB: indicates continuation
raise self.abort("unexpected response: '%s'" % resp)
typ = self.mo.group('type')
dat = self.mo.group('data')
if dat is None: dat = '' # Null untagged response
if dat2: dat = dat + ' ' + dat2
# Is there a literal to come?
while self._match(Literal, dat):
# Read literal direct from connection.
size = int(self.mo.group('size'))
if __debug__:
if self.debug >= 4:
self._mesg('read literal size %s' % size)
data = self.read(size)
# Store response with literal as tuple
self._append_untagged(typ, (dat, data))
# Read trailer - possibly containing another literal
dat = self._get_line()
self._append_untagged(typ, dat)
# Bracketed response information?
if typ in ('OK', 'NO', 'BAD') and self._match(Response_code, dat):
self._append_untagged(self.mo.group('type'), self.mo.group('data'))
if __debug__:
if self.debug >= 1 and typ in ('NO', 'BAD', 'BYE'):
self._mesg('%s response: %s' % (typ, dat))
return resp
def _get_tagged_response(self, tag):
while 1:
result = self.tagged_commands[tag]
if result is not None:
del self.tagged_commands[tag]
return result
# Some have reported "unexpected response" exceptions.
# Note that ignoring them here causes loops.
# Instead, send me details of the unexpected response and
# I'll update the code in `_get_response()'.
try:
self._get_response()
except self.abort, val:
if __debug__:
if self.debug >= 1:
self.print_log()
raise
def _get_line(self):
line = self.readline()
if not line:
raise self.abort('socket error: EOF')
# Protocol mandates all lines terminated by CRLF
if not line.endswith('\r\n'):
raise self.abort('socket error: unterminated line')
line = line[:-2]
if __debug__:
if self.debug >= 4:
self._mesg('< %s' % line)
else:
self._log('< %s' % line)
return line
def _match(self, cre, s):
# Run compiled regular expression match method on 's'.
# Save result, return success.
self.mo = cre.match(s)
if __debug__:
if self.mo is not None and self.debug >= 5:
self._mesg("\tmatched r'%s' => %r" % (cre.pattern, self.mo.groups()))
return self.mo is not None
def _new_tag(self):
tag = '%s%s' % (self.tagpre, self.tagnum)
self.tagnum = self.tagnum + 1
self.tagged_commands[tag] = None
return tag
def _checkquote(self, arg):
# Must quote command args if non-alphanumeric chars present,
# and not already quoted.
if type(arg) is not type(''):
return arg
if len(arg) >= 2 and (arg[0],arg[-1]) in (('(',')'),('"','"')):
return arg
if arg and self.mustquote.search(arg) is None:
return arg
return self._quote(arg)
def _quote(self, arg):
arg = arg.replace('\\', '\\\\')
arg = arg.replace('"', '\\"')
return '"%s"' % arg
def _simple_command(self, name, *args):
return self._command_complete(name, self._command(name, *args))
def _untagged_response(self, typ, dat, name):
if typ == 'NO':
return typ, dat
if not name in self.untagged_responses:
return typ, [None]
data = self.untagged_responses.pop(name)
if __debug__:
if self.debug >= 5:
self._mesg('untagged_responses[%s] => %s' % (name, data))
return typ, data
if __debug__:
def _mesg(self, s, secs=None):
if secs is None:
secs = time.time()
tm = time.strftime('%M:%S', time.localtime(secs))
sys.stderr.write(' %s.%02d %s\n' % (tm, (secs*100)%100, s))
sys.stderr.flush()
def _dump_ur(self, dict):
# Dump untagged responses (in `dict').
l = dict.items()
if not l: return
t = '\n\t\t'
l = map(lambda x:'%s: "%s"' % (x[0], x[1][0] and '" "'.join(x[1]) or ''), l)
self._mesg('untagged responses dump:%s%s' % (t, t.join(l)))
def _log(self, line):
# Keep log of last `_cmd_log_len' interactions for debugging.
self._cmd_log[self._cmd_log_idx] = (line, time.time())
self._cmd_log_idx += 1
if self._cmd_log_idx >= self._cmd_log_len:
self._cmd_log_idx = 0
def print_log(self):
self._mesg('last %d IMAP4 interactions:' % len(self._cmd_log))
i, n = self._cmd_log_idx, self._cmd_log_len
while n:
try:
self._mesg(*self._cmd_log[i])
except:
pass
i += 1
if i >= self._cmd_log_len:
i = 0
n -= 1
try:
import ssl
except ImportError:
pass
else:
class IMAP4_SSL(IMAP4):
"""IMAP4 client class over SSL connection
Instantiate with: IMAP4_SSL([host[, port[, keyfile[, certfile]]]])
host - host's name (default: localhost);
port - port number (default: standard IMAP4 SSL port).
keyfile - PEM formatted file that contains your private key (default: None);
certfile - PEM formatted certificate chain file (default: None);
for more documentation see the docstring of the parent class IMAP4.
"""
def __init__(self, host = '', port = IMAP4_SSL_PORT, keyfile = None, certfile = None):
self.keyfile = keyfile
self.certfile = certfile
IMAP4.__init__(self, host, port)
def open(self, host = '', port = IMAP4_SSL_PORT):
"""Setup connection to remote server on "host:port".
(default: localhost:standard IMAP4 SSL port).
This connection will be used by the routines:
read, readline, send, shutdown.
"""
self.host = host
self.port = port
self.sock = socket.create_connection((host, port))
self.sslobj = ssl.wrap_socket(self.sock, self.keyfile, self.certfile)
def read(self, size):
"""Read 'size' bytes from remote."""
# sslobj.read() sometimes returns < size bytes
chunks = []
read = 0
while read < size:
data = self.sslobj.read(min(size-read, 16384))
read += len(data)
chunks.append(data)
return ''.join(chunks)
def readline(self):
"""Read line from remote."""
line = []
while 1:
char = self.sslobj.read(1)
line.append(char)
if char in ("\n", ""): return ''.join(line)
def send(self, data):
"""Send data to remote."""
bytes = len(data)
while bytes > 0:
sent = self.sslobj.write(data)
if sent == bytes:
break # avoid copy
data = data[sent:]
bytes = bytes - sent
def shutdown(self):
"""Close I/O established in "open"."""
self.sock.close()
def socket(self):
"""Return socket instance used to connect to IMAP4 server.
socket = <instance>.socket()
"""
return self.sock
def ssl(self):
"""Return SSLObject instance used to communicate with the IMAP4 server.
ssl = ssl.wrap_socket(<instance>.socket)
"""
return self.sslobj
__all__.append("IMAP4_SSL")
class IMAP4_stream(IMAP4):
"""IMAP4 client class over a stream
Instantiate with: IMAP4_stream(command)
where "command" is a string that can be passed to subprocess.Popen()
for more documentation see the docstring of the parent class IMAP4.
"""
def __init__(self, command):
self.command = command
IMAP4.__init__(self)
def open(self, host = None, port = None):
"""Setup a stream connection.
This connection will be used by the routines:
read, readline, send, shutdown.
"""
self.host = None # For compatibility with parent class
self.port = None
self.sock = None
self.file = None
self.process = subprocess.Popen(self.command,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
shell=True, close_fds=True)
self.writefile = self.process.stdin
self.readfile = self.process.stdout
def read(self, size):
"""Read 'size' bytes from remote."""
return self.readfile.read(size)
def readline(self):
"""Read line from remote."""
return self.readfile.readline()
def send(self, data):
"""Send data to remote."""
self.writefile.write(data)
self.writefile.flush()
def shutdown(self):
"""Close I/O established in "open"."""
self.readfile.close()
self.writefile.close()
self.process.wait()
class _Authenticator:
"""Private class to provide en/decoding
for base64-based authentication conversation.
"""
def __init__(self, mechinst):
self.mech = mechinst # Callable object to provide/process data
def process(self, data):
ret = self.mech(self.decode(data))
if ret is None:
return '*' # Abort conversation
return self.encode(ret)
def encode(self, inp):
#
# Invoke binascii.b2a_base64 iteratively with
# short even length buffers, strip the trailing
# line feed from the result and append. "Even"
# means a number that factors to both 6 and 8,
# so when it gets to the end of the 8-bit input
# there's no partial 6-bit output.
#
oup = ''
while inp:
if len(inp) > 48:
t = inp[:48]
inp = inp[48:]
else:
t = inp
inp = ''
e = binascii.b2a_base64(t)
if e:
oup = oup + e[:-1]
return oup
def decode(self, inp):
if not inp:
return ''
return binascii.a2b_base64(inp)
Mon2num = {'Jan': 1, 'Feb': 2, 'Mar': 3, 'Apr': 4, 'May': 5, 'Jun': 6,
'Jul': 7, 'Aug': 8, 'Sep': 9, 'Oct': 10, 'Nov': 11, 'Dec': 12}
def Internaldate2tuple(resp):
"""Convert IMAP4 INTERNALDATE to UT.
Returns Python time module tuple.
"""
mo = InternalDate.match(resp)
if not mo:
return None
mon = Mon2num[mo.group('mon')]
zonen = mo.group('zonen')
day = int(mo.group('day'))
year = int(mo.group('year'))
hour = int(mo.group('hour'))
min = int(mo.group('min'))
sec = int(mo.group('sec'))
zoneh = int(mo.group('zoneh'))
zonem = int(mo.group('zonem'))
# INTERNALDATE timezone must be subtracted to get UT
zone = (zoneh*60 + zonem)*60
if zonen == '-':
zone = -zone
tt = (year, mon, day, hour, min, sec, -1, -1, -1)
utc = time.mktime(tt)
# Following is necessary because the time module has no 'mkgmtime'.
# 'mktime' assumes arg in local timezone, so adds timezone/altzone.
lt = time.localtime(utc)
if time.daylight and lt[-1]:
zone = zone + time.altzone
else:
zone = zone + time.timezone
return time.localtime(utc - zone)
def Int2AP(num):
"""Convert integer to A-P string representation."""
val = ''; AP = 'ABCDEFGHIJKLMNOP'
num = int(abs(num))
while num:
num, mod = divmod(num, 16)
val = AP[mod] + val
return val
def ParseFlags(resp):
"""Convert IMAP4 flags response to python tuple."""
mo = Flags.match(resp)
if not mo:
return ()
return tuple(mo.group('flags').split())
def Time2Internaldate(date_time):
"""Convert 'date_time' to IMAP4 INTERNALDATE representation.
Return string in form: '"DD-Mmm-YYYY HH:MM:SS +HHMM"'
"""
if isinstance(date_time, (int, float)):
tt = time.localtime(date_time)
elif isinstance(date_time, (tuple, time.struct_time)):
tt = date_time
elif isinstance(date_time, str) and (date_time[0],date_time[-1]) == ('"','"'):
return date_time # Assume in correct format
else:
raise ValueError("date_time not of a known type")
dt = time.strftime("%d-%b-%Y %H:%M:%S", tt)
if dt[0] == '0':
dt = ' ' + dt[1:]
if time.daylight and tt[-1]:
zone = -time.altzone
else:
zone = -time.timezone
return '"' + dt + " %+03d%02d" % divmod(zone//60, 60) + '"'
if __name__ == '__main__':
# To test: invoke either as 'python imaplib.py [IMAP4_server_hostname]'
# or 'python imaplib.py -s "rsh IMAP4_server_hostname exec /etc/rimapd"'
# to test the IMAP4_stream class
import getopt, getpass
try:
optlist, args = getopt.getopt(sys.argv[1:], 'd:s:')
except getopt.error, val:
optlist, args = (), ()
stream_command = None
for opt,val in optlist:
if opt == '-d':
Debug = int(val)
elif opt == '-s':
stream_command = val
if not args: args = (stream_command,)
if not args: args = ('',)
host = args[0]
USER = getpass.getuser()
PASSWD = getpass.getpass("IMAP password for %s on %s: " % (USER, host or "localhost"))
test_mesg = 'From: %(user)s@localhost%(lf)sSubject: IMAP4 test%(lf)s%(lf)sdata...%(lf)s' % {'user':USER, 'lf':'\n'}
test_seq1 = (
('login', (USER, PASSWD)),
('create', ('/tmp/xxx 1',)),
('rename', ('/tmp/xxx 1', '/tmp/yyy')),
('CREATE', ('/tmp/yyz 2',)),
('append', ('/tmp/yyz 2', None, None, test_mesg)),
('list', ('/tmp', 'yy*')),
('select', ('/tmp/yyz 2',)),
('search', (None, 'SUBJECT', 'test')),
('fetch', ('1', '(FLAGS INTERNALDATE RFC822)')),
('store', ('1', 'FLAGS', '(\Deleted)')),
('namespace', ()),
('expunge', ()),
('recent', ()),
('close', ()),
)
test_seq2 = (
('select', ()),
('response',('UIDVALIDITY',)),
('uid', ('SEARCH', 'ALL')),
('response', ('EXISTS',)),
('append', (None, None, None, test_mesg)),
('recent', ()),
('logout', ()),
)
def run(cmd, args):
M._mesg('%s %s' % (cmd, args))
typ, dat = getattr(M, cmd)(*args)
M._mesg('%s => %s %s' % (cmd, typ, dat))
if typ == 'NO': raise dat[0]
return dat
try:
if stream_command:
M = IMAP4_stream(stream_command)
else:
M = IMAP4(host)
if M.state == 'AUTH':
test_seq1 = test_seq1[1:] # Login not needed
M._mesg('PROTOCOL_VERSION = %s' % M.PROTOCOL_VERSION)
M._mesg('CAPABILITIES = %r' % (M.capabilities,))
for cmd,args in test_seq1:
run(cmd, args)
for ml in run('list', ('/tmp/', 'yy%')):
mo = re.match(r'.*"([^"]+)"$', ml)
if mo: path = mo.group(1)
else: path = ml.split()[-1]
run('delete', (path,))
for cmd,args in test_seq2:
dat = run(cmd, args)
if (cmd,args) != ('uid', ('SEARCH', 'ALL')):
continue
uid = dat[-1].split()
if not uid: continue
run('uid', ('FETCH', '%s' % uid[-1],
'(FLAGS INTERNALDATE RFC822.SIZE RFC822.HEADER RFC822.TEXT)'))
print '\nAll tests OK.'
except:
print '\nTests failed.'
if not Debug:
print '''
If you would like to see debugging output,
try: %s -d5
''' % sys.argv[0]
raise
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VpnSitesOperations(object):
"""VpnSitesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
vpn_site_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VpnSite"
"""Retrieves the details of a VPN site.
:param resource_group_name: The resource group name of the VpnSite.
:type resource_group_name: str
:param vpn_site_name: The name of the VpnSite being retrieved.
:type vpn_site_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnSite, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_06_01.models.VpnSite
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnSite"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnSiteName': self._serialize.url("vpn_site_name", vpn_site_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnSite', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
vpn_site_name, # type: str
vpn_site_parameters, # type: "_models.VpnSite"
**kwargs # type: Any
):
# type: (...) -> "_models.VpnSite"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnSite"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnSiteName': self._serialize.url("vpn_site_name", vpn_site_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_site_parameters, 'VpnSite')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VpnSite', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VpnSite', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
vpn_site_name, # type: str
vpn_site_parameters, # type: "_models.VpnSite"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VpnSite"]
"""Creates a VpnSite resource if it doesn't exist else updates the existing VpnSite.
:param resource_group_name: The resource group name of the VpnSite.
:type resource_group_name: str
:param vpn_site_name: The name of the VpnSite being created or updated.
:type vpn_site_name: str
:param vpn_site_parameters: Parameters supplied to create or update VpnSite.
:type vpn_site_parameters: ~azure.mgmt.network.v2020_06_01.models.VpnSite
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VpnSite or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_06_01.models.VpnSite]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnSite"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
vpn_site_name=vpn_site_name,
vpn_site_parameters=vpn_site_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnSite', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnSiteName': self._serialize.url("vpn_site_name", vpn_site_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
vpn_site_name, # type: str
vpn_site_parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.VpnSite"
"""Updates VpnSite tags.
:param resource_group_name: The resource group name of the VpnSite.
:type resource_group_name: str
:param vpn_site_name: The name of the VpnSite being updated.
:type vpn_site_name: str
:param vpn_site_parameters: Parameters supplied to update VpnSite tags.
:type vpn_site_parameters: ~azure.mgmt.network.v2020_06_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnSite, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_06_01.models.VpnSite
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnSite"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnSiteName': self._serialize.url("vpn_site_name", vpn_site_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_site_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnSite', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
vpn_site_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnSiteName': self._serialize.url("vpn_site_name", vpn_site_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
vpn_site_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a VpnSite.
:param resource_group_name: The resource group name of the VpnSite.
:type resource_group_name: str
:param vpn_site_name: The name of the VpnSite being deleted.
:type vpn_site_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
vpn_site_name=vpn_site_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnSiteName': self._serialize.url("vpn_site_name", vpn_site_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVpnSitesResult"]
"""Lists all the vpnSites in a resource group.
:param resource_group_name: The resource group name of the VpnSite.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnSitesResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_06_01.models.ListVpnSitesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnSitesResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnSitesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVpnSitesResult"]
"""Lists all the VpnSites in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnSitesResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_06_01.models.ListVpnSitesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnSitesResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnSitesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/vpnSites'} # type: ignore
|
|
# Copyright 2015 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_utils import uuidutils
from neutron import context
from neutron.extensions import l3
from networking_cisco.plugins.cisco.common import cisco_constants
from networking_cisco.plugins.cisco.db.l3 import ha_db
from networking_cisco.plugins.cisco.extensions import ha
from networking_cisco.plugins.cisco.extensions import routerhostingdevice
from networking_cisco.plugins.cisco.extensions import routerrole
from networking_cisco.plugins.cisco.extensions import routertype
from networking_cisco.plugins.cisco.extensions import routertypeawarescheduler
from networking_cisco.tests.unit.cisco.l3 import (
test_ha_l3_router_appliance_plugin as cisco_ha_test)
from networking_cisco.tests.unit.cisco.l3 import (
test_l3_routertype_aware_schedulers as cisco_test_case)
_uuid = uuidutils.generate_uuid
EXTERNAL_GW_INFO = l3.EXTERNAL_GW_INFO
AGENT_TYPE_L3_CFG = cisco_constants.AGENT_TYPE_L3_CFG
ROUTER_ROLE_GLOBAL = cisco_constants.ROUTER_ROLE_GLOBAL
ROUTER_ROLE_LOGICAL_GLOBAL = cisco_constants.ROUTER_ROLE_LOGICAL_GLOBAL
ROUTER_ROLE_HA_REDUNDANCY = cisco_constants.ROUTER_ROLE_HA_REDUNDANCY
LOGICAL_ROUTER_ROLE_NAME = cisco_constants.LOGICAL_ROUTER_ROLE_NAME
ROUTER_ROLE_ATTR = routerrole.ROUTER_ROLE_ATTR
HOSTING_DEVICE_ATTR = routerhostingdevice.HOSTING_DEVICE_ATTR
AUTO_SCHEDULE_ATTR = routertypeawarescheduler.AUTO_SCHEDULE_ATTR
class Asr1kRouterTypeDriverTestCase(
cisco_test_case.L3RoutertypeAwareHostingDeviceSchedulerTestCaseBase):
# Nexus router type for ASR1k driver tests, why?
# - Yes(!), it does not matter and there is only one hosting device for
# that router type in the test setup which makes scheduling deterministic
router_type = 'Nexus_ToR_Neutron_router'
def _verify_created_routers(self, router_ids, hd_id):
# tenant routers
q_p = '%s=None' % ROUTER_ROLE_ATTR
r_ids = {r['id'] for r in self._list(
'routers', query_params=q_p)['routers']}
self.assertEqual(len(r_ids), len(router_ids))
for r_id in r_ids:
self.assertIn(r_id, router_ids)
# global router on hosting device
q_p = '%s=%s' % (ROUTER_ROLE_ATTR, ROUTER_ROLE_GLOBAL)
g_rtrs = self._list('routers', query_params=q_p)['routers']
self.assertEqual(len(g_rtrs), 1)
g_rtr = g_rtrs[0]
self.assertEqual(g_rtr['name'].endswith(
hd_id[-cisco_constants.ROLE_ID_LEN:]), True)
# logical global router for global routers HA
q_p = '%s=%s' % (ROUTER_ROLE_ATTR, ROUTER_ROLE_LOGICAL_GLOBAL)
g_l_rtrs = self._list('routers', query_params=q_p)['routers']
self.assertEqual(len(g_l_rtrs), 1)
g_l_rtr = g_l_rtrs[0]
self.assertEqual(g_l_rtr['name'], LOGICAL_ROUTER_ROLE_NAME)
self.assertEqual(g_l_rtr[AUTO_SCHEDULE_ATTR], False)
# ensure first routers_updated notification was for global router
notifier = self.plugin.agent_notifiers[AGENT_TYPE_L3_CFG]
notify_call = notifier.method_calls[0]
self.assertEqual(notify_call[0], 'routers_updated')
updated_routers = notify_call[1][1]
self.assertEqual(len(updated_routers), 1)
self.assertEqual(updated_routers[0]['id'], g_rtr['id'])
# ensure *no* update notifications where sent for logical global router
for call in notifier.method_calls:
self.assertNotIn(call[1][1][0][ROUTER_ROLE_ATTR],
[ROUTER_ROLE_LOGICAL_GLOBAL])
def _test_gw_router_create_adds_global_router(self, set_context=False):
tenant_id = _uuid()
with self.network(tenant_id=tenant_id) as n_external:
res = self._create_subnet(self.fmt, n_external['network']['id'],
cidr='10.0.1.0/24', tenant_id=tenant_id)
s = self.deserialize(self.fmt, res)
self._set_net_external(s['subnet']['network_id'])
ext_gw = {'network_id': s['subnet']['network_id']}
with self.router(tenant_id=tenant_id, external_gateway_info=ext_gw,
set_context=set_context) as router1:
r1 = router1['router']
self.plugin._process_backlogged_routers()
r1_after = self._show('routers', r1['id'])['router']
hd_id = r1_after[HOSTING_DEVICE_ATTR]
# should have one global router now
self._verify_created_routers({r1['id']}, hd_id)
with self.router(name='router2', tenant_id=tenant_id,
external_gateway_info=ext_gw,
set_context=set_context) as router2:
r2 = router2['router']
self.plugin._process_backlogged_routers()
# should still have only one global router
self._verify_created_routers({r1['id'], r2['id']}, hd_id)
def test_gw_router_create_adds_global_router(self):
self._test_gw_router_create_adds_global_router()
def test_gw_router_create_adds_global_router_non_admin(self):
self._test_gw_router_create_adds_global_router(True)
def _test_router_create_adds_no_global_router(self, set_context=False):
with self.router(set_context=set_context) as router:
r = router['router']
self.plugin._process_backlogged_routers()
# tenant routers
q_p = '%s=None' % ROUTER_ROLE_ATTR
t_rtrs = self._list('routers', query_params=q_p)['routers']
self.assertEqual(len(t_rtrs), 1)
t_rtr = t_rtrs[0]
self.assertEqual(t_rtr['id'], r['id'])
# global router
q_p = '%s=%s' % (ROUTER_ROLE_ATTR, ROUTER_ROLE_GLOBAL)
g_rtrs = self._list('routers', query_params=q_p)['routers']
self.assertEqual(len(g_rtrs), 0)
# logical global router for global routers HA
q_p = '%s=%s' % (ROUTER_ROLE_ATTR, ROUTER_ROLE_LOGICAL_GLOBAL)
g_l_rtrs = self._list('routers', query_params=q_p)['routers']
self.assertEqual(len(g_l_rtrs), 0)
notifier = self.plugin.agent_notifiers[AGENT_TYPE_L3_CFG]
# ensure *no* update notifications where sent for global
# router (as there should be none) or logical global router
for call in notifier.method_calls:
if call[0] != 'router_deleted':
self.assertNotIn(call[1][1][0][ROUTER_ROLE_ATTR],
[ROUTER_ROLE_GLOBAL,
ROUTER_ROLE_LOGICAL_GLOBAL])
def test_router_create_adds_no_global_router(self):
self._test_router_create_adds_no_global_router()
def test_router_create_adds_no_global_router_non_admin(self):
self._test_router_create_adds_no_global_router(True)
def _verify_updated_routers(self, router_ids, hd_id=None, call_index=1):
# tenant routers
q_p = '%s=None' % ROUTER_ROLE_ATTR
r_ids = {r['id'] for r in self._list(
'routers', query_params=q_p)['routers']}
self.assertEqual(len(r_ids), len(router_ids))
for r_id in r_ids:
self.assertIn(r_id, router_ids)
# global routers
q_p = '%s=%s' % (ROUTER_ROLE_ATTR, ROUTER_ROLE_GLOBAL)
g_rtrs = self._list('routers', query_params=q_p)['routers']
# logical global router for global routers HA
q_p = '%s=%s' % (ROUTER_ROLE_ATTR, ROUTER_ROLE_LOGICAL_GLOBAL)
g_l_rtrs = self._list('routers', query_params=q_p)['routers']
notifier = self.plugin.agent_notifiers[AGENT_TYPE_L3_CFG]
if hd_id:
self.assertEqual(len(g_rtrs), 1)
g_rtr = g_rtrs[0]
self.assertEqual(
g_rtr['name'].endswith(hd_id[-cisco_constants.ROLE_ID_LEN:]),
True)
self.assertEqual(len(g_l_rtrs), 1)
g_l_rtr = g_l_rtrs[0]
self.assertEqual(g_l_rtr['name'], LOGICAL_ROUTER_ROLE_NAME)
self.assertEqual(g_l_rtr[AUTO_SCHEDULE_ATTR], False)
# routers_updated notification call_index is for global router
notify_call = notifier.method_calls[call_index]
self.assertEqual(notify_call[0], 'routers_updated')
updated_routers = notify_call[1][1]
self.assertEqual(len(updated_routers), 1)
self.assertEqual(updated_routers[0]['id'], g_rtr['id'])
else:
self.assertEqual(len(g_rtrs), 0)
self.assertEqual(len(g_l_rtrs), 0)
# ensure *no* update notifications where sent for logical global router
for call in notifier.method_calls:
if call[0] != 'router_deleted':
self.assertNotIn(call[1][1][0][ROUTER_ROLE_ATTR],
[ROUTER_ROLE_LOGICAL_GLOBAL])
def _test_router_update_set_gw_adds_global_router(self, set_context=False):
tenant_id = _uuid()
with self.network(tenant_id=tenant_id) as n_external:
res = self._create_subnet(self.fmt, n_external['network']['id'],
cidr='10.0.1.0/24', tenant_id=tenant_id)
s = self.deserialize(self.fmt, res)
self._set_net_external(s['subnet']['network_id'])
with self.router(tenant_id=tenant_id,
set_context=set_context) as router1,\
self.router(name='router2', tenant_id=tenant_id,
set_context=set_context) as router2:
r1 = router1['router']
r2 = router2['router']
# backlog processing will trigger one routers_updated
# notification containing r1 and r2
self.plugin._process_backlogged_routers()
# should have no global router yet
r_ids = {r1['id'], r2['id']}
self._verify_updated_routers(r_ids)
ext_gw = {'network_id': s['subnet']['network_id']}
r_spec = {'router': {l3.EXTERNAL_GW_INFO: ext_gw}}
r1_after = self._update('routers', r1['id'], r_spec)['router']
hd_id = r1_after[HOSTING_DEVICE_ATTR]
# should now have one global router
self._verify_updated_routers(r_ids, hd_id)
self._update('routers', r2['id'], r_spec)
# should still have only one global router
self._verify_updated_routers(r_ids, hd_id)
def test_router_update_set_gw_adds_global_router(self):
self._test_router_update_set_gw_adds_global_router()
def test_router_update_set_gw_adds_global_router_non_admin(self):
self._test_router_update_set_gw_adds_global_router(True)
def _test_router_update_unset_gw_keeps_global_router(self,
set_context=False):
tenant_id = _uuid()
with self.network(tenant_id=tenant_id) as n_external:
res = self._create_subnet(self.fmt, n_external['network']['id'],
cidr='10.0.1.0/24', tenant_id=tenant_id)
s = self.deserialize(self.fmt, res)
self._set_net_external(s['subnet']['network_id'])
ext_gw = {'network_id': s['subnet']['network_id']}
with self.router(tenant_id=tenant_id,
external_gateway_info=ext_gw,
set_context=set_context) as router1,\
self.router(name='router2', tenant_id=tenant_id,
external_gateway_info=ext_gw,
set_context=set_context) as router2:
r1 = router1['router']
r2 = router2['router']
# backlog processing will trigger one routers_updated
# notification containing r1 and r2
self.plugin._process_backlogged_routers()
r1_after = self._show('routers', r1['id'])['router']
hd_id = r1_after[HOSTING_DEVICE_ATTR]
r_ids = {r1['id'], r2['id']}
# should have one global router now
self._verify_updated_routers(r_ids, hd_id, 0)
r_spec = {'router': {l3.EXTERNAL_GW_INFO: None}}
self._update('routers', r1['id'], r_spec)
# should still have one global router
self._verify_updated_routers(r_ids, hd_id, 0)
self._update('routers', r2['id'], r_spec)
# should have no global router now
self._verify_updated_routers(r_ids)
def test_router_update_unset_gw_keeps_global_router(self):
self._test_router_update_unset_gw_keeps_global_router()
def test_router_update_unset_gw_keeps_global_router_non_admin(self):
self._test_router_update_unset_gw_keeps_global_router(True)
def _verify_deleted_routers(self, hd_id=None, id_global_router=None):
# global routers
q_p = '%s=%s' % (ROUTER_ROLE_ATTR, ROUTER_ROLE_GLOBAL)
g_rtrs = self._list('routers', query_params=q_p)['routers']
if hd_id:
self.assertEqual(len(g_rtrs), 1)
g_rtr = g_rtrs[0]
self.assertEqual(g_rtr['name'].endswith(
hd_id[-cisco_constants.ROLE_ID_LEN:]), True)
return g_rtrs[0]['id']
else:
self.assertEqual(len(g_rtrs), 0)
notifier = self.plugin.agent_notifiers[AGENT_TYPE_L3_CFG]
# ensure last router_deleted notification was for global router
notify_call = notifier.method_calls[-1]
self.assertEqual(notify_call[0], 'router_deleted')
deleted_router = notify_call[1][1]
self.assertEqual(deleted_router['id'], id_global_router)
def _test_gw_router_delete_removes_global_router(self, set_context=False):
tenant_id = _uuid()
with self.network(tenant_id=tenant_id) as n_external:
res = self._create_subnet(self.fmt, n_external['network']['id'],
cidr='10.0.1.0/24', tenant_id=tenant_id)
s = self.deserialize(self.fmt, res)
self._set_net_external(s['subnet']['network_id'])
ext_gw = {'network_id': s['subnet']['network_id']}
with self.router(tenant_id=tenant_id, external_gateway_info=ext_gw,
set_context=set_context) as router1,\
self.router(name='router2', tenant_id=tenant_id,
external_gateway_info=ext_gw,
set_context=set_context) as router2:
r1 = router1['router']
r2 = router2['router']
self.plugin._process_backlogged_routers()
r1_after = self._show('routers', r1['id'])['router']
hd_id = r1_after[HOSTING_DEVICE_ATTR]
self._delete('routers', r1['id'])
# should still have the global router
id_global_router = self._verify_deleted_routers(hd_id)
self._delete('routers', r2['id'])
# should be no global router now
self._verify_deleted_routers(id_global_router=id_global_router)
def test_gw_router_delete_removes_global_router(self):
self._test_gw_router_delete_removes_global_router()
def test_gw_router_delete_removes_global_router_non_admin(self):
self._test_gw_router_delete_removes_global_router(True)
def _test_router_delete_removes_no_global_router(self, set_context=False):
tenant_id = _uuid()
with self.network(tenant_id=tenant_id) as n_external:
res = self._create_subnet(self.fmt, n_external['network']['id'],
cidr='10.0.1.0/24', tenant_id=tenant_id)
s = self.deserialize(self.fmt, res)
self._set_net_external(s['subnet']['network_id'])
ext_gw = {'network_id': s['subnet']['network_id']}
with self.router(tenant_id=tenant_id,
set_context=set_context) as router1,\
self.router(name='router2', tenant_id=tenant_id,
external_gateway_info=ext_gw,
set_context=set_context) as router2:
r1 = router1['router']
r2 = router2['router']
self.plugin._process_backlogged_routers()
r1_after = self._show('routers', r1['id'])['router']
hd_id = r1_after[HOSTING_DEVICE_ATTR]
self._delete('routers', r1['id'])
# should still have the global router
id_global_router = self._verify_deleted_routers(hd_id)
self._delete('routers', r2['id'])
# should be no global router now
self._verify_deleted_routers(id_global_router=id_global_router)
def test_router_delete_removes_no_global_router(self):
self._test_router_delete_removes_no_global_router()
def test_router_delete_removes_no_global_router_non_admin(self):
self._test_router_delete_removes_no_global_router(True)
class Asr1kHARouterTypeDriverTestCase(
Asr1kRouterTypeDriverTestCase,
cisco_ha_test.HAL3RouterTestsMixin):
# For the HA tests we need more than one hosting device
router_type = 'ASR1k_Neutron_router'
_is_ha_tests = True
def setUp(self, core_plugin=None, l3_plugin=None, dm_plugin=None,
ext_mgr=None):
if l3_plugin is None:
l3_plugin = cisco_test_case.HA_L3_PLUGIN_KLASS
if ext_mgr is None:
ext_mgr = (cisco_test_case.
TestHASchedulingL3RouterApplianceExtensionManager())
cfg.CONF.set_override('default_ha_redundancy_level', 1, group='ha')
super(Asr1kHARouterTypeDriverTestCase, self).setUp(
l3_plugin=l3_plugin, ext_mgr=ext_mgr)
def _verify_ha_created_routers(self, router_ids, num_redundancy=1,
has_gw=None):
if has_gw is None:
has_gw = [True for r_id in router_ids]
temp = {}
for i in range(len(router_ids)):
temp[router_ids[i]] = has_gw[i]
has_gw = temp
# tenant HA user_visible routers
q_p = '%s=None' % ROUTER_ROLE_ATTR
uv_routers = self._list('routers', query_params=q_p)['routers']
uv_r_ids = {r['id'] for r in uv_routers}
self.assertEqual(len(uv_r_ids), len(router_ids))
for uv_r_id in uv_r_ids:
self.assertIn(uv_r_id, router_ids)
# tenant HA redundancy routers
q_p = '%s=%s' % (ROUTER_ROLE_ATTR, ROUTER_ROLE_HA_REDUNDANCY)
rr_id_to_rr = {
r['id']: r for r in self._list('routers',
query_params=q_p)['routers']}
all_rr_ids = rr_id_to_rr.keys()
num_rr_ids = 0
hd_ids = set()
for uv_r in uv_routers:
uv_r_hd_id = uv_r[HOSTING_DEVICE_ATTR]
if has_gw[uv_r['id']] is True:
self.assertIsNotNone(uv_r[EXTERNAL_GW_INFO])
hd_ids.add(uv_r_hd_id)
else:
self.assertIsNone(uv_r[EXTERNAL_GW_INFO])
rr_ids = [rr_info['id']
for rr_info in uv_r[ha.DETAILS][ha.REDUNDANCY_ROUTERS]]
num = len(rr_ids)
num_rr_ids += num
self.assertEqual(num, num_redundancy)
for rr_id in rr_ids:
self.assertIn(rr_id, all_rr_ids)
rr = rr_id_to_rr[rr_id]
rr_hd_id = rr[HOSTING_DEVICE_ATTR]
# redundancy router must not be hosted on same device as its
# user visible router since that defeats HA
self.assertFalse(uv_r_hd_id == rr_hd_id)
if has_gw[uv_r['id']] is True:
self.assertIsNotNone(rr[EXTERNAL_GW_INFO])
hd_ids.add(rr_hd_id)
else:
self.assertIsNone(rr[EXTERNAL_GW_INFO])
self.assertEqual(num_rr_ids, len(all_rr_ids))
# we should have a global router on all hosting devices that hosts
# a router (user visible or redundancy router) with gateway set
q_p = '%s=%s' % (ROUTER_ROLE_ATTR, ROUTER_ROLE_GLOBAL)
g_rtrs = self._list('routers', query_params=q_p)['routers']
self.assertEqual(len(g_rtrs), len(hd_ids))
g_rtr_ids = set()
for g_rtr in g_rtrs:
self.assertIn(g_rtr[HOSTING_DEVICE_ATTR], hd_ids)
g_rtr_ids.add(g_rtr['id'])
# logical global router for global routers HA
q_p = '%s=%s' % (ROUTER_ROLE_ATTR, ROUTER_ROLE_LOGICAL_GLOBAL)
g_l_rtrs = self._list('routers', query_params=q_p)['routers']
if g_l_rtrs:
self.assertEqual(len(g_l_rtrs), 1)
g_l_rtr = g_l_rtrs[0]
self.assertEqual(g_l_rtr['name'], LOGICAL_ROUTER_ROLE_NAME)
self.assertEqual(g_l_rtr[AUTO_SCHEDULE_ATTR], False)
else:
self.assertEqual(len(g_l_rtrs), 0)
notifier = self.plugin.agent_notifiers[AGENT_TYPE_L3_CFG]
if g_l_rtrs:
# ensure first routers_updated notifications were
# for global routers
for i in range(len(hd_ids)):
notify_call = notifier.method_calls[i]
self.assertEqual(notify_call[0], 'routers_updated')
updated_routers = notify_call[1][1]
self.assertEqual(len(updated_routers), 1)
self.assertIn(updated_routers[0]['id'], g_rtr_ids)
g_rtr_ids.remove(updated_routers[0]['id'])
else:
# ensure *no* update notifications where sent for global routers
for call in notifier.method_calls:
self.assertNotIn(call[1][1][0][ROUTER_ROLE_ATTR],
[ROUTER_ROLE_GLOBAL])
# ensure *no* update notifications where sent for logical global router
for call in notifier.method_calls:
self.assertNotIn(call[1][1][0][ROUTER_ROLE_ATTR],
[ROUTER_ROLE_LOGICAL_GLOBAL])
def _test_gw_router_create_adds_global_router(self, set_context=False):
tenant_id = _uuid()
with self.network(tenant_id=tenant_id) as n_external:
res = self._create_subnet(self.fmt, n_external['network']['id'],
cidr='10.0.1.0/24', tenant_id=tenant_id)
s = self.deserialize(self.fmt, res)
self._set_net_external(s['subnet']['network_id'])
ext_gw = {'network_id': s['subnet']['network_id']}
with self.router(tenant_id=tenant_id, external_gateway_info=ext_gw,
set_context=set_context) as router1:
r = router1['router']
self.plugin._process_backlogged_routers()
# should now have one user-visible router, its single
# redundancy router and two global routers (one for each of
# the hosting devices of the aforementioned routers)
self._verify_ha_created_routers([r['id']])
def _test_router_create_adds_no_global_router(self, set_context=False):
with self.router(set_context=set_context) as router:
r = router['router']
self.plugin._process_backlogged_routers()
self._verify_ha_created_routers([r['id']], 1, has_gw=[False])
def _verify_ha_updated_router(self, router_id, hd_ids=None, call_index=1,
num_redundancy=1, has_gw=True):
# ids of hosting devices hosting routers with gateway set
hd_ids = hd_ids or set()
if router_id:
# tenant router
uv_r = self._show('routers', router_id)['router']
uv_r_hd_id = uv_r[HOSTING_DEVICE_ATTR]
if has_gw is True:
self.assertIsNotNone(uv_r[EXTERNAL_GW_INFO])
hd_ids.add(uv_r_hd_id)
else:
self.assertIsNone(uv_r[EXTERNAL_GW_INFO])
rr_ids = [rr_info['id']
for rr_info in uv_r[ha.DETAILS][ha.REDUNDANCY_ROUTERS]]
# tenant HA redundancy routers
q_p = '%s=%s' % (ROUTER_ROLE_ATTR, ROUTER_ROLE_HA_REDUNDANCY)
rr_id_to_rr = {
r['id']: r for r in self._list('routers',
query_params=q_p)['routers']}
all_rr_ids = rr_id_to_rr.keys()
self.assertEqual(len(rr_ids), num_redundancy)
for rr_id in rr_ids:
self.assertIn(rr_id, all_rr_ids)
rr = rr_id_to_rr[rr_id]
rr_hd_id = rr[HOSTING_DEVICE_ATTR]
# redundancy router must not be hosted on same device as its
# user visible router since that defeats HA
self.assertFalse(uv_r_hd_id == rr_hd_id)
if has_gw is True:
self.assertIsNotNone(rr[EXTERNAL_GW_INFO])
hd_ids.add(rr_hd_id)
else:
self.assertIsNone(rr[EXTERNAL_GW_INFO])
# we should have a global router on all hosting devices that hosts
# a router (user visible or redundancy router) with gateway set
num_devices_hosting_gateway_routers = len(hd_ids)
q_p = '%s=%s' % (ROUTER_ROLE_ATTR, ROUTER_ROLE_GLOBAL)
g_rtrs = self._list('routers', query_params=q_p)['routers']
self.assertEqual(len(g_rtrs), num_devices_hosting_gateway_routers)
g_rtr_ids = set()
for g_rtr in g_rtrs:
self.assertIn(g_rtr[HOSTING_DEVICE_ATTR], hd_ids)
g_rtr_ids.add(g_rtr['id'])
# logical global router for global routers HA
q_p = '%s=%s' % (ROUTER_ROLE_ATTR, ROUTER_ROLE_LOGICAL_GLOBAL)
g_l_rtrs = self._list('routers', query_params=q_p)['routers']
if num_devices_hosting_gateway_routers > 0:
self.assertEqual(len(g_l_rtrs), 1)
g_l_rtr = g_l_rtrs[0]
self.assertEqual(g_l_rtr['name'], LOGICAL_ROUTER_ROLE_NAME)
self.assertEqual(g_l_rtr[AUTO_SCHEDULE_ATTR], False)
else:
self.assertEqual(len(g_l_rtrs), 0)
# global routers
notifier = self.plugin.agent_notifiers[AGENT_TYPE_L3_CFG]
# routers_updated notification call_index is for global router
notify_call = notifier.method_calls[call_index]
self.assertEqual(notify_call[0], 'routers_updated')
updated_routers = notify_call[1][1]
self.assertEqual(len(updated_routers), 1)
self.assertEqual(updated_routers[0][ROUTER_ROLE_ATTR],
ROUTER_ROLE_GLOBAL)
# ensure *no* update notifications where sent for logical global router
for call in notifier.method_calls:
if call[0] != 'router_deleted':
self.assertNotIn(call[1][1][0][ROUTER_ROLE_ATTR],
[ROUTER_ROLE_LOGICAL_GLOBAL])
return hd_ids
def _test_router_update_set_gw_adds_global_router(self, set_context=False):
tenant_id = _uuid()
with self.network(tenant_id=tenant_id) as n_external:
res = self._create_subnet(self.fmt, n_external['network']['id'],
cidr='10.0.1.0/24', tenant_id=tenant_id)
s = self.deserialize(self.fmt, res)
self._set_net_external(s['subnet']['network_id'])
with self.router(tenant_id=tenant_id,
set_context=set_context) as router1,\
self.router(name='router2', tenant_id=tenant_id,
set_context=set_context) as router2:
r1 = router1['router']
r2 = router2['router']
# backlog processing to schedule the routers
self.plugin._process_backlogged_routers()
# should have no global router yet
r_ids = [r1['id'], r2['id']]
self._verify_ha_created_routers(r_ids, 1, has_gw=[False,
False])
ext_gw = {'network_id': s['subnet']['network_id']}
r_spec = {'router': {l3.EXTERNAL_GW_INFO: ext_gw}}
self._update('routers', r1['id'], r_spec)
# should now have two global routers, one for hosting device
# of user visible router r1 and one for the hosting device r1's
# redundancy router
hd_ids = self._verify_ha_updated_router(r1['id'])
self._update('routers', r2['id'], r_spec)
self._verify_ha_updated_router(r2['id'], hd_ids)
def _test_router_update_unset_gw_keeps_global_router(self,
set_context=False):
tenant_id = _uuid()
with self.network(tenant_id=tenant_id) as n_external:
res = self._create_subnet(self.fmt, n_external['network']['id'],
cidr='10.0.1.0/24', tenant_id=tenant_id)
s = self.deserialize(self.fmt, res)
self._set_net_external(s['subnet']['network_id'])
ext_gw = {'network_id': s['subnet']['network_id']}
with self.router(tenant_id=tenant_id, external_gateway_info=ext_gw,
set_context=set_context) as router1,\
self.router(name='router2', tenant_id=tenant_id,
external_gateway_info=ext_gw,
set_context=set_context) as router2:
r1 = router1['router']
r2 = router2['router']
# make sure we have only two eligible hosting devices
# in this test
qp = "template_id=00000000-0000-0000-0000-000000000005"
hds = self._list('hosting_devices', query_params=qp)
self._delete('hosting_devices',
hds['hosting_devices'][1]['id'])
# backlog processing to schedule the routers
self.plugin._process_backlogged_routers()
self._verify_ha_created_routers([r1['id'], r2['id']])
r_spec = {'router': {l3.EXTERNAL_GW_INFO: None}}
self._update('routers', r1['id'], r_spec)
# should still have two global routers, we verify using r2
self._verify_ha_updated_router(r2['id'])
self._update('routers', r2['id'], r_spec)
# should have no global routers now, we verify using r1
self._verify_ha_updated_router(r2['id'], has_gw=False)
def _test_gw_router_delete_removes_global_router(self, set_context=False):
tenant_id = _uuid()
with self.network(tenant_id=tenant_id) as n_external:
res = self._create_subnet(self.fmt, n_external['network']['id'],
cidr='10.0.1.0/24', tenant_id=tenant_id)
s = self.deserialize(self.fmt, res)
self._set_net_external(s['subnet']['network_id'])
ext_gw = {'network_id': s['subnet']['network_id']}
with self.router(tenant_id=tenant_id, external_gateway_info=ext_gw,
set_context=set_context) as router1,\
self.router(name='router2', tenant_id=tenant_id,
external_gateway_info=ext_gw,
set_context=set_context) as router2:
r1 = router1['router']
r2 = router2['router']
# make sure we have only two eligible hosting devices
# in this test
qp = "template_id=00000000-0000-0000-0000-000000000005"
hds = self._list('hosting_devices', query_params=qp)
self._delete('hosting_devices',
hds['hosting_devices'][1]['id'])
# backlog processing to schedule the routers
self.plugin._process_backlogged_routers()
self._verify_ha_created_routers([r1['id'], r2['id']])
self._delete('routers', r1['id'])
# should still have two global routers, we verify using r2
self._verify_ha_updated_router(r2['id'])
self._delete('routers', r2['id'])
# should have no global routers now
self._verify_ha_updated_router(None)
def _test_router_delete_removes_no_global_router(self, set_context=False):
tenant_id = _uuid()
with self.network(tenant_id=tenant_id) as n_external:
res = self._create_subnet(self.fmt, n_external['network']['id'],
cidr='10.0.1.0/24', tenant_id=tenant_id)
s = self.deserialize(self.fmt, res)
self._set_net_external(s['subnet']['network_id'])
ext_gw = {'network_id': s['subnet']['network_id']}
with self.router(tenant_id=tenant_id,
set_context=set_context) as router1,\
self.router(name='router2', tenant_id=tenant_id,
external_gateway_info=ext_gw,
set_context=set_context) as router2:
r1 = router1['router']
r2 = router2['router']
# make sure we have only two eligible hosting devices
# in this test
qp = "template_id=00000000-0000-0000-0000-000000000005"
hds = self._list('hosting_devices', query_params=qp)
self._delete('hosting_devices',
hds['hosting_devices'][1]['id'])
self.plugin._process_backlogged_routers()
self._verify_ha_created_routers([r1['id'], r2['id']],
has_gw=[False, True])
self._delete('routers', r1['id'])
# should still have two global routers, we verify using r2
self._verify_ha_updated_router(r2['id'])
self._delete('routers', r2['id'])
# should have no global routers now
self._verify_ha_updated_router(None)
class L3CfgAgentAsr1kRouterTypeDriverTestCase(
cisco_test_case.L3RoutertypeAwareHostingDeviceSchedulerTestCaseBase,
cisco_ha_test.HAL3RouterTestsMixin):
_is_ha_tests = True
def setUp(self, core_plugin=None, l3_plugin=None, dm_plugin=None,
ext_mgr=None):
if l3_plugin is None:
l3_plugin = cisco_test_case.HA_L3_PLUGIN_KLASS
if ext_mgr is None:
ext_mgr = (cisco_test_case.
TestHASchedulingL3RouterApplianceExtensionManager())
cfg.CONF.set_override('default_ha_redundancy_level', 1, group='ha')
super(L3CfgAgentAsr1kRouterTypeDriverTestCase, self).setUp(
l3_plugin=l3_plugin, ext_mgr=ext_mgr)
self.orig_get_sync_data = self.plugin.get_sync_data
self.plugin.get_sync_data = self.plugin.get_sync_data_ext
def tearDown(self):
self.plugin.get_sync_data = self.orig_get_sync_data
super(L3CfgAgentAsr1kRouterTypeDriverTestCase, self).tearDown()
def _verify_sync_data(self, context, ids_colocated_routers, g_l_rtr,
g_l_rtr_rr_ids, ha_settings):
routers = self.plugin.get_sync_data_ext(context,
ids_colocated_routers)
self.assertEqual(len(routers), 2)
global_router = [r for r in routers if
r[ROUTER_ROLE_ATTR] == ROUTER_ROLE_GLOBAL][0]
# verify that global router has HA information from logical
# global router, in particular VIP address for the gw port
# comes from the gw port of the logical global router
ha_info = global_router['gw_port']['ha_info']
ha_port_id = ha_info['ha_port']['id']
vip_address = g_l_rtr[l3.EXTERNAL_GW_INFO][
'external_fixed_ips'][0]['ip_address']
self.assertEqual(
ha_info['ha_port']['fixed_ips'][0]['ip_address'],
vip_address)
self.assertEqual(global_router['gw_port_id'] == ha_port_id,
False)
self._verify_ha_settings(global_router, ha_settings)
rr_info_list = global_router[ha.DETAILS][ha.REDUNDANCY_ROUTERS]
self.assertEqual(len(rr_info_list), len(g_l_rtr_rr_ids))
for rr_info in rr_info_list:
self.assertIn(rr_info['id'], g_l_rtr_rr_ids)
def test_l3_cfg_agent_query_global_router_info(self):
with self.subnet(cidr='10.0.1.0/24') as s_ext:
self._set_net_external(s_ext['subnet']['network_id'])
ext_gw = {'network_id': s_ext['subnet']['network_id']}
with self.router(external_gateway_info=ext_gw) as router:
r = router['router']
self.plugin._process_backlogged_routers()
r_after = self._show('routers', r['id'])['router']
hd_id = r_after[HOSTING_DEVICE_ATTR]
id_r_ha_backup = r_after[ha.DETAILS][
ha.REDUNDANCY_ROUTERS][0]['id']
r_ha_backup_after = self._show('routers',
id_r_ha_backup)['router']
ha_backup_hd_id = r_ha_backup_after[HOSTING_DEVICE_ATTR]
# logical global router for global routers HA
q_p = '%s=%s' % (ROUTER_ROLE_ATTR, ROUTER_ROLE_LOGICAL_GLOBAL)
g_l_rtrs = self._list('routers', query_params=q_p)['routers']
# should be only one logical global router
self.assertEqual(len(g_l_rtrs), 1)
g_l_rtr = g_l_rtrs[0]
g_l_rtr_rr_ids = {r_info['id'] for r_info in g_l_rtr[
ha.DETAILS][ha.REDUNDANCY_ROUTERS]}
self.assertEqual(g_l_rtr[ha.ENABLED], True)
self.assertEqual(g_l_rtr[routertype.TYPE_ATTR],
r[routertype.TYPE_ATTR])
# no auto-scheduling to ensure logical global router is never
# instantiated (unless an admin does some bad thing...)
self.assertEqual(g_l_rtr[AUTO_SCHEDULE_ATTR], False)
# global router on hosting devices
q_p = '%s=%s' % (ROUTER_ROLE_ATTR, ROUTER_ROLE_GLOBAL)
g_rtrs = {g_r[HOSTING_DEVICE_ATTR]: g_r for g_r in self._list(
'routers', query_params=q_p)['routers']}
self.assertEqual(len(g_rtrs), 2)
for g_r in g_rtrs.values():
self.assertEqual(g_r[routertype.TYPE_ATTR],
r[routertype.TYPE_ATTR])
# global routers should have HA disabled in db
self.assertEqual(g_r[ha.ENABLED], False)
# global routers should never be auto-scheduled as that
# can result in them being moved to another hosting device
self.assertEqual(g_r[AUTO_SCHEDULE_ATTR], False)
# global router should be redundancy router of the logical
# global router for this router type
self.assertIn(g_r['id'], g_l_rtr_rr_ids)
e_context = context.get_admin_context()
# global routers should here have HA setup information from
# the logical global router
ha_settings = self._get_ha_defaults(
ha_type=cfg.CONF.ha.default_ha_mechanism,
redundancy_level=2, priority=ha_db.DEFAULT_MASTER_PRIORITY)
# verify global router co-located with the user visible router
ids_colocated_routers = [r['id'], g_rtrs[hd_id]['id']]
self._verify_sync_data(e_context, ids_colocated_routers,
g_l_rtr, g_l_rtr_rr_ids, ha_settings)
# verify global router co.located with the ha backup
# router of the user visible router
ids_colocated_routers = [r_ha_backup_after['id'],
g_rtrs[ha_backup_hd_id]['id']]
self._verify_sync_data(e_context, ids_colocated_routers,
g_l_rtr, g_l_rtr_rr_ids, ha_settings)
|
|
# Copyright (c) 2013-2014 Will Thames <will@thames.id.au>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import glob
import imp
import os
import six
from ansible import constants
from ansible.errors import AnsibleError
try:
# Try to import the Ansible 2 module first, it's the future-proof one
from ansible.parsing.splitter import split_args
except ImportError:
# Fallback on the Ansible 1.9 module
from ansible.module_utils.splitter import split_args
import yaml
from yaml.composer import Composer
from yaml.constructor import Constructor
try:
from ansible.utils import parse_yaml_from_file
from ansible.utils import path_dwim
from ansible.utils.template import template as ansible_template
from ansible.utils import module_finder
module_loader = module_finder
ANSIBLE_VERSION = 1
except ImportError:
from ansible.parsing.dataloader import DataLoader
from ansible.template import Templar
from ansible.parsing.mod_args import ModuleArgsParser
from ansible.parsing.yaml.constructor import AnsibleConstructor
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.errors import AnsibleParserError
ANSIBLE_VERSION = 2
# ansible-lint doesn't need/want to know about encrypted secrets, but it needs
# Ansible 2.3+ allows encrypted secrets within yaml files, so we pass a string
# as the password to enable such yaml files to be opened and parsed successfully.
DEFAULT_VAULT_PASSWORD = 'x'
def parse_yaml_from_file(filepath):
dl = DataLoader()
if hasattr(dl, 'set_vault_password'):
dl.set_vault_password(DEFAULT_VAULT_PASSWORD)
return dl.load_from_file(filepath)
def path_dwim(basedir, given):
dl = DataLoader()
dl.set_basedir(basedir)
return dl.path_dwim(given)
def ansible_template(basedir, varname, templatevars, **kwargs):
dl = DataLoader()
dl.set_basedir(basedir)
templar = Templar(dl, variables=templatevars)
return templar.template(varname, **kwargs)
try:
from ansible.plugins import module_loader
except ImportError:
from ansible.plugins.loader import module_loader
LINE_NUMBER_KEY = '__line__'
FILENAME_KEY = '__file__'
VALID_KEYS = [
'name', 'action', 'when', 'async', 'poll', 'notify',
'first_available_file', 'include', 'import_playbook',
'tags', 'register', 'ignore_errors', 'delegate_to',
'local_action', 'transport', 'remote_user', 'sudo',
'sudo_user', 'sudo_pass', 'when', 'connection', 'environment', 'args', 'always_run',
'any_errors_fatal', 'changed_when', 'failed_when', 'check_mode', 'delay',
'retries', 'until', 'su', 'su_user', 'su_pass', 'no_log', 'run_once',
'become', 'become_user', 'become_method', FILENAME_KEY,
]
BLOCK_NAME_TO_ACTION_TYPE_MAP = {
'tasks': 'task',
'handlers': 'handler',
'pre_tasks': 'task',
'post_tasks': 'task',
'block': 'meta',
'rescue': 'meta',
'always': 'meta',
}
def load_plugins(directory):
result = []
fh = None
for pluginfile in glob.glob(os.path.join(directory, '[A-Za-z]*.py')):
pluginname = os.path.basename(pluginfile.replace('.py', ''))
try:
fh, filename, desc = imp.find_module(pluginname, [directory])
mod = imp.load_module(pluginname, fh, filename, desc)
obj = getattr(mod, pluginname)()
result.append(obj)
finally:
if fh:
fh.close()
return result
def tokenize(line):
tokens = line.lstrip().split(" ")
if tokens[0] == '-':
tokens = tokens[1:]
if tokens[0] == 'action:' or tokens[0] == 'local_action:':
tokens = tokens[1:]
command = tokens[0].replace(":", "")
args = list()
kwargs = dict()
nonkvfound = False
for arg in tokens[1:]:
if "=" in arg and not nonkvfound:
kv = arg.split("=", 1)
kwargs[kv[0]] = kv[1]
else:
nonkvfound = True
args.append(arg)
return (command, args, kwargs)
def _playbook_items(pb_data):
if isinstance(pb_data, dict):
return pb_data.items()
elif not pb_data:
return []
else:
return [item for play in pb_data for item in play.items()]
def find_children(playbook, playbook_dir):
if not os.path.exists(playbook[0]):
return []
if playbook[1] == 'role':
playbook_ds = {'roles': [{'role': playbook[0]}]}
else:
try:
playbook_ds = parse_yaml_from_file(playbook[0])
except AnsibleError as e:
raise SystemExit(str(e))
results = []
basedir = os.path.dirname(playbook[0])
items = _playbook_items(playbook_ds)
for item in items:
for child in play_children(basedir, item, playbook[1], playbook_dir):
if "$" in child['path'] or "{{" in child['path']:
continue
valid_tokens = list()
for token in split_args(child['path']):
if '=' in token:
break
valid_tokens.append(token)
path = ' '.join(valid_tokens)
results.append({
'path': path_dwim(basedir, path),
'type': child['type']
})
return results
def template(basedir, value, vars, fail_on_undefined=False, **kwargs):
try:
value = ansible_template(os.path.abspath(basedir), value, vars,
**dict(kwargs, fail_on_undefined=fail_on_undefined))
# Hack to skip the following exception when using to_json filter on a variable.
# I guess the filter doesn't like empty vars...
except (AnsibleError, ValueError):
# templating failed, so just keep value as is.
pass
return value
def play_children(basedir, item, parent_type, playbook_dir):
delegate_map = {
'tasks': _taskshandlers_children,
'pre_tasks': _taskshandlers_children,
'post_tasks': _taskshandlers_children,
'block': _taskshandlers_children,
'include': _include_children,
'import_playbook': _include_children,
'roles': _roles_children,
'dependencies': _roles_children,
'handlers': _taskshandlers_children,
}
(k, v) = item
play_library = os.path.join(os.path.abspath(basedir), 'library')
_load_library_if_exists(play_library)
if k in delegate_map:
if v:
v = template(os.path.abspath(basedir),
v,
dict(playbook_dir=os.path.abspath(basedir)),
fail_on_undefined=False)
return delegate_map[k](basedir, k, v, parent_type)
return []
def _include_children(basedir, k, v, parent_type):
# handle include: filename.yml tags=blah
(command, args, kwargs) = tokenize("{0}: {1}".format(k, v))
result = path_dwim(basedir, args[0])
if not os.path.exists(result) and not basedir.endswith('tasks'):
result = path_dwim(os.path.join(basedir, '..', 'tasks'), v)
return [{'path': result, 'type': parent_type}]
def _taskshandlers_children(basedir, k, v, parent_type):
results = []
for th in v:
if 'include' in th:
append_children(th['include'], basedir, k, parent_type, results)
elif 'include_tasks' in th:
append_children(th['include_tasks'], basedir, k, parent_type, results)
elif 'import_playbook' in th:
append_children(th['import_playbook'], basedir, k, parent_type, results)
elif 'import_tasks' in th:
append_children(th['import_tasks'], basedir, k, parent_type, results)
elif 'import_role' in th:
results.extend(_roles_children(basedir, k, [th['import_role'].get('name')], parent_type,
main=th['import_role'].get('tasks_from', 'main')))
elif 'include_role' in th:
results.extend(_roles_children(basedir, k, [th['include_role'].get('name')],
parent_type,
main=th['include_role'].get('tasks_from', 'main')))
elif 'block' in th:
results.extend(_taskshandlers_children(basedir, k, th['block'], parent_type))
if 'rescue' in th:
results.extend(_taskshandlers_children(basedir, k, th['rescue'], parent_type))
if 'always' in th:
results.extend(_taskshandlers_children(basedir, k, th['always'], parent_type))
return results
def append_children(taskhandler, basedir, k, parent_type, results):
# when taskshandlers_children is called for playbooks, the
# actual type of the included tasks is the section containing the
# include, e.g. tasks, pre_tasks, or handlers.
if parent_type == 'playbook':
playbook_section = k
else:
playbook_section = parent_type
results.append({
'path': path_dwim(basedir, taskhandler),
'type': playbook_section
})
def _roles_children(basedir, k, v, parent_type, main='main'):
results = []
for role in v:
if isinstance(role, dict):
if 'role' in role or 'name' in role:
if 'tags' not in role or 'skip_ansible_lint' not in role['tags']:
results.extend(_look_for_role_files(basedir,
role.get('role', role.get('name')),
main=main))
else:
raise SystemExit('role dict {0} does not contain a "role" '
'or "name" key'.format(role))
else:
results.extend(_look_for_role_files(basedir, role, main=main))
return results
def _load_library_if_exists(path):
if os.path.exists(path):
module_loader.add_directory(path)
def _rolepath(basedir, role):
role_path = None
possible_paths = [
# if included from a playbook
path_dwim(basedir, os.path.join('roles', role)),
path_dwim(basedir, role),
# if included from roles/[role]/meta/main.yml
path_dwim(
basedir, os.path.join('..', '..', '..', 'roles', role)
),
path_dwim(basedir, os.path.join('..', '..', role))
]
if constants.DEFAULT_ROLES_PATH:
search_locations = constants.DEFAULT_ROLES_PATH
if isinstance(search_locations, six.string_types):
search_locations = search_locations.split(os.pathsep)
for loc in search_locations:
loc = os.path.expanduser(loc)
possible_paths.append(path_dwim(loc, role))
for path_option in possible_paths:
if os.path.isdir(path_option):
role_path = path_option
break
if role_path:
_load_library_if_exists(os.path.join(role_path, 'library'))
return role_path
def _look_for_role_files(basedir, role, main='main'):
role_path = _rolepath(basedir, role)
if not role_path:
return []
results = []
for th in ['tasks', 'handlers', 'meta']:
for ext in ('.yml', '.yaml'):
thpath = os.path.join(role_path, th, main + ext)
if os.path.exists(thpath):
results.append({'path': thpath, 'type': th})
break
return results
def rolename(filepath):
idx = filepath.find('roles/')
if idx < 0:
return ''
role = filepath[idx+6:]
role = role[:role.find('/')]
return role
def _kv_to_dict(v):
(command, args, kwargs) = tokenize(v)
return (dict(__ansible_module__=command, __ansible_arguments__=args, **kwargs))
def normalize_task_v2(task):
'''Ensures tasks have an action key and strings are converted to python objects'''
result = dict()
mod_arg_parser = ModuleArgsParser(task)
try:
action, arguments, result['delegate_to'] = mod_arg_parser.parse()
except AnsibleParserError as e:
try:
task_info = "%s:%s" % (task[FILENAME_KEY], task[LINE_NUMBER_KEY])
del task[FILENAME_KEY]
del task[LINE_NUMBER_KEY]
except KeyError:
task_info = "Unknown"
try:
import pprint
pp = pprint.PrettyPrinter(indent=2)
task_pprint = pp.pformat(task)
except ImportError:
task_pprint = task
raise SystemExit("Couldn't parse task at %s (%s)\n%s" % (task_info, e.message, task_pprint))
# denormalize shell -> command conversion
if '_uses_shell' in arguments:
action = 'shell'
del(arguments['_uses_shell'])
for (k, v) in list(task.items()):
if k in ('action', 'local_action', 'args', 'delegate_to') or k == action:
# we don't want to re-assign these values, which were
# determined by the ModuleArgsParser() above
continue
else:
result[k] = v
result['action'] = dict(__ansible_module__=action)
if '_raw_params' in arguments:
result['action']['__ansible_arguments__'] = arguments['_raw_params'].split(' ')
del(arguments['_raw_params'])
else:
result['action']['__ansible_arguments__'] = list()
result['action'].update(arguments)
return result
def normalize_task_v1(task):
result = dict()
for (k, v) in task.items():
if k in VALID_KEYS or k.startswith('with_'):
if k == 'local_action' or k == 'action':
if not isinstance(v, dict):
v = _kv_to_dict(v)
v['__ansible_arguments__'] = v.get('__ansible_arguments__', list())
result['action'] = v
else:
result[k] = v
else:
if isinstance(v, six.string_types):
v = _kv_to_dict(k + ' ' + v)
elif not v:
v = dict(__ansible_module__=k)
else:
if isinstance(v, dict):
v.update(dict(__ansible_module__=k))
else:
if k == '__line__':
# Keep the line number stored
result[k] = v
continue
else:
# Tasks that include playbooks (rather than task files)
# can get here
# https://github.com/willthames/ansible-lint/issues/138
raise RuntimeError("Was not expecting value %s of type %s for key %s\n"
"Task: %s. Check the syntax of your playbook using "
"ansible-playbook --syntax-check" %
(str(v), type(v), k, str(task)))
v['__ansible_arguments__'] = v.get('__ansible_arguments__', list())
result['action'] = v
if 'module' in result['action']:
# this happens when a task uses
# local_action:
# module: ec2
# etc...
result['action']['__ansible_module__'] = result['action']['module']
del(result['action']['module'])
if 'args' in result:
result['action'].update(result.get('args'))
del(result['args'])
return result
def normalize_task(task, filename):
ansible_action_type = task.get('__ansible_action_type__', 'task')
if '__ansible_action_type__' in task:
del(task['__ansible_action_type__'])
if ANSIBLE_VERSION < 2:
task = normalize_task_v1(task)
else:
task = normalize_task_v2(task)
task[FILENAME_KEY] = filename
task['__ansible_action_type__'] = ansible_action_type
return task
def task_to_str(task):
name = task.get("name")
if name:
return name
action = task.get("action")
args = " ".join([u"{0}={1}".format(k, v) for (k, v) in action.items()
if k not in ["__ansible_module__", "__ansible_arguments__"]] +
action.get("__ansible_arguments__"))
return u"{0} {1}".format(action["__ansible_module__"], args)
def extract_from_list(blocks, candidates):
results = list()
for block in blocks:
for candidate in candidates:
if isinstance(block, dict) and candidate in block:
if isinstance(block[candidate], list):
results.extend(add_action_type(block[candidate], candidate))
elif block[candidate] is not None:
raise RuntimeError(
"Key '%s' defined, but bad value: '%s'" %
(candidate, str(block[candidate])))
return results
def add_action_type(actions, action_type):
results = list()
for action in actions:
action['__ansible_action_type__'] = BLOCK_NAME_TO_ACTION_TYPE_MAP[action_type]
results.append(action)
return results
def get_action_tasks(yaml, file):
tasks = list()
if file['type'] in ['tasks', 'handlers']:
tasks = add_action_type(yaml, file['type'])
else:
tasks.extend(extract_from_list(yaml, ['tasks', 'handlers', 'pre_tasks', 'post_tasks']))
# Add sub-elements of block/rescue/always to tasks list
tasks.extend(extract_from_list(tasks, ['block', 'rescue', 'always']))
# Remove block/rescue/always elements from tasks list
block_rescue_always = ('block', 'rescue', 'always')
tasks[:] = [task for task in tasks if all(k not in task for k in block_rescue_always)]
return [task for task in tasks if
set(['include', 'include_tasks',
'import_playbook', 'import_tasks']).isdisjoint(task.keys())]
def get_normalized_tasks(yaml, file):
tasks = get_action_tasks(yaml, file)
res = []
for task in tasks:
# An empty `tags` block causes `None` to be returned if
# the `or []` is not present - `task.get('tags', [])`
# does not suffice.
if 'skip_ansible_lint' in (task.get('tags') or []):
# No need to normalize_task is we are skipping it.
continue
res.append(normalize_task(task, file['path']))
return res
def parse_yaml_linenumbers(data, filename):
"""Parses yaml as ansible.utils.parse_yaml but with linenumbers.
The line numbers are stored in each node's LINE_NUMBER_KEY key.
"""
def compose_node(parent, index):
# the line number where the previous token has ended (plus empty lines)
line = loader.line
node = Composer.compose_node(loader, parent, index)
node.__line__ = line + 1
return node
def construct_mapping(node, deep=False):
if ANSIBLE_VERSION < 2:
mapping = Constructor.construct_mapping(loader, node, deep=deep)
else:
mapping = AnsibleConstructor.construct_mapping(loader, node, deep=deep)
if hasattr(node, '__line__'):
mapping[LINE_NUMBER_KEY] = node.__line__
else:
mapping[LINE_NUMBER_KEY] = mapping._line_number
mapping[FILENAME_KEY] = filename
return mapping
try:
if ANSIBLE_VERSION < 2:
loader = yaml.Loader(data)
else:
import inspect
kwargs = {}
if 'vault_password' in inspect.getargspec(AnsibleLoader.__init__).args:
kwargs['vault_password'] = DEFAULT_VAULT_PASSWORD
loader = AnsibleLoader(data, **kwargs)
loader.compose_node = compose_node
loader.construct_mapping = construct_mapping
data = loader.get_single_data()
except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e:
raise SystemExit("Failed to parse YAML in %s: %s" % (filename, str(e)))
return data
|
|
import os
import os.path
import sys
import string
from datetime import date
import itertools
import operator
import osgeo
from osgeo import ogr
import utils
# These determine default isochrone time maximums and increments
DEF_ISO_MAX=40
DEF_ISO_INC=10
CORRECT_NODATA_VALUE_BYTE=128
def make_average_raster(save_path, save_suffix, loc_name, datestr, timestr,
nearby_minutes, num_each_side, **kwargs):
# N.B. :- there may be other kwargs passed in, not relevant here, which we ignore,
# hence kwargs on the end.
mins_diffs = utils.get_nearby_min_diffs(nearby_minutes, num_each_side)
date_time_str_set = utils.get_date_time_string_set(datestr, timestr,
mins_diffs)
fnames = utils.get_raster_filenames(loc_name, date_time_str_set, save_path,
save_suffix)
# This VRT step is necessary since :- for this kind of 'raster algebra',
# the problem is that the original type is Byte, and it won't hold a value
# of over 128 properly. So we temporarily transform to Float32 using the
# "VRT" feature of GDAL before doing the calculation. See:
# http://gis.stackexchange.com/questions/33152/why-do-i-get-different-results-with-gdal-calc-within-the-osgeo4w-shell-and-qgis
vrtnames = []
for fname in fnames:
vrtname = os.path.splitext(fname)[0]+".vrt"
vrtnames.append(vrtname)
for fname in fnames:
editcmd = "gdal_edit.py -a_nodata %d %s" \
% (CORRECT_NODATA_VALUE_BYTE, fname)
print "Running %s:" % editcmd
os.system(editcmd)
for ii in range(len(fnames)):
transcmd = "gdal_translate -ot Float32 -of VRT %s %s" \
% (fnames[ii], vrtnames[ii])
print "Running %s:" % transcmd
os.system(transcmd)
# Now do the big average
avg_fname = utils.avgRasterName(loc_name, datestr, timestr, save_path,
save_suffix, num_each_side)
caps = string.ascii_uppercase[:len(fnames)]
vrts_str = " ".join(["-%s %s" % (a, b) for a, b in zip(caps, vrtnames)])
calc_str = "("+"+".join(caps)+")"+"/"+str(len(vrtnames))
calccmd = 'gdal_calc.py %s --outfile=%s --NoDataValue=%d --calc="%s" '\
'--type=Byte --overwrite' \
% (vrts_str, avg_fname, CORRECT_NODATA_VALUE_BYTE, calc_str)
print "Running %s:" % calccmd
os.system(calccmd)
return
def make_contours_isobands(save_path, save_suffix, loc_name, datestr, timestr,
num_each_side, iso_max, iso_inc, **kwargs):
# N.B. :- again kwargs is needed at the end to ignore unneeded args.
iso_timeset = range(0, iso_max+1, iso_inc)[1:]
avg_fname = utils.avgRasterName(loc_name, datestr, timestr,
save_path, save_suffix, num_each_side)
ctr_fname = utils.isoContoursName(loc_name, datestr, timestr,
save_path, save_suffix, num_each_side)
if os.path.exists(ctr_fname):
os.unlink(ctr_fname)
timeset_str = " ".join([str(tval+0.1) for tval in iso_timeset])
contourcmd = 'gdal_contour -a %s %s %s -nln isochrones -fl %s' \
% (utils.TIME_FIELD, avg_fname, ctr_fname, timeset_str)
print "Running %s:" % contourcmd
os.system(contourcmd)
isob_fname = utils.isoBandsName(loc_name, datestr, timestr,
save_path, save_suffix, num_each_side)
isob_all_fname = utils.isoBandsAllName(loc_name, datestr, timestr,
save_path, save_suffix, num_each_side)
# Line below calls script that relies on Matplotlib
# Sourced from:
# https://github.com/rveciana/geoexamples/tree/master/python/raster_isobands
isobandscmd = 'isobands_matplotlib.py -up True -a %s %s %s '\
'-nln isochrones -i %d' \
% (utils.TIME_FIELD, avg_fname, isob_all_fname, iso_inc)
print "Running %s:" % isobandscmd
os.system(isobandscmd)
# These isobands will include all isobands up to OTP's max (128 mins).
# For the sake of this project we just want a subset, defined by
# our timeset list.
# Thanks to https://github.com/dwtkns/gdal-cheat-sheet for this
if os.path.exists(isob_fname):
os.unlink(isob_fname)
# Watch out that ogr2ogr takes _dest_ file before _src_ file.
isobandssubsetcmd = 'ogr2ogr -where "%s <= %d" %s %s' \
% (utils.TIME_FIELD, iso_timeset[-1], isob_fname, isob_all_fname)
print "Running %s:" % isobandssubsetcmd
os.system(isobandssubsetcmd)
def extract_and_smooth_isobands(save_path, save_suffix, loc_name, datestr,
timestr, num_each_side, iso_max, iso_inc, **kwargs):
isob_fname = utils.isoBandsName(loc_name, datestr, timestr, save_path,
save_suffix, num_each_side)
if not os.path.exists(isob_fname):
print "Warning: need pre-existing vector isobands file to extract "\
"from and smooth, generating now ..."
make_contours_isobands(save_path, save_suffix, loc_name, datestr,
timestr, num_each_side, iso_max, iso_inc, **kwargs)
# Import these here in case user doesn't have shapely installed.
import shapely_smoother
import get_polys_at_level
print "Beginning extracting and smoothing isoband vectors ..."
for iso_level in range(iso_inc, iso_max+1, iso_inc):
polys_fname = utils.polysIsoBandsName(loc_name, datestr,
timestr, save_path, save_suffix, num_each_side, iso_level)
print "Extract polygons for iso level %d to %s:" % \
(iso_level, polys_fname)
get_polys_at_level.get_polys_at_level(isob_fname, polys_fname,
utils.TIME_FIELD, iso_level)
smoothed_fname = utils.smoothedIsoBandsName(loc_name, datestr,
timestr, save_path, save_suffix, num_each_side, iso_level)
print "Smoothing these and saving to file %s:" % \
(smoothed_fname)
shapely_smoother.smooth_all_polygons(polys_fname, smoothed_fname)
print "Done."
def combine_smoothed_isoband_files(save_path, save_suffix, loc_name,
datestr, timestr, num_each_side, iso_max, iso_inc, **kwargs):
#print "Combining smoothed isoband files into a single file:"
combined_smoothed_fname = utils.smoothedIsoBandsNameCombined(loc_name,
datestr, timestr, save_path, save_suffix, num_each_side)
# Open the first of the smoothed isobands files to get EPSG
first_smoothed_iso_fname = utils.smoothedIsoBandsName(loc_name, datestr,
timestr, save_path, save_suffix, num_each_side, iso_inc)
source = ogr.Open(first_smoothed_iso_fname)
if not source:
print "Error:- can't open individual smoothed isochrone shapefiles "\
"for location '%s' - file %s ." \
% (loc_name, first_smoothed_iso_fname)
sys.exit(1)
in_layer = source.GetLayer(0)
smoothed_srs = in_layer.GetSpatialRef()
# Create new file
import shapely_smoother
comb_ds, comb_layer = shapely_smoother.create_smoothed_isobands_shpfile(
combined_smoothed_fname, smoothed_srs, time_field=True)
comb_defn = comb_layer.GetLayerDefn()
in_layer = None
feat_id = 0
for iso_level in range(iso_inc, iso_max+1, iso_inc):
smoothed_iso_fname = utils.smoothedIsoBandsName(loc_name, datestr,
timestr, save_path, save_suffix, num_each_side, iso_level)
source = ogr.Open(smoothed_iso_fname)
in_layer = source.GetLayer(0)
# TODO:- potentially need to subtract polygons in previous layers
# during the process below, to support transparencies in visualisation
# etc.
for poly in in_layer:
# We need to adjust muiltipoly IDs for use in the combined file.
new_poly = ogr.Feature(comb_defn)
new_poly.SetField(utils.ID_FIELD, feat_id)
feat_id += 1
new_poly.SetField(utils.TIME_FIELD, iso_level)
poly_geom = poly.GetGeometryRef()
new_poly.SetGeometry(poly_geom)
comb_layer.CreateFeature(new_poly)
in_layer = None
# Close, save the new shapefile.
comb_layer = None
comb_ds = None
#print "...done."
print "combined smoothed isochrones saved to file %s ." \
% combined_smoothed_fname
return
def generate_avg_rasters_and_isobands(multi_graph_iso_set):
for server_url, otp_router_id, save_path, save_suffix, isos_spec in \
multi_graph_iso_set:
datestr = isos_spec['date']
for loc_name in \
itertools.imap(operator.itemgetter(0), isos_spec['locations']):
for timestr in isos_spec['times']:
make_average_raster(save_path, save_suffix, loc_name, datestr,
timestr, **isos_spec)
make_contours_isobands(save_path, save_suffix, loc_name,
datestr, timestr, **isos_spec)
def generate_smoothed_isobands(multi_graph_iso_set):
for server_url, otp_router_id, save_path, save_suffix, isos_spec in \
multi_graph_iso_set:
datestr = isos_spec['date']
for loc_name in \
itertools.imap(operator.itemgetter(0), isos_spec['locations']):
for timestr in isos_spec['times']:
extract_and_smooth_isobands(save_path, save_suffix, loc_name,
datestr, timestr, **isos_spec)
def combine_smoothed_isobands_files(multi_graph_iso_set):
"""Combines all the separate smoothed isoband files (with e.g. -10, -20
extensions) into a single Shapefile with multiple multi-polygon shapes
defined (which makes consistent styling etc easier)."""
for server_url, otp_router_id, save_path, save_suffix, isos_spec in \
multi_graph_iso_set:
datestr = isos_spec['date']
print "Creating combined smoothed isochrones shapefiles for "\
"results from graph '%s', at date '%s', times %s:" \
% (otp_router_id, datestr, isos_spec['times'])
for loc_name in \
itertools.imap(operator.itemgetter(0), isos_spec['locations']):
print "Creating combined smoothed isochrones shapefile for "\
"location '%s':" % loc_name
for timestr in isos_spec['times']:
print "Creating combined shapefile at date, time %s - %s" % \
(datestr, timestr)
combine_smoothed_isoband_files(save_path, save_suffix, loc_name,
datestr, timestr, **isos_spec)
return
|
|
"""Populate measurement info."""
# Author: Eric Larson <larson.eric.d<gmail.com>
#
# License: BSD (3-clause)
from time import strptime
from calendar import timegm
import os.path as op
import numpy as np
from ...utils import logger, warn, _clean_names
from ...transforms import (apply_trans, _coord_frame_name, invert_transform,
combine_transforms)
from ...annotations import Annotations
from ..meas_info import _empty_info
from ..write import get_new_file_id
from ..ctf_comp import _add_kind, _calibrate_comp
from ..constants import FIFF
from .constants import CTF
_ctf_to_fiff = {CTF.CTFV_COIL_LPA: FIFF.FIFFV_POINT_LPA,
CTF.CTFV_COIL_RPA: FIFF.FIFFV_POINT_RPA,
CTF.CTFV_COIL_NAS: FIFF.FIFFV_POINT_NASION}
def _pick_isotrak_and_hpi_coils(res4, coils, t):
"""Pick the HPI coil locations given in device coordinates."""
if coils is None:
return list(), list()
dig = list()
hpi_result = dict(dig_points=list())
n_coil_dev = 0
n_coil_head = 0
for p in coils:
if p['valid']:
if p['kind'] in [CTF.CTFV_COIL_LPA, CTF.CTFV_COIL_RPA,
CTF.CTFV_COIL_NAS]:
kind = FIFF.FIFFV_POINT_CARDINAL
ident = _ctf_to_fiff[p['kind']]
else: # CTF.CTFV_COIL_SPARE
kind = FIFF.FIFFV_POINT_HPI
ident = p['kind']
if p['coord_frame'] == FIFF.FIFFV_MNE_COORD_CTF_DEVICE:
if t is None or t['t_ctf_dev_dev'] is None:
raise RuntimeError('No coordinate transformation '
'available for HPI coil locations')
d = dict(kind=kind, ident=ident,
r=apply_trans(t['t_ctf_dev_dev'], p['r']),
coord_frame=FIFF.FIFFV_COORD_UNKNOWN)
hpi_result['dig_points'].append(d)
n_coil_dev += 1
elif p['coord_frame'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD:
if t is None or t['t_ctf_head_head'] is None:
raise RuntimeError('No coordinate transformation '
'available for (virtual) Polhemus data')
d = dict(kind=kind, ident=ident,
r=apply_trans(t['t_ctf_head_head'], p['r']),
coord_frame=FIFF.FIFFV_COORD_HEAD)
dig.append(d)
n_coil_head += 1
if n_coil_head > 0:
logger.info(' Polhemus data for %d HPI coils added' % n_coil_head)
if n_coil_dev > 0:
logger.info(' Device coordinate locations for %d HPI coils added'
% n_coil_dev)
return dig, [hpi_result]
def _convert_time(date_str, time_str):
"""Convert date and time strings to float time."""
for fmt in ("%d/%m/%Y", "%d-%b-%Y", "%a, %b %d, %Y"):
try:
date = strptime(date_str.strip(), fmt)
except ValueError:
pass
else:
break
else:
raise RuntimeError(
'Illegal date: %s.\nIf the language of the date does not '
'correspond to your local machine\'s language try to set the '
'locale to the language of the date string:\n'
'locale.setlocale(locale.LC_ALL, "en_US")' % date_str)
for fmt in ('%H:%M:%S', '%H:%M'):
try:
time = strptime(time_str, fmt)
except ValueError:
pass
else:
break
else:
raise RuntimeError('Illegal time: %s' % time_str)
# MNE-C uses mktime which uses local time, but here we instead decouple
# conversion location from the process, and instead assume that the
# acquisiton was in GMT. This will be wrong for most sites, but at least
# the value we obtain here won't depend on the geographical location
# that the file was converted.
res = timegm((date.tm_year, date.tm_mon, date.tm_mday,
time.tm_hour, time.tm_min, time.tm_sec,
date.tm_wday, date.tm_yday, date.tm_isdst))
return res
def _get_plane_vectors(ez):
"""Get two orthogonal vectors orthogonal to ez (ez will be modified)."""
assert ez.shape == (3,)
ez_len = np.sqrt(np.sum(ez * ez))
if ez_len == 0:
raise RuntimeError('Zero length normal. Cannot proceed.')
if np.abs(ez_len - np.abs(ez[2])) < 1e-5: # ez already in z-direction
ex = np.array([1., 0., 0.])
else:
ex = np.zeros(3)
if ez[1] < ez[2]:
ex[0 if ez[0] < ez[1] else 1] = 1.
else:
ex[0 if ez[0] < ez[2] else 2] = 1.
ez /= ez_len
ex -= np.dot(ez, ex) * ez
ex /= np.sqrt(np.sum(ex * ex))
ey = np.cross(ez, ex)
return ex, ey
def _at_origin(x):
"""Determine if a vector is at the origin."""
return (np.sum(x * x) < 1e-8)
def _check_comp_ch(cch, kind, desired=None):
if desired is None:
desired = cch['grad_order_no']
if cch['grad_order_no'] != desired:
raise RuntimeError('%s channel with inconsistent compensation '
'grade %s, should be %s'
% (kind, cch['grad_order_no'], desired))
return desired
def _convert_channel_info(res4, t, use_eeg_pos):
"""Convert CTF channel information to fif format."""
nmeg = neeg = nstim = nmisc = nref = 0
chs = list()
this_comp = None
for k, cch in enumerate(res4['chs']):
cal = float(1. / (cch['proper_gain'] * cch['qgain']))
ch = dict(scanno=k + 1, range=1., cal=cal, loc=np.full(12, np.nan),
unit_mul=FIFF.FIFF_UNITM_NONE, ch_name=cch['ch_name'][:15],
coil_type=FIFF.FIFFV_COIL_NONE)
del k
chs.append(ch)
# Create the channel position information
if cch['sensor_type_index'] in (CTF.CTFV_REF_MAG_CH,
CTF.CTFV_REF_GRAD_CH,
CTF.CTFV_MEG_CH):
# Extra check for a valid MEG channel
if np.sum(cch['coil']['pos'][0] ** 2) < 1e-6 or \
np.sum(cch['coil']['norm'][0] ** 2) < 1e-6:
nmisc += 1
ch.update(logno=nmisc, coord_frame=FIFF.FIFFV_COORD_UNKNOWN,
kind=FIFF.FIFFV_MISC_CH, unit=FIFF.FIFF_UNIT_V)
text = 'MEG'
if cch['sensor_type_index'] != CTF.CTFV_MEG_CH:
text += ' ref'
warn('%s channel %s did not have position assigned, so '
'it was changed to a MISC channel'
% (text, ch['ch_name']))
continue
ch['unit'] = FIFF.FIFF_UNIT_T
# Set up the local coordinate frame
r0 = cch['coil']['pos'][0].copy()
ez = cch['coil']['norm'][0].copy()
# It turns out that positive proper_gain requires swapping
# of the normal direction
if cch['proper_gain'] > 0.0:
ez *= -1
# Check how the other vectors should be defined
off_diag = False
# Default: ex and ey are arbitrary in the plane normal to ez
if cch['sensor_type_index'] == CTF.CTFV_REF_GRAD_CH:
# The off-diagonal gradiometers are an exception:
#
# We use the same convention for ex as for Neuromag planar
# gradiometers: ex pointing in the positive gradient direction
diff = cch['coil']['pos'][0] - cch['coil']['pos'][1]
size = np.sqrt(np.sum(diff * diff))
if size > 0.:
diff /= size
# Is ez normal to the line joining the coils?
if np.abs(np.dot(diff, ez)) < 1e-3:
off_diag = True
# Handle the off-diagonal gradiometer coordinate system
r0 -= size * diff / 2.0
ex = diff
ey = np.cross(ez, ex)
else:
ex, ey = _get_plane_vectors(ez)
else:
ex, ey = _get_plane_vectors(ez)
# Transform into a Neuromag-like device coordinate system
ch['loc'] = np.concatenate([
apply_trans(t['t_ctf_dev_dev'], r0),
apply_trans(t['t_ctf_dev_dev'], ex, move=False),
apply_trans(t['t_ctf_dev_dev'], ey, move=False),
apply_trans(t['t_ctf_dev_dev'], ez, move=False)])
del r0, ex, ey, ez
# Set the coil type
if cch['sensor_type_index'] == CTF.CTFV_REF_MAG_CH:
ch['kind'] = FIFF.FIFFV_REF_MEG_CH
ch['coil_type'] = FIFF.FIFFV_COIL_CTF_REF_MAG
nref += 1
ch['logno'] = nref
elif cch['sensor_type_index'] == CTF.CTFV_REF_GRAD_CH:
ch['kind'] = FIFF.FIFFV_REF_MEG_CH
if off_diag:
ch['coil_type'] = FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD
else:
ch['coil_type'] = FIFF.FIFFV_COIL_CTF_REF_GRAD
nref += 1
ch['logno'] = nref
else:
this_comp = _check_comp_ch(cch, 'Gradiometer', this_comp)
ch['kind'] = FIFF.FIFFV_MEG_CH
ch['coil_type'] = FIFF.FIFFV_COIL_CTF_GRAD
nmeg += 1
ch['logno'] = nmeg
# Encode the software gradiometer order
ch['coil_type'] = int(
ch['coil_type'] | (cch['grad_order_no'] << 16))
ch['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
elif cch['sensor_type_index'] == CTF.CTFV_EEG_CH:
coord_frame = FIFF.FIFFV_COORD_HEAD
if use_eeg_pos:
# EEG electrode coordinates may be present but in the
# CTF head frame
ch['loc'][:3] = cch['coil']['pos'][0]
if not _at_origin(ch['loc'][:3]):
if t['t_ctf_head_head'] is None:
warn('EEG electrode (%s) location omitted because of '
'missing HPI information' % ch['ch_name'])
ch['loc'].fill(np.nan)
coord_frame = FIFF.FIFFV_COORD_CTF_HEAD
else:
ch['loc'][:3] = apply_trans(
t['t_ctf_head_head'], ch['loc'][:3])
neeg += 1
ch.update(logno=neeg, kind=FIFF.FIFFV_EEG_CH,
unit=FIFF.FIFF_UNIT_V, coord_frame=coord_frame,
coil_type=FIFF.FIFFV_COIL_EEG)
elif cch['sensor_type_index'] == CTF.CTFV_STIM_CH:
nstim += 1
ch.update(logno=nstim, coord_frame=FIFF.FIFFV_COORD_UNKNOWN,
kind=FIFF.FIFFV_STIM_CH, unit=FIFF.FIFF_UNIT_V)
else:
nmisc += 1
ch.update(logno=nmisc, coord_frame=FIFF.FIFFV_COORD_UNKNOWN,
kind=FIFF.FIFFV_MISC_CH, unit=FIFF.FIFF_UNIT_V)
return chs
def _comp_sort_keys(c):
"""Sort the compensation data."""
return (int(c['coeff_type']), int(c['scanno']))
def _check_comp(comp):
"""Check that conversion to named matrices is possible."""
ref_sens = None
kind = -1
for k, c_k in enumerate(comp):
if c_k['coeff_type'] != kind:
c_ref = c_k
ref_sens = c_ref['sensors']
kind = c_k['coeff_type']
elif not c_k['sensors'] == ref_sens:
raise RuntimeError('Cannot use an uneven compensation matrix')
def _conv_comp(comp, first, last, chs):
"""Add a new converted compensation data item."""
ch_names = [c['ch_name'] for c in chs]
n_col = comp[first]['ncoeff']
col_names = comp[first]['sensors'][:n_col]
row_names = [comp[p]['sensor_name'] for p in range(first, last + 1)]
mask = np.in1d(col_names, ch_names) # missing channels excluded
col_names = np.array(col_names)[mask].tolist()
n_col = len(col_names)
n_row = len(row_names)
ccomp = dict(ctfkind=np.array([comp[first]['coeff_type']]),
save_calibrated=False)
_add_kind(ccomp)
data = np.empty((n_row, n_col))
for ii, coeffs in enumerate(comp[first:last + 1]):
# Pick the elements to the matrix
data[ii, :] = coeffs['coeffs'][mask]
ccomp['data'] = dict(row_names=row_names, col_names=col_names,
data=data, nrow=len(row_names), ncol=len(col_names))
mk = ('proper_gain', 'qgain')
_calibrate_comp(ccomp, chs, row_names, col_names, mult_keys=mk, flip=True)
return ccomp
def _convert_comp_data(res4):
"""Convert the compensation data into named matrices."""
if res4['ncomp'] == 0:
return
# Sort the coefficients in our favorite order
res4['comp'] = sorted(res4['comp'], key=_comp_sort_keys)
# Check that all items for a given compensation type have the correct
# number of channels
_check_comp(res4['comp'])
# Create named matrices
first = 0
kind = -1
comps = list()
for k in range(len(res4['comp'])):
if res4['comp'][k]['coeff_type'] != kind:
if k > 0:
comps.append(_conv_comp(res4['comp'], first, k - 1,
res4['chs']))
kind = res4['comp'][k]['coeff_type']
first = k
comps.append(_conv_comp(res4['comp'], first, k, res4['chs']))
return comps
def _pick_eeg_pos(c):
"""Pick EEG positions."""
eeg = dict(coord_frame=FIFF.FIFFV_COORD_HEAD, assign_to_chs=False,
labels=list(), ids=list(), rr=list(), kinds=list(), np=0)
for ch in c['chs']:
if ch['kind'] == FIFF.FIFFV_EEG_CH and not _at_origin(ch['loc'][:3]):
eeg['labels'].append(ch['ch_name'])
eeg['ids'].append(ch['logno'])
eeg['rr'].append(ch['loc'][:3])
eeg['kinds'].append(FIFF.FIFFV_POINT_EEG)
eeg['np'] += 1
if eeg['np'] == 0:
return None
logger.info('Picked positions of %d EEG channels from channel info'
% eeg['np'])
return eeg
def _add_eeg_pos(eeg, t, c):
"""Pick the (virtual) EEG position data."""
if eeg is None:
return
if t is None or t['t_ctf_head_head'] is None:
raise RuntimeError('No coordinate transformation available for EEG '
'position data')
eeg_assigned = 0
if eeg['assign_to_chs']:
for k in range(eeg['np']):
# Look for a channel name match
for ch in c['chs']:
if ch['ch_name'].lower() == eeg['labels'][k].lower():
r0 = ch['loc'][:3]
r0[:] = eeg['rr'][k]
if eeg['coord_frame'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD:
r0[:] = apply_trans(t['t_ctf_head_head'], r0)
elif eeg['coord_frame'] != FIFF.FIFFV_COORD_HEAD:
raise RuntimeError(
'Illegal coordinate frame for EEG electrode '
'positions : %s'
% _coord_frame_name(eeg['coord_frame']))
# Use the logical channel number as an identifier
eeg['ids'][k] = ch['logno']
eeg['kinds'][k] = FIFF.FIFFV_POINT_EEG
eeg_assigned += 1
break
# Add these to the Polhemus data
fid_count = eeg_count = extra_count = 0
for k in range(eeg['np']):
d = dict(r=eeg['rr'][k].copy(), kind=eeg['kinds'][k],
ident=eeg['ids'][k], coord_frame=FIFF.FIFFV_COORD_HEAD)
c['dig'].append(d)
if eeg['coord_frame'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD:
d['r'] = apply_trans(t['t_ctf_head_head'], d['r'])
elif eeg['coord_frame'] != FIFF.FIFFV_COORD_HEAD:
raise RuntimeError('Illegal coordinate frame for EEG electrode '
'positions: %s'
% _coord_frame_name(eeg['coord_frame']))
if eeg['kinds'][k] == FIFF.FIFFV_POINT_CARDINAL:
fid_count += 1
elif eeg['kinds'][k] == FIFF.FIFFV_POINT_EEG:
eeg_count += 1
else:
extra_count += 1
if eeg_assigned > 0:
logger.info(' %d EEG electrode locations assigned to channel info.'
% eeg_assigned)
for count, kind in zip((fid_count, eeg_count, extra_count),
('fiducials', 'EEG locations', 'extra points')):
if count > 0:
logger.info(' %d %s added to Polhemus data.' % (count, kind))
_filt_map = {CTF.CTFV_FILTER_LOWPASS: 'lowpass',
CTF.CTFV_FILTER_HIGHPASS: 'highpass'}
def _compose_meas_info(res4, coils, trans, eeg):
"""Create meas info from CTF data."""
info = _empty_info(res4['sfreq'])
# Collect all the necessary data from the structures read
info['meas_id'] = get_new_file_id()
info['meas_id']['usecs'] = 0
info['meas_id']['secs'] = _convert_time(res4['data_date'],
res4['data_time'])
info['meas_date'] = (info['meas_id']['secs'], info['meas_id']['usecs'])
info['experimenter'] = res4['nf_operator']
info['subject_info'] = dict(his_id=res4['nf_subject_id'])
for filt in res4['filters']:
if filt['type'] in _filt_map:
info[_filt_map[filt['type']]] = filt['freq']
info['dig'], info['hpi_results'] = _pick_isotrak_and_hpi_coils(
res4, coils, trans)
if trans is not None:
if len(info['hpi_results']) > 0:
info['hpi_results'][0]['coord_trans'] = trans['t_ctf_head_head']
if trans['t_dev_head'] is not None:
info['dev_head_t'] = trans['t_dev_head']
info['dev_ctf_t'] = combine_transforms(
trans['t_dev_head'],
invert_transform(trans['t_ctf_head_head']),
FIFF.FIFFV_COORD_DEVICE, FIFF.FIFFV_MNE_COORD_CTF_HEAD)
if trans['t_ctf_head_head'] is not None:
info['ctf_head_t'] = trans['t_ctf_head_head']
info['chs'] = _convert_channel_info(res4, trans, eeg is None)
info['comps'] = _convert_comp_data(res4)
if eeg is None:
# Pick EEG locations from chan info if not read from a separate file
eeg = _pick_eeg_pos(info)
_add_eeg_pos(eeg, trans, info)
logger.info(' Measurement info composed.')
info._update_redundant()
return info
def _read_bad_chans(directory, info):
"""Read Bad channel list and match to internal names."""
fname = op.join(directory, 'BadChannels')
if not op.exists(fname):
return []
mapping = dict(zip(_clean_names(info['ch_names']), info['ch_names']))
with open(fname, 'r') as fid:
bad_chans = [mapping[f.strip()] for f in fid.readlines()]
return bad_chans
def _annotate_bad_segments(directory, start_time, meas_date):
fname = op.join(directory, 'bad.segments')
if not op.exists(fname):
return None
# read in bad segment file
onsets = []
durations = []
desc = []
with open(fname, 'r') as fid:
for f in fid.readlines():
tmp = f.strip().split()
desc.append('bad_%s' % tmp[0])
onsets.append(np.float64(tmp[1]) - start_time)
durations.append(np.float64(tmp[2]) - np.float64(tmp[1]))
# return None if there are no bad segments
if len(onsets) == 0:
return None
return Annotations(onsets, durations, desc, meas_date)
|
|
import argparse
import numpy as np
from rstools.utils.batch_utils import iterate_minibatches
from tqdm import trange
from PG.reinforce import ReinforceAgent
from common.networks import activations
from wrappers.gym_wrappers import Transition
from wrappers.run_wrappers import typical_args, typical_argsparse, run_wrapper, update_wraper, \
epsilon_greedy_policy, play_session
def update(sess, reinforce_agent, transitions, initial_state=None,
discount_factor=0.99, reward_norm=1.0, batch_size=32, time_major=True):
policy_targets = []
state_history = []
action_history = []
cumulative_reward = np.zeros_like(transitions[-1].reward)
for transition in reversed(transitions):
cumulative_reward = reward_norm * transition.reward + \
np.invert(transition.done) * discount_factor * cumulative_reward
policy_targets.append(cumulative_reward)
state_history.append(transition.state)
action_history.append(transition.action)
# time-major
policy_targets = np.array(policy_targets[::-1])
state_history = np.array(state_history[::-1])
action_history = np.array(action_history[::-1])
time_len = state_history.shape[0]
policy_loss = 0.0
for state_axis, action_axis, policy_target_axis in \
zip(state_history, action_history, policy_targets):
axis_len = state_axis.shape[0]
axis_policy_loss = 0.0
state_axis = iterate_minibatches(state_axis, batch_size)
action_axis = iterate_minibatches(action_axis, batch_size)
policy_target_axis = iterate_minibatches(policy_target_axis, batch_size)
for state_batch, action_batch, policy_target in \
zip(state_axis, action_axis, policy_target_axis):
run_params = [
reinforce_agent.policy_net.loss,
reinforce_agent.policy_net.train_op,
reinforce_agent.feature_net.train_op]
feed_params = {
reinforce_agent.feature_net.states: state_batch,
reinforce_agent.feature_net.is_training: True,
reinforce_agent.policy_net.actions: action_batch,
reinforce_agent.policy_net.cumulative_rewards: policy_target,
reinforce_agent.policy_net.is_training: True
}
run_result = sess.run(
run_params,
feed_dict=feed_params)
batch_loss_policy = run_result[0]
axis_policy_loss += batch_loss_policy
policy_loss += axis_policy_loss / axis_len
return policy_loss / time_len
def generate_sessions(sess, a3c_agent, env_pool, update_fn, t_max=1000):
total_reward = 0.0
total_games = 0.0
transitions = []
states = env_pool.pool_states()
for t in range(t_max):
actions = epsilon_greedy_policy(a3c_agent, sess, states)
next_states, rewards, dones, _ = env_pool.step(actions)
transitions.append(Transition(
state=states, action=actions, reward=rewards, next_state=next_states, done=dones))
states = next_states
total_reward += rewards.sum()
total_games += dones.sum()
if env_pool.n_envs == 1 and total_games > 0:
break
total_policy_loss = update_fn(sess, a3c_agent, transitions)
return total_reward / env_pool.n_envs, \
total_policy_loss, \
t / (total_games / env_pool.n_envs)
def reinforce_learning(
sess, agent, env, update_fn,
n_epochs=1000, n_sessions=100, t_max=1000):
tr = trange(
n_epochs,
desc="",
leave=True)
history = {
"reward": np.zeros(n_epochs),
"policy_loss": np.zeros(n_epochs),
"steps": np.zeros(n_epochs),
}
for i in tr:
sessions = [
generate_sessions(sess, agent, env, update_fn, t_max)
for _ in range(n_sessions)]
session_rewards, session_policy_loss, session_steps = \
map(np.array, zip(*sessions))
history["reward"][i] = np.mean(session_rewards)
history["policy_loss"][i] = np.mean(session_policy_loss)
history["steps"][i] = np.mean(session_steps)
desc = "\t".join(
["{} = {:.3f}".format(key, value[i]) for key, value in history.items()])
tr.set_description(desc)
return history
def run(env_name, make_env_fn, agent_cls,
run_args, update_args, agent_agrs,
log_dir=None, episode_limit=None,
plot_stats=False, api_key=None,
load=False, gpu_option=0.4,
n_games=10):
run_wrapper(
n_games, reinforce_learning, update_wraper(update, **update_args),
play_session, epsilon_greedy_policy,
env_name, make_env_fn, agent_cls,
run_args, agent_agrs,
log_dir=log_dir, episode_limit=episode_limit,
plot_stats=plot_stats, api_key=api_key,
load=load, gpu_option=gpu_option)
def _parse_args():
parser = argparse.ArgumentParser(description='Reinforce Agent Learning')
# typical params
parser = typical_args(parser)
# agent special params & optimization
parser.add_argument(
'--policy_lr',
type=float,
default=1e-5,
help='Learning rate for policy network. (default: %(default)s)')
parser.add_argument(
'--entropy_factor',
type=float,
default=1e-2,
help='Entropy factor for policy network. (default: %(default)s)')
args = parser.parse_args()
return args
def main():
args = _parse_args()
assert args.time_major, "Please, use time_major flag for updates"
network, run_args, update_args, optimization_params, make_env_fn = typical_argsparse(args)
policy_optimization_params = {
**optimization_params,
**{"initial_lr": args.policy_lr}
}
policy_net_params = {
"entropy_factor": args.entropy_factor
}
agent_cls = ReinforceAgent
special = {
"policy_net": policy_net_params,
"hidden_size": args.hidden_size,
"hidden_activation": activations[args.hidden_activation],
"feature_net_optimization": optimization_params,
"hidden_state_optimization": optimization_params,
"policy_net_optimization": policy_optimization_params,
}
agent_args = {
"network": network,
"special": special
}
run(args.env, make_env_fn, agent_cls,
run_args, update_args, agent_args,
args.log_dir, args.episode_limit,
args.plot_history, args.api_key,
args.load, args.gpu_option,
args.n_games)
if __name__ == '__main__':
main()
|
|
# coding=utf-8
# Copyright 2020 The Learning-to-Prompt Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific Learning-to-Prompt governing permissions and
# limitations under the License.
# ==============================================================================
"""General utility functions."""
import functools
import os
import time
from typing import Any, Dict, Sequence
from absl import logging
from clu import checkpoint
from clu import platform
import flax
import jax
import jax.numpy as jnp
import ml_collections
import numpy as np
from libml import utils_vit
import scipy
# get ViT specific loading
POS_EMBED = "PositionEmbedding" # Match the class name of PositionEmbedding
HEAD = "Dense"
def compute_flops(model_cls: Any,
variables: Dict[str, Any],
input_shape: Sequence[int],
fuse_multiply_add: bool = True) -> str:
"""Performs static analysis of the graph to compute theoretical FLOPs."""
if input_shape[0] != 1:
raise ValueError("FLOP test requires batch size dim is 1.")
model = model_cls(train=False)
def apply_fn(x):
return model.apply(variables, x, mutable=False)
model_input = jnp.ones(input_shape, dtype=jnp.float32)
# jax.xla_computation must accept a function that takes input argument only.
m = jax.xla_computation(apply_fn)(model_input).as_hlo_module()
client = jax.lib.xla_bridge.get_backend()
analysis = jax.lib.xla_client._xla.hlo_module_cost_analysis(client, m) # pylint: disable=protected-access
flops = analysis["flops"]
if fuse_multiply_add:
flops = flops / 2
gflops = flops / (10**9)
logging.info("Module: GFLOPs %0.3f for input shape %s", gflops, input_shape)
message = "GFLOPS: %0.3f" % gflops
return message
def log_throughput(model_cls: Any,
variables: Dict[str, Any],
input_shape: Sequence[int],
iterations: int = 500) -> str:
"""Log throughput of models."""
model = model_cls(train=False)
inputs = jnp.ones(input_shape, jnp.float32)
batch_size = inputs.shape[0]
logging.info("Start to compute throughput for input %s...", input_shape)
apply_fn = jax.jit(functools.partial(model.apply, mutable=False))
# Let it compile first with zombie runs.
for _ in range(10):
y = apply_fn(variables, inputs)
start = time.time()
for _ in range(iterations):
y = apply_fn(variables, inputs)
y.block_until_ready()
total_time = time.time() - start
logging.info("Cuda time cost per iteration %.3f", total_time / iterations)
message = "Throughput: %.3f image/s" % (iterations * batch_size / total_time)
logging.info(message)
return message
def cosine_decay(lr: float, step: float, total_steps: int):
ratio = jnp.maximum(0., step / total_steps)
mult = 0.5 * (1. + jnp.cos(jnp.pi * ratio))
return mult * lr
def linear_decay(lr: float, step: float, total_steps: int):
ratio = jnp.maximum(0., step / total_steps)
return lr * (1 - ratio)
def get_learning_rate(step: int,
*,
base_learning_rate: float,
steps_per_epoch: int,
num_epochs: int,
schedule: str = "cosine",
warmup_epochs: int = 5,
min_learning_rate: float = 0.):
"""Cosine learning rate schedule."""
logging.info(
"get_learning_rate(step=%s, base_learning_rate=%s, steps_per_epoch=%s, num_epochs=%s",
step, base_learning_rate, steps_per_epoch, num_epochs)
if steps_per_epoch <= 0:
raise ValueError(f"steps_per_epoch should be a positive integer but was "
f"{steps_per_epoch}.")
if warmup_epochs >= num_epochs:
raise ValueError(f"warmup_epochs should be smaller than num_epochs. "
f"Currently warmup_epochs is {warmup_epochs}, "
f"and num_epochs is {num_epochs}.")
epoch = step / steps_per_epoch
if schedule == "cosine":
lr = cosine_decay(base_learning_rate, epoch - warmup_epochs,
num_epochs - warmup_epochs)
elif schedule == "linear":
lr = linear_decay(base_learning_rate, epoch - warmup_epochs,
num_epochs - warmup_epochs)
elif schedule == "constant":
lr = jnp.array(base_learning_rate)
warmup = jnp.minimum(1., epoch / warmup_epochs)
return jnp.where(warmup < 1, lr * warmup,
jnp.maximum(lr * warmup, min_learning_rate))
def _reshape_position_embeddings(pa: jnp.ndarray, ratio: float) -> jnp.ndarray:
"""Resizes position embeddings with scipy zoom like ViT."""
b, n, s, d = pa.shape
h = w = int(np.sqrt(s))
# Two dimension spline interpolation.
pa = jnp.reshape(pa, (b, n, h, w, d))
newh = neww = int(jnp.ceil(h * ratio))
pa_new_numpy = scipy.ndimage.zoom(
np.array(pa), (1, 1, newh / h, neww / w, 1), order=1)
pa_new = jax.numpy.asarray(pa_new_numpy)
pa_new = jnp.reshape(pa_new, (b, n, newh * neww, d))
return pa_new
def _load_and_custom_init_checkpoint(init_state: Any,
checkpoint_path: str,
*,
resize_posembed: float = 1.0,
reinit_head: str = None) -> Any:
"""Load checkpoint for finetuing task."""
def _find_var_names(s):
return [i for i in s.keys()]
logging.info("Load finetune checkpoint from %s", checkpoint_path)
# 1) Copy model params init_param_dict.
state = checkpoint.load_state_dict(os.path.split(checkpoint_path)[0])
init_param_dict = state["optimizer"]["target"]
state_params = flax.core.freeze(init_param_dict)
if resize_posembed != 1:
# resize_posembed represents the image size ratio (new size / orignal
# size in the checkpoint).
# 2) Resize POS_EMBED variables and update to init_param_dict
for pkey in init_param_dict.keys():
# POS_EMBED is assumed to exist in the first level of init_param_dict.
if POS_EMBED in pkey:
# Find variable name for POS_EMBED.
var_names = _find_var_names(init_param_dict[pkey])
assert len(var_names) == 1
var_name = var_names[0]
pa = state_params[pkey][var_name]
pa_new = _reshape_position_embeddings(pa, resize_posembed)
init_param_dict[pkey][var_name] = pa_new
pa_expected_shape = init_state.optimizer.target[pkey][var_name].shape
assert jnp.array_equal(pa_expected_shape, pa_new.shape)
logging.info("Reshape %s.%s from %s to %s", pkey, var_name, pa.shape,
pa_new.shape)
if reinit_head:
count = 1
# 3) Re-init classification head parameters.
for pkey in init_param_dict.keys():
# kernel/bias are assumed to exist in the first level of init_param_dict.
if HEAD in pkey:
var_names = _find_var_names(init_param_dict[pkey])
for var_name in var_names:
count += 1
pa = state_params[pkey][var_name]
if reinit_head == "zero_all":
pa_new = jnp.zeros_like(init_state.optimizer.target[pkey][var_name])
else:
raise NotImplementedError(
f"reinit_head mode {reinit_head} not found.")
init_param_dict[pkey][var_name] = pa_new
logging.info("Zero init %s.%s (%s)", pkey, var_name, pa_new.shape)
assert count, "Does not found head parameters"
# 4): Copy model params to init_state.
optimizer = init_state.optimizer.replace(
target=flax.core.freeze(init_param_dict))
init_state = init_state.replace(optimizer=optimizer)
return init_state
def load_and_custom_init_checkpoint(*, config: ml_collections.ConfigDict,
init_state: Any) -> Any:
"""Load checkpoint for continual learning."""
model_type = config.model_name
checkpoint_path = config.init_checkpoint
if model_type.startswith("ViT"):
restored_params = utils_vit.load_pretrained(
pretrained_path=checkpoint_path,
init_params=init_state.optimizer.target,
model_config=config.model_config)
# Copy model params to init_state.
optimizer = init_state.optimizer.replace(target=restored_params)
init_state = init_state.replace(optimizer=optimizer)
return init_state
elif model_type.startswith("resnet"):
state = checkpoint.load_state_dict(config.init_checkpoint)
loaded_param_dict = state["optimizer"]["target"]
loaded_model_state = state["model_state"]
# we should always change the classification head
loaded_param_dict["head"]["kernel"] = init_state.optimizer.target["head"][
"kernel"]
loaded_param_dict["head"]["bias"] = init_state.optimizer.target["head"][
"bias"]
optimizer = init_state.optimizer.replace(
target=flax.core.freeze(loaded_param_dict))
init_state = init_state.replace(
optimizer=optimizer, model_state=loaded_model_state)
return init_state
def transfer_weights(config, param_dict, task_id, kernel_only=True):
"""Initialize new task classificatier with average of old tasks."""
param_dict = flax.core.unfreeze(param_dict)
# feature dim * num_classes
num_classes_per_task = config.continual.num_classes_per_task
kernel_old_tasks = param_dict["head"]["kernel"][:, :task_id *
num_classes_per_task]
kernel_old_tasks = jnp.reshape(
kernel_old_tasks, (kernel_old_tasks.shape[0], -1, num_classes_per_task))
mean_kernel_old_tasks = jnp.mean(kernel_old_tasks, axis=-2)
param_dict["head"]["kernel"] = (
param_dict["head"]["kernel"]
.at[:, task_id * num_classes_per_task:(task_id + 1) *
num_classes_per_task]
.set(mean_kernel_old_tasks))
if not kernel_only:
bias_old_tasks = param_dict["head"]["bias"][:task_id * num_classes_per_task]
bias_old_tasks = jnp.reshape(bias_old_tasks, (-1, num_classes_per_task))
mean_bias_old_tasks = jnp.mean(bias_old_tasks, axis=-2)
param_dict["head"]["bias"] = (
param_dict["head"]["bias"]
.at[task_id * num_classes_per_task:(task_id + 1) *
num_classes_per_task]
.set(mean_bias_old_tasks))
return flax.core.freeze(param_dict)
def weight_norm(param_dict, eps=1e-10):
"""Apply weight normalization to the last linear layer."""
param_dict = flax.core.unfreeze(param_dict)
kernel = param_dict["head"]["kernel"]
kernel_norm = jnp.linalg.norm(kernel, ord=2, axis=0, keepdims=True)
kernel = kernel / (eps + kernel_norm)
param_dict["head"]["kernel"] = kernel
return flax.core.freeze(param_dict)
def replace_cls(param_dict, cls):
"""Replace class token."""
param_dict = flax.core.unfreeze(param_dict)
old_cls = param_dict["cls"]
param_dict["cls"] = cls
return flax.core.freeze(param_dict), old_cls
def replace_prompt(param_dict, prompt_para):
"""Replace task-specific prompt."""
param_dict = flax.core.unfreeze(param_dict)
old_prompt_para = param_dict["task_specific_prompt"]["prompt"]
param_dict["task_specific_prompt"]["prompt"] = prompt_para
return flax.core.freeze(param_dict), old_prompt_para
def replace_prompt_weight(param_dict, prompt_weight_para):
"""Replace class token."""
param_dict = flax.core.unfreeze(param_dict)
old_prompt_weight_para = param_dict["reweight"]
param_dict["reweight"] = prompt_weight_para
return flax.core.freeze(param_dict), old_prompt_weight_para
def replace_prompt_pool(param_dict, prompt_pool_para):
"""Replace class token."""
param_dict = flax.core.unfreeze(param_dict)
old_prompt_pool_para = param_dict["prompt_pool"]["prompt"]
param_dict["prompt_pool"]["prompt"] = prompt_pool_para
return flax.core.freeze(param_dict), old_prompt_pool_para
def replace_prompt_key(param_dict, prompt_key_para):
"""Replace class token."""
param_dict = flax.core.unfreeze(param_dict)
old_prompt_key_para = param_dict["prompt_pool"]["key"]
param_dict["prompt_pool"]["key"] = prompt_key_para
return flax.core.freeze(param_dict), old_prompt_key_para
def state_with_new_param(state, param_dict):
"""Return a new state with new parameters."""
optimizer = state.optimizer.replace(target=param_dict)
state = state.replace(optimizer=optimizer)
return state
def get_embedding_params(param_dict):
"""Get the parameters of an embedding layer."""
embedding = param_dict["embedding"]
embedding_params = {"embedding": embedding}
return flax.core.freeze(embedding_params)
|
|
from __future__ import division
import logging
import base64
import hmac
import random
import re
import os.path
import string
from binascii import unhexlify
from collections import namedtuple
from copy import deepcopy
from hashlib import sha256
from io import BytesIO
from math import ceil
from .flvconcat import FLVTagConcat
from .segmented import (SegmentedStreamReader,
SegmentedStreamWriter,
SegmentedStreamWorker)
from .stream import Stream
from .wrappers import StreamIOIterWrapper
from ..cache import Cache
from ..compat import parse_qsl, urljoin, urlparse, urlunparse, bytes, range
from ..exceptions import StreamError, PluginError
from ..utils import absolute_url, swfdecompress
from ..packages.flashmedia import F4V, F4VError
from ..packages.flashmedia.box import Box
from ..packages.flashmedia.tag import ScriptData, Tag, TAG_TYPE_SCRIPT
log = logging.getLogger(__name__)
# Akamai HD player verification key
# Use unhexlify() rather than bytes.fromhex() for compatibility with before
# Python 3. However, in Python 3.2 (not 3.3+), unhexlify only accepts a byte
# string.
AKAMAIHD_PV_KEY = unhexlify(
b"BD938D5EE6D9F42016F9C56577B6FDCF415FE4B184932B785AB32BCADC9BB592")
# Some streams hosted by Akamai seem to require a hdcore parameter
# to function properly.
HDCORE_VERSION = "3.1.0"
# Fragment URL format
FRAGMENT_URL = "{url}{identifier}{quality}Seg{segment}-Frag{fragment}"
Fragment = namedtuple("Fragment", "segment fragment duration url")
class HDSStreamWriter(SegmentedStreamWriter):
def __init__(self, reader, *args, **kwargs):
options = reader.stream.session.options
kwargs["retries"] = options.get("hds-segment-attempts")
kwargs["threads"] = options.get("hds-segment-threads")
kwargs["timeout"] = options.get("hds-segment-timeout")
SegmentedStreamWriter.__init__(self, reader, *args, **kwargs)
duration, tags = None, []
if self.stream.metadata:
duration = self.stream.metadata.value.get("duration")
tags = [Tag(TAG_TYPE_SCRIPT, timestamp=0,
data=self.stream.metadata)]
self.concater = FLVTagConcat(tags=tags,
duration=duration,
flatten_timestamps=True)
def fetch(self, fragment, retries=None):
if self.closed or not retries:
return
try:
request_params = self.stream.request_params.copy()
params = request_params.pop("params", {})
params.pop("g", None)
return self.session.http.get(fragment.url,
stream=True,
timeout=self.timeout,
exception=StreamError,
params=params,
**request_params)
except StreamError as err:
log.error("Failed to open fragment {0}-{1}: {2}",
fragment.segment, fragment.fragment, err)
return self.fetch(fragment, retries - 1)
def write(self, fragment, res, chunk_size=8192):
fd = StreamIOIterWrapper(res.iter_content(chunk_size))
self.convert_fragment(fragment, fd)
def convert_fragment(self, fragment, fd):
mdat = None
try:
f4v = F4V(fd, raw_payload=True)
# Fast forward to mdat box
for box in f4v:
if box.type == "mdat":
mdat = box.payload.data
break
except F4VError as err:
log.error("Failed to parse fragment {0}-{1}: {2}",
fragment.segment, fragment.fragment, err)
return
if not mdat:
log.error("No MDAT box found in fragment {0}-{1}",
fragment.segment, fragment.fragment)
return
try:
for chunk in self.concater.iter_chunks(buf=mdat, skip_header=True):
self.reader.buffer.write(chunk)
if self.closed:
break
else:
log.debug("Download of fragment {0}-{1} complete",
fragment.segment, fragment.fragment)
except IOError as err:
if "Unknown tag type" in str(err):
log.error("Unknown tag type found, this stream is "
"probably encrypted")
self.close()
return
log.error("Error reading fragment {0}-{1}: {2}",
fragment.segment, fragment.fragment, err)
class HDSStreamWorker(SegmentedStreamWorker):
def __init__(self, *args, **kwargs):
SegmentedStreamWorker.__init__(self, *args, **kwargs)
self.bootstrap = self.stream.bootstrap
self.current_segment = -1
self.current_fragment = -1
self.first_fragment = 1
self.last_fragment = -1
self.end_fragment = None
self.bootstrap_minimal_reload_time = 2.0
self.bootstrap_reload_time = self.bootstrap_minimal_reload_time
self.invalid_fragments = set()
self.live_edge = self.session.options.get("hds-live-edge")
self.update_bootstrap()
def update_bootstrap(self):
log.debug("Updating bootstrap")
if isinstance(self.bootstrap, Box):
bootstrap = self.bootstrap
else:
bootstrap = self.fetch_bootstrap(self.bootstrap)
self.live = bootstrap.payload.live
self.profile = bootstrap.payload.profile
self.timestamp = bootstrap.payload.current_media_time
self.identifier = bootstrap.payload.movie_identifier
self.time_scale = bootstrap.payload.time_scale
self.segmentruntable = bootstrap.payload.segment_run_table_entries[0]
self.fragmentruntable = bootstrap.payload.fragment_run_table_entries[0]
self.first_fragment, last_fragment = self.fragment_count()
fragment_duration = self.fragment_duration(last_fragment)
if last_fragment != self.last_fragment:
bootstrap_changed = True
self.last_fragment = last_fragment
else:
bootstrap_changed = False
if self.current_fragment < 0:
if self.live:
current_fragment = last_fragment
# Less likely to hit edge if we don't start with last fragment,
# default buffer is 10 sec.
fragment_buffer = int(ceil(self.live_edge / fragment_duration))
current_fragment = max(self.first_fragment,
current_fragment - (fragment_buffer - 1))
log.debug("Live edge buffer {0} sec is {1} fragments",
self.live_edge, fragment_buffer)
# Make sure we don't have a duration set when it's a
# live stream since it will just confuse players anyway.
self.writer.concater.duration = None
else:
current_fragment = self.first_fragment
self.current_fragment = current_fragment
log.debug("Current timestamp: {0}", self.timestamp / self.time_scale)
log.debug("Current segment: {0}", self.current_segment)
log.debug("Current fragment: {0}", self.current_fragment)
log.debug("First fragment: {0}", self.first_fragment)
log.debug("Last fragment: {0}", self.last_fragment)
log.debug("End fragment: {0}", self.end_fragment)
self.bootstrap_reload_time = fragment_duration
if self.live and not bootstrap_changed:
log.debug("Bootstrap not changed, shortening timer")
self.bootstrap_reload_time /= 2
self.bootstrap_reload_time = max(self.bootstrap_reload_time,
self.bootstrap_minimal_reload_time)
def fetch_bootstrap(self, url):
res = self.session.http.get(url,
exception=StreamError,
**self.stream.request_params)
return Box.deserialize(BytesIO(res.content))
def fragment_url(self, segment, fragment):
url = absolute_url(self.stream.baseurl, self.stream.url)
return FRAGMENT_URL.format(url=url,
segment=segment,
fragment=fragment,
identifier="",
quality="")
def fragment_count(self):
table = self.fragmentruntable.payload.fragment_run_entry_table
first_fragment, end_fragment = None, None
for i, fragmentrun in enumerate(table):
if fragmentrun.discontinuity_indicator is not None:
if fragmentrun.discontinuity_indicator == 0:
break
elif fragmentrun.discontinuity_indicator > 0:
continue
if first_fragment is None:
first_fragment = fragmentrun.first_fragment
end_fragment = fragmentrun.first_fragment
fragment_duration = (fragmentrun.first_fragment_timestamp +
fragmentrun.fragment_duration)
if self.timestamp > fragment_duration:
offset = ((self.timestamp - fragment_duration) /
fragmentrun.fragment_duration)
end_fragment += int(offset)
if first_fragment is None:
first_fragment = 1
if end_fragment is None:
end_fragment = 1
return first_fragment, end_fragment
def fragment_duration(self, fragment):
fragment_duration = 0
table = self.fragmentruntable.payload.fragment_run_entry_table
time_scale = self.fragmentruntable.payload.time_scale
for i, fragmentrun in enumerate(table):
if fragmentrun.discontinuity_indicator is not None:
self.invalid_fragments.add(fragmentrun.first_fragment)
# Check for the last fragment of the stream
if fragmentrun.discontinuity_indicator == 0:
if i > 0:
prev = table[i - 1]
self.end_fragment = prev.first_fragment
break
elif fragmentrun.discontinuity_indicator > 0:
continue
if fragment >= fragmentrun.first_fragment:
fragment_duration = fragmentrun.fragment_duration / time_scale
return fragment_duration
def segment_from_fragment(self, fragment):
table = self.segmentruntable.payload.segment_run_entry_table
for segment, start, end in self.iter_segment_table(table):
if start - 1 <= fragment <= end:
return segment
else:
segment = 1
return segment
def iter_segment_table(self, table):
# If the first segment in the table starts at the beginning we
# can go from there, otherwise we start from the end and use the
# total fragment count to figure out where the last segment ends.
if table[0].first_segment == 1:
prev_frag = self.first_fragment - 1
for segmentrun in table:
start = prev_frag + 1
end = prev_frag + segmentrun.fragments_per_segment
yield segmentrun.first_segment, start, end
prev_frag = end
else:
prev_frag = self.last_fragment + 1
for segmentrun in reversed(table):
start = prev_frag - segmentrun.fragments_per_segment
end = prev_frag - 1
yield segmentrun.first_segment, start, end
prev_frag = start
def valid_fragment(self, fragment):
return fragment not in self.invalid_fragments
def iter_segments(self):
while not self.closed:
fragments = range(self.current_fragment, self.last_fragment + 1)
fragments = filter(self.valid_fragment, fragments)
for fragment in fragments:
self.current_fragment = fragment + 1
self.current_segment = self.segment_from_fragment(fragment)
fragment_duration = int(self.fragment_duration(fragment) * 1000)
fragment_url = self.fragment_url(self.current_segment, fragment)
fragment = Fragment(self.current_segment, fragment,
fragment_duration, fragment_url)
log.debug("Adding fragment {0}-{1} to queue",
fragment.segment, fragment.fragment)
yield fragment
# End of stream
stream_end = self.end_fragment and fragment.fragment >= self.end_fragment
if self.closed or stream_end:
return
if self.wait(self.bootstrap_reload_time):
try:
self.update_bootstrap()
except StreamError as err:
log.warning("Failed to update bootstrap: {0}", err)
class HDSStreamReader(SegmentedStreamReader):
__worker__ = HDSStreamWorker
__writer__ = HDSStreamWriter
def __init__(self, stream, *args, **kwargs):
SegmentedStreamReader.__init__(self, stream, *args, **kwargs)
class HDSStream(Stream):
"""
Implements the Adobe HTTP Dynamic Streaming protocol
*Attributes:*
- :attr:`baseurl` Base URL
- :attr:`url` Base path of the stream, joined with the base URL when
fetching fragments
- :attr:`bootstrap` Either a URL pointing to the bootstrap or a
bootstrap :class:`Box` object used for initial information about
the stream
- :attr:`metadata` Either `None` or a :class:`ScriptData` object
that contains metadata about the stream, such as height, width and
bitrate
"""
__shortname__ = "hds"
def __init__(self, session, baseurl, url, bootstrap, metadata=None,
timeout=60, **request_params):
Stream.__init__(self, session)
self.baseurl = baseurl
self.url = url
self.bootstrap = bootstrap
self.metadata = metadata
self.timeout = timeout
# Deep copy request params to make it mutable
self.request_params = deepcopy(request_params)
parsed = urlparse(self.url)
if parsed.query:
params = parse_qsl(parsed.query)
if params:
if not self.request_params.get("params"):
self.request_params["params"] = {}
self.request_params["params"].update(params)
self.url = urlunparse(
(parsed.scheme, parsed.netloc, parsed.path, None, None, None)
)
def __repr__(self):
return ("<HDSStream({0!r}, {1!r}, {2!r},"
" metadata={3!r}, timeout={4!r})>").format(self.baseurl,
self.url,
self.bootstrap,
self.metadata,
self.timeout)
def __json__(self):
if isinstance(self.bootstrap, Box):
bootstrap = base64.b64encode(self.bootstrap.serialize())
else:
bootstrap = self.bootstrap
if isinstance(self.metadata, ScriptData):
metadata = self.metadata.__dict__
else:
metadata = self.metadata
return dict(type=HDSStream.shortname(), baseurl=self.baseurl,
url=self.url, bootstrap=bootstrap, metadata=metadata,
params=self.request_params.get("params", {}),
headers=self.request_params.get("headers", {}))
def open(self):
reader = HDSStreamReader(self)
reader.open()
return reader
@classmethod
def parse_manifest(cls, session, url, timeout=60, pvswf=None, is_akamai=False,
**request_params):
"""Parses a HDS manifest and returns its substreams.
:param url: The URL to the manifest.
:param timeout: How long to wait for data to be returned from
from the stream before raising an error.
:param is_akamai: force adding of the akamai parameters
:param pvswf: URL of player SWF for Akamai HD player verification.
"""
# private argument, should only be used in recursive calls
raise_for_drm = request_params.pop("raise_for_drm", False)
if not request_params:
request_params = {}
request_params["headers"] = request_params.get("headers", {})
request_params["params"] = request_params.get("params", {})
# These params are reserved for internal use
request_params.pop("exception", None)
request_params.pop("stream", None)
request_params.pop("timeout", None)
request_params.pop("url", None)
if "akamaihd" in url or is_akamai:
request_params["params"]["hdcore"] = HDCORE_VERSION
request_params["params"]["g"] = cls.cache_buster_string(12)
res = session.http.get(url, exception=IOError, **request_params)
manifest = session.http.xml(res, "manifest XML", ignore_ns=True,
exception=IOError)
if manifest.findtext("drmAdditionalHeader"):
log.debug("Omitting HDS stream protected by DRM: {}", url)
if raise_for_drm:
raise PluginError("{} is protected by DRM".format(url))
log.warning("Some or all streams are unavailable as they are protected by DRM")
return {}
parsed = urlparse(url)
baseurl = manifest.findtext("baseURL")
baseheight = manifest.findtext("height")
bootstraps = {}
streams = {}
if not baseurl:
baseurl = urljoin(url, os.path.dirname(parsed.path))
if not baseurl.endswith("/"):
baseurl += "/"
for bootstrap in manifest.findall("bootstrapInfo"):
name = bootstrap.attrib.get("id") or "_global"
url = bootstrap.attrib.get("url")
if url:
box = absolute_url(baseurl, url)
else:
data = base64.b64decode(bytes(bootstrap.text, "utf8"))
box = Box.deserialize(BytesIO(data))
bootstraps[name] = box
pvtoken = manifest.findtext("pv-2.0")
if pvtoken:
if not pvswf:
raise IOError("This manifest requires the 'pvswf' parameter "
"to verify the SWF")
params = cls._pv_params(session, pvswf, pvtoken, **request_params)
request_params["params"].update(params)
child_drm = False
for media in manifest.findall("media"):
url = media.attrib.get("url")
bootstrapid = media.attrib.get("bootstrapInfoId", "_global")
href = media.attrib.get("href")
if url and bootstrapid:
bootstrap = bootstraps.get(bootstrapid)
if not bootstrap:
continue
bitrate = media.attrib.get("bitrate")
streamid = media.attrib.get("streamId")
height = media.attrib.get("height")
if height:
quality = height + "p"
elif bitrate:
quality = bitrate + "k"
elif streamid:
quality = streamid
elif baseheight:
quality = baseheight + "p"
else:
quality = "live"
metadata = media.findtext("metadata")
if metadata:
metadata = base64.b64decode(bytes(metadata, "utf8"))
metadata = ScriptData.deserialize(BytesIO(metadata))
else:
metadata = None
stream = HDSStream(session, baseurl, url, bootstrap,
metadata=metadata, timeout=timeout,
**request_params)
streams[quality] = stream
elif href:
url = absolute_url(baseurl, href)
try:
child_streams = cls.parse_manifest(session, url,
timeout=timeout,
is_akamai=is_akamai,
raise_for_drm=True,
**request_params)
except PluginError:
child_drm = True
child_streams = {}
for name, stream in child_streams.items():
# Override stream name if bitrate is available in parent
# manifest but not the child one.
bitrate = media.attrib.get("bitrate")
if bitrate and not re.match(r"^(\d+)k$", name):
name = bitrate + "k"
streams[name] = stream
if child_drm:
log.warning("Some or all streams are unavailable as they are protected by DRM")
return streams
@classmethod
def _pv_params(cls, session, pvswf, pv, **request_params):
"""Returns any parameters needed for Akamai HD player verification.
Algorithm originally documented by KSV, source:
http://stream-recorder.com/forum/showpost.php?p=43761&postcount=13
"""
try:
data, hdntl = pv.split(";")
except ValueError:
data = pv
hdntl = ""
cache = Cache(filename="stream.json")
key = "akamaihd-player:" + pvswf
cached = cache.get(key)
request_params = deepcopy(request_params)
headers = request_params.pop("headers", {})
if cached:
headers["If-Modified-Since"] = cached["modified"]
swf = session.http.get(pvswf, headers=headers, **request_params)
if cached and swf.status_code == 304: # Server says not modified
hash = cached["hash"]
else:
# Calculate SHA-256 hash of the uncompressed SWF file, base-64
# encoded
hash = sha256()
hash.update(swfdecompress(swf.content))
hash = base64.b64encode(hash.digest()).decode("ascii")
modified = swf.headers.get("Last-Modified", "")
# Only save in cache if a valid date is given
if len(modified) < 40:
cache.set(key, dict(hash=hash, modified=modified))
msg = "st=0~exp=9999999999~acl=*~data={0}!{1}".format(data, hash)
auth = hmac.new(AKAMAIHD_PV_KEY, msg.encode("ascii"), sha256)
pvtoken = "{0}~hmac={1}".format(msg, auth.hexdigest())
# The "hdntl" parameter can be accepted as a cookie or passed in the
# query string, but the "pvtoken" parameter can only be in the query
# string
params = [("pvtoken", pvtoken)]
params.extend(parse_qsl(hdntl, keep_blank_values=True))
return params
@staticmethod
def cache_buster_string(length):
return "".join([random.choice(string.ascii_uppercase) for i in range(length)])
|
|
# coding=utf-8
import sys
import os
import csv
from datetime import datetime, timedelta
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.dates import drange
from matplotlib.patches import Rectangle
import scenario_factory
# http://www.javascripter.net/faq/hextorgb.htm
PRIMA = (148/256, 164/256, 182/256)
PRIMB = (101/256, 129/256, 164/256)
PRIM = ( 31/256, 74/256, 125/256)
PRIMC = ( 41/256, 65/256, 94/256)
PRIMD = ( 10/256, 42/256, 81/256)
EC = (1, 1, 1, 0)
GRAY = (0.5, 0.5, 0.5)
WHITE = (1, 1, 1)
def load(f):
with np.load(f) as npz:
data = np.array([npz[k] for k in sorted(npz.keys())])
return data
def plot_aggregated(sc, bd, unctrl, ctrl, ctrl_sched, res=1):
t_day_start = sc.t_block_start - timedelta(hours=sc.t_block_start.hour,
minutes=sc.t_block_start.minute)
t = drange(t_day_start, sc.t_end, timedelta(minutes=res))
skip = (t_day_start - sc.t_start).total_seconds() / 60 / res
i_block_start = (sc.t_block_start - t_day_start).total_seconds() / 60 / res
i_block_end = (sc.t_block_end - t_day_start).total_seconds() / 60 / res
P_el_unctrl = unctrl[:,0,skip:].sum(0)
P_el_ctrl = ctrl[:,0,skip:].sum(0)
P_el_sched = ctrl_sched[:,skip:].sum(0)
P_el_target = np.ma.array(P_el_sched)
block = np.array(sc.block)
if block.shape == (1,):
block = block.repeat(P_el_target[~P_el_target.mask].shape[0])
elif block.shape[0] == P_el_target[~P_el_target.mask].shape[0] / 15:
block = block.repeat(15)
P_el_target[~P_el_target.mask] = block
T_storage_ctrl = ctrl[:,2,skip:]
ft = np.array([t[0]] + list(np.repeat(t[1:-1], 2)) + [t[-1]])
P_el_ctrl_fill = np.repeat(P_el_ctrl[:-1], 2)
fig, ax = plt.subplots(2, sharex=True)
fig.subplots_adjust(left=0.105, right=0.998, hspace=0.3, top=0.975, bottom=0.2)
for a in ax:
plt.setp(list(a.spines.values()), color='k')
plt.setp([a.get_xticklines(), a.get_yticklines()], color='k')
ax[0].set_ylabel('P$_{\mathrm{el}}$ [kW]')
ymax = max(P_el_unctrl.max(), P_el_ctrl_fill.max(), P_el_sched.max(), 0) / 1000.0
ymin = min(P_el_unctrl.min(), P_el_ctrl_fill.min(), P_el_sched.min(), 0) / 1000.0
ax[0].set_ylim(ymin - abs(ymin * 0.1), ymax + abs(ymax * 0.1))
xspace = (t[-1] - t[-2])
# ax[0].set_xlim(t[0], t[-1] + xspace)
ax[0].set_xlim(t[0], t[len(t)/2])
# ax[0].axvline(t[i_block_start], ls='--', color='0.5')
# ax[0].axvline(t[i_block_end], ls='--', color='0.5')
ax[0].axvspan(t[i_block_start], t[i_block_end], fc=GRAY+(0.2,), ec=EC)
# ax[0].axvline(t[0], ls='-', color=GRAY, lw=0.5)
# ax[0].axvline(t[len(t)/2], ls='-', color=GRAY, lw=0.5)
l_unctrl, = ax[0].plot_date(t, P_el_unctrl / 1000.0, fmt=':', color='k', drawstyle='steps-post', lw=0.75)
l_unctrl.set_dashes([1.0, 1.0])
# add lw=0.0 due to bug in mpl (will show as hairline in pdf though...)
l_ctrl = ax[0].fill_between(ft, P_el_ctrl_fill / 1000.0, facecolors=GRAY+(0.75,), edgecolors=EC, lw=0.0)
# Create proxy artist as l_ctrl legend handle
l_ctrl_proxy = Rectangle((0, 0), 1, 1, fc=GRAY, ec=WHITE, lw=0.0, alpha=0.5)
# l_sched, = ax[0].plot_date(t, P_el_sched / 1000.0, fmt='-', color=GRAY, drawstyle='steps-post', lw=0.75)
l_target, = ax[0].plot_date(t, P_el_target / 1000.0, fmt='-', color='k', drawstyle='steps-post', lw=0.75)
# colors = [
# '#348ABD', # blue
# '#7A68A6', # purple
# '#A60628', # red
# '#467821', # green
# '#CF4457', # pink
# '#188487', # turqoise
# '#E24A33', # orange
# '#1F4A7D', # primary
# '#BF9D23', # secondary
# '#BF5B23', # complementary
# '#94A4B6', # primaryA
# '#6581A4', # primaryB
# '#29415E', # primaryC
# '#0A2A51', # primaryD
# ][:len(unctrl)]
# for (c, P_el_unctrl, P_el_ctrl, P_el_sched) in zip(colors, unctrl[:,0,:], ctrl[:,0,:], ctrl_sched):
# ax[0].plot_date(t, P_el_unctrl / 1000.0, fmt='-', color=c, lw=1, label='unctrl')
# ax[0].plot_date(t, P_el_ctrl / 1000.0, fmt=':', color=c, lw=1, label='ctrl')
# ax[0].plot_date(t, P_el_sched / 1000.0, fmt='--x', color=c, lw=1, label='sched')
ymax = T_storage_ctrl.max() - 273
ymin = T_storage_ctrl.min() - 273
ax[1].set_ylim(ymin - abs(ymin * 0.01), ymax + abs(ymax * 0.01))
ax[1].set_ylabel('T$_{\mathrm{storage}}\;[^{\circ}\mathrm{C}]$', labelpad=9)
ax[1].axvspan(t[i_block_start], t[i_block_end], fc=GRAY+(0.1,), ec=EC)
# ax[1].axvline(t[0], ls='-', color=GRAY, lw=0.5)
# ax[1].axvline(t[len(t)/2], ls='-', color=GRAY, lw=0.5)
for v in T_storage_ctrl:
ax[1].plot_date(t, v - 273.0, fmt='-', color=GRAY, alpha=0.25, lw=0.5)
# HP and CHP have different temperature ranges (HP: 40-50, CHP: 50-70)
crit = (T_storage_ctrl - 273 >= 50).all(axis=1)
T_CHP = T_storage_ctrl[crit]
T_HP = T_storage_ctrl[~crit]
l_T_med_CHP, = ax[1].plot_date(t, T_CHP.mean(0) - 273.0, fmt='-', color=GRAY, alpha=0.75, lw=1.5)
l_T_med_HP, = ax[1].plot_date(t, T_HP.mean(0) - 273.0, fmt='-', color=GRAY, alpha=0.75, lw=1.5)
ax[0].xaxis.get_major_formatter().scaled[1/24.] = '%H:%M'
ax[-1].set_xlabel('Time of day')
fig.autofmt_xdate()
ax[1].legend([l_target, l_unctrl, l_ctrl_proxy, l_T_med_CHP],
['target', 'original', 'scheduled', 'storage temperatures (mean)'],
bbox_to_anchor=(0., 1.03, 1., .103), loc=8, ncol=4,
handletextpad=0.2, mode='expand', handlelength=3,
borderaxespad=0.25, fancybox=False, fontsize='x-small')
# import pdb
# pdb.set_trace()
return fig
def plot_aggregated_SLP(sc, bd, unctrl, ctrl, ctrl_sched, res=1):
assert hasattr(sc, 'slp_file')
t_day_start = sc.t_block_start - timedelta(hours=sc.t_block_start.hour,
minutes=sc.t_block_start.minute)
skip = (t_day_start - sc.t_start).total_seconds() / 60 / res
i_block_start = (sc.t_block_start - t_day_start).total_seconds() / 60 / res
i_block_end = (sc.t_block_end - t_day_start).total_seconds() / 60 / res
t = drange(sc.t_block_start, sc.t_block_end, timedelta(minutes=res))
P_el_unctrl = unctrl[:,0,skip + i_block_start:skip + i_block_end].sum(0)
P_el_ctrl = ctrl[:,0,skip + i_block_start:skip + i_block_end].sum(0)
# ctrl correction
P_el_ctrl = np.roll(P_el_ctrl, -1, axis=0)
P_el_sched = ctrl_sched[:,skip + i_block_start:skip + i_block_end].sum(0)
T_storage_ctrl = ctrl[:,2,skip + i_block_start:skip + i_block_end]
slp = _read_slp(sc, bd)[skip + i_block_start:skip + i_block_end]
# diff_ctrl = (P_el_ctrl - P_el_unctrl) / 1000.0
diff_ctrl = (P_el_sched - P_el_unctrl) / 1000.0
diff_ctrl_fill = np.repeat((slp + diff_ctrl)[:-1], 2)
slp_fill = np.repeat(slp[:-1], 2)
ft = np.array([t[0]] + list(np.repeat(t[1:-1], 2)) + [t[-1]])
# P_el_ctrl_fill = np.repeat(P_el_ctrl[:-1], 2)
P_el_ctrl_fill = np.repeat(P_el_sched[:-1], 2)
fig, ax = plt.subplots(2, sharex=True)
fig.subplots_adjust(left=0.11, right=0.998, hspace=0.2, top=0.95)
for a in ax:
plt.setp(list(a.spines.values()), color='k')
plt.setp([a.get_xticklines(), a.get_yticklines()], color='k')
ax[0].set_ylabel('P$_{\mathrm{el}}$ [kW]')
ymax = max(P_el_unctrl.max(), P_el_ctrl_fill.max(), P_el_sched.max(), 0) / 1000.0
ymin = min(P_el_unctrl.min(), P_el_ctrl_fill.min(), P_el_sched.min(), 0) / 1000.0
ax[0].set_ylim(ymin - abs(ymin * 0.1), ymax + abs(ymax * 0.1))
xspace = (t[-1] - t[-2])
ax[0].set_xlim(t[0], t[-1] + xspace)
l_unctrl, = ax[0].plot_date(t, P_el_unctrl / 1000.0, fmt=':', color='k', drawstyle='steps-post', lw=0.75, label='original')
l_unctrl.set_dashes([1.0, 1.0])
# add lw=0.0 due to bug in mpl (will show as hairline in pdf though...)
l_ctrl = ax[0].fill_between(ft, P_el_ctrl_fill / 1000.0, facecolors=GRAY+(0.75,), edgecolors=EC, lw=0.0)
# Create proxy artist as l_ctrl legend handle
l_ctrl_proxy = Rectangle((0, 0), 1, 1, fc=GRAY, ec=WHITE, lw=0.0, alpha=0.5)
# l_sched, = ax[0].plot_date(t, P_el_sched / 1000.0, fmt='-', color=PRIM, drawstyle='steps-post', lw=0.75, label='gesteuert')
# colors = [
# '#348ABD', # blue
# '#7A68A6', # purple
# '#A60628', # red
# '#467821', # green
# '#CF4457', # pink
# '#188487', # turqoise
# '#E24A33', # orange
# '#1F4A7D', # primary
# '#BF9D23', # secondary
# '#BF5B23', # complementary
# '#94A4B6', # primaryA
# '#6581A4', # primaryB
# '#29415E', # primaryC
# '#0A2A51', # primaryD
# ][:len(unctrl)]
# for (c, P_el_unctrl, P_el_ctrl, P_el_sched) in zip(colors, unctrl[:,0,:], ctrl[:,0,:], ctrl_sched):
# ax[0].plot_date(t, P_el_unctrl / 1000.0, fmt='-', color=c, lw=1, label='unctrl')
# ax[0].plot_date(t, P_el_ctrl / 1000.0, fmt=':', color=c, lw=1, label='ctrl')
# ax[0].plot_date(t, P_el_sched / 1000.0, fmt='--x', color=c, lw=1, label='sched')
ax[1].set_ylabel('P$_{el}$ [kW]')
ax[1].set_xlabel('Time of day')
ymin = min(slp.min(), (slp + diff_ctrl).min())
ax[1].set_ylim(ymin + (ymin * 0.1), 0)
l_unctrl_slp, = ax[1].plot_date(t, slp, fmt=':', color='k', drawstyle='steps-post', lw=0.75, label='original')
l_unctrl_slp.set_dashes([1.0, 1.0])
ax[1].fill_between(ft, diff_ctrl_fill, slp_fill, where=diff_ctrl_fill>=slp_fill, facecolors=GRAY+(0.3,), edgecolors=EC, lw=0.0)
l_diff_slp = ax[1].fill_between(ft, diff_ctrl_fill, slp_fill, where=diff_ctrl_fill<slp_fill, facecolors=GRAY+(0.3,), edgecolors=EC, lw=0.0)
# Create proxy artist as l_diff_slp legend handle
l_diff_slp_proxy = Rectangle((0, 0), 1, 1, fc=GRAY, ec=WHITE, lw=0.0, alpha=0.3)
l_ctrl_slp, = ax[1].plot_date(t, slp + diff_ctrl, fmt='-', color='k', drawstyle='steps-post', lw=0.75, label='scheduled')
# ax[0].legend([l_sched, l_unctrl, l_T_med],
# ['Verbundfahrplan', 'ungesteuert', 'Speichertemperaturen (Median)'],
# bbox_to_anchor=(0., 1.05, 1., .105), loc=8, ncol=4,
# handletextpad=0.2, mode='expand', handlelength=3,
# borderaxespad=0.25, fancybox=False, fontsize='x-small')
ax[0].text(0.5, 1.05, 'Profile of the units under control', ha='center', va='center',
fontsize='small', transform=ax[0].transAxes)
ax[1].text(0.5, 1.05, 'Profile of the medium-voltage node', ha='center', va='center',
fontsize='small', transform=ax[1].transAxes)
ax[0].legend([l_unctrl, l_ctrl_proxy], ['original', 'scheduled'], loc='upper right', fancybox=False, fontsize='x-small')
ax[1].legend([l_unctrl_slp, l_ctrl_slp, l_diff_slp_proxy], ['original', 'scheduled', 'difference'], loc='upper right', fancybox=False, fontsize='x-small')
fig.autofmt_xdate()
ax[0].xaxis.get_major_formatter().scaled[1/24.] = '%H:%M'
return fig
def norm(minimum, maximum, value):
# return value
if maximum == minimum:
return maximum
return (value - minimum) / (maximum - minimum)
def _read_slp(sc, bd):
# Read csv data
slp = []
found = False
with open(sc.slp_file, 'r', encoding='latin-1') as f:
reader = csv.reader(f, delimiter=';')
for row in reader:
if not row:
continue
if not found and row[0] == 'Datum':
found = True
elif found:
date = datetime.strptime('_'.join(row[:2]), '%d.%m.%Y_%H:%M:%S')
if date < sc.t_start:
continue
elif date >= sc.t_end:
break
# This is a demand, so negate the values
slp.append(-1.0 * float(row[2].replace(',', '.')))
slp = np.array(slp)
# Scale values
# if hasattr(sc, 'run_unctrl_datafile'):
# slp_norm = norm(slp.min(), slp.max(), slp)
# unctrl = load(p(bd, sc.run_unctrl_datafile)).sum(0) / 1000
# slp = slp_norm * (unctrl.max() - unctrl.min()) + unctrl.min()
MS_day_mean = 13600 # kWh, derived from SmartNord Scenario document
MS_15_mean = MS_day_mean / 96
slp = slp / np.abs(slp.mean()) * MS_15_mean
return slp
# return np.array(np.roll(slp, 224, axis=0))
def p(basedir, fn):
return os.path.join(basedir, fn)
def resample(d, resolution):
# resample the innermost axis to 'resolution'
shape = tuple(d.shape[:-1]) + (int(d.shape[-1]/resolution), resolution)
return d.reshape(shape).sum(-1)/resolution
def run(sc_file):
print()
bd = os.path.dirname(sc_file)
sc = scenario_factory.Scenario()
sc.load_JSON(sc_file)
print(sc.title)
# # plot_samples(sc, bd)
# plot_samples_carpet(sc, bd)
# plt.show()
# sys.exit(0)
unctrl = load(p(bd, sc.run_unctrl_datafile))
block = load(p(bd, sc.run_ctrl_datafile))
post = load(p(bd, sc.run_post_datafile))
sched = load(p(bd, sc.sched_file))
ctrl = np.zeros(unctrl.shape)
idx = 0
for l in (block, post):
ctrl[:,:,idx:idx + l.shape[-1]] = l
idx += l.shape[-1]
if sched.shape[-1] == unctrl.shape[-1] / 15:
print('Extending schedules shape by factor 15')
sched = sched.repeat(15, axis=1)
t_start, b_start, b_end = sc.t_start, sc.t_block_start, sc.t_block_end
div = 1
if (b_end - t_start).total_seconds() / 60 == sched.shape[-1] * 15:
div = 15
elif (b_end - t_start).total_seconds() / 60 == sched.shape[-1] * 60:
div = 60
b_s = (b_start - sc.t_start).total_seconds() / 60 / div
b_e = (b_end - sc.t_start).total_seconds() / 60 / div
ctrl_sched = np.zeros((unctrl.shape[0], unctrl.shape[-1]))
ctrl_sched = np.ma.array(ctrl_sched)
ctrl_sched[:,:b_s] = np.ma.masked
ctrl_sched[:,b_s:b_e] = sched[:,b_s:b_e]
ctrl_sched[:,b_e:] = np.ma.masked
# plot_each_device(sc, unctrl, ctrl, sched)
minutes = (sc.t_end - sc.t_start).total_seconds() / 60
assert unctrl.shape[-1] == ctrl.shape[-1] == ctrl_sched.shape[-1]
shape = unctrl.shape[-1]
if hasattr(sc, 'slp_file'):
if minutes == shape:
print('data is 1-minute resolution, will be resampled by 15')
res = 15
elif minutes == shape * 15:
print('data is 15-minute resolution, all fine')
res = 1
else:
raise RuntimeError('unsupported data resolution: %.2f' % (minutes / shape))
unctrl = resample(unctrl, res)
ctrl = resample(ctrl, res)
ctrl_sched = resample(ctrl_sched, res)
fig = plot_aggregated_SLP(sc, bd, unctrl, ctrl, ctrl_sched, res=15)
else:
if minutes == shape:
print('data is 1-minute resolution, will be resampled by 60')
res = 60
elif minutes == shape * 15:
print('data is 15-minute resolution, will be resampled by 4')
res = 4
elif minutes == shape * 60:
print('data is 60-minute resolution, all fine')
res = 1
else:
raise RuntimeError('unsupported data resolution: %.2f' % (minutes / shape))
unctrl = resample(unctrl, res)
ctrl = resample(ctrl, res)
ctrl_sched = resample(ctrl_sched, res)
fig = plot_aggregated(sc, bd, unctrl, ctrl, ctrl_sched, res=60)
fig.savefig(p(bd, sc.title) + '.pdf')
fig.savefig(p(bd, sc.title) + '.png', dpi=300)
plt.show()
if __name__ == '__main__':
for n in sys.argv[1:]:
if os.path.isdir(n):
run(p(n, '0.json'))
else:
run(n)
|
|
from subprocess import CalledProcessError
from distutils.command.config import config
import re
import os
from subprocess import Popen, PIPE, CalledProcessError
from tempfile import mkdtemp
from unicodedata import normalize, combining
from urlparse import urlsplit
from citools.git import fetch_repository
"""
Help us handle continuous versioning. Idea is simple: We have n-number digits
version (in form 1.2(.3...).n), where number of 1...(n-1) must appear in tag.
n is then computed as number-of-commits since last version-setting tag (and we're
using git describe for it now)
"""
DEFAULT_TAG_VERSION = (0, 0)
REVLIST_TAG_PATTERN = re.compile("^\ \((.*)\)$")
def compute_version(string):
""" Return VERSION tuple, computed from git describe output """
match = re.match("(?P<bordel>[a-z0-9\-\_\/]*)(?P<arch>\d+\.\d+)(?P<rest>.*)", string)
if not match or not match.groupdict().has_key('arch'):
raise ValueError(u"String %s should be a scheme version, but it's not; failing" % str(string))
version = match.groupdict()['arch']
# if bordel ends with digit numbers, they should be part of <arch>
if match.groupdict().has_key('bordel') and match.groupdict()['bordel']:
i = 0
bordel = match.groupdict()['bordel']
while re.match("\d", bordel[~i]) and i < len(bordel):
version = bordel[~i] + version
i +=1
if match.groupdict().has_key('rest') and match.groupdict()['rest']:
staging = re.findall("(\.\d+)", match.groupdict()['rest'])
version = ''.join([version]+staging)
# we're using integer version numbers instead of string
build_match = re.match(".*(%(version)s){1}.*\-{1}(?P<build>\d+)\-{1}g{1}[0-9a-f]{7}" % {'version' : version}, string)
if not build_match or not build_match.groupdict().has_key('build'):
# if version is 0.0....
if re.match("^0(\.0)+$", version):
# return 0.0.1 instead of 0.0.0, as "ground zero version" is not what we want
build = 1
else:
build = 0
else:
build = int(build_match.groupdict()['build'])
return tuple(list(map(int, version.split(".")))+[build])
def sum_versions(version1, version2):
"""
Return sum of both versions. Raise ValueError when negative number met
i.e.:
(0, 2) = (0, 1) + (0, 1)
(1, 23, 12) = (0, 2, 12) + (1, 21)
"""
final_version = [int(i) for i in version1 if int(i) >= 0]
if len(final_version) != len(version1):
raise ValueError("Negative number in version number not allowed")
position = 0
for part in version2:
if int(part) < 0:
raise ValueError("Negative number in version number not allowed")
if len(final_version) < position+1:
final_version.append(part)
else:
final_version[position] += part
position += 1
return tuple(final_version)
def get_git_last_hash(commit="HEAD"):
p = Popen(["git", "rev-parse", commit], stdout=PIPE, stderr=PIPE)
stdout = p.communicate()[0]
if p.returncode == 0:
return stdout.strip()
else:
return ''
def get_git_revlist_tags(commit="HEAD"):
p = Popen(["git", "rev-list", "--simplify-by-decoration", "--pretty=format:%d", commit], stdout=PIPE, stderr=PIPE)
stdout = p.communicate()[0]
if p.returncode == 0:
return stdout.strip()
else:
return ''
def get_tags_from_line(tagline):
tags = []
tagline = REVLIST_TAG_PATTERN.match(tagline)
if tagline:
line = tagline.groups()[0]
if ', ' in line:
candidates = line.split(', ')
else:
candidates = [line]
for candidate in candidates:
prefixes_to_strip = ['tag: ']
for prefix in prefixes_to_strip:
if candidate.startswith(prefix):
candidate = candidate[len(prefix):]
tags.append(candidate)
return tags
def get_tags_from_current_branch(revlist_output, accepted_tag_pattern):
lines = revlist_output.splitlines()
tags = []
# we could rely in line % 2 ? 0, but bad newline would mess up whole process,
# so just be simple and forgiving
for line in lines:
if not line.startswith("commit: "):
for tag in get_tags_from_line(line):
# now here is the thing: user provides accepted_tag_pattern in non-pythonic,
# git, shell-like syntax. Not to force user to provide details, we'll just
# validate it by running git.
# TODO: This may be optimized in future (shell module?), patches welcomed
proc = Popen(['git', 'describe', '--match=%s' % accepted_tag_pattern, tag], stdout=PIPE, stderr=PIPE)
verified_tag = proc.communicate()[0].strip()
if proc.returncode == 0 and tag == verified_tag:
tags.append(tag)
return tags
def get_highest_version(versions):
"""
Get highest version for version slice strings
(3, 0) > (2, 2, 3) > (1, 155) > (1, 1) > (1, 0, 234, 3890)
"""
current_slice = 0
if len(versions) < 1:
return DEFAULT_TAG_VERSION
while len(versions) > 1:
slice_map = dict([(v[current_slice], v) for v in versions if len(v) >= current_slice+1])
slice_vers = slice_map.keys()
slice_vers.sort()
highest = slice_vers[-1]
versions = [v for v in versions if v[current_slice] == highest]
if len(versions) < 1:
raise NotImplementedError()
current_slice += 1
return versions[0]
def get_highest_tag(tag_list):
"""
Return highest tag from given git describe output tags
"""
version_map = {}
for i in tag_list:
try:
version_map[compute_version(i)] = i
except ValueError:
# bad i format -> shall not be considered
pass
return version_map[get_highest_version(version_map.keys())]
def get_git_describe(fix_environment=False, repository_directory=None, accepted_tag_pattern=None, prefer_highest_version=True):
"""
Return output of git describe. If no tag found, initial version is considered to be 0.0
accepted_tag_pattern is used to filter tags only to 'project numbering ones'.
if accepted_tag_given, prefer_hightest_version may be used. This will prefer tags matching accepted_tag_pattern, but with
"""
if repository_directory and not fix_environment:
raise ValueError("Both fix_environment and repository_directory or none of them must be given")
if fix_environment:
if not repository_directory:
raise ValueError(u"Cannot fix environment when repository directory not given")
env_git_dir = None
if os.environ.has_key('GIT_DIR'):
env_git_dir = os.environ['GIT_DIR']
os.environ['GIT_DIR'] = os.path.join(repository_directory, '.git')
command = ["git", "describe"]
if accepted_tag_pattern is not None:
if not prefer_highest_version:
command.append('--match="%s"' % accepted_tag_pattern)
else:
# git describe fails us on layout similar to:
# o
# | \
# o o (repo-1.1)
# |
# o (repo-1.2)
# where repo-1.1-1-<hash> will be reported, while we're interested in 1.2-2-<hash>
# to work around this, we will find "highest" tag matching accepted_tag_patterns and use it
# as a tag pattern for git describe output
available_tags = get_tags_from_current_branch(
revlist_output=get_git_revlist_tags(),
accepted_tag_pattern=accepted_tag_pattern
)
# if not tag available, just use default
if len(available_tags) < 1:
pattern = accepted_tag_pattern
else:
pattern = get_highest_tag(available_tags)
command.append('--match="%s"' % pattern)
try:
proc = Popen(' '.join(command), stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = proc.communicate()
if proc.returncode == 0:
return stdout.strip()
elif proc.returncode == 128:
return '.'.join(map(str, DEFAULT_TAG_VERSION))
else:
raise ValueError("Unknown return code %s" % proc.returncode)
finally:
if fix_environment:
if env_git_dir:
os.environ['GIT_DIR'] = env_git_dir
else:
del os.environ['GIT_DIR']
def replace_version(source_file, version):
content = []
version_regexp = re.compile(r"^(VERSION){1}(\ )+(\=){1}(\ )+\({1}([0-9])+(\,{1}(\ )*[0-9]+)+(\)){1}")
for line in source_file:
if version_regexp.match(line):
content.append('VERSION = %s\n' % str(version))
else:
content.append(line)
return content
def get_git_head_hash(fix_environment=False, repository_directory=None):
""" Return output of git describe. If no tag found, initial version is considered to be 0.0.1 """
if fix_environment:
if not repository_directory:
raise ValueError(u"Cannot fix environment when repository directory not given")
env_git_dir = None
if os.environ.has_key('GIT_DIR'):
env_git_dir = os.environ['GIT_DIR']
os.environ['GIT_DIR'] = os.path.join(repository_directory, '.git')
try:
proc = Popen(["git", "rev-parse", "HEAD"], stdout=PIPE)
return_code = proc.wait()
if return_code == 0:
return proc.stdout.read().strip()
else:
raise ValueError("Non-zero return code %s from git log" % return_code)
finally:
if fix_environment:
if env_git_dir:
os.environ['GIT_DIR'] = env_git_dir
else:
del os.environ['GIT_DIR']
def get_git_head_tstamp(fix_environment=False, repository_directory=None):
""" return timestamp of last commit on current branch """
if fix_environment:
if not repository_directory:
raise ValueError("Cannot fix environment when repository directory not given")
env_git_dir = os.environ.get('GIT_DIR', None)
os.environ['GIT_DIR'] = os.path.join(repository_directory, '.git')
try:
proc = Popen(["git", "log", "-n1", "--pretty=format:%at"], stdout=PIPE)
return_code = proc.wait()
if return_code == 0:
return proc.stdout.read().strip()
else:
raise ValueError("Non-zero return code %s from git log" % return_code)
finally:
if fix_environment:
if env_git_dir:
os.environ['GIT_DIR'] = env_git_dir
else:
del os.environ['GIT_DIR']
def replace_init(version, name):
""" Update VERSION attribute in $name/__init__.py module """
file = os.path.join(name, '__init__.py')
replace_version_in_file(version, file)
def replace_inits(version, packages=None):
if packages is None:
packages = []
for p in packages:
p = p.replace('.', '/')
replace_init(version, p)
def replace_scripts(version, scripts=None):
if scripts is None:
scripts = []
for s in scripts:
s = '%s.py' % s
replace_version_in_file(version, s)
def replace_version_in_file(version, file):
""" Update VERSION attribute in $name/__init__.py module """
file = open(file, 'r')
content = replace_version(version=version, source_file=file)
file.close()
file = open(file.name, 'wb')
file.writelines(content)
file.close()
def get_current_branch(branch_output):
"""
Parse output of git branch --no-color and return proper result
"""
for line in branch_output.splitlines():
if line[2:] == "(no branch)":
raise ValueError("We're outside of branch")
elif line.startswith('*'):
return line[2:]
raise ValueError("No branch found")
def retrieve_current_branch(fix_environment=False, repository_directory=None, **kwargs):
#######
# FIXME: repository_directory and fix_environment artifact shall be refactored
# in something like secure_and_fixed_git_command or something.
# But pay attention to nested command in get_git_describe, it probably shall be using callback.
# See #6679
#######
if repository_directory and not fix_environment:
raise ValueError("Both fix_environment and repository_directory or none of them must be given")
if fix_environment:
if not repository_directory:
raise ValueError(u"Cannot fix environment when repository directory not given")
env_git_dir = None
if os.environ.has_key('GIT_DIR'):
env_git_dir = os.environ['GIT_DIR']
os.environ['GIT_DIR'] = os.path.join(repository_directory, '.git')
try:
proc = Popen('git branch --no-color', stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = proc.communicate()
if proc.returncode == 0:
return get_current_branch(stdout)
else:
raise CalledProcessError("git branch returned exit code %s" % proc.returncode)
finally:
if fix_environment:
if env_git_dir:
os.environ['GIT_DIR'] = env_git_dir
else:
del os.environ['GIT_DIR']
def compute_meta_version(dependency_repositories, workdir=None, accepted_tag_pattern=None, cachedir=None, dependency_versions=None):
kwargs = {}
if workdir:
kwargs.update({
'repository_directory' : workdir,
'fix_environment' : True
})
if accepted_tag_pattern:
kwargs.update({
'accepted_tag_pattern' : accepted_tag_pattern
})
describe = get_git_describe(**kwargs)
meta_branch = retrieve_current_branch(**kwargs)
version = compute_version(describe)
repositories_dir = mkdtemp(dir=os.curdir, prefix="build-repository-dependencies-")
for repository_dict in dependency_repositories:
if repository_dict.has_key('branch'):
branch = repository_dict['branch']
else:
branch = meta_branch
reference_repository = None
if cachedir:
reponame = urlsplit(repository_dict['url'])[2].split("/")[-1]
if reponame.endswith(".git"):
cachename = reponame[:-4]
else:
cachename = reponame
if os.path.exists(os.path.join(cachedir, cachename)):
reference_repository = os.path.abspath(os.path.join(cachedir, cachename))
elif os.path.exists(os.path.join(cachedir, cachename+".git")):
reference_repository = os.path.abspath(os.path.join(cachedir, cachename+".git"))
workdir = fetch_repository(repository_dict['url'], branch=branch, workdir=repositories_dir, reference_repository=reference_repository)
# this is pattern for dependency repo, NOT for for ourselves -> pattern of it, not ours
# now hardcoded, but shall be retrieved via egg_info or custom command
project_pattern = "%s-[0-9]*" % repository_dict['package_name']
new_version = compute_version(get_git_describe(repository_directory=workdir, fix_environment=True, accepted_tag_pattern=project_pattern))
if dependency_versions is not None:
dependency_versions[repository_dict['package_name']] = new_version
version = sum_versions(version, new_version)
return version
def undiacritic(text, encoding='utf-8'):
if type(text) == type(''):
text = unicode(text, encoding)
text = normalize('NFKD', text)
text = [char for char in text if not combining(char)]
text = ''.join(text)
return text
def get_branch_suffix(metadata, branch):
rename_map = getattr(metadata, "branch_rename_map", {
'automation' : 'auto',
'testomation' : 'test',
})
if branch in rename_map:
return rename_map[branch]
else:
# only [a-z0-9-] should be in name (and no following -)s
# replace other chars and return "slugified" version
unixname = undiacritic(branch)
unixname = unixname.lower()
unixname = re.sub("[^a-z0-9]+", "-", unixname)
unixname = re.sub("^([^a-z0-9])+", "", unixname)
unixname = re.sub("([^a-z0-9]+)$", "", unixname)
return unixname
class GitSetMetaVersion(config):
description = "calculate and set version from all dependencies"
user_options = [
("cache-directory=", None, "Directory where dependent repositories are cached in"),
]
def initialize_options(self):
self.cache_directory = None
def finalize_options(self):
self.cache_directory = self.cache_directory or None
def run(self):
"""
Clone all dependencies into temporary directory. Collect all git_set_versions for all
packages (including myself).
Update on all places as in git_set_version.
"""
try:
format = "%s-[0-9]*" % self.distribution.metadata.get_name()
dependency_versions = {}
meta_version = compute_meta_version(
self.distribution.dependencies_git_repositories,
accepted_tag_pattern = format,
cachedir = self.cache_directory,
dependency_versions = dependency_versions
)
branch_suffix = get_branch_suffix(self.distribution.metadata, retrieve_current_branch())
version = meta_version
version_str = '.'.join(map(str, version))
replace_inits(version, self.distribution.packages)
replace_scripts(version, self.distribution.py_modules)
replace_version_in_file(version, 'setup.py')
self.distribution.metadata.version = version_str
self.distribution.metadata.dependency_versions = dict([(k,'.'.join(map(str, v))) for k,v in dependency_versions.items()])
self.distribution.metadata.branch_suffix = branch_suffix
print "Current version is %s" % version_str
print "Current branch suffix is %s" % branch_suffix
except Exception:
import traceback
traceback.print_exc()
raise
class GitSetVersion(config):
description = "calculate version from git describe"
user_options = [
]
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
""" Compute current version for tag and git describe. Expects VERSION variable to be stored in
$name/__init__.py file (relatively placed to $cwd.) and to be a tuple of three integers.
Because of line endings, should be not run on Windows."""
try:
# format is given, sorry. If you want it configurable, use paver
format = "%s-[0-9]*" % self.distribution.metadata.get_name()
current_git_version = get_git_describe(accepted_tag_pattern=format)
branch_suffix = get_branch_suffix(self.distribution.metadata, retrieve_current_branch())
version = compute_version(current_git_version)
version_str = '.'.join(map(str, version))
replace_inits(version, self.distribution.packages)
replace_scripts(version, self.distribution.py_modules)
replace_version_in_file(version, 'setup.py')
if os.path.exists('pavement.py'):
replace_version_in_file(version, 'pavement.py')
self.distribution.metadata.version = version_str
self.distribution.metadata.branch_suffix = branch_suffix
print "Current version is %s" % version_str
print "Current branch suffix is %s" % branch_suffix
except Exception:
import traceback
traceback.print_exc()
raise
def validate_repositories(dist, attr, value):
# TODO:
# http://peak.telecommunity.com/DevCenter/setuptools#adding-setup-arguments
pass
|
|
# Load MNIST data
import input_data
import tensorflow as tf
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
# Tensorflow relies on a highly efficient C++ backend to do its
# computation. The connection to this backend is called a session. The
# common usage for TensorFlow programs is to first create a graph and then
# launch it in a session.
# Here we instead use the convenient InteractiveSession class, which makes
# TensorFlow more flexible about how you structure your code. It allows
# you to interleave operations which build a computation graph with ones
# that run the graph. This is particularly convenient when working in
# interactive contexts like iPython. If you are not using an
# InteractiveSession, then you should build the entire computation graph
# before starting a session and launching the graph.
sess = tf.InteractiveSession()
# BUILD A SOFTMAX REGRESSSION MODEL --------
# We start building the computation graph by creating nodes for the input
# images and target output classes.
x = tf.placeholder("float", shape=[None, 784])
y_ = tf.placeholder("float", shape=[None, 10])
# Here x and y_ aren't specific values. Rather, they are each a
# placeholder -- a value that we'll input when we ask TensorFlow to run a
# computation.
# The input images x will consist of a 2d tensor of floating point
# numbers. Here we assign it a shape of [None, 784], where 784 is the
# dimensionality of a single flattened MNIST image, and None indicates
# that the first dimension, corresponding to the batch size, can be of any
# size. The target output classes y_ will also consist of a 2d tensor,
# where each row is a one-hot 10-dimensional vector indicating which digit
# class the corresponding MNIST image belongs to.
# The shape argument to placeholder is optional, but it allows TensorFlow
# to automatically catch bugs stemming from inconsistent tensor shapes.
# VARIABLES ---------------------
# We now define the weights W and biases b for our model. We could imagine
# treating these like additional inputs, but TensorFlow has an even better
# way to handle them: Variable. A Variable is a value that lives in
# TensorFlow's computation graph. It can be used and even modified by the
# computation. In machine learning applications, one generally has the
# model paramaters be Variables.
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
# We pass the initial value for each parameter in the call to tf.Variable.
# In this case, we initialize both W and b as tensors full of zeros. W is
# a 784x10 matrix (because we have 784 input features and 10 outputs) and
# b is a 10-dimensional vector (because we have 10 classes).
# Before Variables can be used within a session, they must be initialized
# using that session. This step takes the initial values (in this case
# tensors full of zeros) that have already been specified, and assigns
# them to each Variable. This can be done for all Variables at once.
sess.run(tf.initialize_all_variables())
# PREDICTED CLASS AND COST FUNCTION ------------------------------------
# We can now implement our regression model. It only takes one line! We
# multiply the vectorized input images x by the weight matrix W, add the
# bias b, and compute the softmax probabilities that are assigned to each
# class.
y = tf.nn.softmax(tf.matmul(x, W) + b)
# The cost function to be minimized during training can be specified just
# as easily. Our cost function will be the cross-entropy between the
# target and the model's prediction.
cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
# Note that tf.reduce_sum sums across all images in the minibatch, as well
# as all classes. We are computing the cross entropy for the entire
# minibatch.
# TRAINING THE MODEL ---------------------------------------------------
# Now that we have defined our model and training cost function, it is
# straightforward to train using TensorFlow. Because TensorFlow knows the
# entire computation graph, it can use automatic differentiation to find
# the gradients of the cost with respect to each of the variables.
# TensorFlow has a variety of builtin optimization algorithms. For this
# example, we will use steepest gradient descent, with a step length of
# 0.01, to descend the cross entropy.
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
# What TensorFlow actually did in that single line was to add new
# operations to the computation graph. These operations included ones to
# compute gradients, compute parameter update steps, and apply update
# steps to the parameters.
# The returned operation train_step, when run, will apply the gradient
# descent updates to the parameters. Training the model can therefore be
# accomplished by repeatedly running train_step.
for i in range(1000):
batch = mnist.train.next_batch(50)
train_step.run(feed_dict={x: batch[0], y_: batch[1]})
# Each training iteration we load 50 training examples. We then run the
# train_step operation, using feed_dict to replace the placeholder tensors
# x and y_ with the training examples. Note that you can replace any
# tensor in your computation graph using feed_dict -- it's not restricted
# to just placeholders.
# EVALUATE THE MODEL ----------------------------------------------------
# First we'll figure out where we predicted the correct label. tf.argmax
# is an extremely useful function which gives you the index of the highest
# entry in a tensor along some axis. For example, tf.argmax(y,1) is the
# label our model thinks is most likely for each input, while
# tf.argmax(y_,1) is the true label. We can use tf.equal to check if our
# prediction matches the truth.
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
# That gives us a list of booleans. To determine what fraction are
# correct, we cast to floating point numbers and then take the mean. For
# example, [True, False, True, True] would become [1,0,1,1] which would
# become 0.75.
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# Finally, we can evaluate our accuracy on the test data. This should be
# about 91% correct.
print("--- ACCURACY WITH A SIMPLE SOFTMAX ---")
print(accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
#########################################################################
# IMPROVING ACCURACY BY USING A SMALL CONVOLUTIONAL NETWORK
#########################################################################
# To create this model, we're going to need to create a lot of weights and
# biases. One should generally initialize weights with a small amount of
# noise for symmetry breaking, and to prevent 0 gradients. Since we're
# using ReLU neurons, it is also good practice to initialize them with a
# slightly positive initial bias to avoid "dead neurons." Instead of doing
# this repeatedly while we build the model, let's create two handy
# functions to do it for us.
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
# CONVOLUTION AND POOLING -----------------------------------------------
# TensorFlow also gives us a lot of flexibility in convolution and pooling
# operations. How do we handle the boundaries? What is our stride size? In
# this example, we're always going to choose the vanilla version. Our
# convolutions uses a stride of one and are zero padded so that the output
# is the same size as the input. Our pooling is plain old max pooling over
# 2x2 blocks. To keep our code cleaner, let's also abstract those
# operations into functions.
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
# FIRST CONVOLUTIONAL LAYER --------------------------------------------
# We can now implement our first layer. It will consist of convolution,
# followed by max pooling. The convolutional will compute 32 features for
# each 5x5 patch. Its weight tensor will have a shape of [5, 5, 1, 32].
# The first two dimensions are the patch size, the next is the number of
# input channels, and the last is the number of output channels. We will
# also have a bias vector with a component for each output channel.
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
# To apply the layer, we first reshape x to a 4d tensor, with the second
# and third dimensions corresponding to image width and height, and the
# final dimension corresponding to the number of color channels.
x_image = tf.reshape(x, [-1, 28, 28, 1])
# We then convolve x_image with the weight tensor, add the bias, apply the
# ReLU function, and finally max pool.
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# SECOND CONVOLUTIONAL LAYER --------------------------------------------
# In order to build a deep network, we stack several layers of this type.
# The second layer will have 64 features for each 5x5 patch.
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
# DENSELY CONNECTED LAYER ------------------------------------------------
# Now that the image size has been reduced to 7x7, we add a
# fully-connected layer with 1024 neurons to allow processing on the
# entire image. We reshape the tensor from the pooling layer into a batch
# of vectors, multiply by a weight matrix, add a bias, and apply a ReLU.
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# To reduce overfitting, we will apply dropout before the readout layer.
# We create a placeholder for the probability that a neuron's output is
# kept during dropout. This allows us to turn dropout on during training,
# and turn it off during testing. TensorFlow's tf.nn.dropout op
# automatically handles scaling neuron outputs in addition to masking
# them, so dropout just works without any additional scaling.
keep_prob = tf.placeholder("float")
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# READOUT LAYER ---------------------------------------------------------
# Finally, we add a softmax layer, just like for the one layer softmax
# regression above.
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
# TRAIN AND EVALUATE THE MODEL -----------------------------------------
# How well does this model do? To train and evaluate it we will use code
# that is nearly identical to that for the simple one layer SoftMax
# network above. The differences are that: we will replace the steepest
# gradient descent optimizer with the more sophisticated ADAM optimizer;
# we will include the additional parameter keep_prob in feed_dict to
# control the dropout rate; and we will add logging to every 100th
# iteration in the training process.
cross_entropy = -tf.reduce_sum(y_ * tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
sess.run(tf.initialize_all_variables())
print("--- TRAINING WITH A CONVOLUTIONAL NETWORK ---")
for i in range(20000):
batch = mnist.train.next_batch(50)
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x: batch[0], y_: batch[1], keep_prob: 1.0})
print("step %d, training accuracy %g" % (i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print("test accuracy %g" % accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
|
|
import os
import sys
import numpy as np
from astropy.io import fits as pf
from sklearn.neighbors import KernelDensity as kde
from scipy import integrate
import camb
from camb import model
from scipy.special import j0
from scipy import interpolate
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D as ax3d
from skmonaco import mcquad
from skmonaco import mcmiser
import time
#Import the Adam's data as a lowz test
#data = '/Users/johntimlin/Clustering/Myers2006/Myers2006_dr1_test.fits'
#obs = pf.open(data)[1].data
#gdx = ((obs.ZSPEC >= 0.4)&(obs.ZSPEC <= 3))
#gdx = ((obs.zphot <= 2.1) & (obs.zphot >= 0.4) & (obs.ZSPEC > 0))
#Compute the redshift percentiles for the Friedmann-Diaconis rule for bin width
#q75, q25 = np.percentile(obs.ZSPEC[gdx], [75 ,25])
#iqr = q75 - q25
#FD = 2*iqr /(len(obs.ZSPEC[gdx]))**(1/3.0)
#Set up the bin range from using the Friedmann Diaconis bin width
#bins = np.arange(min(obs.ZSPEC[gdx]),max(obs.ZSPEC[gdx]),FD)
#num,bins = np.histogram(obs.ZSPEC[gdx],bins,normed=True)
#Import SpIES / SHELA data
data = '../Data_Sets/QSO_Candidates_allcuts_with_errors_visualinsp.fits'
obs = pf.open(data)[1].data
Z = obs.zphotNW
gdx = ((Z >= 3.4)&(Z <= 5.2) & (obs.Good_obj == 0)) & (obs.dec>=-1.2) & (obs.dec<=1.2)
#gdx = Z>0
#Set up a KDE for dNdz
tmpz = Z[gdx][:, np.newaxis] #change the array from row shape (1) to column shape (1,)
print np.shape(tmpz)
sample_range = np.linspace(min(tmpz[:, 0]), max(tmpz[:, 0]), len(tmpz[:, 0]))[:, np.newaxis]
est = kde(bandwidth=0.1,kernel='epanechnikov') #Set up the Kernel
histkde = est.fit(tmpz).score_samples(sample_range) #fit the kernel to the data and find the density of the grid
#Interpolate (you get the same function back) to plug in any z in the range (as opposed to set z values)
dNdz = interpolate.interp1d(sample_range.flatten(),np.exp(histkde))
print sample_range.flatten()
print 'done'
ZE = np.linspace(min(Z),max(Z),100)
xo=integrate.quad(dNdz,min(sample_range),max(sample_range)) #quad(f(x),xlower,xupper, args)
print xo
'''
#Plot the KDE dndz
plt.plot(sample_range[:,0],np.exp(histkde))
plt.xlabel('z')
#plt.plot(sample_range[:,0],dNdz(sample_range[:,0]))
#plt.plot(bins[:-1],num,linestyle = 'steps-mid')
ZE = np.linspace(min(Z),max(Z),100)
xo=integrate.quad(dNdz,min(sample_range),max(sample_range)) #quad(f(x),xlower,xupper, args)
print xo
plt.savefig('dndz.png')
plt.show()
'''
# Compute the matter power spectrum from CAMB and Generate the P(z,k) function to output the power at any given redshift
#and wavenumber
#First define Planck 2015 cosmological parameters
H = 70 #H0.
oc = 0.229 #physical density of CDM
ob = 0.046 #physical density of baryons
#Conversion to density param: Omega_Matter = (oc+ob)/(H0/100.)**2
#Set up parameters in CAMB
pars = camb.CAMBparams()
#H0 is hubble parameter at z=0, ombh2 is the baryon density (physical), omch2 is the matter density (physical)
#mnu is sum of neutrino masses, omk is curvature parameter (set to 0 for flat), meffsterile is effective mass of sterile neutrinos
#pars.set_cosmology(H0=H,ombh2=ob, omch2=oc,omk=0)#,mnu=0,meffsterile=0)
#Hard code the cosmolgy params
pars.H0=H #hubble param (No h!!)
pars.omegab=ob #Baryon density parameter
pars.omegac=oc #CDM density parameter
pars.omegav=0.725 #Vacuum density parameter
pars.set_dark_energy()
#Set parameters using standard power law parameterization.If nt=None, uses inflation consistency relation.
#ns is scalar speectral index
pars.InitPower.set_params(ns=0.960)
camb.set_halofit_version(version='original') #uses the Smith 2003 halo model
ze=np.linspace(0,20,150)
ka=np.logspace(-4,2,len(ze))#np.linspace(0,10,100)
#Get the matter power spectrum interpolation object (based on RectBivariateSpline).
#pars: input parameters, zs: redshift range, nonlinear: generate nonlinear power spectrum, hubble_units=True: output as Mpc/h^3
#instead of Mpc^3
PK = camb.get_matter_power_interpolator(pars,zs = ze,zmax = ze[-1], nonlinear=True, hubble_units=False, k_hunit=True, kmax = ka[-1])
#Generate the power using the interpolator and the z and k arrays
#Power = PK.P(z,k)
def dimpower(Pk,z,k):
delta = Pk.P(z,k) * k**3/(2*np.pi**2)
return delta
def domega(kz,theta,cambpars,H0,dndz,Power,OmegaM,OmegaL,evalint=False):
if evalint == True:
#Use this if integrating ln(10)k dlog(k)
#start = time.time()
k=kz[0]
z=kz[1]
bkg = camb.get_background(cambpars)
x = 10**k * (theta/60./180.*np.pi) * bkg.comoving_radial_distance(z)
om = (H0/3.0e5) * 10**(-k) * dimpower(Power,z,10**k) * dndz(z)**2 * j0(x) * (OmegaM*(1+z)**3+OmegaL)**0.5*np.log(10)
#end = time.time()
#print end-start
## USe this if integrating dk
#x = k * theta * bkg.comoving_radial_distance(z)
#om = (H0/3.0e5) * k**-2 * dimpower(Power,z,k) * dndz(z)**2 * j0(x) * (OmegaM*(1+z)**3+OmegaL)**0.5
if evalint == False:
#project the z array onto new axis to output a matrix when evaluating in k and z. This allows
#me to plot a wireframe 3d plot
#k=kz[0]
#z=kz[1]
z = np.array(z)
z = z[:,np.newaxis]
bkg = camb.get_background(cambpars)
x = k * theta * bkg.comoving_radial_distance(z)
om = (H0/3.0e5) * k**-2 * dimpower(Power,z,k) * dndz(z)**2 * j0(x) * (OmegaM*(1+z)**3+OmegaL)**0.5
return om
'''
#parameters if integrate == False
theta = 1./60./180.*np.pi # radians = arcmin/60/180*pi
z = np.linspace(2.91,5.1,100)
k = np.logspace(-3,2,100)
omegaM = (oc+ob)/(H/100.)**2
omegaL= 1.0-omegaM
#Generate the surface under which to integrate
surf = domega(k,z,theta,pars,H,dNdz,PK,omegaM,omegaL)
#Set up meshgrid such that z interates over the columns and k iterates over the row
K,Z = np.meshgrid(k,z)
plt.figure(4)
plt.plot(K[0],surf[0])
plt.xscale('log')
plt.xlabel('k')
plt.ylabel(r'$\delta^2$w')
plt.figure(5)
plt.plot(Z[:,0],surf[:,0])
plt.xscale('linear')
plt.xlabel('z')
plt.ylabel(r'$\delta^2$w')
fig = plt.figure(6)
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(Z,np.log10(K),surf)
ax.set_xlabel('z')
ax.set_ylabel('log10(k)')
ax.set_zlabel(r'$\delta^2$w')
plt.show()
'''
#Integrate using mcmiser
omegaM = (oc+ob)#/(H/100.)**2
omegaL= 1.0-omegaM
print H,omegaM, omegaL, omegaM+omegaL
print 'begin integration'
s= time.time()
#mcquad(fn,integrand xl=[0.,0.],xu=[1.,1.], lower and upper limits of integration npoints=100000 number of points,args)
newtheta = np.logspace(-1.3,2.5,20)
mclimber = []
for i in range(len(newtheta)):
thetas = newtheta[i]
test = mcmiser(domega, xl=[-4.0,3.41], xu=[1.0,5.1], npoints=1e3, args=(thetas,pars,H,dNdz,PK,omegaM,omegaL,True))
mclimber.append(test[0])
print mclimber
e=time.time()
print e-s
'''
latest run:
mcmiser(domega, xl=[-3.0,3.45], xu=[2.0,5.0], npoints=1e6, args=(thetas,pars,H,dNdz,PK,omegaM,omegaL,True))
[0.0018763493756045195, 0.0015591052537067829, 0.0013261541719343291, 0.0011664782432483816, 0.0010404309744665909, 0.00091741906231659518, 0.00078667114128277277, 0.00064789973106323866, 0.0005049509301372051, 0.00036797906601997838, 0.00024422862731641093, 0.00014404571216926446, 7.2933582496721974e-05, 2.9223826003039019e-05, 7.8230852216102688e-06, 2.9890491694937377e-06, -2.307437559147607e-06, -9.1226385750823894e-07, -3.9755941765663542e-07, 1.3928717601483434e-08]
141181.353475 s
new candidates, 10**3points
[0.0019430243038571534, 0.0016349397131697643, 0.0015559643190088466, 0.0011592312843893796, 0.001045982603488736, 0.00095526409517522886, 0.00093113611560497887, 0.0005889401612489372, 0.00053144714843557936, 0.00038853567370124737, 0.00025666765171879647, 0.00016544957819145055, 9.8265639739552113e-05, 3.3731282373988794e-05, 8.4752026179249433e-06, 2.2529810568760694e-06, 9.1571876941527249e-06, -7.5021177212707544e-07, 4.2410939833994758e-06, 3.9566810872630768e-06]
shen candidates: newtheta = np.logspace(-1.3,2.5,20)
mcmiser(domega, xl=[-3.0,2.91], xu=[2.0,5.17], npoints=1e6, args=(thetas,pars,H,dNdz,PK,omegaM,omegaL,True))
[0.0018358807532616219, 0.0015034895403743954, 0.0012276746859320596, 0.0010369278499846939, 0.00090159800775010729, 0.00078828444848061288, 0.00067568885621950841, 0.00055784990591065565, 0.00043864978763109299, 0.00032197840731266829, 0.00021621673957789532, 0.0001293993054038773, 6.6678330899456382e-05, 2.7563877682033188e-05, 7.9067731028462201e-06, 2.9283435400988902e-06, -2.2004904973685094e-06, -8.6505180997999433e-07, -3.2480646807619417e-07, 7.9393559384844712e-08]
10^3 points changing to z = 5.1 on the upper limit of the integral
[0.0031072154820804372, 0.0024773982340020656, 0.0022069831939996406, 0.0018249231279954346, 0.0016802887745281424, 0.0014562986726930265, 0.0012651874250202608, 0.0010400616665105426, 0.00080494654363068101, 0.0005982830063258948, 0.00038513590577395919, 0.00026714928016567424, 0.00014338341872873156, 4.9450665812679637e-05, 1.782600514223763e-05, 5.0932795884699636e-06, 1.4925594883012705e-05, -4.9547953675508698e-06, 4.5836346273833925e-06, 3.8992113097562235e-06]
[0.0022529517590044938, 0.0021280436475443168, 0.0018731349354724374, 0.0015033947078234348, 0.0013070363461209996, 0.0011766368407685472, 0.0010140398851336263, 0.00083560744525899085, 0.00065508092975343803, 0.00047837963299693522, 0.0003187878517245088, 0.0001885268241591017, 9.5947162744095565e-05, 3.876918723162803e-05, 1.0394048795964464e-05, 3.7806009976488573e-06, -3.1705205023784285e-06, -1.3079909197198175e-06, -1.1571078800356848e-06, 2.4679274288594045e-07]
'''
|
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from keystoneclient.common import cms
import six
from keystone.common import controller
from keystone.common import dependency
from keystone.common import wsgi
from keystone import config
from keystone.contrib import federation
from keystone import exception
from keystone.openstack.common.gettextutils import _
from keystone.openstack.common import importutils
from keystone.openstack.common import log
from keystone.openstack.common import timeutils
LOG = log.getLogger(__name__)
CONF = config.CONF
# registry of authentication methods
AUTH_METHODS = {}
AUTH_PLUGINS_LOADED = False
def load_auth_methods():
global AUTH_PLUGINS_LOADED
if AUTH_PLUGINS_LOADED:
# Only try and load methods a single time.
return
# config.setup_authentication should be idempotent, call it to ensure we
# have setup all the appropriate configuration options we may need.
config.setup_authentication()
for plugin in CONF.auth.methods:
if '.' in plugin:
# NOTE(morganfainberg): if '.' is in the plugin name, it should be
# imported rather than used as a plugin identifier.
plugin_class = plugin
driver = importutils.import_object(plugin)
if not hasattr(driver, 'method'):
raise ValueError(_('Cannot load an auth-plugin by class-name '
'without a "method" attribute defined: %s'),
plugin_class)
else:
plugin_class = CONF.auth.get(plugin)
driver = importutils.import_object(plugin_class)
if hasattr(driver, 'method'):
if driver.method != plugin:
raise ValueError(_('Driver requested method %(req)s does '
'not match plugin name %(plugin)s.') %
{'req': driver.method,
'plugin': plugin})
else:
LOG.warning(_('Auth Plugin %s does not have a "method" '
'attribute.'), plugin)
setattr(driver, 'method', plugin)
if driver.method in AUTH_METHODS:
raise ValueError(_('Auth plugin %(plugin)s is requesting '
'previously registered method %(method)s') %
{'plugin': plugin_class, 'method': driver.method})
AUTH_METHODS[driver.method] = driver
AUTH_PLUGINS_LOADED = True
def get_auth_method(method_name):
global AUTH_METHODS
if method_name not in AUTH_METHODS:
raise exception.AuthMethodNotSupported()
return AUTH_METHODS[method_name]
# TODO(blk-u): this class doesn't use identity_api directly, but makes it
# available for consumers. Consumers should probably not be getting
# identity_api from this since it's available in global registry, then
# identity_api should be removed from this list.
@dependency.requires('assignment_api', 'identity_api', 'trust_api')
class AuthInfo(object):
"""Encapsulation of "auth" request."""
@staticmethod
def create(context, auth=None):
auth_info = AuthInfo(context, auth=auth)
auth_info._validate_and_normalize_auth_data()
return auth_info
def __init__(self, context, auth=None):
self.context = context
self.auth = auth
self._scope_data = (None, None, None)
# self._scope_data is (domain_id, project_id, trust_ref)
# project scope: (None, project_id, None)
# domain scope: (domain_id, None, None)
# trust scope: (None, None, trust_ref)
# unscoped: (None, None, None)
def _assert_project_is_enabled(self, project_ref):
# ensure the project is enabled
if not project_ref.get('enabled', True):
msg = _('Project is disabled: %s') % project_ref['id']
LOG.warning(msg)
raise exception.Unauthorized(msg)
def _assert_domain_is_enabled(self, domain_ref):
if not domain_ref.get('enabled'):
msg = _('Domain is disabled: %s') % (domain_ref['id'])
LOG.warning(msg)
raise exception.Unauthorized(msg)
def _lookup_domain(self, domain_info):
domain_id = domain_info.get('id')
domain_name = domain_info.get('name')
domain_ref = None
if not domain_id and not domain_name:
raise exception.ValidationError(attribute='id or name',
target='domain')
try:
if domain_name:
domain_ref = self.assignment_api.get_domain_by_name(
domain_name)
else:
domain_ref = self.assignment_api.get_domain(domain_id)
except exception.DomainNotFound as e:
LOG.exception(e)
raise exception.Unauthorized(e)
self._assert_domain_is_enabled(domain_ref)
return domain_ref
def _lookup_project(self, project_info):
project_id = project_info.get('id')
project_name = project_info.get('name')
project_ref = None
if not project_id and not project_name:
raise exception.ValidationError(attribute='id or name',
target='project')
try:
if project_name:
if 'domain' not in project_info:
raise exception.ValidationError(attribute='domain',
target='project')
domain_ref = self._lookup_domain(project_info['domain'])
project_ref = self.assignment_api.get_project_by_name(
project_name, domain_ref['id'])
else:
project_ref = self.assignment_api.get_project(project_id)
except exception.ProjectNotFound as e:
LOG.exception(e)
raise exception.Unauthorized(e)
self._assert_project_is_enabled(project_ref)
return project_ref
def _lookup_trust(self, trust_info):
trust_id = trust_info.get('id')
if not trust_id:
raise exception.ValidationError(attribute='trust_id',
target='trust')
trust = self.trust_api.get_trust(trust_id)
if not trust:
raise exception.TrustNotFound(trust_id=trust_id)
return trust
def _validate_and_normalize_scope_data(self):
"""Validate and normalize scope data."""
if 'scope' not in self.auth:
return
if sum(['project' in self.auth['scope'],
'domain' in self.auth['scope'],
'OS-TRUST:trust' in self.auth['scope']]) != 1:
raise exception.ValidationError(
attribute='project, domain, or OS-TRUST:trust',
target='scope')
if 'project' in self.auth['scope']:
project_ref = self._lookup_project(self.auth['scope']['project'])
self._scope_data = (None, project_ref['id'], None)
elif 'domain' in self.auth['scope']:
domain_ref = self._lookup_domain(self.auth['scope']['domain'])
self._scope_data = (domain_ref['id'], None, None)
elif 'OS-TRUST:trust' in self.auth['scope']:
if not CONF.trust.enabled:
raise exception.Forbidden('Trusts are disabled.')
trust_ref = self._lookup_trust(
self.auth['scope']['OS-TRUST:trust'])
# TODO(ayoung): when trusts support domains, fill in domain data
if 'project_id' in trust_ref:
project_ref = self._lookup_project(
{'id': trust_ref['project_id']})
self._scope_data = (None, project_ref['id'], trust_ref)
else:
self._scope_data = (None, None, trust_ref)
def _validate_auth_methods(self):
if 'identity' not in self.auth:
raise exception.ValidationError(attribute='identity',
target='auth')
# make sure auth methods are provided
if 'methods' not in self.auth['identity']:
raise exception.ValidationError(attribute='methods',
target='identity')
# make sure all the method data/payload are provided
for method_name in self.get_method_names():
if method_name not in self.auth['identity']:
raise exception.ValidationError(attribute=method_name,
target='identity')
# make sure auth method is supported
for method_name in self.get_method_names():
if method_name not in AUTH_METHODS:
raise exception.AuthMethodNotSupported()
def _validate_and_normalize_auth_data(self):
"""Make sure "auth" is valid."""
# make sure "auth" exist
if not self.auth:
raise exception.ValidationError(attribute='auth',
target='request body')
self._validate_auth_methods()
self._validate_and_normalize_scope_data()
def get_method_names(self):
"""Returns the identity method names.
:returns: list of auth method names
"""
# Sanitizes methods received in request's body
# Filters out duplicates, while keeping elements' order.
method_names = []
for method in self.auth['identity']['methods']:
if method not in method_names:
method_names.append(method)
return method_names
def get_method_data(self, method):
"""Get the auth method payload.
:returns: auth method payload
"""
if method not in self.auth['identity']['methods']:
raise exception.ValidationError(attribute=method,
target='identity')
return self.auth['identity'][method]
def get_scope(self):
"""Get scope information.
Verify and return the scoping information.
:returns: (domain_id, project_id, trust_ref).
If scope to a project, (None, project_id, None)
will be returned.
If scoped to a domain, (domain_id, None, None)
will be returned.
If scoped to a trust, (None, project_id, trust_ref),
Will be returned, where the project_id comes from the
trust definition.
If unscoped, (None, None, None) will be returned.
"""
return self._scope_data
def set_scope(self, domain_id=None, project_id=None, trust=None):
"""Set scope information."""
if domain_id and project_id:
msg = _('Scoping to both domain and project is not allowed')
raise ValueError(msg)
if domain_id and trust:
msg = _('Scoping to both domain and trust is not allowed')
raise ValueError(msg)
if project_id and trust:
msg = _('Scoping to both project and trust is not allowed')
raise ValueError(msg)
self._scope_data = (domain_id, project_id, trust)
@dependency.requires('assignment_api', 'identity_api', 'token_api',
'token_provider_api', 'trust_api')
class Auth(controller.V3Controller):
# Note(atiwari): From V3 auth controller code we are
# calling protection() wrappers, so we need to setup
# the member_name and collection_name attributes of
# auth controller code.
# In the absence of these attributes, default 'entity'
# string will be used to represent the target which is
# generic. Policy can be defined using 'entity' but it
# would not reflect the exact entity that is in context.
# We are defining collection_name = 'tokens' and
# member_name = 'token' to facilitate policy decisions.
collection_name = 'tokens'
member_name = 'token'
def __init__(self, *args, **kw):
super(Auth, self).__init__(*args, **kw)
config.setup_authentication()
def authenticate_for_token(self, context, auth=None):
"""Authenticate user and issue a token."""
include_catalog = 'nocatalog' not in context['query_string']
try:
auth_info = AuthInfo.create(context, auth=auth)
auth_context = {'extras': {}, 'method_names': [], 'bind': {}}
self.authenticate(context, auth_info, auth_context)
if auth_context.get('access_token_id'):
auth_info.set_scope(None, auth_context['project_id'], None)
self._check_and_set_default_scoping(auth_info, auth_context)
(domain_id, project_id, trust) = auth_info.get_scope()
if trust:
self.trust_api.consume_use(trust['id'])
method_names = auth_info.get_method_names()
method_names += auth_context.get('method_names', [])
# make sure the list is unique
method_names = list(set(method_names))
expires_at = auth_context.get('expires_at')
# NOTE(morganfainberg): define this here so it is clear what the
# argument is during the issue_v3_token provider call.
metadata_ref = None
(token_id, token_data) = self.token_provider_api.issue_v3_token(
auth_context['user_id'], method_names, expires_at, project_id,
domain_id, auth_context, trust, metadata_ref, include_catalog)
return render_token_data_response(token_id, token_data,
created=True)
except exception.TrustNotFound as e:
raise exception.Unauthorized(e)
def _check_and_set_default_scoping(self, auth_info, auth_context):
(domain_id, project_id, trust) = auth_info.get_scope()
if trust:
project_id = trust['project_id']
if domain_id or project_id or trust:
# scope is specified
return
# Skip scoping when unscoped federated token is being issued
if federation.IDENTITY_PROVIDER in auth_context:
return
# fill in default_project_id if it is available
try:
user_ref = self.identity_api.get_user(auth_context['user_id'])
except exception.UserNotFound as e:
LOG.exception(e)
raise exception.Unauthorized(e)
default_project_id = user_ref.get('default_project_id')
if not default_project_id:
# User has no default project. He shall get an unscoped token.
return
# make sure user's default project is legit before scoping to it
try:
default_project_ref = self.assignment_api.get_project(
default_project_id)
default_project_domain_ref = self.assignment_api.get_domain(
default_project_ref['domain_id'])
if (default_project_ref.get('enabled', True) and
default_project_domain_ref.get('enabled', True)):
if self.assignment_api.get_roles_for_user_and_project(
user_ref['id'], default_project_id):
auth_info.set_scope(project_id=default_project_id)
else:
msg = _("User %(user_id)s doesn't have access to"
" default project %(project_id)s. The token will"
" be unscoped rather than scoped to the project.")
LOG.warning(msg,
{'user_id': user_ref['id'],
'project_id': default_project_id})
else:
msg = _("User %(user_id)s's default project %(project_id)s is"
" disabled. The token will be unscoped rather than"
" scoped to the project.")
LOG.warning(msg,
{'user_id': user_ref['id'],
'project_id': default_project_id})
except (exception.ProjectNotFound, exception.DomainNotFound):
# default project or default project domain doesn't exist,
# will issue unscoped token instead
msg = _("User %(user_id)s's default project %(project_id)s not"
" found. The token will be unscoped rather than"
" scoped to the project.")
LOG.warning(msg, {'user_id': user_ref['id'],
'project_id': default_project_id})
def authenticate(self, context, auth_info, auth_context):
"""Authenticate user."""
# user has been authenticated externally
if 'REMOTE_USER' in context['environment']:
external = get_auth_method('external')
external.authenticate(context, auth_info, auth_context)
# need to aggregate the results in case two or more methods
# are specified
auth_response = {'methods': []}
for method_name in auth_info.get_method_names():
method = get_auth_method(method_name)
resp = method.authenticate(context,
auth_info.get_method_data(method_name),
auth_context)
if resp:
auth_response['methods'].append(method_name)
auth_response[method_name] = resp
if auth_response["methods"]:
# authentication continuation required
raise exception.AdditionalAuthRequired(auth_response)
if 'user_id' not in auth_context:
msg = _('User not found')
raise exception.Unauthorized(msg)
@controller.protected()
def check_token(self, context):
token_id = context.get('subject_token_id')
self.token_provider_api.check_v3_token(token_id)
@controller.protected()
def revoke_token(self, context):
token_id = context.get('subject_token_id')
return self.token_provider_api.revoke_token(token_id)
@controller.protected()
def validate_token(self, context):
token_id = context.get('subject_token_id')
include_catalog = 'nocatalog' not in context['query_string']
token_data = self.token_provider_api.validate_v3_token(
token_id)
if not include_catalog and 'catalog' in token_data['token']:
del token_data['token']['catalog']
return render_token_data_response(token_id, token_data)
@controller.protected()
def revocation_list(self, context, auth=None):
if not CONF.token.revoke_by_id:
raise exception.Gone()
tokens = self.token_api.list_revoked_tokens()
for t in tokens:
expires = t['expires']
if not (expires and isinstance(expires, six.text_type)):
t['expires'] = timeutils.isotime(expires)
data = {'revoked': tokens}
json_data = json.dumps(data)
signed_text = cms.cms_sign_text(json_data,
CONF.signing.certfile,
CONF.signing.keyfile)
return {'signed': signed_text}
# FIXME(gyee): not sure if it belongs here or keystone.common. Park it here
# for now.
def render_token_data_response(token_id, token_data, created=False):
"""Render token data HTTP response.
Stash token ID into the X-Subject-Token header.
"""
headers = [('X-Subject-Token', token_id)]
if created:
status = (201, 'Created')
else:
status = (200, 'OK')
return wsgi.render_response(body=token_data,
status=status, headers=headers)
|
|
import datetime
import decimal
from django.db import models
from django.db.models.sql.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.db.models.related import RelatedObject
from django.forms.forms import pretty_name
from django.utils import formats
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils import timezone
from django.utils.encoding import force_unicode, smart_unicode, smart_str
from django.utils.translation import ungettext
from django.core.urlresolvers import reverse
def lookup_needs_distinct(opts, lookup_path):
"""
Returns True if 'distinct()' should be used to query the given lookup path.
"""
field_name = lookup_path.split('__', 1)[0]
field = opts.get_field_by_name(field_name)[0]
if ((hasattr(field, 'rel') and
isinstance(field.rel, models.ManyToManyRel)) or
(isinstance(field, models.related.RelatedObject) and
not field.field.unique)):
return True
return False
def prepare_lookup_value(key, value):
"""
Returns a lookup value prepared to be used in queryset filtering.
"""
# if key ends with __in, split parameter into separate values
if key.endswith('__in'):
value = value.split(',')
# if key ends with __isnull, special case '' and false
if key.endswith('__isnull'):
if value.lower() in ('', 'false'):
value = False
else:
value = True
return value
def quote(s):
"""
Ensure that primary key values do not confuse the admin URLs by escaping
any '/', '_' and ':' characters. Similar to urllib.quote, except that the
quoting is slightly different so that it doesn't get automatically
unquoted by the Web browser.
"""
if not isinstance(s, basestring):
return s
res = list(s)
for i in range(len(res)):
c = res[i]
if c in """:/_#?;@&=+$,"<>%\\""":
res[i] = '_%02X' % ord(c)
return ''.join(res)
def unquote(s):
"""
Undo the effects of quote(). Based heavily on urllib.unquote().
"""
mychr = chr
myatoi = int
list = s.split('_')
res = [list[0]]
myappend = res.append
del list[0]
for item in list:
if item[1:2]:
try:
myappend(mychr(myatoi(item[:2], 16)) + item[2:])
except ValueError:
myappend('_' + item)
else:
myappend('_' + item)
return "".join(res)
def flatten_fieldsets(fieldsets):
"""Returns a list of field names from an admin fieldsets structure."""
field_names = []
for name, opts in fieldsets:
for field in opts['fields']:
# type checking feels dirty, but it seems like the best way here
if type(field) == tuple:
field_names.extend(field)
else:
field_names.append(field)
return field_names
def get_deleted_objects(objs, opts, user, admin_site, using):
"""
Find all objects related to ``objs`` that should also be deleted. ``objs``
must be a homogenous iterable of objects (e.g. a QuerySet).
Returns a nested list of strings suitable for display in the
template with the ``unordered_list`` filter.
"""
collector = NestedObjects(using=using)
collector.collect(objs)
perms_needed = set()
def format_callback(obj):
has_admin = obj.__class__ in admin_site._registry
opts = obj._meta
if has_admin:
admin_url = reverse('%s:%s_%s_change'
% (admin_site.name,
opts.app_label,
opts.object_name.lower()),
None, (quote(obj._get_pk_val()),))
p = '%s.%s' % (opts.app_label,
opts.get_delete_permission())
if not user.has_perm(p):
perms_needed.add(opts.verbose_name)
# Display a link to the admin page.
return mark_safe(u'%s: <a href="%s">%s</a>' %
(escape(capfirst(opts.verbose_name)),
admin_url,
escape(obj)))
else:
# Don't display link to edit, because it either has no
# admin or is edited inline.
return u'%s: %s' % (capfirst(opts.verbose_name),
force_unicode(obj))
to_delete = collector.nested(format_callback)
protected = [format_callback(obj) for obj in collector.protected]
return to_delete, perms_needed, protected
class NestedObjects(Collector):
def __init__(self, *args, **kwargs):
super(NestedObjects, self).__init__(*args, **kwargs)
self.edges = {} # {from_instance: [to_instances]}
self.protected = set()
def add_edge(self, source, target):
self.edges.setdefault(source, []).append(target)
def collect(self, objs, source_attr=None, **kwargs):
for obj in objs:
if source_attr:
self.add_edge(getattr(obj, source_attr), obj)
else:
self.add_edge(None, obj)
try:
return super(NestedObjects, self).collect(objs, source_attr=source_attr, **kwargs)
except models.ProtectedError as e:
self.protected.update(e.protected_objects)
def related_objects(self, related, objs):
qs = super(NestedObjects, self).related_objects(related, objs)
return qs.select_related(related.field.name)
def _nested(self, obj, seen, format_callback):
if obj in seen:
return []
seen.add(obj)
children = []
for child in self.edges.get(obj, ()):
children.extend(self._nested(child, seen, format_callback))
if format_callback:
ret = [format_callback(obj)]
else:
ret = [obj]
if children:
ret.append(children)
return ret
def nested(self, format_callback=None):
"""
Return the graph as a nested list.
"""
seen = set()
roots = []
for root in self.edges.get(None, ()):
roots.extend(self._nested(root, seen, format_callback))
return roots
def model_format_dict(obj):
"""
Return a `dict` with keys 'verbose_name' and 'verbose_name_plural',
typically for use with string formatting.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
"""
if isinstance(obj, (models.Model, models.base.ModelBase)):
opts = obj._meta
elif isinstance(obj, models.query.QuerySet):
opts = obj.model._meta
else:
opts = obj
return {
'verbose_name': force_unicode(opts.verbose_name),
'verbose_name_plural': force_unicode(opts.verbose_name_plural)
}
def model_ngettext(obj, n=None):
"""
Return the appropriate `verbose_name` or `verbose_name_plural` value for
`obj` depending on the count `n`.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
If `obj` is a `QuerySet` instance, `n` is optional and the length of the
`QuerySet` is used.
"""
if isinstance(obj, models.query.QuerySet):
if n is None:
n = obj.count()
obj = obj.model
d = model_format_dict(obj)
singular, plural = d["verbose_name"], d["verbose_name_plural"]
return ungettext(singular, plural, n or 0)
def lookup_field(name, obj, model_admin=None):
opts = obj._meta
try:
f = opts.get_field(name)
except models.FieldDoesNotExist:
# For non-field values, the value is either a method, property or
# returned via a callable.
if callable(name):
attr = name
value = attr(obj)
elif (model_admin is not None and hasattr(model_admin, name) and
not name == '__str__' and not name == '__unicode__'):
attr = getattr(model_admin, name)
value = attr(obj)
else:
attr = getattr(obj, name)
if callable(attr):
value = attr()
else:
value = attr
f = None
else:
attr = None
value = getattr(obj, name)
return f, attr, value
def label_for_field(name, model, model_admin=None, return_attr=False):
"""
Returns a sensible label for a field name. The name can be a callable or the
name of an object attributes, as well as a genuine fields. If return_attr is
True, the resolved attribute (which could be a callable) is also returned.
This will be None if (and only if) the name refers to a field.
"""
attr = None
try:
field = model._meta.get_field_by_name(name)[0]
if isinstance(field, RelatedObject):
label = field.opts.verbose_name
else:
label = field.verbose_name
except models.FieldDoesNotExist:
if name == "__unicode__":
label = force_unicode(model._meta.verbose_name)
attr = unicode
elif name == "__str__":
label = smart_str(model._meta.verbose_name)
attr = str
else:
if callable(name):
attr = name
elif model_admin is not None and hasattr(model_admin, name):
attr = getattr(model_admin, name)
elif hasattr(model, name):
attr = getattr(model, name)
else:
message = "Unable to lookup '%s' on %s" % (name, model._meta.object_name)
if model_admin:
message += " or %s" % (model_admin.__class__.__name__,)
raise AttributeError(message)
if hasattr(attr, "short_description"):
label = attr.short_description
elif callable(attr):
if attr.__name__ == "<lambda>":
label = "--"
else:
label = pretty_name(attr.__name__)
else:
label = pretty_name(name)
if return_attr:
return (label, attr)
else:
return label
def help_text_for_field(name, model):
try:
help_text = model._meta.get_field_by_name(name)[0].help_text
except models.FieldDoesNotExist:
help_text = ""
return smart_unicode(help_text)
def display_for_field(value, field):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
if field.flatchoices:
return dict(field.flatchoices).get(value, EMPTY_CHANGELIST_VALUE)
# NullBooleanField needs special-case null-handling, so it comes
# before the general null test.
elif isinstance(field, models.BooleanField) or isinstance(field, models.NullBooleanField):
return _boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(field, models.DateTimeField):
return formats.localize(timezone.template_localtime(value))
elif isinstance(field, (models.DateField, models.TimeField)):
return formats.localize(value)
elif isinstance(field, models.DecimalField):
return formats.number_format(value, field.decimal_places)
elif isinstance(field, models.FloatField):
return formats.number_format(value)
else:
return smart_unicode(value)
def display_for_value(value, boolean=False):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
if boolean:
return _boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(value, datetime.datetime):
return formats.localize(timezone.template_localtime(value))
elif isinstance(value, (datetime.date, datetime.time)):
return formats.localize(value)
elif isinstance(value, (decimal.Decimal, float, int, long)):
return formats.number_format(value)
else:
return smart_unicode(value)
class NotRelationField(Exception):
pass
def get_model_from_relation(field):
if isinstance(field, models.related.RelatedObject):
return field.model
elif getattr(field, 'rel'): # or isinstance?
return field.rel.to
else:
raise NotRelationField
def reverse_field_path(model, path):
""" Create a reversed field path.
E.g. Given (Order, "user__groups"),
return (Group, "user__order").
Final field must be a related model, not a data field.
"""
reversed_path = []
parent = model
pieces = path.split(LOOKUP_SEP)
for piece in pieces:
field, model, direct, m2m = parent._meta.get_field_by_name(piece)
# skip trailing data field if extant:
if len(reversed_path) == len(pieces)-1: # final iteration
try:
get_model_from_relation(field)
except NotRelationField:
break
if direct:
related_name = field.related_query_name()
parent = field.rel.to
else:
related_name = field.field.name
parent = field.model
reversed_path.insert(0, related_name)
return (parent, LOOKUP_SEP.join(reversed_path))
def get_fields_from_path(model, path):
""" Return list of Fields given path relative to model.
e.g. (ModelX, "user__groups__name") -> [
<django.db.models.fields.related.ForeignKey object at 0x...>,
<django.db.models.fields.related.ManyToManyField object at 0x...>,
<django.db.models.fields.CharField object at 0x...>,
]
"""
pieces = path.split(LOOKUP_SEP)
fields = []
for piece in pieces:
if fields:
parent = get_model_from_relation(fields[-1])
else:
parent = model
fields.append(parent._meta.get_field_by_name(piece)[0])
return fields
def remove_trailing_data_field(fields):
""" Discard trailing non-relation field if extant. """
try:
get_model_from_relation(fields[-1])
except NotRelationField:
fields = fields[:-1]
return fields
def get_limit_choices_to_from_path(model, path):
""" Return Q object for limiting choices if applicable.
If final model in path is linked via a ForeignKey or ManyToManyField which
has a `limit_choices_to` attribute, return it as a Q object.
"""
fields = get_fields_from_path(model, path)
fields = remove_trailing_data_field(fields)
limit_choices_to = (
fields and hasattr(fields[-1], 'rel') and
getattr(fields[-1].rel, 'limit_choices_to', None))
if not limit_choices_to:
return models.Q() # empty Q
elif isinstance(limit_choices_to, models.Q):
return limit_choices_to # already a Q
else:
return models.Q(**limit_choices_to) # convert dict to Q
|
|
# Copyright 2014 Blue Box Group, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
from mox3 import mox
from neutronclient.neutron.v2_0.lb.v2 import loadbalancer as lb
from neutronclient.tests.unit import test_cli20
class CLITestV20LbLoadBalancerJSON(test_cli20.CLITestV20Base):
def test_create_loadbalancer_with_mandatory_params(self):
# lbaas-loadbalancer-create with mandatory params only.
resource = 'loadbalancer'
cmd_resource = 'lbaas_loadbalancer'
cmd = lb.CreateLoadBalancer(test_cli20.MyApp(sys.stdout), None)
name = 'lbaas-loadbalancer-name'
vip_subnet_id = 'vip-subnet'
my_id = 'my-id'
args = [vip_subnet_id]
position_names = ['vip_subnet_id']
position_values = [vip_subnet_id]
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values,
cmd_resource=cmd_resource)
def test_create_loadbalancer_with_all_params(self):
# lbaas-loadbalancer-create with all params set.
resource = 'loadbalancer'
cmd_resource = 'lbaas_loadbalancer'
cmd = lb.CreateLoadBalancer(test_cli20.MyApp(sys.stdout), None)
name = 'lbaas-loadbalancer-name'
description = 'lbaas-loadbalancer-desc'
flavor_id = 'lbaas-loadbalancer-flavor'
vip_subnet_id = 'vip-subnet'
my_id = 'my-id'
args = ['--admin-state-down', '--description', description,
'--name', name, '--flavor', flavor_id, vip_subnet_id]
position_names = ['admin_state_up', 'description', 'name',
'flavor_id', 'vip_subnet_id']
position_values = [False, description, name, flavor_id, vip_subnet_id]
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values,
cmd_resource=cmd_resource)
def test_list_loadbalancers(self):
# lbaas-loadbalancer-list.
resources = 'loadbalancers'
cmd_resources = 'lbaas_loadbalancers'
cmd = lb.ListLoadBalancer(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, True,
cmd_resources=cmd_resources)
def test_list_loadbalancers_pagination(self):
# lbaas-loadbalancer-list with pagination.
resources = 'loadbalancers'
cmd_resources = 'lbaas_loadbalancers'
cmd = lb.ListLoadBalancer(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources_with_pagination(resources, cmd,
cmd_resources=cmd_resources)
def test_list_loadbalancers_sort(self):
# lbaas-loadbalancer-list --sort-key name --sort-key id --sort-key asc
# --sort-key desc
resources = 'loadbalancers'
cmd_resources = 'lbaas_loadbalancers'
cmd = lb.ListLoadBalancer(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd,
sort_key=["name", "id"],
sort_dir=["asc", "desc"],
cmd_resources=cmd_resources)
def test_list_loadbalancers_limit(self):
# lbaas-loadbalancer-list -P.
resources = 'loadbalancers'
cmd_resources = 'lbaas_loadbalancers'
cmd = lb.ListLoadBalancer(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, page_size=1000,
cmd_resources=cmd_resources)
def test_show_loadbalancer_id(self):
# lbaas-loadbalancer-loadbalancer-show test_id.
resource = 'loadbalancer'
cmd_resource = 'lbaas_loadbalancer'
cmd = lb.ShowLoadBalancer(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', self.test_id]
self._test_show_resource(resource, cmd, self.test_id, args, ['id'],
cmd_resource=cmd_resource)
def test_show_loadbalancer_id_name(self):
# lbaas-loadbalancer-loadbalancer-show.
resource = 'loadbalancer'
cmd_resource = 'lbaas_loadbalancer'
cmd = lb.ShowLoadBalancer(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id,
args, ['id', 'name'],
cmd_resource=cmd_resource)
def _test_update_lb(self, args, expected_values):
resource = 'loadbalancer'
cmd_resource = 'lbaas_loadbalancer'
my_id = 'myid'
args.insert(0, my_id)
cmd = lb.UpdateLoadBalancer(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, my_id,
args, expected_values,
cmd_resource=cmd_resource)
def test_update_loadbalancer(self):
# lbaas-loadbalancer-update myid --name newname.
self._test_update_lb(['--name', 'newname'], {'name': 'newname', })
# lbaas-loadbalancer-update myid --description check.
self._test_update_lb(['--description', 'check'],
{'description': 'check', })
# lbaas-loadbalancer-update myid --admin-state-up False.
self._test_update_lb(['--admin-state-up', 'False'],
{'admin_state_up': 'False', })
def test_delete_loadbalancer(self):
# lbaas-loadbalancer-loadbalancer-delete my-id.
resource = 'loadbalancer'
cmd_resource = 'lbaas_loadbalancer'
cmd = lb.DeleteLoadBalancer(test_cli20.MyApp(sys.stdout), None)
my_id = 'my-id'
args = [my_id]
self._test_delete_resource(resource, cmd, my_id, args,
cmd_resource=cmd_resource)
def test_retrieve_loadbalancer_stats(self):
# lbaas-loadbalancer-stats test_id.
resource = 'loadbalancer'
cmd = lb.RetrieveLoadBalancerStats(test_cli20.MyApp(sys.stdout), None)
my_id = self.test_id
fields = ['bytes_in', 'bytes_out']
args = ['--fields', 'bytes_in', '--fields', 'bytes_out', my_id]
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
cmd.get_client().MultipleTimes().AndReturn(self.client)
query = "&".join(["fields=%s" % field for field in fields])
expected_res = {'stats': {'bytes_in': '1234', 'bytes_out': '4321'}}
resstr = self.client.serialize(expected_res)
path = getattr(self.client, "lbaas_loadbalancer_path_stats")
return_tup = (test_cli20.MyResp(200), resstr)
self.client.httpclient.request(
test_cli20.end_url(path % my_id, query), 'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token', test_cli20.TOKEN)).AndReturn(return_tup)
self.mox.ReplayAll()
cmd_parser = cmd.get_parser("test_" + resource)
parsed_args = cmd_parser.parse_args(args)
cmd.run(parsed_args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
_str = self.fake_stdout.make_string()
self.assertIn('bytes_in', _str)
self.assertIn('1234', _str)
self.assertIn('bytes_out', _str)
self.assertIn('4321', _str)
def test_get_loadbalancer_statuses(self):
# lbaas-loadbalancer-status test_id.
resource = 'loadbalancer'
cmd = lb.RetrieveLoadBalancerStatus(test_cli20.MyApp(sys.stdout), None)
my_id = self.test_id
args = [my_id]
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
cmd.get_client().MultipleTimes().AndReturn(self.client)
expected_res = {'statuses': {'operating_status': 'ONLINE',
'provisioning_status': 'ACTIVE'}}
resstr = self.client.serialize(expected_res)
path = getattr(self.client, "lbaas_loadbalancer_path_status")
return_tup = (test_cli20.MyResp(200), resstr)
self.client.httpclient.request(
test_cli20.end_url(path % my_id), 'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token', test_cli20.TOKEN)).AndReturn(return_tup)
self.mox.ReplayAll()
cmd_parser = cmd.get_parser("test_" + resource)
parsed_args = cmd_parser.parse_args(args)
cmd.run(parsed_args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
_str = self.fake_stdout.make_string()
self.assertIn('operating_status', _str)
self.assertIn('ONLINE', _str)
self.assertIn('provisioning_status', _str)
self.assertIn('ACTIVE', _str)
|
|
#!/usr/bin/env python
from __future__ import absolute_import
import logging
import os
import optparse
import warnings
import sys
import re
from pip.exceptions import InstallationError, CommandError, PipError
from pip.utils import get_installed_distributions, get_prog
from pip.utils import deprecation
from pip.vcs import git, mercurial, subversion, bazaar # noqa
from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pip.commands import get_summaries, get_similar_commands
from pip.commands import commands_dict
from pip._vendor.requests.packages.urllib3.exceptions import (
InsecureRequestWarning,
)
# assignment for flake8 to be happy
# This fixes a peculiarity when importing via __import__ - as we are
# initialising the pip module, "from pip import cmdoptions" is recursive
# and appears not to work properly in that situation.
import pip.cmdoptions
cmdoptions = pip.cmdoptions
# The version as used in the setup.py and the docs conf.py
__version__ = "6.1.0.dev0"
logger = logging.getLogger(__name__)
# Hide the InsecureRequestWArning from urllib3
warnings.filterwarnings("ignore", category=InsecureRequestWarning)
def autocomplete():
"""Command and option completion for the main option parser (and options)
and its subcommands (and options).
Enable by sourcing one of the completion shell scripts (bash or zsh).
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'PIP_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
current = cwords[cword - 1]
except IndexError:
current = ''
subcommands = [cmd for cmd, summary in get_summaries()]
options = []
# subcommand
try:
subcommand_name = [w for w in cwords if w in subcommands][0]
except IndexError:
subcommand_name = None
parser = create_main_parser()
# subcommand options
if subcommand_name:
# special case: 'help' subcommand has no options
if subcommand_name == 'help':
sys.exit(1)
# special case: list locally installed dists for uninstall command
if subcommand_name == 'uninstall' and not current.startswith('-'):
installed = []
lc = current.lower()
for dist in get_installed_distributions(local_only=True):
if dist.key.startswith(lc) and dist.key not in cwords[1:]:
installed.append(dist.key)
# if there are no dists installed, fall back to option completion
if installed:
for dist in installed:
print(dist)
sys.exit(1)
subcommand = commands_dict[subcommand_name]()
options += [(opt.get_opt_string(), opt.nargs)
for opt in subcommand.parser.option_list_all
if opt.help != optparse.SUPPRESS_HELP]
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
options = [(x, v) for (x, v) in options if x not in prev_opts]
# filter options by current input
options = [(k, v) for k, v in options if k.startswith(current)]
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print(opt_label)
else:
# show main parser options only when necessary
if current.startswith('-') or current.startswith('--'):
opts = [i.option_list for i in parser.option_groups]
opts.append(parser.option_list)
opts = (o for it in opts for o in it)
subcommands += [i.get_opt_string() for i in opts
if i.help != optparse.SUPPRESS_HELP]
print(' '.join([x for x in subcommands if x.startswith(current)]))
sys.exit(1)
def create_main_parser():
parser_kw = {
'usage': '\n%prog <command> [options]',
'add_help_option': False,
'formatter': UpdatingDefaultsHelpFormatter(),
'name': 'global',
'prog': get_prog(),
}
parser = ConfigOptionParser(**parser_kw)
parser.disable_interspersed_args()
pip_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
parser.version = 'pip %s from %s (python %s)' % (
__version__, pip_pkg_dir, sys.version[:3])
# add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser)
parser.add_option_group(gen_opts)
parser.main = True # so the help formatter knows
# create command listing for description
command_summaries = get_summaries()
description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries]
parser.description = '\n'.join(description)
return parser
def parseopts(args):
parser = create_main_parser()
# Note: parser calls disable_interspersed_args(), so the result of this
# call is to split the initial args into the general options before the
# subcommand and everything else.
# For example:
# args: ['--timeout=5', 'install', '--user', 'INITools']
# general_options: ['--timeout==5']
# args_else: ['install', '--user', 'INITools']
general_options, args_else = parser.parse_args(args)
# --version
if general_options.version:
sys.stdout.write(parser.version)
sys.stdout.write(os.linesep)
sys.exit()
# pip || pip help -> print_help()
if not args_else or (args_else[0] == 'help' and len(args_else) == 1):
parser.print_help()
sys.exit()
# the subcommand name
cmd_name = args_else[0]
if cmd_name not in commands_dict:
guess = get_similar_commands(cmd_name)
msg = ['unknown command "%s"' % cmd_name]
if guess:
msg.append('maybe you meant "%s"' % guess)
raise CommandError(' - '.join(msg))
# all the args without the subcommand
cmd_args = args[:]
cmd_args.remove(cmd_name)
return cmd_name, cmd_args
def check_isolated(args):
isolated = False
if "--isolated" in args:
isolated = True
return isolated
def main(args=None):
if args is None:
args = sys.argv[1:]
# Enable our Deprecation Warnings
for deprecation_warning in deprecation.DEPRECATIONS:
warnings.simplefilter("default", deprecation_warning)
# Configure our deprecation warnings to be sent through loggers
deprecation.install_warning_logger()
autocomplete()
try:
cmd_name, cmd_args = parseopts(args)
except PipError as exc:
sys.stderr.write("ERROR: %s" % exc)
sys.stderr.write(os.linesep)
sys.exit(1)
command = commands_dict[cmd_name](isolated=check_isolated(cmd_args))
return command.main(cmd_args)
# ###########################################################
# # Writing freeze files
class FrozenRequirement(object):
def __init__(self, name, req, editable, comments=()):
self.name = name
self.req = req
self.editable = editable
self.comments = comments
_rev_re = re.compile(r'-r(\d+)$')
_date_re = re.compile(r'-(20\d\d\d\d\d\d)$')
@classmethod
def from_dist(cls, dist, dependency_links, find_tags=False):
location = os.path.normcase(os.path.abspath(dist.location))
comments = []
from pip.vcs import vcs, get_src_requirement
if vcs.get_backend_name(location):
editable = True
try:
req = get_src_requirement(dist, location, find_tags)
except InstallationError as exc:
logger.warning(
"Error when trying to get requirement for VCS system %s, "
"falling back to uneditable format", exc
)
req = None
if req is None:
logger.warning(
'Could not determine repository location of %s', location
)
comments.append(
'## !! Could not determine repository location'
)
req = dist.as_requirement()
editable = False
else:
editable = False
req = dist.as_requirement()
specs = req.specs
assert len(specs) == 1 and specs[0][0] in ["==", "==="], \
'Expected 1 spec with == or ===; specs = %r; dist = %r' % \
(specs, dist)
version = specs[0][1]
ver_match = cls._rev_re.search(version)
date_match = cls._date_re.search(version)
if ver_match or date_match:
svn_backend = vcs.get_backend('svn')
if svn_backend:
svn_location = svn_backend().get_location(
dist,
dependency_links,
)
if not svn_location:
logger.warning(
'Warning: cannot find svn location for %s', req)
comments.append(
'## FIXME: could not find svn URL in dependency_links '
'for this package:'
)
else:
comments.append(
'# Installing as editable to satisfy requirement %s:' %
req
)
if ver_match:
rev = ver_match.group(1)
else:
rev = '{%s}' % date_match.group(1)
editable = True
req = '%s@%s#egg=%s' % (
svn_location,
rev,
cls.egg_name(dist)
)
return cls(dist.project_name, req, editable, comments)
@staticmethod
def egg_name(dist):
name = dist.egg_name()
match = re.search(r'-py\d\.\d$', name)
if match:
name = name[:match.start()]
return name
def __str__(self):
req = self.req
if self.editable:
req = '-e %s' % req
return '\n'.join(list(self.comments) + [str(req)]) + '\n'
if __name__ == '__main__':
sys.exit(main())
|
|
"""Various helper functions"""
import asyncio
import base64
import datetime
import functools
import io
import os
import re
from urllib.parse import quote, urlencode
from collections import namedtuple
from pathlib import Path
from . import hdrs, multidict
from .errors import InvalidURL
try:
from asyncio import ensure_future
except ImportError:
ensure_future = asyncio.async
__all__ = ('BasicAuth', 'FormData', 'parse_mimetype', 'Timeout')
class BasicAuth(namedtuple('BasicAuth', ['login', 'password', 'encoding'])):
"""Http basic authentication helper.
:param str login: Login
:param str password: Password
:param str encoding: (optional) encoding ('latin1' by default)
"""
def __new__(cls, login, password='', encoding='latin1'):
if login is None:
raise ValueError('None is not allowed as login value')
if password is None:
raise ValueError('None is not allowed as password value')
return super().__new__(cls, login, password, encoding)
def encode(self):
"""Encode credentials."""
creds = ('%s:%s' % (self.login, self.password)).encode(self.encoding)
return 'Basic %s' % base64.b64encode(creds).decode(self.encoding)
class FormData:
"""Helper class for multipart/form-data and
application/x-www-form-urlencoded body generation."""
def __init__(self, fields=()):
from . import multipart
self._writer = multipart.MultipartWriter('form-data')
self._fields = []
self._is_multipart = False
if isinstance(fields, dict):
fields = list(fields.items())
elif not isinstance(fields, (list, tuple)):
fields = (fields,)
self.add_fields(*fields)
@property
def is_multipart(self):
return self._is_multipart
@property
def content_type(self):
if self._is_multipart:
return self._writer.headers[hdrs.CONTENT_TYPE]
else:
return 'application/x-www-form-urlencoded'
def add_field(self, name, value, *, content_type=None, filename=None,
content_transfer_encoding=None):
if isinstance(value, io.IOBase):
self._is_multipart = True
elif isinstance(value, (bytes, bytearray, memoryview)):
if filename is None and content_transfer_encoding is None:
filename = name
type_options = multidict.MultiDict({'name': name})
if filename is not None and not isinstance(filename, str):
raise TypeError('filename must be an instance of str. '
'Got: %s' % filename)
if filename is None and isinstance(value, io.IOBase):
filename = guess_filename(value, name)
if filename is not None:
type_options['filename'] = filename
self._is_multipart = True
headers = {}
if content_type is not None:
if not isinstance(content_type, str):
raise TypeError('content_type must be an instance of str. '
'Got: %s' % content_type)
headers[hdrs.CONTENT_TYPE] = content_type
self._is_multipart = True
if content_transfer_encoding is not None:
if not isinstance(content_transfer_encoding, str):
raise TypeError('content_transfer_encoding must be an instance'
' of str. Got: %s' % content_transfer_encoding)
headers[hdrs.CONTENT_TRANSFER_ENCODING] = content_transfer_encoding
self._is_multipart = True
self._fields.append((type_options, headers, value))
def add_fields(self, *fields):
to_add = list(fields)
while to_add:
rec = to_add.pop(0)
if isinstance(rec, io.IOBase):
k = guess_filename(rec, 'unknown')
self.add_field(k, rec)
elif isinstance(rec,
(multidict.MultiDictProxy,
multidict.MultiDict)):
to_add.extend(rec.items())
elif isinstance(rec, (list, tuple)) and len(rec) == 2:
k, fp = rec
self.add_field(k, fp)
else:
raise TypeError('Only io.IOBase, multidict and (name, file) '
'pairs allowed, use .add_field() for passing '
'more complex parameters')
def _gen_form_urlencoded(self, encoding):
# form data (x-www-form-urlencoded)
data = []
for type_options, _, value in self._fields:
data.append((type_options['name'], value))
data = urlencode(data, doseq=True)
return data.encode(encoding)
def _gen_form_data(self, *args, **kwargs):
"""Encode a list of fields using the multipart/form-data MIME format"""
for dispparams, headers, value in self._fields:
part = self._writer.append(value, headers)
if dispparams:
part.set_content_disposition('form-data', **dispparams)
# FIXME cgi.FieldStorage doesn't likes body parts with
# Content-Length which were sent via chunked transfer encoding
part.headers.pop(hdrs.CONTENT_LENGTH, None)
yield from self._writer.serialize()
def __call__(self, encoding):
if self._is_multipart:
return self._gen_form_data(encoding)
else:
return self._gen_form_urlencoded(encoding)
def parse_mimetype(mimetype):
"""Parses a MIME type into its components.
:param str mimetype: MIME type
:returns: 4 element tuple for MIME type, subtype, suffix and parameters
:rtype: tuple
Example:
>>> parse_mimetype('text/html; charset=utf-8')
('text', 'html', '', {'charset': 'utf-8'})
"""
if not mimetype:
return '', '', '', {}
parts = mimetype.split(';')
params = []
for item in parts[1:]:
if not item:
continue
key, value = item.split('=', 1) if '=' in item else (item, '')
params.append((key.lower().strip(), value.strip(' "')))
params = dict(params)
fulltype = parts[0].strip().lower()
if fulltype == '*':
fulltype = '*/*'
mtype, stype = fulltype.split('/', 1) \
if '/' in fulltype else (fulltype, '')
stype, suffix = stype.split('+', 1) if '+' in stype else (stype, '')
return mtype, stype, suffix, params
def str_to_bytes(s, encoding='utf-8'):
if isinstance(s, str):
return s.encode(encoding)
return s
def guess_filename(obj, default=None):
name = getattr(obj, 'name', None)
if name and name[0] != '<' and name[-1] != '>':
return Path(name).name
return default
class AccessLogger:
"""Helper object to log access.
Usage:
log = logging.getLogger("spam")
log_format = "%a %{User-Agent}i"
access_logger = AccessLogger(log, log_format)
access_logger.log(message, environ, response, transport, time)
Format:
%% The percent sign
%a Remote IP-address (IP-address of proxy if using reverse proxy)
%t Time when the request was started to process
%P The process ID of the child that serviced the request
%r First line of request
%s Response status code
%b Size of response in bytes, excluding HTTP headers
%O Bytes sent, including headers
%T Time taken to serve the request, in seconds
%Tf Time taken to serve the request, in seconds with floating fraction
in .06f format
%D Time taken to serve the request, in microseconds
%{FOO}i request.headers['FOO']
%{FOO}o response.headers['FOO']
%{FOO}e os.environ['FOO']
"""
LOG_FORMAT = '%a %l %u %t "%r" %s %b "%{Referrer}i" "%{User-Agent}i"'
FORMAT_RE = re.compile(r'%(\{([A-Za-z\-]+)\}([ioe])|[atPrsbOD]|Tf?)')
CLEANUP_RE = re.compile(r'(%[^s])')
_FORMAT_CACHE = {}
def __init__(self, logger, log_format=LOG_FORMAT):
"""Initialise the logger.
:param logger: logger object to be used for logging
:param log_format: apache compatible log format
"""
self.logger = logger
_compiled_format = AccessLogger._FORMAT_CACHE.get(log_format)
if not _compiled_format:
_compiled_format = self.compile_format(log_format)
AccessLogger._FORMAT_CACHE[log_format] = _compiled_format
self._log_format, self._methods = _compiled_format
def compile_format(self, log_format):
"""Translate log_format into form usable by modulo formatting
All known atoms will be replaced with %s
Also methods for formatting of those atoms will be added to
_methods in apropriate order
For example we have log_format = "%a %t"
This format will be translated to "%s %s"
Also contents of _methods will be
[self._format_a, self._format_t]
These method will be called and results will be passed
to translated string format.
Each _format_* method receive 'args' which is list of arguments
given to self.log
Exceptions are _format_e, _format_i and _format_o methods which
also receive key name (by functools.partial)
"""
log_format = log_format.replace("%l", "-")
log_format = log_format.replace("%u", "-")
methods = []
for atom in self.FORMAT_RE.findall(log_format):
if atom[1] == '':
methods.append(getattr(AccessLogger, '_format_%s' % atom[0]))
else:
m = getattr(AccessLogger, '_format_%s' % atom[2])
methods.append(functools.partial(m, atom[1]))
log_format = self.FORMAT_RE.sub(r'%s', log_format)
log_format = self.CLEANUP_RE.sub(r'%\1', log_format)
return log_format, methods
@staticmethod
def _format_e(key, args):
return (args[1] or {}).get(multidict.upstr(key), '-')
@staticmethod
def _format_i(key, args):
return args[0].headers.get(multidict.upstr(key), '-')
@staticmethod
def _format_o(key, args):
return args[2].headers.get(multidict.upstr(key), '-')
@staticmethod
def _format_a(args):
return args[3].get_extra_info('peername')[0]
@staticmethod
def _format_t(args):
return datetime.datetime.utcnow().strftime('[%d/%b/%Y:%H:%M:%S +0000]')
@staticmethod
def _format_P(args):
return "<%s>" % os.getpid()
@staticmethod
def _format_r(args):
msg = args[0]
if not msg:
return '-'
return '%s %s HTTP/%s.%s' % tuple((msg.method,
msg.path) + msg.version)
@staticmethod
def _format_s(args):
return args[2].status
@staticmethod
def _format_b(args):
return args[2].body_length
@staticmethod
def _format_O(args):
return args[2].output_length
@staticmethod
def _format_T(args):
return round(args[4])
@staticmethod
def _format_Tf(args):
return '%06f' % args[4]
@staticmethod
def _format_D(args):
return round(args[4] * 1000000)
def _format_line(self, args):
return tuple(m(args) for m in self._methods)
def log(self, message, environ, response, transport, time):
"""Log access.
:param message: Request object. May be None.
:param environ: Environment dict. May be None.
:param response: Response object.
:param transport: Tansport object.
:param float time: Time taken to serve the request.
"""
try:
self.logger.info(self._log_format % self._format_line(
[message, environ, response, transport, time]))
except Exception:
self.logger.exception("Error in logging")
_marker = object()
class reify:
"""Use as a class method decorator. It operates almost exactly like
the Python `@property` decorator, but it puts the result of the
method it decorates into the instance dict after the first call,
effectively replacing the function it decorates with an instance
variable. It is, in Python parlance, a data descriptor.
"""
def __init__(self, wrapped):
self.wrapped = wrapped
try:
self.__doc__ = wrapped.__doc__
except: # pragma: no cover
self.__doc__ = ""
self.name = wrapped.__name__
def __get__(self, inst, owner, _marker=_marker):
if inst is None:
return self
val = inst.__dict__.get(self.name, _marker)
if val is not _marker:
return val
val = self.wrapped(inst)
inst.__dict__[self.name] = val
return val
def __set__(self, inst, value):
raise AttributeError("reified property is read-only")
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" +
"0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
"""
safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
safe_without_percent = "!#$&'()*+,/:;=?@[]~"
try:
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved,
# unreserved, or '%')
return quote(unquote_unreserved(uri), safe=safe_with_percent)
except InvalidURL:
# We couldn't unquote the given URI, so let's try quoting it, but
# there may be unquoted '%'s in the URI. We need to make sure they're
# properly quoted so they do not cause issues elsewhere.
return quote(uri, safe=safe_without_percent)
class Timeout:
"""Timeout context manager.
Useful in cases when you want to apply timeout logic around block
of code or in cases when asyncio.wait_for is not suitable. For example:
>>> with aiohttp.Timeout(0.001):
... async with aiohttp.get('https://github.com') as r:
... await r.text()
:param timeout: timeout value in seconds
:param loop: asyncio compatible event loop
"""
def __init__(self, timeout, *, loop=None):
self._timeout = timeout
if loop is None:
loop = asyncio.get_event_loop()
self._loop = loop
self._task = None
self._cancelled = False
self._cancel_handler = None
def __enter__(self):
self._task = asyncio.Task.current_task(loop=self._loop)
if self._task is None:
raise RuntimeError('Timeout context manager should be used '
'inside a task')
self._cancel_handler = self._loop.call_later(
self._timeout, self._cancel_task)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is asyncio.CancelledError and self._cancelled:
self._cancel_handler = None
self._task = None
raise asyncio.TimeoutError
self._cancel_handler.cancel()
self._cancel_handler = None
self._task = None
def _cancel_task(self):
self._cancelled = self._task.cancel()
|
|
"""The tests for the Demo Media player platform."""
import asyncio
import unittest
from unittest.mock import patch
import pytest
import voluptuous as vol
import homeassistant.components.media_player as mp
from homeassistant.helpers.aiohttp_client import DATA_CLIENTSESSION
from homeassistant.setup import async_setup_component, setup_component
from tests.common import get_test_home_assistant
from tests.components.media_player import common
entity_id = "media_player.walkman"
class TestDemoMediaPlayer(unittest.TestCase):
"""Test the media_player module."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self):
"""Shut down test instance."""
self.hass.stop()
def test_source_select(self):
"""Test the input source service."""
entity_id = "media_player.lounge_room"
assert setup_component(
self.hass, mp.DOMAIN, {"media_player": {"platform": "demo"}}
)
state = self.hass.states.get(entity_id)
assert "dvd" == state.attributes.get("source")
with pytest.raises(vol.Invalid):
common.select_source(self.hass, None, entity_id)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
assert "dvd" == state.attributes.get("source")
common.select_source(self.hass, "xbox", entity_id)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
assert "xbox" == state.attributes.get("source")
def test_clear_playlist(self):
"""Test clear playlist."""
assert setup_component(
self.hass, mp.DOMAIN, {"media_player": {"platform": "demo"}}
)
assert self.hass.states.is_state(entity_id, "playing")
common.clear_playlist(self.hass, entity_id)
self.hass.block_till_done()
assert self.hass.states.is_state(entity_id, "off")
def test_volume_services(self):
"""Test the volume service."""
assert setup_component(
self.hass, mp.DOMAIN, {"media_player": {"platform": "demo"}}
)
state = self.hass.states.get(entity_id)
assert 1.0 == state.attributes.get("volume_level")
with pytest.raises(vol.Invalid):
common.set_volume_level(self.hass, None, entity_id)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
assert 1.0 == state.attributes.get("volume_level")
common.set_volume_level(self.hass, 0.5, entity_id)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
assert 0.5 == state.attributes.get("volume_level")
common.volume_down(self.hass, entity_id)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
assert 0.4 == state.attributes.get("volume_level")
common.volume_up(self.hass, entity_id)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
assert 0.5 == state.attributes.get("volume_level")
assert False is state.attributes.get("is_volume_muted")
with pytest.raises(vol.Invalid):
common.mute_volume(self.hass, None, entity_id)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
assert False is state.attributes.get("is_volume_muted")
common.mute_volume(self.hass, True, entity_id)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
assert True is state.attributes.get("is_volume_muted")
def test_turning_off_and_on(self):
"""Test turn_on and turn_off."""
assert setup_component(
self.hass, mp.DOMAIN, {"media_player": {"platform": "demo"}}
)
assert self.hass.states.is_state(entity_id, "playing")
common.turn_off(self.hass, entity_id)
self.hass.block_till_done()
assert self.hass.states.is_state(entity_id, "off")
assert not mp.is_on(self.hass, entity_id)
common.turn_on(self.hass, entity_id)
self.hass.block_till_done()
assert self.hass.states.is_state(entity_id, "playing")
common.toggle(self.hass, entity_id)
self.hass.block_till_done()
assert self.hass.states.is_state(entity_id, "off")
assert not mp.is_on(self.hass, entity_id)
def test_playing_pausing(self):
"""Test media_pause."""
assert setup_component(
self.hass, mp.DOMAIN, {"media_player": {"platform": "demo"}}
)
assert self.hass.states.is_state(entity_id, "playing")
common.media_pause(self.hass, entity_id)
self.hass.block_till_done()
assert self.hass.states.is_state(entity_id, "paused")
common.media_play_pause(self.hass, entity_id)
self.hass.block_till_done()
assert self.hass.states.is_state(entity_id, "playing")
common.media_play_pause(self.hass, entity_id)
self.hass.block_till_done()
assert self.hass.states.is_state(entity_id, "paused")
common.media_play(self.hass, entity_id)
self.hass.block_till_done()
assert self.hass.states.is_state(entity_id, "playing")
def test_prev_next_track(self):
"""Test media_next_track and media_previous_track ."""
assert setup_component(
self.hass, mp.DOMAIN, {"media_player": {"platform": "demo"}}
)
state = self.hass.states.get(entity_id)
assert 1 == state.attributes.get("media_track")
common.media_next_track(self.hass, entity_id)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
assert 2 == state.attributes.get("media_track")
common.media_next_track(self.hass, entity_id)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
assert 3 == state.attributes.get("media_track")
common.media_previous_track(self.hass, entity_id)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
assert 2 == state.attributes.get("media_track")
assert setup_component(
self.hass, mp.DOMAIN, {"media_player": {"platform": "demo"}}
)
ent_id = "media_player.lounge_room"
state = self.hass.states.get(ent_id)
assert 1 == state.attributes.get("media_episode")
common.media_next_track(self.hass, ent_id)
self.hass.block_till_done()
state = self.hass.states.get(ent_id)
assert 2 == state.attributes.get("media_episode")
common.media_previous_track(self.hass, ent_id)
self.hass.block_till_done()
state = self.hass.states.get(ent_id)
assert 1 == state.attributes.get("media_episode")
def test_play_media(self):
"""Test play_media ."""
assert setup_component(
self.hass, mp.DOMAIN, {"media_player": {"platform": "demo"}}
)
ent_id = "media_player.living_room"
state = self.hass.states.get(ent_id)
assert 0 < (mp.SUPPORT_PLAY_MEDIA & state.attributes.get("supported_features"))
assert state.attributes.get("media_content_id") is not None
with pytest.raises(vol.Invalid):
common.play_media(self.hass, None, "some_id", ent_id)
self.hass.block_till_done()
state = self.hass.states.get(ent_id)
assert 0 < (mp.SUPPORT_PLAY_MEDIA & state.attributes.get("supported_features"))
assert not "some_id" == state.attributes.get("media_content_id")
common.play_media(self.hass, "youtube", "some_id", ent_id)
self.hass.block_till_done()
state = self.hass.states.get(ent_id)
assert 0 < (mp.SUPPORT_PLAY_MEDIA & state.attributes.get("supported_features"))
assert "some_id" == state.attributes.get("media_content_id")
@patch(
"homeassistant.components.demo.media_player.DemoYoutubePlayer.media_seek",
autospec=True,
)
def test_seek(self, mock_seek):
"""Test seek."""
assert setup_component(
self.hass, mp.DOMAIN, {"media_player": {"platform": "demo"}}
)
ent_id = "media_player.living_room"
state = self.hass.states.get(ent_id)
assert state.attributes["supported_features"] & mp.SUPPORT_SEEK
assert not mock_seek.called
with pytest.raises(vol.Invalid):
common.media_seek(self.hass, None, ent_id)
self.hass.block_till_done()
assert not mock_seek.called
common.media_seek(self.hass, 100, ent_id)
self.hass.block_till_done()
assert mock_seek.called
async def test_media_image_proxy(hass, hass_client):
"""Test the media server image proxy server ."""
assert await async_setup_component(
hass, mp.DOMAIN, {"media_player": {"platform": "demo"}}
)
fake_picture_data = "test.test"
class MockResponse:
def __init__(self):
self.status = 200
self.headers = {"Content-Type": "sometype"}
@asyncio.coroutine
def read(self):
return fake_picture_data.encode("ascii")
@asyncio.coroutine
def release(self):
pass
class MockWebsession:
@asyncio.coroutine
def get(self, url):
return MockResponse()
def detach(self):
pass
hass.data[DATA_CLIENTSESSION] = MockWebsession()
assert hass.states.is_state(entity_id, "playing")
state = hass.states.get(entity_id)
client = await hass_client()
req = await client.get(state.attributes.get("entity_picture"))
assert req.status == 200
assert await req.text() == fake_picture_data
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Part of the Keras training engine related to Python generators of array data.
"""
# pylint: disable=protected-access
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import callbacks as cbks
from tensorflow.python.keras.utils.data_utils import GeneratorEnqueuer
from tensorflow.python.keras.utils.data_utils import OrderedEnqueuer
from tensorflow.python.keras.utils.data_utils import Sequence
from tensorflow.python.keras.utils.generic_utils import Progbar
from tensorflow.python.platform import tf_logging as logging
def fit_generator(model,
generator,
steps_per_epoch=None,
epochs=1,
verbose=1,
callbacks=None,
validation_data=None,
validation_steps=None,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
shuffle=True,
initial_epoch=0):
"""See docstring for `Model.fit_generator`."""
wait_time = 0.01 # in seconds
epoch = initial_epoch
do_validation = bool(validation_data)
is_sequence = isinstance(generator, Sequence)
if not is_sequence and use_multiprocessing and workers > 1:
logging.warning(
UserWarning('Using a generator with `use_multiprocessing=True`'
' and multiple workers may duplicate your data.'
' Please consider using the`keras.utils.Sequence'
' class.'))
if steps_per_epoch is None:
if is_sequence:
steps_per_epoch = len(generator)
else:
raise ValueError('`steps_per_epoch=None` is only valid for a'
' generator based on the `keras.utils.Sequence`'
' class. Please specify `steps_per_epoch` or use'
' the `keras.utils.Sequence` class.')
# python 2 has 'next', 3 has '__next__'
# avoid any explicit version checks
val_gen = (
hasattr(validation_data, 'next') or
hasattr(validation_data, '__next__') or
isinstance(validation_data, Sequence))
if (val_gen and not isinstance(validation_data, Sequence) and
not validation_steps):
raise ValueError('`validation_steps=None` is only valid for a'
' generator based on the `keras.utils.Sequence`'
' class. Please specify `validation_steps` or use'
' the `keras.utils.Sequence` class.')
# Prepare display labels.
out_labels = model.metrics_names
callback_metrics = out_labels + ['val_%s' % n for n in out_labels]
# prepare callbacks
model.history = cbks.History()
callbacks = [cbks.BaseLogger()] + (callbacks or []) + [model.history]
if verbose:
callbacks += [cbks.ProgbarLogger(count_mode='steps')]
callbacks = cbks.CallbackList(callbacks)
# it's possible to callback a different model than self:
if hasattr(model, 'callback_model') and model.callback_model:
callback_model = model.callback_model
else:
callback_model = model
callbacks.set_model(callback_model)
callbacks.set_params({
'epochs': epochs,
'steps': steps_per_epoch,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
})
callbacks.on_train_begin()
enqueuer = None
val_enqueuer = None
try:
if do_validation and not val_gen:
# Prepare data for validation
if len(validation_data) == 2:
val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence
else:
raise ValueError(
'`validation_data` should be a tuple '
'`(val_x, val_y, val_sample_weight)` '
'or `(val_x, val_y)`. Found: ' + str(validation_data))
val_x, val_y, val_sample_weights = model._standardize_user_data(
val_x, val_y, val_sample_weight)
val_data = val_x + val_y + val_sample_weights
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
val_data += [0.]
for cbk in callbacks:
cbk.validation_data = val_data
if workers > 0:
if is_sequence:
enqueuer = OrderedEnqueuer(
generator,
use_multiprocessing=use_multiprocessing,
shuffle=shuffle)
else:
enqueuer = GeneratorEnqueuer(
generator,
use_multiprocessing=use_multiprocessing,
wait_time=wait_time)
enqueuer.start(workers=workers, max_queue_size=max_queue_size)
output_generator = enqueuer.get()
else:
if is_sequence:
output_generator = iter(generator)
else:
output_generator = generator
callback_model.stop_training = False
# Construct epoch logs.
epoch_logs = {}
while epoch < epochs:
for m in model.stateful_metric_functions:
m.reset_states()
callbacks.on_epoch_begin(epoch)
steps_done = 0
batch_index = 0
while steps_done < steps_per_epoch:
generator_output = next(output_generator)
if not hasattr(generator_output, '__len__'):
raise ValueError('Output of generator should be '
'a tuple `(x, y, sample_weight)` '
'or `(x, y)`. Found: ' + str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
raise ValueError('Output of generator should be '
'a tuple `(x, y, sample_weight)` '
'or `(x, y)`. Found: ' + str(generator_output))
# build batch logs
batch_logs = {}
if isinstance(x, list):
batch_size = x[0].shape[0]
elif isinstance(x, dict):
batch_size = list(x.values())[0].shape[0]
else:
batch_size = x.shape[0]
batch_logs['batch'] = batch_index
batch_logs['size'] = batch_size
callbacks.on_batch_begin(batch_index, batch_logs)
outs = model.train_on_batch(
x, y, sample_weight=sample_weight, class_weight=class_weight)
if not isinstance(outs, list):
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
batch_index += 1
steps_done += 1
# Epoch finished.
if steps_done >= steps_per_epoch and do_validation:
if val_gen:
val_outs = evaluate_generator(
model,
validation_data,
validation_steps,
workers=workers,
use_multiprocessing=use_multiprocessing,
max_queue_size=max_queue_size)
else:
# No need for try/except because
# data has already been validated.
val_outs = model.evaluate(
val_x,
val_y,
batch_size=batch_size,
sample_weight=val_sample_weights,
verbose=0)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
if callback_model.stop_training:
break
callbacks.on_epoch_end(epoch, epoch_logs)
epoch += 1
if callback_model.stop_training:
break
finally:
try:
if enqueuer is not None:
enqueuer.stop()
finally:
if val_enqueuer is not None:
val_enqueuer.stop()
callbacks.on_train_end()
return model.history
def evaluate_generator(model,
generator,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0):
"""See docstring for `Model.evaluate_generator`."""
stateful_metric_indices = []
if hasattr(model, 'metrics'):
for m in model.stateful_metric_functions:
m.reset_states()
stateful_metric_indices = [
i for i, name in enumerate(model.metrics_names)
if str(name) in model.stateful_metric_names]
else:
stateful_metric_indices = []
steps_done = 0
wait_time = 0.01
all_outs = []
batch_sizes = []
is_sequence = isinstance(generator, Sequence)
if not is_sequence and use_multiprocessing and workers > 1:
logging.warning(
UserWarning('Using a generator with `use_multiprocessing=True`'
' and multiple workers may duplicate your data.'
' Please consider using the`keras.utils.Sequence'
' class.'))
if steps is None:
if is_sequence:
steps = len(generator)
else:
raise ValueError('`steps=None` is only valid for a generator'
' based on the `keras.utils.Sequence` class.'
' Please specify `steps` or use the'
' `keras.utils.Sequence` class.')
enqueuer = None
try:
if workers > 0:
if is_sequence:
enqueuer = OrderedEnqueuer(
generator, use_multiprocessing=use_multiprocessing)
else:
enqueuer = GeneratorEnqueuer(
generator,
use_multiprocessing=use_multiprocessing,
wait_time=wait_time)
enqueuer.start(workers=workers, max_queue_size=max_queue_size)
output_generator = enqueuer.get()
else:
if is_sequence:
output_generator = iter(generator)
else:
output_generator = generator
if verbose == 1:
progbar = Progbar(target=steps)
while steps_done < steps:
generator_output = next(output_generator)
if not hasattr(generator_output, '__len__'):
raise ValueError('Output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
raise ValueError('Output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
outs = model.test_on_batch(x, y, sample_weight=sample_weight)
if isinstance(x, list):
batch_size = x[0].shape[0]
elif isinstance(x, dict):
batch_size = list(x.values())[0].shape[0]
else:
batch_size = x.shape[0]
if batch_size == 0:
raise ValueError('Received an empty batch. '
'Batches should at least contain one item.')
all_outs.append(outs)
steps_done += 1
batch_sizes.append(batch_size)
if verbose == 1:
progbar.update(steps_done)
finally:
if enqueuer is not None:
enqueuer.stop()
if not isinstance(outs, list):
return np.average(np.asarray(all_outs), weights=batch_sizes)
else:
averages = []
for i in range(len(outs)):
if i not in stateful_metric_indices:
averages.append(
np.average([out[i] for out in all_outs], weights=batch_sizes))
else:
averages.append(float(all_outs[-1][i]))
return averages
def predict_generator(model,
generator,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0):
"""See docstring for `Model.predict_generator`."""
steps_done = 0
wait_time = 0.01
all_outs = []
is_sequence = isinstance(generator, Sequence)
if not is_sequence and use_multiprocessing and workers > 1:
logging.warning(
UserWarning('Using a generator with `use_multiprocessing=True`'
' and multiple workers may duplicate your data.'
' Please consider using the`keras.utils.Sequence'
' class.'))
if steps is None:
if is_sequence:
steps = len(generator)
else:
raise ValueError('`steps=None` is only valid for a generator'
' based on the `keras.utils.Sequence` class.'
' Please specify `steps` or use the'
' `keras.utils.Sequence` class.')
enqueuer = None
try:
if workers > 0:
if is_sequence:
enqueuer = OrderedEnqueuer(
generator, use_multiprocessing=use_multiprocessing)
else:
enqueuer = GeneratorEnqueuer(
generator,
use_multiprocessing=use_multiprocessing,
wait_time=wait_time)
enqueuer.start(workers=workers, max_queue_size=max_queue_size)
output_generator = enqueuer.get()
else:
if is_sequence:
output_generator = iter(generator)
else:
output_generator = generator
if verbose == 1:
progbar = Progbar(target=steps)
while steps_done < steps:
generator_output = next(output_generator)
if isinstance(generator_output, tuple):
# Compatibility with the generators
# used for training.
if len(generator_output) == 2:
x, _ = generator_output
elif len(generator_output) == 3:
x, _, _ = generator_output
else:
raise ValueError('Output of generator should be '
'a tuple `(x, y, sample_weight)` '
'or `(x, y)`. Found: ' + str(generator_output))
else:
# Assumes a generator that only
# yields inputs (not targets and sample weights).
x = generator_output
outs = model.predict_on_batch(x)
if not isinstance(outs, list):
outs = [outs]
if not all_outs:
for out in outs:
all_outs.append([])
for i, out in enumerate(outs):
all_outs[i].append(out)
steps_done += 1
if verbose == 1:
progbar.update(steps_done)
finally:
if enqueuer is not None:
enqueuer.stop()
if len(all_outs) == 1:
if steps_done == 1:
return all_outs[0][0]
else:
return np.concatenate(all_outs[0])
if steps_done == 1:
return [out[0] for out in all_outs]
else:
return [np.concatenate(out) for out in all_outs]
|
|
import numpy as np
from compliance_checker.base import check_has, Result
from compliance_checker.defined_base import DefinedNCBaseCheck
from netCDF4 import Dataset
from __builtin__ import RuntimeError
#from docutils.math.math2html import LimitsProcessor
##
## UR-TODO - simple copy from ROMS, needs adjusting to SHOC
##
##
class DefinedSWANBaseCheck(DefinedNCBaseCheck):
###############################################################################
#
# HIGHLY RECOMMENDED
#
###############################################################################
supported_ds = [Dataset]
@classmethod
def beliefs(cls):
'''
Not applicable for Defined
'''
return {}
@classmethod
def make_result(cls, level, score, out_of, name, messages, the_method):
return Result(level, (score, out_of), name, messages,None,"swan",the_method)
def setup(self, ds):
pass
def limits(self,dsp):
from wicken.netcdf_dogma import NetCDFDogma
times = list()
if isinstance(dsp.dogma,NetCDFDogma):
from netCDF4 import num2date
from compliance_checker import inferXVar,inferYVar,DTExportFormat
ds = dsp.dataset
xvar = inferXVar(ds)
yvar = inferYVar(ds)
if yvar is not None:
# metric
lons = yvar[:]
lats = xvar[:]
else:
raise RuntimeError('Cannot find x/y variables in %s' % ds.filepath)
bounds = [float(np.amin(lons)), float(np.amax(lons)), float(np.amin(lats)), float(np.amax(lats))]
xshape = xvar.shape
yshape = yvar.shape
tt = None
# test if we have a valid time
if 't' in ds.variables and len(ds.variables['t']) > 0:
tt = ds.variables['t']
elif 'time' in ds.variables and len(ds.variables['time']) > 0:
tt = ds.variables['time']
if tt is not None:
times.append(str(len(tt)))
times.append(DTExportFormat.format(num2date(tt[0],tt.units)))
times.append(DTExportFormat.format(num2date(tt[len(tt)-1],tt.units)))
else:
raise RuntimeError("Only supporting NETCDF files so far")
if len(xshape) > 1:
import math
ni = xshape[len(xshape) -1]
nj = xshape[len(xshape) -2]
# from the horizontal -> cartesian
widthX = lons[0,ni-1] - lons[0,0]
heightX = lats[0,ni-1] - lats[0,0]
rotation = DefinedNCBaseCheck.calc_rotation(self,widthX,heightX)
# now extract the actual width and height
widthY = lons[nj-1,0] - lons[0,0]
heightY = lats[nj-1,0] - lats[0,0]
height=math.sqrt((widthY*widthY)+(heightY*heightY))
width=math.sqrt((widthX*widthX)+(heightX*heightX))
origin = [lons[0,0],lats[0,0]]
else:
ni = xshape[0]
nj = yshape[0]
width = lons[len(lons)-1] - lons[0]
height = lats[len(lats)-1] - lats[0]
rotation = 0.
origin = [lons[0],lats[0]]
ninj = [ ni, nj ]
vals = dict()
vals['bounds'] = bounds
vals['ni_nj'] = ninj
vals['rotation'] = rotation
vals['height'] = height
vals['width'] = width
vals['origin'] = origin
if tt is not None:
vals['time'] = times
return vals
@check_has(DefinedNCBaseCheck.HIGH)
def check_high(self, ds):
return ['title', 'summary', 'keywords']
###############################################################################
#
# RECOMMENDED
#
###############################################################################
@check_has(DefinedNCBaseCheck.MEDIUM)
def check_recommended(self, ds):
return [
'history',
'comment',
'date_created',
'creator_name',
'creator_url',
'creator_email',
'institution',
'license'
]
###############################################################################
#
# SUGGESTED
#
###############################################################################
@check_has(DefinedNCBaseCheck.LOW)
def check_suggested(self, ds):
return [
'date_modified',
'date_issued']
def do_check_2D(self, ds, ftype = "cf"):
'''
Verifies the data set has the required variables for the 2D grid
we don't prescribe a type so let NETCDF dataset figure out what coordinate vars there are
'''
from compliance_checker import inferXVar,inferYVar
xvar = inferXVar(ds)
yvar = inferYVar(ds)
messages = []
success_messages = ""
score = 0
level = DefinedNCBaseCheck.HIGH
if xvar is not None:
score = score +1
success_messages += " xvar: "
success_messages += xvar.name
else:
messages.append("Did not find matching longitude variable")
if yvar is not None:
score = score +1
success_messages += " yvar: "
success_messages += yvar.name
else:
messages.append("Did not find matching latitude variable")
return self.make_result(level, score, 2, 'Required 2D Variables '+success_messages, messages,'check_2D')
def do_check_3D(self, ds, ftype = "std"):
'''
Verifies the dataset has the required variables for the 3D grid
'''
return self.make_result(DefinedNCBaseCheck.LOW, 0, 0, 'Required bathy Variable', [],'check_3D')
def do_check_bathy(self, ds, ftype = "std"):
'''
Verifies the dataset has the required variables for bathy
'''
from compliance_checker import inferZVar
zvar = inferZVar(ds)
messages = []
success_messages = ""
score = 0
level = DefinedNCBaseCheck.HIGH
if zvar is not None:
score = score +1
success_messages += " zvar: "
success_messages += zvar.name
else:
messages.append("Did not find matching bathy variable")
return self.make_result(level, score, 1, 'Required bathy Variable '+success_messages, messages,'check_bathy')
def do_check_mask(self, ds):
'''
Verifies the dataset has the required variables for bathy
'''
# we could go overboard and test for units and dimesnions on the variables as well ....
# not really necessary here
required_variables = []
required_dimensions = []
level = DefinedNCBaseCheck.HIGH
out_of = len(required_variables) + len(required_dimensions)
score = 0
messages = []
for variable in required_variables:
test = variable in ds.variables
score += int(test)
if not test:
messages.append("%s is a required variable" % variable)
for dim in required_dimensions:
test = dim in ds.dimensions
score += int(test)
if not test:
messages.append("%s is a required variable" % dim)
return self.make_result(level, score, out_of, 'Required Variables and Dimensions', messages,'check_mask')
def check(self,dsp):
scores = []
ds = dsp.dataset
ftype = "cf"
scores.append(self.do_check_2D(ds,ftype))
#if str("3D").lower() in self.options:
# scores.append(self.do_check_3D(ds,ftype))
if str("bathy").lower() in self.options:
scores.append(self.do_check_bathy(ds,ftype))
return scores
|
|
#!/usr/bin/env python3
'''
plugin/view.py
View manager class.
Manages and organizes views. The main purpose of this class is to help
determine which views/files belong to the same project. Views in the same
project may all share a single ycmd server backend.
'''
import logging
import threading
from ..lib.subl.view import (
View,
get_view_id,
)
from ..lib.util.lock import lock_guard
logger = logging.getLogger('sublime-ycmd.' + __name__)
try:
import sublime
except ImportError:
from ..lib.subl.dummy import sublime
class SublimeYcmdViewManager(object):
'''
Singleton helper class. Manages wrappers around sublime view instances.
The wrapper class `View` is used around `sublime.View` to cache certain
calculations, and to store view-specific variables/state.
Although this abstraction isn't strictly necessary, it can save expensive
operations like file path calculation and ycmd event notification.
All APIs are thread-safe.
'''
def __init__(self):
# maps view IDs to `View` instances
self._views = {}
self._lock = threading.RLock()
self.reset()
@lock_guard()
def reset(self):
if self._views:
view_ids = list(self._views.keys())
for view_id in view_ids:
self._unregister_view(view_id)
logger.info('all views have been unregistered')
# active views:
self._views = {}
def get_wrapped_view(self, view):
'''
Returns an instance of `View` corresponding to `view`. If one does
not exist, it will be created, if possible.
If the view is provided as an ID (int), then the lookup is performed
as normal, but a `KeyError` will be raised if it does not exist.
If the view is an instance of `sublime.View`, then the lookup is again
performed as usual, but will be created if it does not exist.
Finally, if the view is an instance of `View`, it is returned as-is.
'''
if not isinstance(view, (int, sublime.View, View)):
raise TypeError('view must be a View: %r' % (view))
if isinstance(view, View):
return view
view_id = get_view_id(view)
if view_id is None:
logger.error('failed to get view ID for view: %r', view)
raise TypeError('view id must be an int: %r' % (view))
with self._lock:
if view_id not in self._views:
# create a wrapped view, if possible
if not isinstance(view, sublime.View):
# not possible... view given with just its id
logger.warning(
'view has not been registered, id: %r', view_id,
)
raise KeyError(view,)
# else, we have a usable view for the wrapper
logger.debug(
'view has not been registered, registering it: %r', view,
)
self._register_view(view, view_id)
assert view_id in self._views, \
'[internal] view id has not been registered: %r' % (view_id)
wrapped_view = self._views[view_id] # type: View
return wrapped_view
@lock_guard()
def has_notified_ready_to_parse(self, view, server):
'''
Returns true if the given `view` has been parsed by the `server`. This
must be done at least once to ensure that the ycmd server has a list
of identifiers to offer in completion results.
This works by storing a view-specific variable indicating the server,
if any, that the view has been uploaded to. If this variable is not
set, or if the variable refers to another server, this method will
return false. In that case, the notification should probably be sent.
'''
view = self.get_wrapped_view(view)
if not view:
logger.error('unknown view type: %r', view)
raise TypeError('view must be a View: %r' % (view))
init_notified_server_set(view)
return has_notified_server(view, server)
@lock_guard()
def set_notified_ready_to_parse(self, view, server, has_notified=True):
'''
Updates the variable that indicates that the given `view` has been
parsed by the `server`.
This works by setting a view-specific variable indicating the server,
that the view has been uploaded to. The same variable can then be
checked in `has_notified_ready_to_parse`.
'''
view = self.get_wrapped_view(view)
if not view:
logger.error('unknown view type: %r', view)
raise TypeError('view must be a View: %r' % (view))
init_notified_server_set(view)
if has_notified:
add_notified_server(view, server)
else:
remove_notified_server(view, server)
def _register_view(self, view, view_id=None):
if not isinstance(view, sublime.View):
raise TypeError('view must be a sublime.View: %r' % (view))
if view_id is None:
view_id = get_view_id(view)
if not isinstance(view_id, int):
raise TypeError('view id must be an int: %r' % (view))
logger.debug('registering view with id: %r, %r', view_id, view)
view = View(view)
with self._lock:
self._views[view_id] = view
return view_id
def _unregister_view(self, view):
view_id = get_view_id(view)
if view_id is None:
logger.error('failed to get view ID for view: %r', view)
raise TypeError('view id must be an int: %r' % (view))
with self._lock:
if view_id not in self._views:
logger.debug(
'view was never registered, ignoring id: %s', view_id,
)
return False
del self._views[view_id]
return True
@lock_guard()
def get_views(self):
'''
Returns a shallow-copy of the map of managed `View` instances.
'''
return self._views.copy()
def __contains__(self, view):
view_id = get_view_id(view)
if view_id is None:
logger.error('failed to get view ID for view: %r', view)
raise TypeError('view id must be an int: %r' % (view))
with self._lock:
return view_id in self._views
@lock_guard()
def __getitem__(self, view):
return self.get_wrapped_view(view)
@lock_guard()
def __len__(self):
return len(self._views)
def __bool__(self):
''' Returns `True`, so an instance is always truthy. '''
return True
NOTIFIED_SERVERS_KEY = 'notified_servers'
def init_notified_server_set(view, key=NOTIFIED_SERVERS_KEY):
'''
Initializes the set of notified servers for a given `view` if it has not
already been initialized.
This does nothing if it has been initialized already.
'''
if not isinstance(view, View):
logger.warning('view does not appear valid: %r', view)
if key not in view:
logger.debug('view has not been sent to any server, creating metadata')
view[key] = set()
def get_server_key(server):
'''
Returns a unique key for `server` to use as an id for it.
'''
server_key = str(server)
return server_key
def has_notified_server(view, server, key=NOTIFIED_SERVERS_KEY):
'''
Checks if a given `server` is in the notified server set for a `view`.
'''
if not isinstance(view, View):
logger.warning('view does not appear valid: %r', view)
if key not in view:
logger.error(
'notified server set is not initialized for view: %r', view,
)
notified_servers = view[key]
assert isinstance(notified_servers, set), \
'[internal] notified server set is not a set: %r' % (notified_servers)
server_key = get_server_key(server)
return server_key in notified_servers
def add_notified_server(view, server, key=NOTIFIED_SERVERS_KEY):
'''
Adds `server` to the notified server set for `view`.
'''
if not isinstance(view, View):
logger.warning('view does not appear valid: %r', view)
if key not in view:
logger.error(
'notified server set is not initialized for view: %r', view,
)
notified_servers = view[key]
assert isinstance(notified_servers, set), \
'[internal] notified server set is not a set: %r' % (notified_servers)
server_key = get_server_key(server)
notified_servers.add(server_key)
def remove_notified_server(view, server, key=NOTIFIED_SERVERS_KEY):
'''
Removes `server` to the notified server set for `view`.
If the server is not in the notified server set, this does nothing.
'''
if not isinstance(view, View):
logger.warning('view does not appear valid: %r', view)
if key not in view:
logger.error(
'notified server set is not initialized for view: %r', view,
)
notified_servers = view[key]
assert isinstance(notified_servers, set), \
'[internal] notified server set is not a set: %r' % (notified_servers)
server_key = get_server_key(server)
notified_servers.discard(server_key)
|
|
import datetime
from xml.etree import ElementTree as etree
import pytest
from ..core.exceptions import (
FRITZ_ERRORS,
ActionError,
ServiceError,
FritzActionError,
FritzArgumentError,
FritzActionFailedError,
FritzArgumentValueError,
FritzOutOfMemoryError,
FritzSecurityError,
FritzArrayIndexError,
FritzLookUpError,
FritzArgumentStringToShortError,
FritzArgumentStringToLongError,
FritzArgumentCharacterError,
FritzInternalError,
)
from ..core.soaper import (
boolean_convert,
encode_boolean,
get_argument_value,
get_converted_value,
get_html_safe_value,
raise_fritzconnection_error,
)
content_template = """
<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<s:Body>
<s:Fault>
<faultcode>s:Client</faultcode>
<faultstring>UPnPError</faultstring>
<detail>
<UPnPError xmlns="urn:schemas-upnp-org:control-1-0">
<errorCode>{error_code}</errorCode>
<errorDescription>Invalid Action</errorDescription>
</UPnPError>
</detail>
</s:Fault>
</s:Body>
</s:Envelope>
"""
class Response:
"""Namespace object."""
@pytest.mark.parametrize(
"error_code, exception", [
('401', FritzActionError),
('402', FritzArgumentError),
('501', FritzActionFailedError),
('600', FritzArgumentValueError),
('603', FritzOutOfMemoryError),
('606', FritzSecurityError),
('713', FritzArrayIndexError),
('714', FritzLookUpError),
('801', FritzArgumentStringToShortError),
('802', FritzArgumentStringToLongError),
('803', FritzArgumentCharacterError),
('820', FritzInternalError),
('713', IndexError),
('714', KeyError),
('401', ActionError),
]
)
def test_raise_fritzconnection_error(error_code, exception):
"""check for exception raising depending on the error_code"""
content = content_template.format(error_code=error_code)
response = Response()
response.content = content.encode()
pytest.raises(exception, raise_fritzconnection_error, response)
@pytest.mark.parametrize(
"value, expected_result", [
('0', False),
('1', True),
]
)
def test_boolean_convert(value, expected_result):
result = boolean_convert(value)
assert result == expected_result
@pytest.mark.parametrize(
"value", ['2', 'x', '3.1']
)
def test_boolean_convert_fails(value):
with pytest.raises(ValueError):
boolean_convert(value)
long_error = """
<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<s:Body>
<s:Fault>
<faultcode> s:Client </faultcode>
<faultstring>
UPnPError </faultstring>
<detail>
<UPnPError xmlns="urn:schemas-upnp-org:control-1-0">
<errorCode> 401 </errorCode>
<errorDescription> Invalid Action </errorDescription>
</UPnPError>
</detail>
</s:Fault>
</s:Body>
</s:Envelope>
"""
def test_long_error_message():
response = Response()
response.content = long_error.encode()
with pytest.raises(ActionError) as exc:
raise_fritzconnection_error(response)
assert exc.value.args[0] == "\n".join(
["UPnPError: ",
"errorCode: 401",
"errorDescription: Invalid Action",
]
)
@pytest.mark.parametrize(
"value, expected_type", [
("text", str),
(0, int),
(1, int),
(None, int),
(False, int),
(True, int),
]
)
def test_encode_boolean(value, expected_type):
result = encode_boolean(value)
assert isinstance(result, expected_type)
@pytest.mark.parametrize(
"value, expected_results", [
(True, 1),
(False, 0),
(None, 0),
(3.141, 3.141),
("hello test", "hello test"),
("2021-07-17T12:00:00", "2021-07-17T12:00:00"), # redundant, but ISO ;)
("ham, spam & eggs", "ham, spam & eggs"),
("5 > 3", "5 > 3"),
("3 < 5", "3 < 5"),
('say "hello"', "say "hello""),
("let's test again", ["let' test again", "let's test again"])
]
)
def test_get_html_safe_value(value, expected_results):
if not isinstance(expected_results, list):
expected_results = [expected_results]
result = get_html_safe_value(value)
assert result in expected_results
@pytest.mark.parametrize(
"value, not_expected_type", [
(False, bool), # should be int after encoding, not bool
(True, bool),
]
)
def test_encode_boolean2(value, not_expected_type):
result = encode_boolean(value)
assert not isinstance(result, not_expected_type)
soap_root = etree.fromstring("""<?xml version="1.0"?>
<data>
<container>
<year>2010</year>
<msg>message text</msg>
<number>3.141</number>
<ip></ip>
</container>
</data>""")
@pytest.mark.parametrize(
"argument_name, expected_value", [
('year', '2010'),
('msg', 'message text'),
('number', '3.141'),
('ip', ''),
]
)
def test_get_argument_value(argument_name, expected_value):
value = get_argument_value(soap_root, argument_name)
assert value == expected_value
@pytest.mark.parametrize(
"data_type, value, expected_value", [
('datetime', '2020-02-02T10:10:10', datetime.datetime(2020, 2, 2, 10, 10, 10)),
('boolean', '1', True),
('boolean', '0', False),
('uuid', 'uuid:123', '123'),
('uuid', '123', '123'),
('i4', '42', 42),
('ui1', '42', 42),
('ui2', '42', 42),
('ui4', '42', 42),
]
)
def test_get_converted_value(data_type, value, expected_value):
result = get_converted_value(data_type, value)
assert result == expected_value
@pytest.mark.parametrize(
"data_type, value", [
('datetime', '2010.02.02-10:10:10'), # not ISO 8601
('boolean', ''), # neither '1' nor '0'
]
)
def test_get_converted_value_fails(data_type, value):
with pytest.raises(ValueError):
get_converted_value(data_type, value)
|
|
"""Non-Plow specific widgets."""
from itertools import izip_longest
from plow.gui.manifest import QtGui, QtCore
from plow.gui.common.help import getHelp, getHelpTextWidget
from plow.gui import constants
class TableWidget(QtGui.QTableView):
def __init__(self, *args, **kwargs):
super(TableWidget, self).__init__(*args, **kwargs)
self.setEditTriggers(self.NoEditTriggers)
self.setSelectionBehavior(self.SelectRows)
self.setSelectionMode(self.ExtendedSelection)
self.setSortingEnabled(True)
self.setAlternatingRowColors(False)
self.setAutoFillBackground(False)
self.viewport().setFocusPolicy(QtCore.Qt.NoFocus)
self.horizontalHeader().setStretchLastSection(True)
vheader = self.verticalHeader()
vheader.hide()
vheader.setDefaultSectionSize(constants.DEFAULT_ROW_HEIGHT)
class TreeWidget(QtGui.QTreeView):
def __init__(self, *args, **kwargs):
super(TreeWidget, self).__init__(*args, **kwargs)
self.setSortingEnabled(True)
self.setEditTriggers(self.NoEditTriggers)
self.setSelectionBehavior(self.SelectRows)
self.setSelectionMode(self.ExtendedSelection)
self.setUniformRowHeights(True)
self.setAlternatingRowColors(False)
self.setAutoFillBackground(True)
self.viewport().setFocusPolicy(QtCore.Qt.NoFocus)
# self.setVerticalScrollMode(self.ScrollPerPixel)
class SpinSliderWidget(QtGui.QWidget):
def __init__(self, minimum, maximum, value, parent=None):
QtGui.QWidget.__init__(self, parent)
layout = QtGui.QHBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
self.slider = QtGui.QSlider(QtCore.Qt.Horizontal, self)
self.slider.setMaximum(maximum)
self.slider.setMinimum(minimum)
self.slider.setValue(value)
self.spin = QtGui.QSpinBox(self)
self.spin.setRange(minimum, maximum)
self.spin.setValue(value)
self.spin.valueChanged.connect(self.slider.setValue)
self.slider.valueChanged.connect(self.spin.setValue)
layout.addWidget(self.slider)
layout.addWidget(self.spin)
def value(self):
return self.slider.value()
class BooleanCheckBox(QtGui.QCheckBox):
def __init__(self, checked=True, parent=None):
QtGui.QCheckBox.__init__(self, parent)
self.setChecked(checked)
def setChecked(self, value):
if value:
self.setCheckState(QtCore.Qt.Checked)
else:
self.setCheckState(QtCore.Qt.Unchecked)
def isChecked(self):
return self.checkState() == QtCore.Qt.Checked
class FilterableListBox(QtGui.QWidget):
"""
A list box widget with a text filter.
"""
DATA_ROLE = QtCore.Qt.UserRole
selectionChanged = QtCore.Signal(list)
valueDoubleClicked = QtCore.Signal(object)
valueClicked = QtCore.Signal(object)
def __init__(self, filt=None, items=None, data=None, parent=None):
QtGui.QWidget.__init__(self, parent)
self.__data = {}
self.__txt_label = QtGui.QLabel(self)
self.__txt_filter = QtGui.QLineEdit(self)
self.__txt_filter.textChanged.connect(self.__filterChanged)
self.__model = QtGui.QStringListModel(self)
self.__proxyModel = proxy = QtGui.QSortFilterProxyModel(self)
proxy.setSourceModel(self.__model)
self.__list = view = QtGui.QListView(self)
view.setSelectionMode(self.__list.ExtendedSelection)
view.setModel(proxy)
proxy.sort(0)
proxy.setDynamicSortFilter(True)
layout = QtGui.QVBoxLayout(self)
layout.setSpacing(4)
layout.setContentsMargins(0, 0, 0, 0)
hlayout = QtGui.QHBoxLayout()
hlayout.setContentsMargins(0, 0, 0, 0)
hlayout.addWidget(self.__txt_label)
hlayout.addWidget(self.__txt_filter)
layout.addLayout(hlayout)
layout.addWidget(self.__list)
# connections
self.__list.doubleClicked.connect(self._itemDoubleClicked)
self.__list.clicked.connect(self._itemClicked)
self.__list.selectionModel().selectionChanged.connect(self._selectionChanged)
if items:
self.setStringList(items)
if filt:
self.setFilter(filt)
def clear(self):
self.setStringList([])
self.setFilter('')
def clearSelection(self, clearFilter=True):
self.__list.clearSelection()
if clearFilter:
self.setFilter('')
def setLabel(self, val):
self.__txt_label.setText(val)
def setFilter(self, val, selectFirst=False):
if not val:
val = ''
self.__txt_filter.setText(val)
if not selectFirst:
return
proxy = self.__proxyModel
matches = proxy.match(proxy.index(0,0), QtCore.Qt.DisplayRole, val, 1, QtCore.Qt.MatchContains)
if matches:
selModel = self.__list.selectionModel()
selModel.select(matches[0], selModel.ClearAndSelect)
def setStringList(self, aList, data=None):
model = self.__model
model.setStringList(aList)
self.__data = {}
role = self.DATA_ROLE
for row, val in enumerate(aList):
try:
dataVal = data[row]
except Exception, e:
dataVal = val
self.__data[row] = dataVal
def setSingleSelections(self, enabled):
if enabled:
mode = self.__list.SingleSelection
else:
mode = self.__list.ExtendedSelection
self.__list.setSelectionMode(mode)
def getSelectedValues(self, role=QtCore.Qt.DisplayRole):
indexes = self.__list.selectedIndexes()
if self.__list.selectionMode() == self.__list.SingleSelection:
indexes = indexes[:1]
proxy = self.__proxyModel
sourceModel = proxy.sourceModel()
data = self.__data
if role == self.DATA_ROLE:
values = [data.get(proxy.mapToSource(i).row()) for i in indexes]
else:
values = [proxy.data(i) for i in indexes]
return values
def __filterChanged(self, value):
value = value.strip()
if not value:
self.__proxyModel.setFilterFixedString("")
else:
searchStr = '*'.join(value.split())
self.__proxyModel.setFilterWildcard(searchStr)
def _itemDoubleClicked(self, item):
data = self.__proxyModel.data(item)
self.valueDoubleClicked.emit(data)
def _itemClicked(self, item):
data = self.__proxyModel.data(item)
self.valueClicked.emit(data)
def _selectionChanged(self):
sel = self.getSelectedValues()
self.selectionChanged.emit(sel)
class CheckableComboBox(QtGui.QWidget):
"""
A combo box with selectable items.
"""
optionSelected = QtCore.Signal(str)
def __init__(self, title, options, selected=None, icons=None, parent=None):
QtGui.QWidget.__init__(self, parent)
layout = QtGui.QVBoxLayout(self)
self.__btn = btn = QtGui.QPushButton(title)
btn.setFocusPolicy(QtCore.Qt.NoFocus)
btn.setMaximumHeight(22)
btn.setFlat(True)
btn.setContentsMargins(0, 0, 0, 0)
self.__menu = menu = QtGui.QMenu(self)
btn.setMenu(menu)
self.setOptions(options, selected, icons)
layout.addWidget(btn)
btn.toggled.connect(btn.showMenu)
menu.triggered.connect(lambda action: self.optionSelected.emit(action.text()))
def options(self):
return [a.text() for a in self.__menu.actions()]
def setOptions(self, options, selected=None, icons=None):
if selected and not isinstance(selected, (set, dict)):
selected = set(selected)
menu = self.__menu
menu.clear()
for opt, icon in izip_longest(options, icons or []):
a = QtGui.QAction(menu)
a.setText(opt)
a.setCheckable(True)
if selected and opt in selected:
a.setChecked(True)
if icon:
a.setIcon(icons[i])
menu.addAction(a)
def selectedOptions(self):
return [a.text() for a in self.__menu.actions() if a.isChecked()]
class CheckableListBox(QtGui.QWidget):
"""
A list box with selectable items.
"""
def __init__(self, title, options, checked, allSelected=True, parent=None):
QtGui.QWidget.__init__(self, parent)
layout = QtGui.QVBoxLayout(self)
self.listItems = QtGui.QListWidget(self)
self.listItems.setMaximumHeight(100)
for opt in options:
item = QtGui.QListWidgetItem(opt)
item.setFlags(QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
if checked and opt in checked:
item.setCheckState(QtCore.Qt.Checked)
else:
item.setCheckState(QtCore.Qt.Unchecked)
self.listItems.addItem(item)
self.checkBoxAll = QtGui.QCheckBox("All %s" % title, self)
if allSelected:
self.listItems.setDisabled(True)
self.checkBoxAll.setCheckState(QtCore.Qt.Checked)
self.checkBoxAll.stateChanged.connect(self.__allSelectedToggled)
layout.addWidget(self.checkBoxAll)
layout.addWidget(self.listItems)
def isAllSelected(self):
return self.checkBoxAll.checkState() == QtCore.Qt.Checked
def getCheckedOptions(self):
result = []
if self.isAllSelected():
return result
for i in xrange(0, self.listItems.count()):
item = self.listItems.item(i)
if item.checkState() == QtCore.Qt.Checked:
result.append(str(item.text()))
return result
def __allSelectedToggled(self, state):
if state == QtCore.Qt.Checked:
self.listItems.setDisabled(True)
else:
self.listItems.setDisabled(False)
class RadioBoxArray(QtGui.QWidget):
"""
An array of linked radio boxes.
"""
def __init__(self, title, options, cols=3, parent=None):
QtGui.QWidget.__init__(self, parent)
layout = QtGui.QVBoxLayout(self)
group_box = QtGui.QGroupBox(title)
group_box_layout = QtGui.QGridLayout(group_box)
row = 0
for item, opt in enumerate(options):
row = item / cols
radio = QtGui.QRadioButton(opt, self)
group_box_layout.addWidget(radio, row, item % cols)
layout.addWidget(group_box)
class ManagedListWidget(QtGui.QWidget):
"""
A list widget that lets you add/remove things.
"""
def __init__(self, items, default="name", parent=None):
QtGui.QWidget.__init__(self, parent)
QtGui.QVBoxLayout(self)
self.__default = default
self.setMaximumHeight(200)
self.list_widget = QtGui.QListWidget(self)
self.list_widget.itemDoubleClicked.connect(self.list_widget.editItem)
for item in (items or []):
list_item = self.__newItem(item)
self.list_widget.addItem(list_item)
self.list_widget.sortItems()
self.btn_add = QtGui.QPushButton(QtGui.QIcon(":/images/plus.png"), "", self)
self.btn_add.setFlat(True)
self.btn_add.clicked.connect(self.addItem)
self.btn_sub = QtGui.QPushButton(QtGui.QIcon(":/images/minus.png"), "", self)
self.btn_sub.setFlat(True)
self.btn_sub.clicked.connect(self.removeItems)
layout_btn = QtGui.QHBoxLayout()
layout_btn.setContentsMargins(0, 0, 0, 0)
layout_btn.setSpacing(1)
layout_btn.addStretch()
layout_btn.addWidget(self.btn_add)
layout_btn.addWidget(self.btn_sub)
self.layout().addWidget(self.list_widget)
self.layout().addLayout(layout_btn)
def getValues(self):
result = []
for i in range(0, self.list_widget.count()):
result.append(str(self.list_widget.item(i).text()))
return result
def addItem(self):
item = self.__newItem(self.__default)
self.list_widget.addItem(item)
self.list_widget.editItem(item)
def removeItems(self):
for item in self.list_widget.selectedItems():
self.list_widget.takeItem(self.list_widget.row(item))
def __newItem(self, name):
list_item = QtGui.QListWidgetItem(name)
list_item.setFlags(
QtCore.Qt.ItemIsEditable |
QtCore.Qt.ItemIsSelectable |
QtCore.Qt.ItemIsEnabled)
return list_item
class FormWidgetLabel(QtGui.QWidget):
def __init__(self, text, help, parent=None):
QtGui.QWidget.__init__(self, parent)
QtGui.QHBoxLayout(self)
self.__help = help
self.__btn = QtGui.QToolButton(self)
self.__btn.setIcon(QtGui.QIcon(":/images/help.png"))
self.__btn.setFocusPolicy(QtCore.Qt.NoFocus)
self.__btn.clicked.connect(self.__show_popup)
self.__btn.setStyleSheet("QToolButton { border: 0px }")
self.__label = QtGui.QLabel(text, self)
self.layout().setContentsMargins(0, 0, 0, 0)
self.layout().setSpacing(0)
self.layout().addWidget(self.__btn)
self.layout().addSpacing(5)
self.layout().addWidget(self.__label)
self.layout().addStretch()
def __show_popup(self):
frame = QtGui.QFrame(self, QtCore.Qt.Popup | QtCore.Qt.Window)
frame.resize(350, 200)
frame.setFrameStyle(QtGui.QFrame.Box | QtGui.QFrame.Raised)
frame.setLineWidth(2);
frame.move(QtGui.QCursor.pos())
layout = QtGui.QVBoxLayout(frame)
layout.addWidget(getHelpTextWidget(self.__help))
frame.show()
class SimplePercentageBarDelegate(QtGui.QStyledItemDelegate):
"""
A simple status bar, much like a heath meter, which
is intended to show the ratio of two values.
"""
# Left, top, right, bottom
Margins = [5, 4, 5, 4]
__PEN = QtGui.QColor(33, 33, 33)
__C1 = constants.RED
__C2 = constants.GREEN
def __init__(self, parent=None):
QtGui.QStyledItemDelegate.__init__(self, parent)
def paint(self, painter, option, index):
if not index.isValid():
QtGui.QStyledItemDelegate.paint(self, painter, option, index)
return
## Broken in PySide.
opt = QtGui.QStyleOptionViewItemV4(option)
self.initStyleOption(opt, index);
# Broken in pyside 1.1.2
#if opt.state & QtGui.QStyle.State_Selected:
# painter.fillRect(opt.rect, opt.palette.highlight())
rect = opt.rect
rect.adjust(self.Margins[0], self.Margins[1], -self.Margins[2], -self.Margins[3])
data = index.data()
painter.save()
painter.setRenderHints (
painter.HighQualityAntialiasing |
painter.SmoothPixmapTransform |
painter.Antialiasing)
painter.setPen(self.__PEN)
if data[1] == 0:
painter.setBrush(self.__C1)
painter.drawRoundedRect(rect, 3, 3)
else:
ratio = data[0] / float(data[1])
painter.setBrush(self.__C1)
painter.drawRoundedRect(rect, 3, 3)
rect.setWidth(ratio * rect.width())
painter.setBrush(self.__C2)
painter.drawRoundedRect(rect, 3, 3)
painter.restore()
class ResourceDelegate(QtGui.QItemDelegate):
"""
A custom QItemDelegate to be set onto a specific
column of a view, containing numeric data that can
be represented as a resource.
The default role to check for this data on an index
is Qt.UserRole, but can be set to any other role to
source the numeric data.
Example:
If we have a column in our view that contains a
percentage value of how much memory is left in the
system (from 0.0 - 1.0), then we can do:
delegate = ResourceDelegate(warn=.5, critical=.1)
This will show a warning indication when the ratio is
below 50%, and a critical indication when it falls below
10%
If we are storing our data in another role...
otherRole = QtCore.Qt.UserRole + 50
delegate = ResourceDelegate(dataRole=otherRole)
"""
COLOR_CRITICAL = constants.RED
COLOR_WARN = constants.YELLOW
COLOR_OK = constants.GREEN
COLOR_BG = constants.GRAY
def __init__(self, warn=0.15, critical=0.05, dataRole=QtCore.Qt.UserRole, parent=None):
super(ResourceDelegate, self).__init__(parent)
self._warn = warn
self._crit = critical
self._role = dataRole
def paint(self, painter, opts, index):
currentData = index.data(self._role)
try:
ratio = float(currentData)
except:
super(ResourceDelegate, self).paint(painter, opts, index)
return
text = "%0.2f%%" % (ratio * 100)
opt = QtGui.QStyleOptionViewItemV4(opts)
opt.displayAlignment = QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter
grad = QtGui.QLinearGradient(opt.rect.topLeft(), opt.rect.topRight())
# darkEnd = QtCore.Qt.transparent
darkEnd = self.COLOR_BG
end = darkEnd
if ratio == 1:
darkEnd = self.COLOR_OK
end = darkEnd
elif ratio <= self._crit:
darkEnd = self.COLOR_CRITICAL
end = self.COLOR_CRITICAL
elif ratio <= self._warn:
darkEnd = self.COLOR_WARN
end = self.COLOR_WARN
grad.setColorAt(0.0, self.COLOR_OK)
grad.setColorAt(min(ratio, 1.0), self.COLOR_OK)
grad.setColorAt(min(ratio + .01, 1.0), end)
grad.setColorAt(1.0, darkEnd)
self.drawBackground(painter, opt, index)
painter.fillRect(opt.rect, QtGui.QBrush(grad))
self.drawDisplay(painter, opt, opt.rect, text)
state_bg = index.data(QtCore.Qt.BackgroundRole)
if state_bg:
painter.setBrush(QtCore.Qt.NoBrush)
pen = QtGui.QPen(state_bg)
pen.setWidth(2)
painter.setPen(pen)
painter.drawRect(opt.rect)
|
|
"""
forecasting.py
Assumption and Data Driven based forecasting calculation. Assumption based forecasting is useful for quickly playing with or
understanding a concept; it can be used to evaluate various strategies without historical or real data. Data driven
forecasting is useful for forecasting an actual financial situation based on historical data, either personal or economic.
The Data driven approach generates statistical models from real data and forecasts with random samples from the models.
project : pf
version : 0.0.0
status : development
modifydate :
createdate :
website : https://github.com/tmthydvnprt/pf
author : tmthydvnprt
email : tim@tmthydvnprt.com
maintainer : tmthydvnprt
license : MIT
copyright : Copyright 2016, tmthydvnprt
credits :
"""
import numpy as np
import pandas as pd
import scipy.stats as st
from statsmodels.tsa.arima_model import ARIMA
import pf.util
from pf.constants import ARIMA_ORDERS
################################################################################################################################
# Forecasting Helpers
################################################################################################################################
def increase_pay(
paycheck,
gross_increase,
retire_contribution_percent,
employer_match_percent,
employer_retire_percent
):
"""
Estimate pay increase affect of paycheck values.
Paycheck is a DataFrame containing at least the following columns:
gross
net
pretex retire
pretax detuct
posttax loan
employer_match
employer_retire
other
tax
taxable gross
taxable net
"""
# Calculate last tax percent
percent_tax = paycheck['tax'] / paycheck['taxable gross']
# Increased pay and retirement
paycheck['gross'] = (1.0 + gross_increase) * paycheck['gross']
paycheck['pretax retire'] = -retire_contribution_percent * paycheck['gross']
# Recalculate taxable gross and then tax
paycheck['taxable gross'] = paycheck[['gross', 'pretax deduct', 'pretax retire']].sum(axis=1)
paycheck['tax'] = percent_tax * paycheck['taxable gross']
# Recalculate net
paycheck['net'] = paycheck[['gross', 'pretax deduct', 'pretax retire', 'posttax loan', 'tax','other']].sum(axis=1)
# Recalculate employer match
paycheck['employer_match'] = employer_match_percent * paycheck['gross']
paycheck['employer_retire'] = employer_retire_percent * paycheck['gross']
return paycheck
################################################################################################################################
# Assumption Based Forecasting
################################################################################################################################
def assumption_fi_forecast(
income=50000.00,
initial_balance=0.0,
income_increase=0.03,
savings_rate=0.50,
withdrawal_rate=0.04,
return_rate=0.05,
age=23,
life_expectancy=90,
min_spending=0,
max_spending=9e999,
start=None,
expense_increase=True
):
"""
Financial Independance (Investment withdrawal > Living expenses) forecasting based purely on assumptions not real data.
"""
# Calculate years to simulate
years = (life_expectancy - age) + 1
# Empty DataFrame to store results
columns = ['Age', 'Balance', 'Income', 'Savings', 'Expenses', 'Return on Investment', 'Safe Withdrawal', '% FI', 'FI']
cashflow_table = pd.DataFrame(
data=np.zeros((years, len(columns))),
index=range(years),
columns=columns
)
# cashflow_table['FI'] = False
cashflow_table.index.name = 'year'
# Store initial balance
cashflow_table.iloc[0]['Balance'] = initial_balance
# Generate Cashflow table
fi = False
for i in cashflow_table.index:
# Calculate savings and expenses
yearly_savings = savings_rate * income
if i == 0 or expense_increase:
yearly_expenses = (1 - savings_rate) * income if not fi else cashflow_table.loc[i-1]['Safe Withdrawal']
yearly_expenses = max(yearly_expenses, min_spending)
yearly_expenses = min(yearly_expenses, max_spending)
# store data
cashflow_table.loc[i, 'Age'] = age + i
cashflow_table.loc[i, 'Income'] = income
cashflow_table.loc[i, 'Savings'] = yearly_savings
cashflow_table.loc[i, 'Expenses'] = yearly_expenses
# If not the first year
if i >= 1:
# Determine Return
cashflow_table.loc[i, 'Return on Investment'] = return_rate * cashflow_table.loc[i-1]['Balance']
# Growth balance
cashflow_table.loc[i, 'Balance'] = (1 + return_rate) * cashflow_table.loc[i-1]['Balance']
# Calculate safe withdrawal
cashflow_table.loc[i, 'Safe Withdrawal'] = withdrawal_rate * cashflow_table.loc[i-1]['Balance']
cashflow_table.loc[i, '% FI'] = 100.0 * cashflow_table.loc[i, 'Safe Withdrawal'] / cashflow_table.loc[i, 'Expenses']
# Once withdrawal is greater than expenses, retire
if cashflow_table.loc[i, 'Safe Withdrawal'] >= cashflow_table.loc[i-1]['Expenses']:
fi = True
# Remove withdrawal from blance for expenses
cashflow_table.loc[i, 'Balance'] -= cashflow_table.loc[i]['Safe Withdrawal']
if fi:
# stop income
income = np.nan
elif i > 0:
# Add yearly savings
cashflow_table.loc[i, 'Balance'] += yearly_savings
# increase income a little for next year
income = (1 + income_increase) * income
# Store boolean
cashflow_table.loc[i, 'FI'] = fi
# Turn Index into date if data available
if start:
cashflow_table['Date'] = pd.date_range(start=start, periods=len(cashflow_table.index), freq='A')
cashflow_table = cashflow_table.reset_index().set_index('Date')
return cashflow_table
################################################################################################################################
# Modeled Forecasting
################################################################################################################################
def arima_model(accounts):
"""Fit ARIMA models for each account"""
# Model each account
account_models = {}
for account_type, account in accounts:
account_data = accounts[(account_type, account)]
account_data.name = account
# ARIMA model order is unknown, so find the highest order that can be fit
order = 0
modeled = False
while not modeled and order < len(ARIMA_ORDERS):
try:
model = ARIMA(account_data, order=ARIMA_ORDERS[order])
results = model.fit()
modeled = True
account_models[(account_type, account)] = results
except (ValueError, np.linalg.LinAlgError):
order += 1
return account_models
def arima_forecast(account_models, start, **kwds):
"""Forecast accounts with ARIMA method"""
# Determine times
forecast_start = start
forecast_end = start + pd.DateOffset(**kwds)
# Forecast each account
accounts_forecast = pd.DataFrame(columns=account_models.keys())
for account_type, account in accounts_forecast:
accounts_forecast[(account_type, account)] = account_models[(account_type, account)].predict(
start=str(forecast_start.date()),
end=str(forecast_end.date()),
typ='levels'
)
return accounts_forecast
def dist_fit_model(accounts):
"""Build models for each account based on log change"""
# Model each account
account_models = {}
for account_type, account in accounts:
# Compute monthly change, ignoring Infs and NaNs
account_pct_change = np.log(accounts[(account_type, account)]) \
.pct_change() \
.replace([-1.0, -np.inf, np.inf], np.nan) \
.dropna()
# Generate model
model, params = pf.util.best_fit_distribution(account_pct_change)
account_models[(account_type, account)] = (model, params)
return account_models
def monte_carlo_forecast(accounts, account_models, start, number_of_runs=1000, **kwds):
"""Forecast accounts with Monte Carlo method from fit distributions"""
# Determine times
forecast_start = start
forecast_end = start + pd.DateOffset(**kwds)
forecast_dates = pd.date_range(forecast_start, forecast_end, freq='MS')
# Empty Panel to store Monte Carlo runs
account_forecast_runs = pd.Panel(
data=np.zeros((number_of_runs, len(forecast_dates), len(account_models.keys()))),
items=range(number_of_runs),
major_axis=forecast_dates,
minor_axis=account_models.keys()
)
# Forecast each account
accounts_forecast = pd.DataFrame(columns=account_models.keys())
for account_type, account in accounts_forecast:
# Get initial account value
init_value = accounts[(account_type, account)].iloc[-1]
if any(init_value == _ for _ in [np.inf, -np.inf, np.nan]):
init_value = 1.0
# Get model
model_name, params = account_models[(account_type, account)]
model = getattr(st, model_name)
if model:
arg = params[:-2]
loc = params[-2]
scale = params[-1]
# Forecast into future with Monte Carlo
for run in xrange(number_of_runs):
# Generate random variables
forecast_rvs = model.rvs(loc=loc, scale=scale, size=len(forecast_dates), *arg)
# Create Series of percent changes from random variables
forecast_pct_change = pd.Series(forecast_rvs, index=forecast_dates)
# Clip unrealistic changes larger than +/-50% in once month
forecast_pct_change = forecast_pct_change.clip(-0.5, 0.5)
# Forecast account as monthly percent change from last known account value
forecast_pct = np.exp(forecast_pct_change.copy())
forecast_pct[0] = forecast_pct[0] * init_value
forecast = forecast_pct.cumprod()
# Add to run storage
account_forecast_runs[run][(account_type, account)] = forecast
return account_forecast_runs
|
|
"""Examine callable regions following genome mapping of short reads.
Identifies callable analysis regions surrounded by larger regions lacking
aligned bases. This allows parallelization of smaller chromosome chunks
through post-processing and variant calling, with each sub-section
mapping handled separately.
Regions are split to try to maintain relative uniformity across the
genome and avoid extremes of large blocks or large numbers of
small blocks.
"""
import contextlib
import copy
from distutils.version import LooseVersion
import operator
import os
import subprocess
import sys
import numpy
import pybedtools
import pysam
import toolz as tz
import yaml
from bcbio import bam, broad, utils
from bcbio.bam import ref
from bcbio.log import logger
from bcbio.distributed import multi, prun
from bcbio.distributed.split import parallel_split_combine
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils, shared
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do, programs
from bcbio.variation import bedutils
from bcbio.variation import multi as vmulti
def parallel_callable_loci(in_bam, ref_file, data):
config = copy.deepcopy(data["config"])
num_cores = config["algorithm"].get("num_cores", 1)
data = {"work_bam": in_bam, "config": config,
"reference": data["reference"]}
parallel = {"type": "local", "cores": num_cores, "module": "bcbio.distributed"}
items = [[data]]
with prun.start(parallel, items, config, multiplier=int(num_cores)) as runner:
split_fn = shared.process_bam_by_chromosome("-callable.bed", "work_bam", remove_alts=True)
out = parallel_split_combine(items, split_fn, runner,
"calc_callable_loci", "combine_bed",
"callable_bed", ["config"])[0]
return out[0]["callable_bed"]
@multi.zeromq_aware_logging
def calc_callable_loci(data, region=None, out_file=None):
"""Determine callable bases for an input BAM in the given region.
"""
if out_file is None:
out_file = "%s-callable.bed" % os.path.splitext(data["work_bam"])[0]
max_depth = dd.get_coverage_depth_max(data)
depth = {"max": max_depth * 7 if max_depth > 0 else sys.maxint - 1,
"min": dd.get_coverage_depth_min(data)}
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
ref_file = tz.get_in(["reference", "fasta", "base"], data)
region_file, calc_callable = _regions_for_coverage(data, region, ref_file, tx_out_file)
if calc_callable:
_group_by_ctype(_get_coverage_file(data["work_bam"], ref_file, region, region_file, depth,
tx_out_file, data),
depth, region_file, tx_out_file, data)
# special case, do not calculate if we are in a chromosome not covered by BED file
else:
os.rename(region_file, tx_out_file)
return [{"callable_bed": out_file, "config": data["config"], "work_bam": data["work_bam"]}]
def _group_by_ctype(bed_file, depth, region_file, out_file, data):
"""Group adjacent callable/uncallble regions into defined intervals.
Uses tips from bedtools discussion:
https://groups.google.com/d/msg/bedtools-discuss/qYDE6XF-GRA/2icQtUeOX_UJ
https://gist.github.com/arq5x/b67196a46db5b63bee06
"""
def assign_coverage(feat):
feat.name = _get_ctype(float(feat.name), depth)
return feat
full_out_file = "%s-full%s" % utils.splitext_plus(out_file)
with open(full_out_file, "w") as out_handle:
kwargs = {"g": [1, 4], "c": [1, 2, 3, 4], "o": ["first", "first", "max", "first"]}
# back compatible precision https://github.com/chapmanb/bcbio-nextgen/issues/664
if LooseVersion(programs.get_version_manifest("bedtools", data=data, required=True)) >= LooseVersion("2.22.0"):
kwargs["prec"] = 21
for line in open(pybedtools.BedTool(bed_file).each(assign_coverage).saveas()
.groupby(**kwargs).fn):
out_handle.write("\t".join(line.split("\t")[2:]))
pybedtools.BedTool(full_out_file).intersect(region_file, nonamecheck=True).saveas(out_file)
def _get_coverage_file(in_bam, ref_file, region, region_file, depth, base_file, data):
"""Retrieve summary of coverage in a region.
Requires positive non-zero mapping quality at a position, matching GATK's
CallableLoci defaults.
"""
out_file = "%s-genomecov.bed" % utils.splitext_plus(base_file)[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
bam.index(in_bam, data["config"])
fai_file = ref.fasta_idx(ref_file, data["config"])
sambamba = config_utils.get_program("sambamba", data["config"])
bedtools = config_utils.get_program("bedtools", data["config"])
max_depth = depth["max"] + 1
cmd = ("{sambamba} view -F 'mapping_quality > 0' -L {region_file} -f bam -l 1 {in_bam} | "
"{bedtools} genomecov -split -ibam stdin -bga -g {fai_file} -max {max_depth} "
"> {tx_out_file}")
do.run(cmd.format(**locals()), "bedtools genomecov: %s" % (str(region)), data)
# Empty output file, no coverage for the whole contig
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for feat in get_ref_bedtool(ref_file, data["config"], region):
out_handle.write("%s\t%s\t%s\t%s\n" % (feat.chrom, feat.start, feat.end, 0))
return out_file
def _get_ctype(count, depth):
if count == 0:
return "NO_COVERAGE"
elif count < depth["min"]:
return "LOW_COVERAGE"
elif count > depth["max"]:
return "EXCESSIVE_COVERAGE"
else:
return "CALLABLE"
def _regions_for_coverage(data, region, ref_file, out_file):
"""Retrieve BED file of regions we need to calculate coverage in.
"""
variant_regions = bedutils.merge_overlaps(dd.get_variant_regions(data), data)
ready_region = shared.subset_variant_regions(variant_regions, region, out_file)
custom_file = "%s-coverageregions.bed" % utils.splitext_plus(out_file)[0]
if not ready_region:
get_ref_bedtool(ref_file, data["config"]).saveas(custom_file)
return custom_file, True
elif os.path.isfile(ready_region):
return ready_region, True
elif isinstance(ready_region, (list, tuple)):
c, s, e = ready_region
pybedtools.BedTool("%s\t%s\t%s\n" % (c, s, e), from_string=True).saveas(custom_file)
return custom_file, True
else:
with file_transaction(data, custom_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for feat in get_ref_bedtool(ref_file, data["config"], region):
out_handle.write("%s\t%s\t%s\t%s\n" % (feat.chrom, feat.start, feat.end, "NO_COVERAGE"))
return custom_file, variant_regions is None
def sample_callable_bed(bam_file, ref_file, data):
"""Retrieve callable regions for a sample subset by defined analysis regions.
"""
config = data["config"]
out_file = "%s-callable_sample.bed" % os.path.splitext(bam_file)[0]
with shared.bedtools_tmpdir({"config": config}):
callable_bed = parallel_callable_loci(bam_file, ref_file, data)
input_regions_bed = config["algorithm"].get("variant_regions", None)
if not utils.file_uptodate(out_file, callable_bed):
with file_transaction(config, out_file) as tx_out_file:
callable_regions = pybedtools.BedTool(callable_bed)
filter_regions = callable_regions.filter(lambda x: x.name == "CALLABLE")
if input_regions_bed:
if not utils.file_uptodate(out_file, input_regions_bed):
input_regions = pybedtools.BedTool(input_regions_bed)
filter_regions.intersect(input_regions, nonamecheck=True).saveas(tx_out_file)
else:
filter_regions.saveas(tx_out_file)
return out_file
def calculate_offtarget(bam_file, ref_file, data):
"""Generate file of offtarget read counts for inputs with variant regions.
"""
vrs_file = dd.get_variant_regions(data)
if vrs_file:
out_file = "%s-offtarget-stats.yaml" % os.path.splitext(bam_file)[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
offtarget_regions = "%s-regions.bed" % utils.splitext_plus(out_file)[0]
ref_bed = get_ref_bedtool(ref_file, data["config"])
ref_bed.subtract(pybedtools.BedTool(vrs_file), nonamecheck=True).saveas(offtarget_regions)
cmd = ("samtools view -u {bam_file} -L {offtarget_regions} | "
"bedtools intersect -abam - -b {offtarget_regions} -f 1.0 -bed | wc -l")
offtarget_count = int(subprocess.check_output(cmd.format(**locals()), shell=True))
cmd = "samtools idxstats {bam_file} | awk '{{s+=$3}} END {{print s}}'"
mapped_count = int(subprocess.check_output(cmd.format(**locals()), shell=True))
with open(tx_out_file, "w") as out_handle:
yaml.safe_dump({"mapped": mapped_count, "offtarget": offtarget_count}, out_handle,
allow_unicode=False, default_flow_style=False)
return out_file
def get_ref_bedtool(ref_file, config, chrom=None):
"""Retrieve a pybedtool BedTool object with reference sizes from input reference.
"""
broad_runner = broad.runner_from_config(config, "picard")
ref_dict = broad_runner.run_fn("picard_index_ref", ref_file)
ref_lines = []
with contextlib.closing(pysam.Samfile(ref_dict, "r")) as ref_sam:
for sq in ref_sam.header["SQ"]:
if not chrom or sq["SN"] == chrom:
ref_lines.append("%s\t%s\t%s" % (sq["SN"], 0, sq["LN"]))
return pybedtools.BedTool("\n".join(ref_lines), from_string=True)
def _get_nblock_regions(in_file, min_n_size, ref_regions):
"""Retrieve coordinates of regions in reference genome with no mapping.
These are potential breakpoints for parallelizing analysis.
"""
out_lines = []
called_contigs = set([])
with open(in_file) as in_handle:
for line in in_handle:
contig, start, end, ctype = line.rstrip().split()
called_contigs.add(contig)
if (ctype in ["REF_N", "NO_COVERAGE", "EXCESSIVE_COVERAGE", "LOW_COVERAGE"] and
int(end) - int(start) > min_n_size):
out_lines.append("%s\t%s\t%s\n" % (contig, start, end))
for refr in ref_regions:
if refr.chrom not in called_contigs:
out_lines.append("%s\t%s\t%s\n" % (refr.chrom, 0, refr.stop))
return pybedtools.BedTool("\n".join(out_lines), from_string=True)
def _combine_regions(all_regions, ref_regions):
"""Combine multiple BEDtools regions of regions into sorted final BEDtool.
"""
chrom_order = {}
for i, x in enumerate(ref_regions):
chrom_order[x.chrom] = i
def wchrom_key(x):
chrom, start, end = x
return (chrom_order[chrom], start, end)
all_intervals = []
for region_group in all_regions:
for region in region_group:
all_intervals.append((region.chrom, int(region.start), int(region.stop)))
all_intervals.sort(key=wchrom_key)
bed_lines = ["%s\t%s\t%s" % (c, s, e) for (c, s, e) in all_intervals]
return pybedtools.BedTool("\n".join(bed_lines), from_string=True)
def _add_config_regions(nblock_regions, ref_regions, config):
"""Add additional nblock regions based on configured regions to call.
Identifies user defined regions which we should not be analyzing.
"""
input_regions_bed = config["algorithm"].get("variant_regions", None)
if input_regions_bed:
input_regions = pybedtools.BedTool(input_regions_bed)
# work around problem with single region not subtracted correctly.
if len(input_regions) == 1:
str_regions = str(input_regions[0]).strip()
input_regions = pybedtools.BedTool("%s\n%s" % (str_regions, str_regions),
from_string=True)
input_nblock = ref_regions.subtract(input_regions, nonamecheck=True)
if input_nblock == ref_regions:
raise ValueError("Input variant_region file (%s) "
"excludes all genomic regions. Do the chromosome names "
"in the BED file match your genome (chr1 vs 1)?" % input_regions_bed)
all_intervals = _combine_regions([input_nblock, nblock_regions], ref_regions)
return all_intervals.merge()
else:
return nblock_regions
class NBlockRegionPicker:
"""Choose nblock regions reasonably spaced across chromosomes.
This avoids excessively large blocks and also large numbers of tiny blocks
by splitting to a defined number of blocks.
Assumes to be iterating over an ordered input file and needs re-initiation
with each new file processed as it keeps track of previous blocks to
maintain the splitting.
"""
def __init__(self, ref_regions, config, min_n_size):
self._end_buffer = 250 if min_n_size > 50 else 0
self._chr_last_blocks = {}
target_blocks = int(config["algorithm"].get("nomap_split_targets", 200))
self._target_size = self._get_target_size(target_blocks, ref_regions)
self._ref_sizes = {x.chrom: x.stop for x in ref_regions}
def _get_target_size(self, target_blocks, ref_regions):
size = 0
for x in ref_regions:
size += (x.end - x.start)
return size // target_blocks
def include_block(self, x):
"""Check for inclusion of block based on distance from previous.
"""
last_pos = self._chr_last_blocks.get(x.chrom, 0)
# Region excludes an entire chromosome, typically decoy/haplotypes
if last_pos <= self._end_buffer and x.stop >= self._ref_sizes.get(x.chrom, 0) - self._end_buffer:
return True
# Do not split on smaller decoy and haplotype chromosomes
elif self._ref_sizes.get(x.chrom, 0) <= self._target_size:
return False
elif (x.start - last_pos) > self._target_size:
self._chr_last_blocks[x.chrom] = x.stop
return True
else:
return False
def expand_block(self, feat):
"""Expand any blocks which are near the start or end of a contig.
"""
chrom_end = self._ref_sizes.get(feat.chrom)
if chrom_end:
if feat.start < self._end_buffer:
feat.start = 0
if feat.stop >= chrom_end - self._end_buffer:
feat.stop = chrom_end
return feat
def block_regions(in_bam, ref_file, data):
"""Find blocks of regions for analysis from mapped input BAM file.
Identifies islands of callable regions, surrounding by regions
with no read support, that can be analyzed independently.
"""
config = data["config"]
min_n_size = int(config["algorithm"].get("nomap_split_size", 100))
with shared.bedtools_tmpdir({"config": config}):
callable_bed = parallel_callable_loci(in_bam, ref_file, data)
nblock_bed = "%s-nblocks%s" % os.path.splitext(callable_bed)
callblock_bed = "%s-callableblocks%s" % os.path.splitext(callable_bed)
if not utils.file_uptodate(nblock_bed, callable_bed):
ref_regions = get_ref_bedtool(ref_file, config)
nblock_regions = _get_nblock_regions(callable_bed, min_n_size, ref_regions)
nblock_regions = _add_config_regions(nblock_regions, ref_regions, config)
nblock_regions.saveas(nblock_bed)
if len(ref_regions.subtract(nblock_regions, nonamecheck=True)) > 0:
ref_regions.subtract(nblock_bed, nonamecheck=True).merge(d=min_n_size).saveas(callblock_bed)
else:
raise ValueError("No callable regions found from BAM file. Alignment regions might "
"not overlap with regions found in your `variant_regions` BED: %s" % in_bam)
return callblock_bed, nblock_bed, callable_bed
def _write_bed_regions(data, final_regions, out_file, out_file_ref):
ref_file = tz.get_in(["reference", "fasta", "base"], data)
ref_regions = get_ref_bedtool(ref_file, data["config"])
noanalysis_regions = ref_regions.subtract(final_regions, nonamecheck=True)
final_regions.saveas(out_file)
noanalysis_regions.saveas(out_file_ref)
def _analysis_block_stats(regions, samples):
"""Provide statistics on sizes and number of analysis blocks.
"""
prev = None
between_sizes = []
region_sizes = []
for region in regions:
if prev and prev.chrom == region.chrom:
between_sizes.append(region.start - prev.end)
region_sizes.append(region.end - region.start)
prev = region
def descriptive_stats(xs):
if len(xs) < 2:
return xs
parts = ["min: %s" % min(xs),
"5%%: %s" % numpy.percentile(xs, 5),
"25%%: %s" % numpy.percentile(xs, 25),
"median: %s" % numpy.percentile(xs, 50),
"75%%: %s" % numpy.percentile(xs, 75),
"95%%: %s" % numpy.percentile(xs, 95),
"99%%: %s" % numpy.percentile(xs, 99),
"max: %s" % max(xs)]
return "\n".join([" " + x for x in parts])
logger.info("Identified %s parallel analysis blocks\n" % len(region_sizes) +
"Block sizes:\n%s\n" % descriptive_stats(region_sizes) +
"Between block sizes:\n%s\n" % descriptive_stats(between_sizes))
if len(region_sizes) == 0:
raise ValueError("No callable regions found in: %s" %
(", ".join([dd.get_sample_name(x) for x in samples])))
def _needs_region_update(out_file, samples):
"""Check if we need to update BED file of regions, supporting back compatibility.
"""
nblock_files = [x["regions"]["nblock"] for x in samples if "regions" in x]
# For older approaches and do not create a new set of analysis
# regions, since the new algorithm will re-do all BAM and variant
# steps with new regions
for nblock_file in nblock_files:
test_old = nblock_file.replace("-nblocks", "-analysisblocks")
if os.path.exists(test_old):
return False
# Check if any of the local files have changed so we need to refresh
for noblock_file in nblock_files:
if not utils.file_uptodate(out_file, noblock_file):
return True
return False
def combine_sample_regions(*samples):
"""Create batch-level sets of callable regions for multi-sample calling.
Intersects all non-callable (nblock) regions from all samples in a batch,
producing a global set of callable regions.
"""
samples = [x[0] for x in samples]
# back compatibility -- global file for entire sample set
global_analysis_file = os.path.join(samples[0]["dirs"]["work"], "analysis_blocks.bed")
if utils.file_exists(global_analysis_file) and not _needs_region_update(global_analysis_file, samples):
global_no_analysis_file = os.path.join(os.path.dirname(global_analysis_file), "noanalysis_blocks.bed")
else:
global_analysis_file = None
out = []
analysis_files = []
batches = []
with shared.bedtools_tmpdir(samples[0]):
for batch, items in vmulti.group_by_batch(samples, require_bam=False).items():
batches.append(items)
if global_analysis_file:
analysis_file, no_analysis_file = global_analysis_file, global_no_analysis_file
else:
analysis_file, no_analysis_file = _combine_sample_regions_batch(batch, items)
for data in items:
vr_file = dd.get_variant_regions(data)
if analysis_file:
analysis_files.append(analysis_file)
data["config"]["algorithm"]["callable_regions"] = analysis_file
data["config"]["algorithm"]["non_callable_regions"] = no_analysis_file
data["config"]["algorithm"]["callable_count"] = pybedtools.BedTool(analysis_file).count()
elif vr_file:
data["config"]["algorithm"]["callable_count"] = pybedtools.BedTool(vr_file).count()
highdepth_bed = tz.get_in(["regions", "highdepth"], data)
if highdepth_bed:
data["config"]["algorithm"]["highdepth_regions"] = highdepth_bed
# attach a representative sample for calculating callable region
if not data.get("work_bam"):
for x in items:
if x.get("work_bam"):
data["work_bam_callable"] = x["work_bam"]
out.append([data])
assert len(out) == len(samples)
if len(analysis_files) > 0:
final_regions = pybedtools.BedTool(analysis_files[0])
_analysis_block_stats(final_regions, batches[0])
return out
def _combine_sample_regions_batch(batch, items):
"""Combine sample regions within a group of batched samples.
"""
config = items[0]["config"]
work_dir = utils.safe_makedir(os.path.join(items[0]["dirs"]["work"], "regions"))
analysis_file = os.path.join(work_dir, "%s-analysis_blocks.bed" % batch)
no_analysis_file = os.path.join(work_dir, "%s-noanalysis_blocks.bed" % batch)
if not utils.file_exists(analysis_file) or _needs_region_update(analysis_file, items):
# Combine all nblocks into a final set of intersecting regions
# without callable bases. HT @brentp for intersection approach
# https://groups.google.com/forum/?fromgroups#!topic/bedtools-discuss/qA9wK4zN8do
bed_regions = [pybedtools.BedTool(x["regions"]["nblock"])
for x in items if "regions" in x]
if len(bed_regions) == 0:
analysis_file, no_analysis_file = None, None
else:
with file_transaction(items[0], analysis_file, no_analysis_file) as (tx_afile, tx_noafile):
nblock_regions = reduce(operator.add, bed_regions).saveas(
"%s-nblock%s" % utils.splitext_plus(tx_afile))
ref_file = tz.get_in(["reference", "fasta", "base"], items[0])
ref_regions = get_ref_bedtool(ref_file, config)
min_n_size = int(config["algorithm"].get("nomap_split_size", 100))
block_filter = NBlockRegionPicker(ref_regions, config, min_n_size)
final_nblock_regions = nblock_regions.filter(
block_filter.include_block).saveas().each(block_filter.expand_block).saveas(
"%s-nblockfinal%s" % utils.splitext_plus(tx_afile))
final_regions = ref_regions.subtract(final_nblock_regions, nonamecheck=True).merge(d=min_n_size)
_write_bed_regions(items[0], final_regions, tx_afile, tx_noafile)
if analysis_file and utils.file_exists(analysis_file):
return analysis_file, no_analysis_file
else:
return None, None
|
|
###
# Copyright (c) 2002-2004, Jeremiah Fincher
# Copyright (c) 2009-2010, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import os
import sys
import time
import supybot.conf as conf
import supybot.world as world
import supybot.ircdb as ircdb
import supybot.irclib as irclib
import supybot.utils.minisix as minisix
import supybot.ircmsgs as ircmsgs
import supybot.ircutils as ircutils
import supybot.registry as registry
import supybot.callbacks as callbacks
from supybot.i18n import PluginInternationalization, internationalizeDocstring
_ = PluginInternationalization('ChannelLogger')
if minisix.PY2:
from io import open
class FakeLog(object):
def flush(self):
return
def close(self):
return
def write(self, s):
return
class ChannelLogger(callbacks.Plugin):
"""This plugin allows the bot to log channel conversations to disk."""
noIgnore = True
def __init__(self, irc):
self.__parent = super(ChannelLogger, self)
self.__parent.__init__(irc)
self.lastMsgs = {}
self.lastStates = {}
self.logs = {}
self.flusher = self.flush
world.flushers.append(self.flusher)
def die(self):
for log in self._logs():
log.close()
world.flushers = [x for x in world.flushers if x is not self.flusher]
def __call__(self, irc, msg):
try:
# I don't know why I put this in, but it doesn't work, because it
# doesn't call doNick or doQuit.
# if msg.args and irc.isChannel(msg.args[0]):
self.__parent.__call__(irc, msg)
if irc in self.lastMsgs:
if irc not in self.lastStates:
self.lastStates[irc] = irc.state.copy()
self.lastStates[irc].addMsg(irc, self.lastMsgs[irc])
finally:
# We must make sure this always gets updated.
self.lastMsgs[irc] = msg
def reset(self):
for log in self._logs():
log.close()
self.logs.clear()
self.lastMsgs.clear()
self.lastStates.clear()
def _logs(self):
for logs in self.logs.values():
for log in logs.values():
yield log
def flush(self):
self.checkLogNames()
for log in self._logs():
try:
log.flush()
except ValueError as e:
if e.args[0] != 'I/O operation on a closed file':
self.log.exception('Odd exception:')
def logNameTimestamp(self, channel):
format = self.registryValue('filenameTimestamp', channel)
return time.strftime(format)
def getLogName(self, channel):
if self.registryValue('rotateLogs', channel):
return '%s.%s.log' % (channel, self.logNameTimestamp(channel))
else:
return '%s.log' % channel
def getLogDir(self, irc, channel):
logDir = conf.supybot.directories.log.dirize(self.name())
if self.registryValue('directories'):
if self.registryValue('directories.network'):
logDir = os.path.join(logDir, irc.network)
if self.registryValue('directories.channel'):
logDir = os.path.join(logDir, channel)
if self.registryValue('directories.timestamp'):
format = self.registryValue('directories.timestamp.format')
timeDir =time.strftime(format)
logDir = os.path.join(logDir, timeDir)
if not os.path.exists(logDir):
os.makedirs(logDir)
return logDir
def checkLogNames(self):
for (irc, logs) in self.logs.items():
for (channel, log) in list(logs.items()):
if self.registryValue('rotateLogs', channel):
name = self.getLogName(channel)
if name != os.path.basename(log.name):
log.close()
del logs[channel]
def getLog(self, irc, channel):
self.checkLogNames()
try:
logs = self.logs[irc]
except KeyError:
logs = ircutils.IrcDict()
self.logs[irc] = logs
if channel in logs:
return logs[channel]
else:
try:
name = self.getLogName(channel)
logDir = self.getLogDir(irc, channel)
log = open(os.path.join(logDir, name), encoding='utf-8', mode='a')
logs[channel] = log
return log
except IOError:
self.log.exception('Error opening log:')
return FakeLog()
def timestamp(self, log):
format = conf.supybot.log.timestampFormat()
if format:
string = time.strftime(format) + ' '
if minisix.PY2:
string = string.decode('utf8', 'ignore')
log.write(string)
def normalizeChannel(self, irc, channel):
return ircutils.toLower(channel)
def doLog(self, irc, channel, s, *args):
if not self.registryValue('enable', channel):
return
s = format(s, *args)
channel = self.normalizeChannel(irc, channel)
log = self.getLog(irc, channel)
if self.registryValue('timestamp', channel):
self.timestamp(log)
if self.registryValue('stripFormatting', channel):
s = ircutils.stripFormatting(s)
if minisix.PY2:
s = s.decode('utf8', 'ignore')
log.write(s)
if self.registryValue('flushImmediately'):
log.flush()
def doPrivmsg(self, irc, msg):
(recipients, text) = msg.args
for channel in recipients.split(','):
if irc.isChannel(channel):
noLogPrefix = self.registryValue('noLogPrefix', channel)
cap = ircdb.makeChannelCapability(channel, 'logChannelMessages')
try:
logChannelMessages = ircdb.checkCapability(msg.prefix, cap,
ignoreOwner=True)
except KeyError:
logChannelMessages = True
nick = msg.nick or irc.nick
if msg.tagged('ChannelLogger__relayed'):
(nick, text) = text.split(' ', 1)
nick = nick[1:-1]
msg.args = (recipients, text)
if (noLogPrefix and text.startswith(noLogPrefix)) or \
not logChannelMessages:
text = '-= THIS MESSAGE NOT LOGGED =-'
if ircmsgs.isAction(msg):
self.doLog(irc, channel,
'* %s %s\n', nick, ircmsgs.unAction(msg))
else:
self.doLog(irc, channel, '<%s> %s\n', nick, text)
def doNotice(self, irc, msg):
(recipients, text) = msg.args
for channel in recipients.split(','):
if irc.isChannel(channel):
self.doLog(irc, channel, '-%s- %s\n', msg.nick, text)
def doNick(self, irc, msg):
oldNick = msg.nick
newNick = msg.args[0]
for (channel, c) in irc.state.channels.items():
if newNick in c.users:
self.doLog(irc, channel,
'*** %s is now known as %s\n', oldNick, newNick)
def doInvite(self, irc, msg):
(target, channel) = msg.args
self.doLog(irc, channel,
'*** %s <%s> invited %s to %s\n',
msg.nick, msg.prefix, target, channel)
def doJoin(self, irc, msg):
for channel in msg.args[0].split(','):
if(self.registryValue('showJoinParts', channel)):
self.doLog(irc, channel,
'*** %s <%s> has joined %s\n',
msg.nick, msg.prefix, channel)
def doKick(self, irc, msg):
if len(msg.args) == 3:
(channel, target, kickmsg) = msg.args
else:
(channel, target) = msg.args
kickmsg = ''
if kickmsg:
self.doLog(irc, channel,
'*** %s was kicked by %s (%s)\n',
target, msg.nick, kickmsg)
else:
self.doLog(irc, channel,
'*** %s was kicked by %s\n', target, msg.nick)
def doPart(self, irc, msg):
if len(msg.args) > 1:
reason = " (%s)" % msg.args[1]
else:
reason = ""
for channel in msg.args[0].split(','):
if(self.registryValue('showJoinParts', channel)):
self.doLog(irc, channel,
'*** %s <%s> has left %s%s\n',
msg.nick, msg.prefix, channel, reason)
def doMode(self, irc, msg):
channel = msg.args[0]
if irc.isChannel(channel) and msg.args[1:]:
self.doLog(irc, channel,
'*** %s sets mode: %s %s\n',
msg.nick or msg.prefix, msg.args[1],
' '.join(msg.args[2:]))
def doTopic(self, irc, msg):
if len(msg.args) == 1:
return # It's an empty TOPIC just to get the current topic.
channel = msg.args[0]
self.doLog(irc, channel,
'*** %s changes topic to "%s"\n', msg.nick, msg.args[1])
def doQuit(self, irc, msg):
if len(msg.args) == 1:
reason = " (%s)" % msg.args[0]
else:
reason = ""
if not isinstance(irc, irclib.Irc):
irc = irc.getRealIrc()
if irc not in self.lastStates:
return
for (channel, chan) in self.lastStates[irc].channels.items():
if(self.registryValue('showJoinParts', channel)):
if msg.nick in chan.users:
self.doLog(irc, channel,
'*** %s <%s> has quit IRC%s\n',
msg.nick, msg.prefix, reason)
def outFilter(self, irc, msg):
# Gotta catch my own messages *somehow* :)
# Let's try this little trick...
if msg.command in ('PRIVMSG', 'NOTICE'):
# Other messages should be sent back to us.
m = ircmsgs.IrcMsg(msg=msg, prefix=irc.prefix)
if msg.tagged('relayedMsg'):
m.tag('ChannelLogger__relayed')
self(irc, m)
return msg
Class = ChannelLogger
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
|
"""
Copyright (c) 2017 Dell Inc. or its subsidiaries. All Rights Reserved.
A set of super-simple matchers to use to self-test the matching framework.
"""
from gevent import monkey
monkey.patch_dns()
monkey.patch_time()
monkey.patch_builtins()
monkey.patch_select()
import re
import sys
import optparse
import uuid
import gevent
import gevent.queue
from pexpect import EOF
from datetime import datetime, timedelta
from .monitor_abc import StreamMonitorBaseClass
from .stream_matchers_base import StreamMatchBase
from .stream_matchers_results import StreamRunResults, MatcherValidationMissmatch, MatcherValidationMissingField
from .amqp_od import RackHDAMQPOnDemand
from .ssh_helper import SSHHelper
from kombu import Connection, Producer, Queue, Exchange, Consumer
class _KeyedConsumerHandler(object):
_keyed_consumers = {}
@classmethod
def get_keyed_consumer(cls, logs, connection, exchange, routing_key, queue_name, event_cb):
mname = "ex={} rk={} qn={}".format(exchange, routing_key, queue_name)
if mname not in cls._keyed_consumers:
new_one = _KeyedConsumerHandler(logs, connection, mname, exchange, routing_key, queue_name)
cls._keyed_consumers[mname] = new_one
cls._keyed_consumers[mname].add_new_event_handler(event_cb)
return cls._keyed_consumers[mname]
@classmethod
def test_helper_finalize_cleanup(cls):
cls._keyed_consumers = {}
def __init__(self, logs, connection, name, exchange, routing_key, queue_name):
self.__logs = logs
self.__ignore_some_stuff = False
self.name = name
self.__event_callbacks = []
if queue_name is None:
queue_name = ''
exclusive = True
else:
exclusive = False
chan = connection.channel()
ex = Exchange(exchange, 'topic', channel=chan)
queue = Queue(exchange=ex, routing_key=routing_key, exclusive=exclusive)
consumer = Consumer(chan, queues=[queue], callbacks=[self.__message_cb])
consumer.consume()
self.exchange = ex
def add_new_event_handler(self, event_cb):
self.__event_callbacks.append(event_cb)
def __message_cb(self, body, msg):
skip = False
if self.__ignore_some_stuff:
if "heartbeat" in msg.delivery_info['routing_key']:
skip = True
if msg.delivery_info['routing_key'].startswith('http'):
skip = True
if msg.delivery_info['routing_key'].startswith('polleralert'):
skip = True
if skip:
self.__logs.idl.debug('AMQP-SKIP=%s', msg.delivery_info['routing_key'])
msg.ack()
return
self.__logs.idl.debug(
'Inbound AMQP msg. %s (delivery_info=%s, content_type=%s, properties=%s, body=%s)',
msg, msg.delivery_info, msg.content_type, msg.properties, body)
for event_cb in self.__event_callbacks:
try:
event_cb(msg, body)
self.__logs.debug(' -- ran %s on msg', event_cb)
except Exception as proc_ex:
self.__logs.warning('exception while running %s on %s: %s', event_cb, msg, proc_ex)
msg.ack()
class _AMQPServerWrapper(object):
def __init__(self, amqp_url, logs):
self.__logs = logs
self.__amqp_url = amqp_url
self.__monitors = {}
self.__connection = Connection(self.__amqp_url)
self.__connection.connect()
self.__running = True
self.__consumer_gl = gevent.spawn(self.__consumer_greenlet_main)
self.__consumer_gl.greenlet_name = 'amqp-consumer-gl' # allowing flogging to print a nice name
gevent.sleep(0.0)
def __consumer_greenlet_main(self):
gevent.sleep(0)
while self.__running:
try:
self.__connection.drain_events(timeout=0.5)
except Exception as ex: # NOQA: assigned but not used (left in for super-duper-low-level-debug)
# print("was woken because {}".format(ex))
pass
gevent.sleep(0.1) # make -sure- to yield cpu...
# print("---loop")
def stop_greenlet(self):
self.__running = False
@property
def connected(self):
return self.__connection.connected
def create_add_tracker(self, exchange, routing_key, event_cb, queue_name=None):
self.__logs.irl.debug("AMQPServerWrapper: create_add_tracker ex=%s, rk=%s, event_cb=%s",
exchange, routing_key, event_cb)
mon = _KeyedConsumerHandler.get_keyed_consumer(
self.__logs, self.__connection, exchange, routing_key, queue_name, event_cb)
return mon.exchange
def inject(self, exchange, routing_key, payload):
self.__logs.irl.debug("Injecting a test AMQP message: ex=%s, rk=%s, payload=%s", exchange, routing_key, payload)
if not isinstance(exchange, Exchange):
exchange = Exchange(exchange, 'topic')
prod = Producer(self.__connection, exchange=exchange, routing_key=routing_key)
prod.publish(payload)
def test_helper_sync_send_msg(self, exchange, ex_rk, send_rk, payload):
ex = Exchange(exchange, 'topic')
queue = Queue(exchange=ex, routing_key=ex_rk + '.*', exclusive=True, channel=self.__connection)
queue.declare()
prod = Producer(self.__connection, exchange=ex, routing_key=send_rk)
prod.publish(payload)
return queue
def test_helper_sync_recv_msg(self, queue):
for tick in range(10):
msg = queue.get()
if msg is not None:
break
return msg
class _AMQPMatcher(StreamMatchBase):
"""
Implementation of a StreamMatchBase matcher.
"""
def __init__(self, logs, route_key, description, min=1, max=sys.maxint, validation_block=None, match_CB=None):
self.__route_key = route_key
self.__validation_block = validation_block
self.__match_CB = match_CB
if route_key is not None:
escaped_key = re.escape(route_key)
no_star = escaped_key.replace('*', '[^.]')
no_pound = no_star.replace('\#', '.*?')
self.__rk_regex = re.compile('^{}$'.format(no_pound))
self.__no_pound = no_pound
else:
self.__rk_regex = re.compile('.*')
super(_AMQPMatcher, self).__init__(logs, description, min=min, max=max)
def _match(self, other_event):
if self.__route_key is None:
return bool(other_event)
assert isinstance(other_event, _AMQPTrackerRecord), \
'other_event was a {} needs to be a {}'.format(type(other_event), _AMQPTrackerRecord)
m = self.__rk_regex.match(other_event.msg.delivery_info['routing_key'])
if m is None:
return False
if self.__match_CB is None:
return True
return self.__match_CB(other_event)
def _validate(self, other_event):
self._logs.idl.debug('validating event %s', other_event)
assert isinstance(other_event, _AMQPTrackerRecord), \
'other_event was a {} needs to be a {}'.format(type(other_event), _AMQPTrackerRecord)
if self.__validation_block is None:
return []
error_list = []
if 'routing_key' in self.__validation_block:
crk = self.__validation_block['routing_key']
ork = other_event.msg.delivery_info['routing_key']
if crk != ork:
self._logs.irl.debug(' Invalidated because rk %s does not match expected %s', ork, crk)
err = MatcherValidationMissmatch('msg.delivery_info', 'routing_key', crk, ork)
error_list.append(err)
if 'body' in self.__validation_block:
exp_body = self.__validation_block['body']
other_body = other_event.body
# todo: recursion
# todo: extra fields in other
for field_name, exp_value in exp_body.items():
if field_name not in other_body:
self._logs.irl.debug(" Invalidated because field %s not in event's fields %s", field_name,
other_body.keys())
err = MatcherValidationMissingField('body', field_name, exp_value)
error_list.append(err)
else:
# ok, it's there....
if exp_value == '<<present>>':
# that's good enough!
pass
elif exp_value != other_body[field_name]:
self._logs.irl.debug(" Invalidated because field %s value %s does not match expected %s",
field_name, other_body[field_name], exp_value)
err = MatcherValidationMissmatch('body', field_name, exp_value, other_body[field_name])
error_list.append(err)
pass
else:
pass
self._logs.irl.debug('Validation complete: error_list=%s', error_list)
return error_list
def dump(self, ofile=sys.stdout, indent=0):
super(_AMQPMatcher, self).dump(ofile=ofile, indent=indent)
ins = ' ' * indent
print >>ofile, "{0} route_key='{1}'".format(ins, self.__route_key)
class _AMQPProcessor(StreamMonitorBaseClass):
def __init__(self, logs, tracker, start_at=None, transient=True):
self._logs = logs
super(_AMQPProcessor, self).__init__()
self.handle_begin()
self.transient = transient
self.__tracker = tracker
self.__inbound_queue = gevent.queue.Queue()
self.__run_till = None
self.__tail_timeout = None
self.__in_finish_mode = False
self.__ignore_misses = False
# THIS is a hack to allow raw access to underlying tracker-records until we get a common
# validation phase. See get_raw_tracker_events() below for details
self.__matches_in_order = []
self.__started_at = tracker.add_processor(self, start_at=start_at)
self.__match_greenlet = gevent.spawn(self.__match_greenlet_run)
self.__match_greenlet.greenlet_name = 'processor-match-loop-gl'
def __match_greenlet_run(self):
self._logs.irl.debug('Starting to watch for events %s', self)
results = StreamRunResults()
tail_limit = None
loop_exit_why = None
noticed_change_to_finish = False
lcnt = 0
loop_slice = 0.1
five_s_mod = int(5 / loop_slice)
# Note: we want to move having it possible to NOT have to call
# start_finish before processing, but there are some icky glitches
# there I don't have time to hunt down. So, for now, just hang here
# until all the rules are set up.
while not self.__in_finish_mode:
gevent.sleep(0.1)
while (loop_exit_why is None) and (self.__run_till is None or self.__run_till > datetime.now()):
if lcnt % five_s_mod == 0:
if self.__run_till is None:
left = 'N/A'
else:
left = self.__run_till - datetime.now()
self._logs.irl.debug('Periodic loop: count=%d, run_till=%s, left=%s', lcnt, self.__run_till, left)
lcnt += 1
# we always want to setup tail_limit when we first cross over to finishing
if not noticed_change_to_finish and self.__in_finish_mode:
noticed_change_to_finish = True
self._logs.irl.debug(' Noticed that we shifted to finish-mode')
if tail_limit is None:
tail_limit = datetime.now() + self.__tail_timeout
self._logs.irl.debug(' and set tail-limit from none to %s', tail_limit)
try:
# timeout on peek call is needed to allow us to "notice" if our run-till
# or tail-time has been exceeded.
tracked = self.__inbound_queue.peek(timeout=loop_slice)
self._logs.idl.debug('%s peeked and got %s', self, tracked)
except gevent.queue.Empty:
tracked = None
if tracked is None:
# no message on queue.
if tail_limit is not None and datetime.now() > tail_limit:
self._logs.irl.debug(' hit tail limit during idle. Checking if end-check will work')
res = self._match_groups.check_ending()
self._logs.irl.debug(' check-res was %s, results-state=%s', res, 'results.dump(None)')
if res.is_empty:
self._logs.irl.debug(' and we can stop because processor in success state')
loop_exit_why = "tail-wait expired while processor in success state"
else:
# clear the tail-limit till another event hits us
self._logs.irl.debug(' and clearing tail-limit since we are not in success state: %s', res)
tail_limit = None
continue
res = self._match_groups.check_event(tracked, allow_complete_miss=self.__ignore_misses)
consume = False
if not res.is_empty:
consume = True
results.add_result(res)
self.__matches_in_order.append(tracked)
elif self.__ignore_misses:
# note: ignore_miss can only be set as we enter start-finish mode.
consume = True
if consume:
# remove consumed item from queue.
self.__inbound_queue.get()
if self.__tail_timeout is not None:
# we consumed a message, so bump out tail-limit
old_tail_limit = tail_limit
tail_limit = datetime.now() + self.__tail_timeout
self._logs.irl.debug(' consumed event. Bumping tail-limit from %s to %s', old_tail_limit, tail_limit)
if loop_exit_why is None:
loop_exit_why = "overall timeout occured"
self._logs.irl.debug('Periodic loop exit because %s count=%d, run_till=%s, now=%s',
loop_exit_why, lcnt, self.__run_till, datetime.now())
self._logs.irl.debug('---exiting loop because %s---: %s -> %s', loop_exit_why, self, results)
res = self._match_groups.check_ending()
results.add_result(res)
self._logs.irl.debug(' final results from %s is %s', self, results)
return results
def start_finish(self, timeout, tail_timeout=1.0, ignore_misses=True):
timeout = timedelta(seconds=timeout)
tail_timeout = timedelta(seconds=tail_timeout)
self._logs.irl.debug('start_finish on %s called. timeout=%s, tail-timeout=%s', self, timeout, tail_timeout)
self.__tail_timeout = tail_timeout
self.__run_till = datetime.now() + timeout + tail_timeout
self.__ignore_misses = ignore_misses
self.__in_finish_mode = True
return self.__match_greenlet
def process_tracked_record(self, tracked_record):
self._logs.irl.debug('Processing-tracked-record = %s', tracked_record)
self.__inbound_queue.put(tracked_record)
def match_any_event(self, description=None, min=1, max=1):
if description is None:
description = "match-any(rk={},min={},max={}".format(None, min, max)
m = _AMQPMatcher(self._logs, route_key=None, description=description, min=min, max=max)
self._add_matcher(m)
def match_on_routekey(self, description, routing_key=None, min=1, max=1, validation_block=None, match_CB=None):
if routing_key is None:
routing_key = '#'
description = "{}(rk={},min={},max={})".format(description, routing_key, min, max)
m = _AMQPMatcher(self._logs, route_key=routing_key, description=description, min=min, max=max,
validation_block=validation_block, match_CB=match_CB)
self._add_matcher(m)
def get_raw_tracker_events(self):
"""
total hack method to get raw access to the tracker-events. We WANT a mechanism
to do a veryify step at end-of-run, but for now this will have to do.
"""
return self.__matches_in_order
class _AMQPTrackerRecord(object):
def __init__(self, in_test, prior_test, msg, body):
self.in_test = str(in_test)
self.prior_test = str(prior_test)
self.msg = msg
self.body = body
self.timestamp = datetime.now()
def __str__(self):
rs = 'TrackRecord(at={}, msg.delivery_info={}, body={})'.format(self.timestamp, self.msg.delivery_info, self.body)
return rs
class _AMQPQueueTracker(object):
def __init__(self, tracker_name, logs, amqp_server, exchange_name, routing_key=None):
self.tracker_name = tracker_name
self.exchange_name = exchange_name
self.routing_key = routing_key
self._logs = logs
# self.handle_begin()
self.__server = amqp_server
self.__routing_key = routing_key
self.__recorded_data = []
self.__processors = []
ex = self.__server.create_add_tracker(exchange_name, routing_key, self.__got_amqp_message_cb)
self.__exchange = ex
self.__in_test = None
self.__prior_test = None
def handle_set_flogging(self, logs):
self._logs = logs
def set_test(self, test):
if self.__in_test is not None:
self.__prior_test = self.__in_test
if test is None:
saved_processors = []
for processor in self.__processors:
if not processor.transient:
saved_processors = processor
else:
self._logs.irl.debug('Removed processor %s', processor)
self.__processors = saved_processors
self.__in_test = test
def __got_amqp_message_cb(self, msg, body):
self._logs.irl.debug('%s received msg=%s, body=%s', self, msg, body)
track = _AMQPTrackerRecord(self.__in_test, self.__prior_test, msg, body)
self.__recorded_data.append(track)
for processor in self.__processors:
processor.process_tracked_record(track)
def add_processor(self, processor, start_at):
valid_start_ats = [None, 'now']
assert start_at in valid_start_ats, \
"start_at of '{}' not one of current valid start_ats {}".format(start_at, valid_start_ats)
self.__processors.append(processor)
if start_at is None:
for tracker_record in self.__recorded_data:
processor.process_tracked_record(tracker_record)
def start_finish(self, timeout):
greenlets = []
for processor in self.__processors:
self._logs.irl.debug("%s going to start_finish on %s", self, processor)
gl = processor.start_finish(timeout)
greenlets.append(gl)
self._logs.irl.debug(" list of greenlets to finish %s", greenlets)
return greenlets
def test_helper_wait_for_one_message(self, timeout=5):
timeout = timedelta(seconds=timeout)
sleep_till = datetime.now() + timeout
self._logs.irl.debug('waiting for single message, timeout=%s', timeout)
while len(self.__recorded_data) == 0 and datetime.now() < sleep_till:
gevent.sleep(0)
if len(self.__recorded_data) > 0:
return self.__recorded_data[0]
return None
def __str__(self):
ns = 'tracker(name={}, ex={}, rk={}'.format(self.tracker_name, self.exchange_name, self.routing_key)
return ns
def __repr__(self):
return str(self)
class AMQPStreamMonitor(StreamMonitorBaseClass):
"""
Implementation of a StreamMonitorBaseClass that handles working with AMQP.
Needs to be able to:
* Create an AMQP-on-demand server if asked
* Spin up an AMQP receiver greenlet to on-demand
"""
def handle_set_flogging(self, logs):
super(AMQPStreamMonitor, self).handle_set_flogging(logs)
self.__trackers = {}
self.__call_for_all_trackers('handle_set_flogging)', logs)
def handle_begin(self):
"""
Handles plugin 'begin' event. This means spinning up
a greenlet to monitor the AMQP server.
"""
super(AMQPStreamMonitor, self).handle_begin()
sm_amqp_url = getattr(self.__options, 'sm_amqp_url', None)
sm_amqp_use_user = getattr(self.__options, 'sm_amqp_use_user', None)
sm_amqp_setup_user = getattr(self.__options, 'sm_amqp_setup_user', None)
self.__cleanup_user = None
self.__amqp_on_demand = False
if sm_amqp_url is None:
sm_amqp_url = None
elif sm_amqp_url == 'on-demand':
self.__amqp_on_demand = RackHDAMQPOnDemand()
sm_amqp_url = self.__amqp_on_demand.get_url()
elif sm_amqp_url.startswith('generate'):
sm_amqp_url, self.__cleanup_user = self.__setup_generated_amqp(sm_amqp_url, sm_amqp_use_user, sm_amqp_setup_user)
if sm_amqp_url is None:
self.__amqp_server = None
else:
self.__amqp_server = _AMQPServerWrapper(sm_amqp_url, self._logs)
self.amqp_url = sm_amqp_url
def __call_for_all_trackers(self, method_name, *args, **kwargs):
self._logs.irl.debug('relaying %s(%s) to all trackers %s', method_name, args, self.__trackers)
for tracker in self.__trackers.values():
method = getattr(tracker, method_name, None)
if method is not None:
self._logs.irl.debug_4(' method %s:%s found on monitor %s. calling', method_name, method, tracker)
method(*args, **kwargs)
def create_tracker(self, tracker_name, exchange_name, routing_key=None):
assert tracker_name not in self.__trackers, \
'you attempted to create a tracker by the name of {}(ex={},rk={}) but it already exists {}'.format(
tracker_name, exchange_name, routing_key, self.__trackers[tracker_name])
tracker = _AMQPQueueTracker(tracker_name, self._logs, self.__amqp_server, exchange_name, routing_key=routing_key)
self.__trackers[tracker_name] = tracker
self._logs.irl.debug('created tracker {}'.format(tracker))
return tracker
def get_tracker_queue_processor(self, tracker, start_at=None):
assert tracker.tracker_name in self.__trackers, \
"you tried to use tracker {}, but it isn't in the list of registered trackers {}".format(
tracker.name, self.__trackers.keys())
proc = _AMQPProcessor(self._logs, tracker, start_at=start_at)
return proc
def handle_start_test(self, test):
self.__call_for_all_trackers('set_test', test)
super(AMQPStreamMonitor, self).handle_start_test(test)
def handle_after_test(self, test):
self.__call_for_all_trackers('set_test', None)
super(AMQPStreamMonitor, self).handle_after_test(test)
def handle_finalize(self):
"""
Handle end-of-run cleanup
"""
if self.__cleanup_user is not None:
clean = SSHHelper('dut', 'amqp-user-delete-ssh-stdouterr: ')
cmd_text, ecode, output = clean.sendline_and_stat('rabbitmqctl delete_user {}'.format(
self.__cleanup_user))
if ecode != 0 and 'no_such_user' not in output:
self._logs.irl.warning(
"remove of amqp-test-user %s command '%s' failed with something other than 'no_such_user': %s",
self.__cleanup_user, classmethod, output)
if self.__amqp_server is not None:
self.__amqp_server.stop_greenlet()
_KeyedConsumerHandler.test_helper_finalize_cleanup()
def inject(self, exchange, routing_key, payload):
self.__amqp_server.inject(exchange, routing_key, payload)
def finish(self, timeout=5):
greenlets = []
self._logs.irl.debug("Entering finish for amqp-stream monitor with %d trackers", len(self.__trackers))
for tracker in self.__trackers.values():
ttgls = tracker.start_finish(timeout=timeout)
self._logs.irl.debug(" located %s greenlets (%s) in tracker %s", len(ttgls), tracker, ttgls)
greenlets.extend(ttgls)
self._logs.irl.debug("START wait for %d greenlets (%s)", len(greenlets), greenlets)
gevent.wait(greenlets)
reses = []
self._logs.irl.debug("END wait for %d greenlets (%s)", len(greenlets), greenlets)
for gr in greenlets:
assert gr.ready(), \
'all greenlets said they completed, but this one is not {}'.format(gr)
if not gr.successful():
raise gr.exception
assert gr.successful(), \
'a greenlet {} failed with {}.'.format(gr, gr.exception)
results = gr.value
reses.append(results)
self._logs.irl.debug(" added results %s for greenlet %s", results, gr)
self._logs.irl.debug("complete set of results for finish: %s", reses)
return reses
def __setup_generated_amqp(self, generate_string, use_user, setup_user):
"""
Handle the case where we are told to generate an AMQP user
and even set it up on the DUT. There are three paths here:
* totally auto-setup (use_user and setup_user both None)
* use_user is non-none, so we use that name instead of creating one (we also don't delete the user when done)
* setup_user is non-none, so we create one by that name (and don't delete when done)
"""
assert use_user is None or setup_user is None, \
"can't both setup user AND use-user in same invocation"
port = int(generate_string.split(':')[1])
if use_user is not None:
auser = use_user
apw = use_user
host = SSHHelper.get_parser_options_sm_dut_ssh_host()
return 'amqp://{}:{}@{}:{}'.format(auser, apw, host, port), None
elif setup_user is not None:
auser = setup_user
apw = setup_user
ret_user = None
else:
uid = str(uuid.uuid4())
auser = 'tdd_amqp_user_{}'.format(uid)
apw = uid
ret_user = auser
try:
fixed = SSHHelper('dut', 'amqp-user-setup-ssh-stdouterr: ')
cmd_text, ecode, output = fixed.sendline_and_stat('rabbitmqctl delete_user {}'.format(auser))
# the user probably WON'T be there, so don't worry much.
assert ecode == 0 or 'no_such_user' in output, \
"{} failed with something other than 'no_such_user':".format(cmd_text, output)
# now add this user.
fixed.sendline_and_stat('rabbitmqctl add_user {} {}'.format(auser, apw), must_be_0=True)
# add administrator tag
fixed.sendline_and_stat('rabbitmqctl set_user_tags {} administrator'.format(auser), must_be_0=True)
# now add permissions
fixed.sendline_and_stat(r'''rabbitmqctl set_permissions {} ".*" ".*" ".*"'''.format(auser), must_be_0=True)
fixed.logout()
return 'amqp://{}:{}@{}:{}'.format(auser, apw, fixed.dut_ssh_host, port), ret_user
except EOF as ex:
self._logs.irl.warning('unable to connect to instance to setup AMQP user. AMQP monitors disabled: %s', ex)
self._logs.irl.warning('^^^^ this is -usually- caused by incorrect configuration, such as " \
"the wrong host or ssh port for the given installation')
except Exception as ex:
self._logs.irl.debug('unable to set up amqp user. AMQP monitors disabled: %s', ex)
self._logs.irl.debug('^^^^ if this is a deploy test, this is probably ok. If it is a real test, this is a problem.')
return None, None
@property
def has_amqp_server(self):
"""
method to indicate if an AMQP server was defined or not.
This allows callers to Skip() tests if not.
"""
return self.__amqp_server is not None
def test_helper_is_amqp_running(self):
return self.__amqp_server.connected
def test_helper_sync_send_msg(self, exchange, ex_rk, send_rk, payload):
return self.__amqp_server.test_helper_sync_send_msg(
exchange, ex_rk, send_rk, payload)
def test_helper_sync_recv_msg(self, queue):
return self.__amqp_server.test_helper_sync_recv_msg(queue)
@classmethod
def enabled_for_nose(true):
return True
def set_options(self, options):
self.__options = options
@classmethod
def add_nose_parser_opts(self, parser):
amqp_group = optparse.OptionGroup(parser, 'AMQP options')
parser.add_option_group(amqp_group)
amqp_group.add_option(
'--sm-amqp-url', dest='sm_amqp_url', default=None,
help="set the AMQP url to use. If not set, a docker based server will be setup and used")
amqp_group.add_option(
'--sm-amqp-setup-user', dest='sm_amqp_setup_user', default=None,
help="assure this user exists in the instance. Disables the auto-create user")
amqp_group.add_option(
'--sm-amqp-use-user', dest='sm_amqp_use_user', default=None,
help="use this user instead of auto-creating one. Must already exist in instance")
|
|
import os
import genapi
from genapi import \
TypeApi, GlobalVarApi, FunctionApi, BoolValuesApi
import numpy_api
h_template = r"""
#ifdef _MULTIARRAYMODULE
typedef struct {
PyObject_HEAD
npy_bool obval;
} PyBoolScalarObject;
#ifdef NPY_ENABLE_SEPARATE_COMPILATION
extern NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type;
extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type;
extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
#else
NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type;
NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type;
NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
#endif
%s
#else
#if defined(PY_ARRAY_UNIQUE_SYMBOL)
#define PyArray_API PY_ARRAY_UNIQUE_SYMBOL
#endif
#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY)
extern void **PyArray_API;
#else
#if defined(PY_ARRAY_UNIQUE_SYMBOL)
void **PyArray_API;
#else
static void **PyArray_API=NULL;
#endif
#endif
%s
#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT)
static int
_import_array(void)
{
int st;
PyObject *numpy = PyImport_ImportModule("numpy.core.multiarray");
PyObject *c_api = NULL;
if (numpy == NULL) {
PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import");
return -1;
}
c_api = PyObject_GetAttrString(numpy, "_ARRAY_API");
Py_DECREF(numpy);
if (c_api == NULL) {
PyErr_SetString(PyExc_AttributeError, "_ARRAY_API not found");
return -1;
}
#if PY_VERSION_HEX >= 0x02070000
if (!PyCapsule_CheckExact(c_api)) {
PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object");
Py_DECREF(c_api);
return -1;
}
PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL);
#else
if (!PyCObject_Check(c_api)) {
PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCObject object");
Py_DECREF(c_api);
return -1;
}
PyArray_API = (void **)PyCObject_AsVoidPtr(c_api);
#endif
Py_DECREF(c_api);
if (PyArray_API == NULL) {
PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer");
return -1;
}
/* Perform runtime check of C API version */
if (NPY_VERSION != PyArray_GetNDArrayCVersion()) {
PyErr_Format(PyExc_RuntimeError, "module compiled against "\
"ABI version %%x but this version of numpy is %%x", \
(int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion());
return -1;
}
if (NPY_FEATURE_VERSION > PyArray_GetNDArrayCFeatureVersion()) {
PyErr_Format(PyExc_RuntimeError, "module compiled against "\
"API version %%x but this version of numpy is %%x", \
(int) NPY_FEATURE_VERSION, (int) PyArray_GetNDArrayCFeatureVersion());
return -1;
}
/*
* Perform runtime check of endianness and check it matches the one set by
* the headers (npy_endian.h) as a safeguard
*/
st = PyArray_GetEndianness();
if (st == NPY_CPU_UNKNOWN_ENDIAN) {
PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as unknown endian");
return -1;
}
#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN
if (st != NPY_CPU_BIG) {
PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\
"big endian, but detected different endianness at runtime");
return -1;
}
#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN
if (st != NPY_CPU_LITTLE) {
PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\
"little endian, but detected different endianness at runtime");
return -1;
}
#endif
return 0;
}
#if PY_VERSION_HEX >= 0x03000000
#define NUMPY_IMPORT_ARRAY_RETVAL NULL
#else
#define NUMPY_IMPORT_ARRAY_RETVAL
#endif
#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return NUMPY_IMPORT_ARRAY_RETVAL; } }
#define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return ret; } }
#define import_array2(msg, ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; } }
#endif
#endif
"""
c_template = r"""
/* These pointers will be stored in the C-object for use in other
extension modules
*/
void *PyArray_API[] = {
%s
};
"""
c_api_header = """
===========
Numpy C-API
===========
"""
def generate_api(output_dir, force=False):
basename = 'multiarray_api'
h_file = os.path.join(output_dir, '__%s.h' % basename)
c_file = os.path.join(output_dir, '__%s.c' % basename)
d_file = os.path.join(output_dir, '%s.txt' % basename)
targets = (h_file, c_file, d_file)
sources = numpy_api.multiarray_api
if (not force and not genapi.should_rebuild(targets, [numpy_api.__file__, __file__])):
return targets
else:
do_generate_api(targets, sources)
return targets
def do_generate_api(targets, sources):
header_file = targets[0]
c_file = targets[1]
doc_file = targets[2]
global_vars = sources[0]
global_vars_types = sources[1]
scalar_bool_values = sources[2]
types_api = sources[3]
multiarray_funcs = sources[4]
# Remove global_vars_type: not a api dict
multiarray_api = sources[:1] + sources[2:]
module_list = []
extension_list = []
init_list = []
# Check multiarray api indexes
multiarray_api_index = genapi.merge_api_dicts(multiarray_api)
genapi.check_api_dict(multiarray_api_index)
numpyapi_list = genapi.get_api_functions('NUMPY_API',
multiarray_funcs)
ordered_funcs_api = genapi.order_dict(multiarray_funcs)
# Create dict name -> *Api instance
api_name = 'PyArray_API'
multiarray_api_dict = {}
for f in numpyapi_list:
name = f.name
index = multiarray_funcs[name]
multiarray_api_dict[f.name] = FunctionApi(f.name, index, f.return_type,
f.args, api_name)
for name, index in global_vars.items():
type = global_vars_types[name]
multiarray_api_dict[name] = GlobalVarApi(name, index, type, api_name)
for name, index in scalar_bool_values.items():
multiarray_api_dict[name] = BoolValuesApi(name, index, api_name)
for name, index in types_api.items():
multiarray_api_dict[name] = TypeApi(name, index, 'PyTypeObject', api_name)
assert len(multiarray_api_dict) == len(multiarray_api_index)
extension_list = []
for name, index in genapi.order_dict(multiarray_api_index):
api_item = multiarray_api_dict[name]
extension_list.append(api_item.define_from_array_api_string())
init_list.append(api_item.array_api_define())
module_list.append(api_item.internal_define())
# Write to header
fid = open(header_file, 'w')
s = h_template % ('\n'.join(module_list), '\n'.join(extension_list))
fid.write(s)
fid.close()
# Write to c-code
fid = open(c_file, 'w')
s = c_template % ',\n'.join(init_list)
fid.write(s)
fid.close()
# write to documentation
fid = open(doc_file, 'w')
fid.write(c_api_header)
for func in numpyapi_list:
fid.write(func.to_ReST())
fid.write('\n\n')
fid.close()
return targets
|
|
# Copyright 2020 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Commands for storage groups on CPCs in DPM mode.
"""
from __future__ import absolute_import
from __future__ import print_function
import click
import zhmcclient
from .zhmccli import cli
from ._cmd_cpc import find_cpc
from ._cmd_port import find_port
from ._helper import print_properties, print_resources, abort_if_false, \
options_to_properties, original_options, COMMAND_OPTIONS_METAVAR, \
click_exception, add_options, LIST_OPTIONS, EMAIL_OPTIONS, \
ASYNC_TIMEOUT_OPTIONS
ALL_TYPES = ['fcp', 'fc']
ALL_PARTITION_STATUSES = [
"communications-not-active",
"status-check",
"stopped",
"terminated",
"starting",
"active",
"stopping",
"degraded",
"reservation-error",
"paused",
]
# Defaults for storage group creation unless created from storage template
DEFAULT_TYPE = 'fcp'
DEFAULT_CONNECTIVITY = 2
DEFAULT_SHARED = True
DEFAULT_MAX_PARTITIONS = 2
DEFAULT_DIRECT_CONNECTION_COUNT = 0
def find_storagegroup(cmd_ctx, client, stogrp_name):
"""
Find a storage group by name and return its resource object.
"""
console = client.consoles.console
try:
stogrp = console.storage_groups.find(name=stogrp_name)
except zhmcclient.Error as exc:
raise click_exception(exc, cmd_ctx.error_format)
return stogrp
@cli.group('storagegroup', options_metavar=COMMAND_OPTIONS_METAVAR)
def storagegroup_group():
"""
Command group for managing storage groups (DPM mode only).
Storage groups are definitions in the HMC that simplify the management of
storage attached to partitions.
The commands in this group work only on z14 and later CPCs that are in DPM
mode.
In addition to the command-specific options shown in this help text, the
general options (see 'zhmc --help') can also be specified right after the
'zhmc' command name.
"""
@storagegroup_group.command('list', options_metavar=COMMAND_OPTIONS_METAVAR)
@add_options(LIST_OPTIONS)
@click.pass_obj
def storagegroup_list(cmd_ctx, **options):
"""
List the storage groups defined in the HMC.
Storage groups for which the authenticated user does not have
object-access permission will not be included.
In addition to the command-specific options shown in this help text, the
general options (see 'zhmc --help') can also be specified right after the
'zhmc' command name.
"""
cmd_ctx.execute_cmd(lambda: cmd_storagegroup_list(cmd_ctx, options))
@storagegroup_group.command('show', options_metavar=COMMAND_OPTIONS_METAVAR)
@click.argument('STORAGEGROUP', type=str, metavar='STORAGEGROUP')
@click.pass_obj
def storagegroup_show(cmd_ctx, storagegroup):
"""
Show the details of a storage group.
In addition to the command-specific options shown in this help text, the
general options (see 'zhmc --help') can also be specified right after the
'zhmc' command name.
"""
cmd_ctx.execute_cmd(lambda: cmd_storagegroup_show(cmd_ctx, storagegroup))
@storagegroup_group.command('create', options_metavar=COMMAND_OPTIONS_METAVAR)
@click.option('--name', type=str, required=True,
help='The name of the new storage group.')
@click.option('--cpc', type=str, required=True,
help='The name of the CPC associated with the new storage group.')
@click.option('--type', type=click.Choice(ALL_TYPES),
required=False, default=DEFAULT_TYPE,
help='The type of the new storage group. '
'Mutually exclusive with --template; one of them is required.')
@click.option('--template', type=str, required=False,
help='The name of the storage template on which the new storage '
'group is to be based. '
'Mutually exclusive with --type; one of them is required.')
@click.option('--description', type=str, required=False,
help='The description of the new storage group. '
'Default: Empty, or from template')
@click.option('--shared', type=bool, required=False,
help='Indicates whether the storage group can be attached to '
'more than one partition. '
'Default: {d}, or from template'.
format(d=DEFAULT_SHARED))
@click.option('--connectivity', type=int, required=False,
help='The number of adapters to utilize for the new storage '
'group. '
'Default: {d}, or from template'.
format(d=DEFAULT_CONNECTIVITY))
@click.option('--max-partitions', type=int, required=False,
help='The maximum number of partitions to which the new storage '
'group can be attached. '
'Default: {d}, or from template'.
format(d=DEFAULT_MAX_PARTITIONS))
@click.option('--direct-connection-count', type=int, required=False,
help='The number of additional virtual storage resource '
'connections for the host that can be directly assigned to a '
'guest virtual machine. A value of 0 indicates this feature is '
'disabled. '
'Default: {d}, or from template'.
format(d=DEFAULT_DIRECT_CONNECTION_COUNT))
@add_options(EMAIL_OPTIONS)
@click.pass_obj
def storagegroup_create(cmd_ctx, **options):
"""
Create a storage group.
When created using --type, the new storage group will have no storage
volumes. Storage volumes can be created and added to the storage group
with the 'storagevolume' command.
In addition to the command-specific options shown in this help text, the
general options (see 'zhmc --help') can also be specified right after the
'zhmc' command name.
"""
cmd_ctx.execute_cmd(lambda: cmd_storagegroup_create(cmd_ctx, options))
@storagegroup_group.command('update', options_metavar=COMMAND_OPTIONS_METAVAR)
@click.argument('STORAGEGROUP', type=str, metavar='STORAGEGROUP')
@click.option('--name', type=str, required=False,
help='The new name of the storage group.')
@click.option('--description', type=str, required=False,
help='The new description of the storage group.')
@click.option('--shared', type=bool, required=False,
help='Indicates whether the storage group can be attached to '
'more than one partition.')
@click.option('--connectivity', type=int, required=False,
help='The number of adapters to utilize for the new storage '
'group.')
@click.option('--max-partitions', type=int, required=False,
help='The maximum number of partitions to which the new storage '
'group can be attached.')
@click.option('--direct-connection-count', type=int, required=False,
help='The number of additional virtual storage resource '
'connections for the host that can be directly assigned to a '
'guest virtual machine. A value of 0 indicates this feature is '
'disabled.')
@add_options(EMAIL_OPTIONS)
@click.pass_obj
def storagegroup_update(cmd_ctx, storagegroup, **options):
"""
Update the properties of a storage group.
Only the properties will be changed for which a corresponding option is
specified, so the default for all options is not to change properties.
In addition to the command-specific options shown in this help text, the
general options (see 'zhmc --help') can also be specified right after the
'zhmc' command name.
"""
cmd_ctx.execute_cmd(
lambda: cmd_storagegroup_update(cmd_ctx, storagegroup, options))
@storagegroup_group.command('delete', options_metavar=COMMAND_OPTIONS_METAVAR)
@click.argument('STORAGEGROUP', type=str, metavar='STORAGEGROUP')
@click.option('-y', '--yes', is_flag=True, callback=abort_if_false,
expose_value=False,
help='Skip prompt to confirm deletion of the storage group.',
prompt='Are you sure you want to delete this storage group ?')
@add_options(EMAIL_OPTIONS)
@click.pass_obj
def storagegroup_delete(cmd_ctx, storagegroup, **options):
"""
Delete a storage group.
In addition to the command-specific options shown in this help text, the
general options (see 'zhmc --help') can also be specified right after the
'zhmc' command name.
"""
cmd_ctx.execute_cmd(
lambda: cmd_storagegroup_delete(cmd_ctx, storagegroup, options))
@storagegroup_group.command('list-partitions',
options_metavar=COMMAND_OPTIONS_METAVAR)
@click.argument('STORAGEGROUP', type=str, metavar='STORAGEGROUP')
@click.option('--name', type=str, required=False,
help='Regular expression filter to limit the returned partitions '
'to those with a matching name.')
@click.option('--status', type=str, required=False,
help='Filter to limit the returned partitions to those with a '
'matching status. Valid status values are: {sv}.'.
format(sv=', '.join(ALL_PARTITION_STATUSES)))
@click.pass_obj
def storagegroup_list_partitions(cmd_ctx, storagegroup, **options):
"""
List the partitions to which a storage group is attached.
Partitions for which the authenticated user does not have object-access
permission will not be included.
In addition to the command-specific options shown in this help text, the
general options (see 'zhmc --help') can also be specified right after the
'zhmc' command name.
"""
cmd_ctx.execute_cmd(
lambda: cmd_storagegroup_list_partitions(cmd_ctx, storagegroup,
options))
@storagegroup_group.command('list-ports',
options_metavar=COMMAND_OPTIONS_METAVAR)
@click.argument('STORAGEGROUP', type=str, metavar='STORAGEGROUP')
@click.pass_obj
def storagegroup_list_ports(cmd_ctx, storagegroup):
"""
List the candidate adapter ports of a storage group.
In addition to the command-specific options shown in this help text, the
general options (see 'zhmc --help') can also be specified right after the
'zhmc' command name.
"""
cmd_ctx.execute_cmd(
lambda: cmd_storagegroup_list_ports(cmd_ctx, storagegroup))
@storagegroup_group.command('add-ports',
options_metavar=COMMAND_OPTIONS_METAVAR)
@click.argument('STORAGEGROUP', type=str, metavar='STORAGEGROUP')
@click.option('--adapter', type=str, metavar='NAME',
required=False, multiple=True,
help='The name of the storage adapter with the new port to be '
'added. '
'The --adapter and --port options can be specified multiple '
'times and correspond to each other via their order.')
@click.option('--port', type=str, metavar='NAME',
required=False, multiple=True,
help='The name of the storage adapter port to be added. '
'The --adapter and --port options can be specified multiple '
'times and correspond to each other via their order.')
@click.pass_obj
def storagegroup_add_ports(cmd_ctx, storagegroup, **options):
"""
Add storage adapter ports to the candidate adapter port list of a storage
group.
In addition to the command-specific options shown in this help text, the
general options (see 'zhmc --help') can also be specified right after the
'zhmc' command name.
"""
cmd_ctx.execute_cmd(
lambda: cmd_storagegroup_add_ports(cmd_ctx, storagegroup, options))
@storagegroup_group.command('remove-ports',
options_metavar=COMMAND_OPTIONS_METAVAR)
@click.argument('STORAGEGROUP', type=str, metavar='STORAGEGROUP')
@click.option('--adapter', type=str, metavar='NAME',
required=False, multiple=True,
help='The name of the storage adapter with the new port to be '
'added. '
'The --adapter and --port options can be specified multiple '
'times and correspond to each other via their order.')
@click.option('--port', type=str, metavar='NAME',
required=False, multiple=True,
help='The name of the storage adapter port to be added. '
'The --adapter and --port options can be specified multiple '
'times and correspond to each other via their order.')
@click.pass_obj
def storagegroup_remove_ports(cmd_ctx, storagegroup, **options):
"""
Remove ports from the candidate adapter port list of a storage group.
In addition to the command-specific options shown in this help text, the
general options (see 'zhmc --help') can also be specified right after the
'zhmc' command name.
"""
cmd_ctx.execute_cmd(
lambda: cmd_storagegroup_remove_ports(cmd_ctx, storagegroup, options))
@storagegroup_group.command('discover-fcp',
options_metavar=COMMAND_OPTIONS_METAVAR)
@click.argument('STORAGEGROUP', type=str, metavar='STORAGEGROUP')
@click.option('--force-restart', type=bool, required=False, default=False,
help='Indicates if there is an in-progress discovery operation '
'for the specified storage group, it should be terminated and '
'started again.')
@add_options(ASYNC_TIMEOUT_OPTIONS)
@click.pass_obj
def storagegroup_discover_fcp(cmd_ctx, storagegroup, **options):
"""
Perform Logical Unit Number (LUN) discovery for an FCP storage group.
This command only applies to storage groups of type "fcp".
In addition to the command-specific options shown in this help text, the
general options (see 'zhmc --help') can also be specified right after the
'zhmc' command name.
"""
cmd_ctx.execute_cmd(
lambda: cmd_storagegroup_discover_fcp(cmd_ctx, storagegroup, options))
def cmd_storagegroup_list(cmd_ctx, options):
# pylint: disable=missing-function-docstring
client = zhmcclient.Client(cmd_ctx.session)
console = client.consoles.console
try:
stogrps = console.storage_groups.list()
except zhmcclient.Error as exc:
raise click_exception(exc, cmd_ctx.error_format)
show_list = [
'name',
]
if not options['names_only']:
show_list.extend([
'device-number',
'type',
'shared',
'fulfillment-state',
'cpc', # CPC name, as additional property
])
if options['uri']:
show_list.extend([
'object-uri',
])
cpc_additions = {}
for sg in stogrps:
try:
cpc_uri = sg.prop('cpc-uri')
cpc = client.cpcs.find(**{'object-uri': cpc_uri})
cpc_additions[sg.uri] = cpc.name
except zhmcclient.Error as exc:
raise click_exception(exc, cmd_ctx.error_format)
additions = {
'cpc': cpc_additions,
}
try:
print_resources(cmd_ctx, stogrps, cmd_ctx.output_format, show_list,
additions, all=options['all'])
except zhmcclient.Error as exc:
raise click_exception(exc, cmd_ctx.error_format)
def cmd_storagegroup_show(cmd_ctx, stogrp_name):
# pylint: disable=missing-function-docstring
client = zhmcclient.Client(cmd_ctx.session)
stogrp = find_storagegroup(cmd_ctx, client, stogrp_name)
try:
stogrp.pull_full_properties()
except zhmcclient.Error as exc:
raise click_exception(exc, cmd_ctx.error_format)
print_properties(cmd_ctx, stogrp.properties, cmd_ctx.output_format)
def cmd_storagegroup_create(cmd_ctx, options):
# pylint: disable=missing-function-docstring
client = zhmcclient.Client(cmd_ctx.session)
console = client.consoles.console
name_map = {
# The following options are handled in this function:
'cpc': None,
'email-to-address': None,
'email-cc-address': None,
}
org_options = original_options(options)
properties = options_to_properties(org_options, name_map)
cpc_name = org_options['cpc'] # It is required
cpc = find_cpc(cmd_ctx, client, cpc_name)
properties['cpc-uri'] = cpc.uri
email_to_addresses = org_options['email-to-address']
if email_to_addresses:
properties['email-to-addresses'] = email_to_addresses
email_cc_addresses = org_options['email-cc-address']
if email_cc_addresses:
properties['email-cc-addresses'] = email_cc_addresses
try:
new_stogrp = console.storage_groups.create(properties)
except zhmcclient.Error as exc:
raise click_exception(exc, cmd_ctx.error_format)
cmd_ctx.spinner.stop()
click.echo("New storage group '{sg}' has been created.".
format(sg=new_stogrp.properties['name']))
def cmd_storagegroup_update(cmd_ctx, stogrp_name, options):
# pylint: disable=missing-function-docstring
client = zhmcclient.Client(cmd_ctx.session)
stogrp = find_storagegroup(cmd_ctx, client, stogrp_name)
name_map = {
# The following options are handled in this function:
'email-to-address': None,
'email-cc-address': None,
}
org_options = original_options(options)
properties = options_to_properties(org_options, name_map)
email_to_addresses = org_options['email-to-address']
if email_to_addresses:
properties['email-to-addresses'] = email_to_addresses
email_cc_addresses = org_options['email-cc-address']
if email_cc_addresses:
properties['email-cc-addresses'] = email_cc_addresses
if not properties:
cmd_ctx.spinner.stop()
click.echo("No properties specified for updating storage group '{sg}'.".
format(sg=stogrp_name))
return
try:
stogrp.update_properties(properties)
except zhmcclient.Error as exc:
raise click_exception(exc, cmd_ctx.error_format)
cmd_ctx.spinner.stop()
if 'name' in properties and properties['name'] != stogrp_name:
click.echo("Storage group '{sg}' has been renamed to '{sgn}' and was "
"updated.".
format(sg=stogrp_name, sgn=properties['name']))
else:
click.echo("Storage group '{sg}' has been updated.".
format(sg=stogrp_name))
def cmd_storagegroup_delete(cmd_ctx, stogrp_name, options):
# pylint: disable=missing-function-docstring
client = zhmcclient.Client(cmd_ctx.session)
stogrp = find_storagegroup(cmd_ctx, client, stogrp_name)
org_options = original_options(options)
email_insert = org_options['email-insert']
email_to_addresses = org_options['email-to-address'] or None
email_cc_addresses = org_options['email-cc-address'] or None
try:
stogrp.delete(email_to_addresses=email_to_addresses,
email_cc_addresses=email_cc_addresses,
email_insert=email_insert)
except zhmcclient.Error as exc:
raise click_exception(exc, cmd_ctx.error_format)
cmd_ctx.spinner.stop()
click.echo("Storage group '{sg}' has been deleted.".format(sg=stogrp_name))
def cmd_storagegroup_list_partitions(cmd_ctx, stogrp_name, options):
# pylint: disable=missing-function-docstring
client = zhmcclient.Client(cmd_ctx.session)
stogrp = find_storagegroup(cmd_ctx, client, stogrp_name)
filter_name = options['name']
filter_status = options['status']
try:
partitions = stogrp.list_attached_partitions(
name=filter_name, status=filter_status)
except zhmcclient.Error as exc:
raise click_exception(exc, cmd_ctx.error_format)
show_list = [
'cpc', # CPC name, as additional property
'name',
'type',
'status',
]
cpc_additions = {}
for part in partitions:
cpc = part.manager.parent
cpc_additions[part.uri] = cpc.name
additions = {
'cpc': cpc_additions,
}
try:
print_resources(cmd_ctx, partitions, cmd_ctx.output_format, show_list,
additions)
except zhmcclient.Error as exc:
raise click_exception(exc, cmd_ctx.error_format)
def cmd_storagegroup_list_ports(cmd_ctx, stogrp_name):
# pylint: disable=missing-function-docstring
client = zhmcclient.Client(cmd_ctx.session)
stogrp = find_storagegroup(cmd_ctx, client, stogrp_name)
try:
ports = stogrp.list_candidate_adapter_ports()
except zhmcclient.Error as exc:
raise click_exception(exc, cmd_ctx.error_format)
show_list = [
'cpc', # CPC name, as additional property
'adapter', # Adapter name, as additional property
'name',
'index',
'fabric-id',
]
cpc_additions = {}
adapter_additions = {}
for port in ports:
adapter = port.manager.parent
adapter_additions[port.uri] = adapter.name
cpc = adapter.manager.parent
cpc_additions[port.uri] = cpc.name
additions = {
'cpc': cpc_additions,
'adapter': adapter_additions,
}
try:
print_resources(cmd_ctx, ports, cmd_ctx.output_format, show_list,
additions)
except zhmcclient.Error as exc:
raise click_exception(exc, cmd_ctx.error_format)
def cmd_storagegroup_add_ports(cmd_ctx, stogrp_name, options):
# pylint: disable=missing-function-docstring
client = zhmcclient.Client(cmd_ctx.session)
stogrp = find_storagegroup(cmd_ctx, client, stogrp_name)
cpc = stogrp.cpc
adapter_names = options['adapter'] # List
port_names = options['port'] # List
if len(adapter_names) != len(port_names):
raise click_exception(
"The --adapter and --port options must be specified the same "
"number of times, but have been specified {na} and {np} times.".
format(na=len(adapter_names), np=len(port_names)),
cmd_ctx.error_format)
ports = []
for i, adapter_name in enumerate(adapter_names):
port_name = port_names[i]
port = find_port(cmd_ctx, client, cpc, adapter_name, port_name)
ports.append(port)
if not ports:
cmd_ctx.spinner.stop()
click.echo("No ports specified for adding to the candidate list "
"of storage group '{sg}'.".format(sg=stogrp_name))
return
try:
stogrp.add_candidate_adapter_ports(ports)
except zhmcclient.Error as exc:
raise click_exception(exc, cmd_ctx.error_format)
cmd_ctx.spinner.stop()
click.echo("The specified ports have been added to the candidate list "
"of storage group '{sg}'.".format(sg=stogrp_name))
def cmd_storagegroup_remove_ports(cmd_ctx, stogrp_name, options):
# pylint: disable=missing-function-docstring
client = zhmcclient.Client(cmd_ctx.session)
stogrp = find_storagegroup(cmd_ctx, client, stogrp_name)
cpc = stogrp.cpc
adapter_names = options['adapter'] # List
port_names = options['port'] # List
if len(adapter_names) != len(port_names):
raise click_exception(
"The --adapter and --port options must be specified the same "
"number of times, but have been specified {na} and {np} times.".
format(na=len(adapter_names), np=len(port_names)),
cmd_ctx.error_format)
ports = []
for i, adapter_name in enumerate(adapter_names):
port_name = port_names[i]
port = find_port(cmd_ctx, client, cpc, adapter_name, port_name)
ports.append(port)
if not ports:
cmd_ctx.spinner.stop()
click.echo("No ports specified for removing from the candidate list "
"of storage group '{sg}'.".format(sg=stogrp_name))
return
try:
stogrp.remove_candidate_adapter_ports(ports)
except zhmcclient.Error as exc:
raise click_exception(exc, cmd_ctx.error_format)
cmd_ctx.spinner.stop()
click.echo("The specified ports have been removed from the candidate list "
"of storage group '{sg}'.".format(sg=stogrp_name))
def cmd_storagegroup_discover_fcp(cmd_ctx, stogrp_name, options):
# pylint: disable=missing-function-docstring
client = zhmcclient.Client(cmd_ctx.session)
stogrp = find_storagegroup(cmd_ctx, client, stogrp_name)
force_restart = options['force_restart']
try:
stogrp.discover_fcp(
force_restart=force_restart, wait_for_completion=True,
operation_timeout=options['operation_timeout'])
except zhmcclient.Error as exc:
raise click_exception(exc, cmd_ctx.error_format)
cmd_ctx.spinner.stop()
click.echo("LUN discovery has been completed for FCP storage group '{sg}'.".
format(sg=stogrp_name))
|
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
from sahara.api import acl
from sahara.service.edp import api
from sahara.service import validation as v
from sahara.service.validations.edp import data_source as v_d_s
from sahara.service.validations.edp import job as v_j
from sahara.service.validations.edp import job_binary as v_j_b
from sahara.service.validations.edp import job_binary_internal as v_j_b_i
from sahara.service.validations.edp import job_execution as v_j_e
import sahara.utils.api as u
LOG = logging.getLogger(__name__)
rest = u.Rest('v11', __name__)
# Job execution ops
@rest.get('/job-executions')
@acl.enforce("job-executions:get_all")
def job_executions_list():
job_executions = [je.to_dict() for je in api.job_execution_list(
**u.get_request_args().to_dict())]
return u.render(job_executions=job_executions)
@rest.get('/job-executions/<job_execution_id>')
@acl.enforce("job-executions:get")
@v.check_exists(api.get_job_execution, id='job_execution_id')
def job_executions(job_execution_id):
job_execution = api.get_job_execution(job_execution_id)
return u.render(job_execution.to_wrapped_dict())
@rest.get('/job-executions/<job_execution_id>/refresh-status')
@acl.enforce("job-executions:refresh_status")
@v.check_exists(api.get_job_execution, id='job_execution_id')
def job_executions_status(job_execution_id):
job_execution = api.get_job_execution_status(job_execution_id)
return u.render(job_execution.to_wrapped_dict())
@rest.get('/job-executions/<job_execution_id>/cancel')
@acl.enforce("job-executions:cancel")
@v.check_exists(api.get_job_execution, id='job_execution_id')
def job_executions_cancel(job_execution_id):
job_execution = api.cancel_job_execution(job_execution_id)
return u.render(job_execution.to_wrapped_dict())
@rest.delete('/job-executions/<job_execution_id>')
@acl.enforce("job-executions:delete")
@v.check_exists(api.get_job_execution, id='job_execution_id')
def job_executions_delete(job_execution_id):
api.delete_job_execution(job_execution_id)
return u.render()
# Data source ops
@rest.get('/data-sources')
@acl.enforce("data-sources:get_all")
def data_sources_list():
return u.render(
data_sources=[ds.to_dict() for ds in api.get_data_sources(
**u.get_request_args().to_dict())])
@rest.post('/data-sources')
@acl.enforce("data-sources:register")
@v.validate(v_d_s.DATA_SOURCE_SCHEMA, v_d_s.check_data_source_create)
def data_source_register(data):
return u.render(api.register_data_source(data).to_wrapped_dict())
@rest.get('/data-sources/<data_source_id>')
@acl.enforce("data-sources:get")
@v.check_exists(api.get_data_source, 'data_source_id')
def data_source_get(data_source_id):
return u.render(api.get_data_source(data_source_id).to_wrapped_dict())
@rest.delete('/data-sources/<data_source_id>')
@acl.enforce("data-sources:delete")
@v.check_exists(api.get_data_source, 'data_source_id')
def data_source_delete(data_source_id):
api.delete_data_source(data_source_id)
return u.render()
# Job ops
@rest.get('/jobs')
@acl.enforce("jobs:get_all")
def job_list():
return u.render(jobs=[j.to_dict() for j in api.get_jobs(
**u.get_request_args().to_dict())])
@rest.post('/jobs')
@acl.enforce("jobs:create")
@v.validate(v_j.JOB_SCHEMA, v_j.check_mains_libs)
def job_create(data):
return u.render(api.create_job(data).to_wrapped_dict())
@rest.get('/jobs/<job_id>')
@acl.enforce("jobs:get")
@v.check_exists(api.get_job, id='job_id')
def job_get(job_id):
return u.render(api.get_job(job_id).to_wrapped_dict())
@rest.delete('/jobs/<job_id>')
@acl.enforce("jobs:delete")
@v.check_exists(api.get_job, id='job_id')
def job_delete(job_id):
api.delete_job(job_id)
return u.render()
@rest.post('/jobs/<job_id>/execute')
@acl.enforce("jobs:execute")
@v.check_exists(api.get_job, id='job_id')
@v.validate(v_j_e.JOB_EXEC_SCHEMA, v_j_e.check_job_execution)
def job_execute(job_id, data):
return u.render(job_execution=api.execute_job(job_id, data).to_dict())
@rest.get('/jobs/config-hints/<job_type>')
@acl.enforce("jobs:get_config_hints")
@v.check_exists(api.get_job_config_hints, job_type='job_type')
def job_config_hints_get(job_type):
return u.render(api.get_job_config_hints(job_type))
@rest.get('/job-types')
@acl.enforce("job-types:get_all")
def job_types_get():
# We want to use flat=False with to_dict() so that
# the value of each arg is given as a list. This supports
# filters of the form ?type=Pig&type=Java, etc.
return u.render(job_types=api.get_job_types(
**u.get_request_args().to_dict(flat=False)))
# Job binary ops
@rest.post('/job-binaries')
@acl.enforce("job-binaries:create")
@v.validate(v_j_b.JOB_BINARY_SCHEMA, v_j_b.check_job_binary)
def job_binary_create(data):
return u.render(api.create_job_binary(data).to_wrapped_dict())
@rest.get('/job-binaries')
@acl.enforce("job-binaries:get_all")
def job_binary_list():
return u.render(binaries=[j.to_dict() for j in api.get_job_binaries(
**u.get_request_args().to_dict())])
@rest.get('/job-binaries/<job_binary_id>')
@acl.enforce("job-binaries:get")
@v.check_exists(api.get_job_binary, 'job_binary_id')
def job_binary_get(job_binary_id):
return u.render(api.get_job_binary(job_binary_id).to_wrapped_dict())
@rest.delete('/job-binaries/<job_binary_id>')
@acl.enforce("job-binaries:delete")
@v.check_exists(api.get_job_binary, id='job_binary_id')
def job_binary_delete(job_binary_id):
api.delete_job_binary(job_binary_id)
return u.render()
@rest.get('/job-binaries/<job_binary_id>/data')
@acl.enforce("job-binaries:get_data")
@v.check_exists(api.get_job_binary, 'job_binary_id')
def job_binary_data(job_binary_id):
data = api.get_job_binary_data(job_binary_id)
if type(data) == dict:
data = u.render(data)
return data
# Job binary internals ops
@rest.put_file('/job-binary-internals/<name>')
@acl.enforce("job-binary-internals:create")
@v.validate(None, v_j_b_i.check_job_binary_internal)
def job_binary_internal_create(**values):
return u.render(api.create_job_binary_internal(values).to_wrapped_dict())
@rest.get('/job-binary-internals')
@acl.enforce("job-binary-internals:get_all")
def job_binary_internal_list():
return u.render(binaries=[j.to_dict() for j in
api.get_job_binary_internals(
**u.get_request_args().to_dict())])
@rest.get('/job-binary-internals/<job_binary_internal_id>')
@acl.enforce("job-binary-internals:get")
@v.check_exists(api.get_job_binary_internal, 'job_binary_internal_id')
def job_binary_internal_get(job_binary_internal_id):
return u.render(api.get_job_binary_internal(job_binary_internal_id
).to_wrapped_dict())
@rest.delete('/job-binary-internals/<job_binary_internal_id>')
@acl.enforce("job-binary-internals:delete")
@v.check_exists(api.get_job_binary_internal, 'job_binary_internal_id')
def job_binary_internal_delete(job_binary_internal_id):
api.delete_job_binary_internal(job_binary_internal_id)
return u.render()
@rest.get('/job-binary-internals/<job_binary_internal_id>/data')
@acl.enforce("job-binary-internals:get_data")
@v.check_exists(api.get_job_binary_internal, 'job_binary_internal_id')
def job_binary_internal_data(job_binary_internal_id):
return api.get_job_binary_internal_data(job_binary_internal_id)
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from argparse import ArgumentParser, FileType
from contextlib import contextmanager
from ufo2ft import CFFOptimization
from ufo2ft.featureWriters import loadFeatureWriterFromString
from ufo2ft.filters import loadFilterFromString
from fontmake import __version__
from fontmake.errors import FontmakeError
from fontmake.font_project import INTERPOLATABLE_OUTPUTS, FontProject
def _loadPlugins(parser, specs, from_string_func, parser_error_message):
plugins = []
for s in specs:
if s == "None":
# magic value that means "don't write any features or don't apply
# any filters!"
return []
try:
plugins.append(from_string_func(s))
except Exception as e:
parser.error(parser_error_message.format(type(e).__name__, e))
return plugins
def _loadFeatureWriters(parser, specs):
return _loadPlugins(
parser,
specs,
loadFeatureWriterFromString,
"Failed to load --feature-writer:\n {}: {}",
)
def _loadFilters(parser, specs):
return _loadPlugins(
parser, specs, loadFilterFromString, "Failed to load --filter:\n {}: {}"
)
def exclude_args(parser, args, excluded_args, target, positive=True):
"""Delete options that are not appropriate for a following code path; exit
with an error if excluded options were passed in by the user.
argparse generates a namespace with all options it knows, but not every
attribute should be passed to all code paths (i.e. options about
interpolation should not reach `run_from_ufos()`). This function can be run
before entering a particular code path to clean up the kwargs passed to it.
Exit with an error message if the user actually passed the options in.
"""
msg = '"%s" option invalid for %s'
for argname in excluded_args:
if argname not in args:
continue
if bool(args[argname]) is positive:
optname = "--{}{}".format(
"" if positive else "no-", argname.replace("_", "-")
)
parser.error(msg % (optname, target))
del args[argname]
@contextmanager
def _make_tempdirs(parser, args):
output = args["output"]
tempdirs = []
for dirname in ("master_dir", "instance_dir"):
if args.get(dirname) == "{tmp}":
if "ufo" in output:
parser.error(
"Can't use temporary %s directory with 'ufo' output"
% dirname.replace("_dir", "")
)
import tempfile
td = args[dirname] = tempfile.mkdtemp(prefix=dirname + "_")
tempdirs.append(td)
yield tempdirs
if tempdirs:
import shutil
for td in tempdirs:
shutil.rmtree(td)
def main(args=None):
parser = ArgumentParser()
parser.add_argument("--version", action="version", version=__version__)
inputGroup = parser.add_argument_group(
title="Input arguments",
description="The following arguments are mutually exclusive (pick only one):",
)
xInputGroup = inputGroup.add_mutually_exclusive_group(required=True)
xInputGroup.add_argument(
"-g", "--glyphs-path", metavar="GLYPHS", help="Path to .glyphs source file"
)
xInputGroup.add_argument(
"-u",
"--ufo-paths",
nargs="+",
metavar="UFO",
help="One or more paths to UFO files",
)
xInputGroup.add_argument(
"-m",
"--mm-designspace",
metavar="DESIGNSPACE",
help="Path to .designspace file",
)
outputGroup = parser.add_argument_group(title="Output arguments")
outputGroup.add_argument(
"-o",
"--output",
nargs="+",
default=("otf", "ttf"),
metavar="FORMAT",
help="Output font formats. Choose 1 or more from: %(choices)s. Default: otf, ttf. "
"(No file paths).",
choices=(
"ufo",
"otf",
"otf-cff2",
"ttf",
"ttf-interpolatable",
"otf-interpolatable",
"variable",
"variable-cff2",
),
)
outputSubGroup = outputGroup.add_mutually_exclusive_group()
outputSubGroup.add_argument(
"--output-path",
default=None,
help="Output font file path. Only valid when the output is a single "
"file (e.g. input is a single UFO or output is variable font)",
)
outputSubGroup.add_argument(
"--output-dir",
default=None,
help="Output folder. By default, output folders are created in the "
"current working directory, grouping output fonts by format.",
)
outputGroup.add_argument(
"-i",
"--interpolate",
nargs="?",
default=False,
const=True,
metavar="INSTANCE_NAME",
help="Interpolate masters and generate all the instances defined. "
"To only interpolate a specific instance (or instances) that "
'match a given "name" attribute, you can pass as argument '
"the full instance name or a regular expression. "
'E.g.: -i "Noto Sans Bold"; or -i ".* UI Condensed". '
"(for Glyphs or MutatorMath sources only). ",
)
outputGroup.add_argument(
"--use-mutatormath",
action="store_true",
help=(
"Use MutatorMath to generate instances (supports extrapolation and "
"anisotropic locations)."
),
)
outputGroup.add_argument(
"-M",
"--masters-as-instances",
action="store_true",
help="Output masters as instances",
)
outputGroup.add_argument(
"--family-name",
help="Family name to use for masters, and to filter output instances",
)
outputGroup.add_argument(
"--round-instances",
dest="round_instances",
action="store_true",
help="Apply integer rounding to all geometry when interpolating",
)
outputGroup.add_argument(
"--designspace-path",
default=None,
help="Path to output designspace file (for Glyphs sources only).",
)
outputGroup.add_argument(
"--master-dir",
default=None,
help='Directory where to write master UFO. Default: "./master_ufo". '
'If value is "{tmp}", a temporary directory is created and '
"removed at the end (for Glyphs sources only).",
)
outputGroup.add_argument(
"--instance-dir",
default=None,
help="Directory where to write instance UFOs. Default: "
'"./instance_ufo". If value is "{tmp}", a temporary directory '
"is created and removed at the end (for Glyphs sources only).",
)
outputGroup.add_argument(
"--no-write-skipexportglyphs",
action="store_false",
dest="write_skipexportglyphs",
help="Do not store the glyph export flags in the 'public.skipExportGlyphs' "
"key of designspace/UFO lib, but use the old private glyph lib key "
"'com.schriftgestaltung.Glyphs.Export' (for Glyphs sources only).",
)
outputGroup.add_argument(
"--validate-ufo",
action="store_true",
help="Enable ufoLib validation on reading/writing UFO files. It is "
"disabled by default",
)
outputGroup.add_argument(
"--expand-features-to-instances",
action="store_true",
help="Resolves all include()s in the master feature file and writes "
"the full feature file to all instance UFOs. Only valid when "
"interpolating. Use if you share feature files of masters in "
"external files, as instances can end up elsewhere.",
)
outputGroup.add_argument(
"--no-generate-GDEF",
dest="generate_GDEF",
action="store_false",
help="Do not auto-generate a GDEF table, but keep an existing one intact.",
)
contourGroup = parser.add_argument_group(title="Handling of contours")
contourGroup.add_argument(
"--keep-overlaps",
dest="remove_overlaps",
action="store_false",
help="Do not remove any overlap.",
)
contourGroup.add_argument(
"--overlaps-backend",
dest="overlaps_backend",
metavar="BACKEND",
choices=("booleanOperations", "pathops"),
default="booleanOperations",
help="Select library to remove overlaps. Choose between: %(choices)s "
"(default: %(default)s)",
)
contourGroup.add_argument(
"--keep-direction",
dest="reverse_direction",
action="store_false",
help="Do not reverse contour direction when output is ttf or "
"ttf-interpolatable",
)
contourGroup.add_argument(
"-e",
"--conversion-error",
type=float,
default=None,
metavar="ERROR",
help="Maximum approximation error for cubic to quadratic conversion "
"measured in EM",
)
contourGroup.add_argument(
"-f",
"--flatten-components",
dest="flatten_components",
action="store_true",
help="Flatten nested components to single level.",
)
contourGroup.add_argument(
"-a",
"--autohint",
nargs="?",
const="",
help="Run ttfautohint. Can provide arguments, quoted",
)
contourGroup.add_argument(
"--cff-round-tolerance",
type=float,
default=None,
metavar="FLOAT",
help="Restrict rounding of point coordinates in CFF table to only "
"those floats whose absolute difference from their integral part "
"is less than or equal to the tolerance. By default, all floats "
"are rounded to integer (tolerance 0.5); 0 disables rounding.",
)
contourGroup.add_argument(
"--optimize-cff",
type=lambda s: CFFOptimization(int(s)),
default=CFFOptimization.SUBROUTINIZE,
help="0 disables all optimizations; 1 specializes the CFF charstring "
"operators; 2 (default) also enables subroutinization",
)
contourGroup.add_argument(
"--subroutinizer",
default=None,
choices=["compreffor", "cffsubr"],
help="name of the library to use for compressing CFF charstrings. "
"Choose between: %(choices)s. By default compreffor is used for CFF 1, "
"and cffsubr for CFF2. NOTE: compreffor doesn't support CFF2.",
)
contourGroup.add_argument(
"--no-optimize-gvar",
dest="optimize_gvar",
action="store_false",
help="Do not perform IUP optimization on variable font's 'gvar' table. "
"(only works with 'variable' TrueType-flavored output)",
)
contourGroup.add_argument(
"--filter",
metavar="CLASS",
action="append",
dest="filter_specs",
help="string specifying a filter class to load, either "
"built-in or from an external module, optionally initialized with "
"the given keyword arguments. The class and module names are "
"separated by '::'. The option can be repeated multiple times "
"for each filter class. The option overrides the filters specified "
"in the UFO lib.",
)
layoutGroup = parser.add_argument_group(title="Handling of OpenType Layout")
layoutGroup.add_argument(
"--interpolate-binary-layout",
nargs="?",
default=False,
const=True,
metavar="MASTER_DIR",
help="Interpolate layout tables from compiled master binaries. "
"Requires Glyphs or MutatorMath source.",
)
layoutGroup.add_argument(
"--feature-writer",
metavar="CLASS",
action="append",
dest="feature_writer_specs",
help="string specifying a feature writer class to load, either "
"built-in or from an external module, optionally initialized with "
"the given keyword arguments. The class and module names are "
"separated by '::'. The option can be repeated multiple times "
"for each writer class. A special value of 'None' will disable "
"all automatic feature generation. The option overrides both the "
"default ufo2ft writers and those specified in the UFO lib.",
)
layoutGroup.add_argument(
"--debug-feature-file",
metavar="FILE",
type=FileType("w", encoding="utf-8"),
default=None,
help=(
"Path were to dump OpenType features text to debug auto-generated "
"features (kern, mark, mkmk, etc.)."
),
)
feaCompilerGroup = layoutGroup.add_mutually_exclusive_group(required=False)
feaCompilerGroup.add_argument(
"--mti-source",
help="mtiLib feature definition .plist file path (use instead of FEA)",
)
glyphnamesGroup = parser.add_mutually_exclusive_group(required=False)
glyphnamesGroup.add_argument(
"--production-names",
dest="use_production_names",
action="store_true",
help="Rename glyphs with production names if available otherwise use "
"uninames.",
)
glyphnamesGroup.add_argument(
"--no-production-names", dest="use_production_names", action="store_false"
)
subsetGroup = parser.add_mutually_exclusive_group(required=False)
subsetGroup.add_argument(
"--subset",
dest="subset",
action="store_true",
help="Subset font using export flags set by glyphsLib",
)
subsetGroup.add_argument("--no-subset", dest="subset", action="store_false")
subroutinizeGroup = parser.add_mutually_exclusive_group(required=False)
subroutinizeGroup.add_argument(
"-s",
"--subroutinize",
action="store_true",
help="Optimize CFF table using compreffor (default) [DEPRECATED: use "
"--optimize-cff option instead]",
)
subroutinizeGroup.add_argument(
"-S", "--no-subroutinize", dest="subroutinize", action="store_false"
)
parser.set_defaults(use_production_names=None, subset=None, subroutinize=None)
logGroup = parser.add_argument_group(title="Logging arguments")
logGroup.add_argument(
"--timing", action="store_true", help="Print the elapsed time for each steps"
)
logGroup.add_argument(
"--verbose",
default="INFO",
metavar="LEVEL",
choices=("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"),
help="Configure the logger verbosity level. Choose between: "
"%(choices)s. Default: INFO",
)
args = vars(parser.parse_args(args))
specs = args.pop("feature_writer_specs")
if specs is not None:
args["feature_writers"] = _loadFeatureWriters(parser, specs)
specs = args.pop("filter_specs")
if specs is not None:
args["filters"] = _loadFilters(parser, specs)
glyphs_path = args.pop("glyphs_path")
ufo_paths = args.pop("ufo_paths")
designspace_path = args.pop("mm_designspace")
input_format = (
"Glyphs" if glyphs_path else "designspace" if designspace_path else "UFO"
) + " source"
if INTERPOLATABLE_OUTPUTS.intersection(args["output"]):
if not (glyphs_path or designspace_path):
parser.error("Glyphs or designspace source required for variable font")
exclude_args(
parser,
args,
[
"interpolate",
"masters_as_instances",
"interpolate_binary_layout",
"use_mutatormath",
],
"variable output",
)
else:
exclude_args(parser, args, ["optimize_gvar"], "static output", positive=False)
if args.get("use_mutatormath"):
for module in ("defcon", "mutatorMath"):
try:
__import__(module)
except ImportError:
parser.error(
f"{module} module not found; reinstall fontmake with the "
"[mutatormath] extra"
)
PRINT_TRACEBACK = args.get("verbose", "INFO") == "DEBUG"
try:
project = FontProject(
timing=args.pop("timing"),
verbose=args.pop("verbose"),
validate_ufo=args.pop("validate_ufo"),
)
if glyphs_path:
with _make_tempdirs(parser, args):
project.run_from_glyphs(glyphs_path, **args)
return
exclude_args(
parser,
args,
[
"family_name",
"mti_source",
"designspace_path",
"master_dir",
"instance_dir",
],
input_format,
)
exclude_args(
parser, args, ["write_skipexportglyphs"], input_format, positive=False
)
if designspace_path:
project.run_from_designspace(designspace_path, **args)
return
exclude_args(
parser,
args,
[
"interpolate",
"use_mutatormath",
"interpolate_binary_layout",
"round_instances",
"expand_features_to_instances",
],
input_format,
)
project.run_from_ufos(
ufo_paths, is_instance=args.pop("masters_as_instances"), **args
)
except FontmakeError as e:
if PRINT_TRACEBACK:
logging.exception(e)
sys.exit(1)
sys.exit(f"fontmake: Error: {str(e)}")
if __name__ == "__main__":
sys.exit(main())
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CLC: Views Command Line Client documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 14 13:00:42 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
#
# Put the Python sources in the path.
print('adding the path: ', os.path.abspath('../velstor'))
sys.path.insert(0, os.path.abspath('../velstor'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'clc'
copyright = '2015, IC Manage Inc'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bizstyle'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
palette = { 'dark-primary': '#303F9F'
, 'primary': '#3F51B5'
, 'light-primary': '#C5CAE9'
, 'text-icons': '#FFFFFF'
, 'accent': '#FF5722'
, 'primary-text': '#212121'
, 'secondary-text': '#727272'
, 'divider': '#B6B6B6'
, 'icm-logo-bg': '#feffff'
, 'icm-logo-blue': '#00488d'
, 'icm-logo-text': '#45617f'
, 'icm-scrolling-text': '#091f82'
, '900': '#0D47A1'
, '500': '#2196f3'
, '100': '#bbdefb'
, '50': '#e3f2fd'
}
alabaster_theme_options = {}
bizstyle_theme_options = { }
classic_theme_options = {
'sidebarbgcolor': palette['icm-logo-bg']
, 'sidebarbgcolor': palette['icm-logo-bg']
, 'sidebartextcolor': palette['icm-logo-text']
, 'sidebarlinkcolor': palette['accent']
, 'footerbgcolor': palette['icm-logo-blue']
, 'relbarbgcolor': palette['icm-logo-blue']
, 'bgcolor': palette['icm-logo-bg']
, 'textcolor': palette['primary-text']
, 'linkcolor': palette['accent']
, 'visitedlinkcolor': palette['secondary-text']
, 'headbgcolor': palette['icm-logo-bg']
, 'headtextcolor': palette['icm-logo-text']
, 'headlinkcolor': palette['accent']
, 'codebgcolor': palette['100']
, 'codetextcolor': '#000000'
}
html_theme_options = bizstyle_theme_options
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'v1'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'icm-logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'CLCViewsCommandLineClientdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'CLCViewsCommandLineClient.tex', 'CLC: Views Command Line Client Documentation',
'Nick Okasinski', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'clcviewscommandlineclient', 'CLC: Views Command Line Client Documentation',
['Nick Okasinski'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'CLCViewsCommandLineClient', 'CLC: Views Command Line Client Documentation',
'Nick Okasinski', 'CLCViewsCommandLineClient', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = 'CLC: Views Command Line Client'
epub_author = 'Nick Okasinski'
epub_publisher = 'Nick Okasinski'
epub_copyright = '2015, Nick Okasinski'
# The basename for the epub file. It defaults to the project name.
#epub_basename = 'CLC: Views Command Line Client'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
|
# This file taken from Python, licensed under the Python License Agreement
from __future__ import print_function
import gc
import weakref
import operator
import copy
import pickle
from random import randrange, shuffle
import sys
import warnings
import collections
from blist.test import unittest
from blist.test import test_support as support
from blist import sortedset as set
class PassThru(Exception):
pass
def check_pass_thru():
raise PassThru
yield 1 # pragma: no cover
class BadCmp: # pragma: no cover
def __hash__(self):
return 1
def __lt__(self, other):
raise RuntimeError
def __eq__(self, other):
raise RuntimeError
class ReprWrapper:
'Used to test self-referential repr() calls'
def __repr__(self):
return repr(self.value)
class TestJointOps(unittest.TestCase):
# Tests common to both set and frozenset
def setUp(self):
self.word = word = 'simsalabim'
self.otherword = 'madagascar'
self.letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
self.s = self.thetype(word)
self.d = dict.fromkeys(word)
def test_new_or_init(self):
self.assertRaises(TypeError, self.thetype, [], 2)
self.assertRaises(TypeError, set().__init__, a=1)
def test_uniquification(self):
actual = sorted(self.s)
expected = sorted(self.d)
self.assertEqual(actual, expected)
self.assertRaises(PassThru, self.thetype, check_pass_thru())
def test_len(self):
self.assertEqual(len(self.s), len(self.d))
def test_contains(self):
for c in self.letters:
self.assertEqual(c in self.s, c in self.d)
s = self.thetype([frozenset(self.letters)])
# Issue 8752
#self.assertIn(self.thetype(self.letters), s)
def test_union(self):
u = self.s.union(self.otherword)
for c in self.letters:
self.assertEqual(c in u, c in self.d or c in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertRaises(PassThru, self.s.union, check_pass_thru())
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype('abcba').union(C('cdc')), set('abcd'))
self.assertEqual(self.thetype('abcba').union(C('efgfe')), set('abcefg'))
self.assertEqual(self.thetype('abcba').union(C('ccb')), set('abc'))
self.assertEqual(self.thetype('abcba').union(C('ef')), set('abcef'))
self.assertEqual(self.thetype('abcba').union(C('ef'), C('fg')), set('abcefg'))
# Issue #6573
x = self.thetype()
self.assertEqual(x.union(set([1]), x, set([2])), self.thetype([1, 2]))
def test_or(self):
i = self.s.union(self.otherword)
self.assertEqual(self.s | set(self.otherword), i)
self.assertEqual(self.s | frozenset(self.otherword), i)
self.assertEqual(self.s | self.otherword, i)
def test_intersection(self):
i = self.s.intersection(self.otherword)
for c in self.letters:
self.assertEqual(c in i, c in self.d and c in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertRaises(PassThru, self.s.intersection, check_pass_thru())
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype('abcba').intersection(C('cdc')), set('cc'))
self.assertEqual(self.thetype('abcba').intersection(C('efgfe')), set(''))
self.assertEqual(self.thetype('abcba').intersection(C('ccb')), set('bc'))
self.assertEqual(self.thetype('abcba').intersection(C('ef')), set(''))
self.assertEqual(self.thetype('abcba').intersection(C('cbcf'), C('bag')), set('b'))
s = self.thetype('abcba')
z = s.intersection()
if self.thetype == frozenset(): # pragma: no cover
self.assertEqual(id(s), id(z))
else:
self.assertNotEqual(id(s), id(z))
def test_isdisjoint(self):
def f(s1, s2):
'Pure python equivalent of isdisjoint()'
return not set(s1).intersection(s2)
for larg in '', 'a', 'ab', 'abc', 'ababac', 'cdc', 'cc', 'efgfe', 'ccb', 'ef':
s1 = self.thetype(larg)
for rarg in '', 'a', 'ab', 'abc', 'ababac', 'cdc', 'cc', 'efgfe', 'ccb', 'ef':
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s2 = C(rarg)
actual = s1.isdisjoint(s2)
expected = f(s1, s2)
self.assertEqual(actual, expected)
self.assertTrue(actual is True or actual is False)
def test_and(self):
i = self.s.intersection(self.otherword)
self.assertEqual(self.s & set(self.otherword), i)
self.assertEqual(self.s & frozenset(self.otherword), i)
self.assertEqual(self.s & self.otherword, i)
def test_difference(self):
i = self.s.difference(self.otherword)
for c in self.letters:
self.assertEqual(c in i, c in self.d and c not in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertRaises(PassThru, self.s.difference, check_pass_thru())
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype('abcba').difference(C('cdc')), set('ab'))
self.assertEqual(self.thetype('abcba').difference(C('efgfe')), set('abc'))
self.assertEqual(self.thetype('abcba').difference(C('ccb')), set('a'))
self.assertEqual(self.thetype('abcba').difference(C('ef')), set('abc'))
self.assertEqual(self.thetype('abcba').difference(), set('abc'))
self.assertEqual(self.thetype('abcba').difference(C('a'), C('b')), set('c'))
def test_sub(self):
i = self.s.difference(self.otherword)
self.assertEqual(self.s - set(self.otherword), i)
self.assertEqual(self.s - frozenset(self.otherword), i)
self.assertEqual(self.s - self.otherword, i)
def test_symmetric_difference(self):
i = self.s.symmetric_difference(self.otherword)
for c in self.letters:
self.assertEqual(c in i, (c in self.d) ^ (c in self.otherword))
self.assertEqual(self.s, self.thetype(self.word))
self.assertRaises(PassThru, self.s.symmetric_difference, check_pass_thru())
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype('abcba').symmetric_difference(C('cdc')), set('abd'))
self.assertEqual(self.thetype('abcba').symmetric_difference(C('efgfe')), set('abcefg'))
self.assertEqual(self.thetype('abcba').symmetric_difference(C('ccb')), set('a'))
self.assertEqual(self.thetype('abcba').symmetric_difference(C('ef')), set('abcef'))
def test_xor(self):
i = self.s.symmetric_difference(self.otherword)
self.assertEqual(self.s ^ set(self.otherword), i)
self.assertEqual(self.s ^ frozenset(self.otherword), i)
self.assertEqual(self.s ^ self.otherword, i)
def test_equality(self):
self.assertEqual(self.s, set(self.word))
self.assertEqual(self.s, frozenset(self.word))
self.assertEqual(self.s == self.word, False)
self.assertNotEqual(self.s, set(self.otherword))
self.assertNotEqual(self.s, frozenset(self.otherword))
self.assertEqual(self.s != self.word, True)
def test_setOfFrozensets(self):
t = map(frozenset, ['abcdef', 'bcd', 'bdcb', 'fed', 'fedccba'])
s = self.thetype(t)
self.assertEqual(len(s), 3)
def test_sub_and_super(self):
p, q, r = map(self.thetype, ['ab', 'abcde', 'def'])
self.assertTrue(p < q)
self.assertTrue(p <= q)
self.assertTrue(q <= q)
self.assertTrue(q > p)
self.assertTrue(q >= p)
self.assertFalse(q < r)
self.assertFalse(q <= r)
self.assertFalse(q > r)
self.assertFalse(q >= r)
self.assertTrue(set('a').issubset('abc'))
self.assertTrue(set('abc').issuperset('a'))
self.assertFalse(set('a').issubset('cbs'))
self.assertFalse(set('cbs').issuperset('a'))
def test_pickling(self):
for i in range(pickle.HIGHEST_PROTOCOL + 1):
p = pickle.dumps(self.s, i)
dup = pickle.loads(p)
self.assertEqual(self.s, dup, "%s != %s" % (self.s, dup))
if type(self.s) not in (set, frozenset):
self.s.x = 10
p = pickle.dumps(self.s)
dup = pickle.loads(p)
self.assertEqual(self.s.x, dup.x)
def test_deepcopy(self):
class Tracer:
def __init__(self, value):
self.value = value
def __hash__(self): # pragma: no cover
return self.value
def __deepcopy__(self, memo=None):
return Tracer(self.value + 1)
t = Tracer(10)
s = self.thetype([t])
dup = copy.deepcopy(s)
self.assertNotEqual(id(s), id(dup))
for elem in dup:
newt = elem
self.assertNotEqual(id(t), id(newt))
self.assertEqual(t.value + 1, newt.value)
def test_gc(self):
# Create a nest of cycles to exercise overall ref count check
class A:
def __lt__(self, other):
return id(self) < id(other)
def __gt__(self, other):
return id(self) > id(other)
s = set(A() for i in range(1000))
for elem in s:
elem.cycle = s
elem.sub = elem
elem.set = set([elem])
def test_badcmp(self):
s = self.thetype([BadCmp()])
# Detect comparison errors during insertion and lookup
self.assertRaises(RuntimeError, self.thetype, [BadCmp(), BadCmp()])
self.assertRaises(RuntimeError, s.__contains__, BadCmp())
# Detect errors during mutating operations
if hasattr(s, 'add'):
self.assertRaises(RuntimeError, s.add, BadCmp())
self.assertRaises(RuntimeError, s.discard, BadCmp())
self.assertRaises(RuntimeError, s.remove, BadCmp())
def test_cyclical_repr(self):
w = ReprWrapper()
s = self.thetype([w])
w.value = s
if self.thetype == set:
self.assertEqual(repr(s), 'sortedset([sortedset(...)])')
else:
name = repr(s).partition('(')[0] # strip class name
self.assertEqual(repr(s), '%s([%s(...)])' % (name, name))
def test_cyclical_print(self):
w = ReprWrapper()
s = self.thetype([w])
w.value = s
fo = open(support.TESTFN, "w")
try:
fo.write(str(s))
fo.close()
fo = open(support.TESTFN, "r")
self.assertEqual(fo.read(), repr(s))
finally:
fo.close()
support.unlink(support.TESTFN)
def test_container_iterator(self):
# Bug #3680: tp_traverse was not implemented for set iterator object
class C(object):
def __lt__(self, other):
return id(self) < id(other)
def __gt__(self, other):
return id(self) > id(other)
obj = C()
ref = weakref.ref(obj)
container = set([obj, 1])
obj.x = iter(container)
del obj, container
gc.collect()
self.assertTrue(ref() is None, "Cycle was not collected")
class TestSet(TestJointOps):
thetype = set
basetype = set
def test_init(self):
s = self.thetype()
s.__init__(self.word)
self.assertEqual(s, set(self.word))
s.__init__(self.otherword)
self.assertEqual(s, set(self.otherword))
self.assertRaises(TypeError, s.__init__, s, 2);
self.assertRaises(TypeError, s.__init__, 1);
def test_constructor_identity(self):
s = self.thetype(range(3))
t = self.thetype(s)
self.assertNotEqual(id(s), id(t))
def test_hash(self):
self.assertRaises(TypeError, hash, self.s)
def test_clear(self):
self.s.clear()
self.assertEqual(self.s, set())
self.assertEqual(len(self.s), 0)
def test_copy(self):
dup = self.s.copy()
self.assertEqual(self.s, dup)
self.assertNotEqual(id(self.s), id(dup))
#self.assertEqual(type(dup), self.basetype)
def test_add(self):
self.s.add('Q')
self.assertIn('Q', self.s)
dup = self.s.copy()
self.s.add('Q')
self.assertEqual(self.s, dup)
#self.assertRaises(TypeError, self.s.add, [])
def test_remove(self):
self.s.remove('a')
self.assertNotIn('a', self.s)
self.assertRaises(KeyError, self.s.remove, 'Q')
s = self.thetype([frozenset(self.word)])
#self.assertIn(self.thetype(self.word), s)
#s.remove(self.thetype(self.word))
#self.assertNotIn(self.thetype(self.word), s)
#self.assertRaises(KeyError, self.s.remove, self.thetype(self.word))
def test_remove_keyerror_unpacking(self):
# bug: www.python.org/sf/1576657
for v1 in ['Q', (1,)]:
try:
self.s.remove(v1)
except KeyError as e:
v2 = e.args[0]
self.assertEqual(v1, v2)
else: # pragma: no cover
self.fail()
def test_remove_keyerror_set(self):
key = self.thetype([3, 4])
try:
self.s.remove(key)
except KeyError as e:
self.assertTrue(e.args[0] is key,
"KeyError should be {0}, not {1}".format(key,
e.args[0]))
else: # pragma: no cover
self.fail()
def test_discard(self):
self.s.discard('a')
self.assertNotIn('a', self.s)
self.s.discard('Q')
#self.assertRaises(TypeError, self.s.discard, [])
s = self.thetype([frozenset(self.word)])
#self.assertIn(self.thetype(self.word), s)
#s.discard(self.thetype(self.word))
#self.assertNotIn(self.thetype(self.word), s)
#s.discard(self.thetype(self.word))
def test_pop(self):
for i in range(len(self.s)):
elem = self.s.pop()
self.assertNotIn(elem, self.s)
self.assertRaises(IndexError, self.s.pop)
def test_update(self):
retval = self.s.update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
self.assertIn(c, self.s)
self.assertRaises(PassThru, self.s.update, check_pass_thru())
self.assertRaises(TypeError, self.s.update, 6)
for p, q in (('cdc', 'abcd'), ('efgfe', 'abcefg'), ('ccb', 'abc'), ('ef', 'abcef')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.update(C(p)), None)
self.assertEqual(s, set(q))
for p in ('cdc', 'efgfe', 'ccb', 'ef', 'abcda'):
q = 'ahi'
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.update(C(p), C(q)), None)
self.assertEqual(s, set(s) | set(p) | set(q))
def test_ior(self):
self.s |= set(self.otherword)
for c in (self.word + self.otherword):
self.assertIn(c, self.s)
def test_intersection_update(self):
retval = self.s.intersection_update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
if c in self.otherword and c in self.word:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(PassThru, self.s.intersection_update, check_pass_thru())
self.assertRaises(TypeError, self.s.intersection_update, 6)
for p, q in (('cdc', 'c'), ('efgfe', ''), ('ccb', 'bc'), ('ef', '')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.intersection_update(C(p)), None)
self.assertEqual(s, set(q))
ss = 'abcba'
s = self.thetype(ss)
t = 'cbc'
self.assertEqual(s.intersection_update(C(p), C(t)), None)
self.assertEqual(s, set('abcba')&set(p)&set(t))
def test_iand(self):
self.s &= set(self.otherword)
for c in (self.word + self.otherword):
if c in self.otherword and c in self.word:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_difference_update(self):
retval = self.s.difference_update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
if c in self.word and c not in self.otherword:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(PassThru, self.s.difference_update, check_pass_thru())
self.assertRaises(TypeError, self.s.difference_update, 6)
self.assertRaises(TypeError, self.s.symmetric_difference_update, 6)
for p, q in (('cdc', 'ab'), ('efgfe', 'abc'), ('ccb', 'a'), ('ef', 'abc')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.difference_update(C(p)), None)
self.assertEqual(s, set(q))
s = self.thetype('abcdefghih')
s.difference_update()
self.assertEqual(s, self.thetype('abcdefghih'))
s = self.thetype('abcdefghih')
s.difference_update(C('aba'))
self.assertEqual(s, self.thetype('cdefghih'))
s = self.thetype('abcdefghih')
s.difference_update(C('cdc'), C('aba'))
self.assertEqual(s, self.thetype('efghih'))
def test_isub(self):
self.s -= set(self.otherword)
for c in (self.word + self.otherword):
if c in self.word and c not in self.otherword:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_symmetric_difference_update(self):
retval = self.s.symmetric_difference_update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
if (c in self.word) ^ (c in self.otherword):
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(PassThru, self.s.symmetric_difference_update, check_pass_thru())
self.assertRaises(TypeError, self.s.symmetric_difference_update, 6)
for p, q in (('cdc', 'abd'), ('efgfe', 'abcefg'), ('ccb', 'a'), ('ef', 'abcef')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.symmetric_difference_update(C(p)), None)
self.assertEqual(s, set(q))
def test_ixor(self):
self.s ^= set(self.otherword)
for c in (self.word + self.otherword):
if (c in self.word) ^ (c in self.otherword):
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_inplace_on_self(self):
t = self.s.copy()
t |= t
self.assertEqual(t, self.s)
t &= t
self.assertEqual(t, self.s)
t -= t
self.assertEqual(t, self.thetype())
t = self.s.copy()
t ^= t
self.assertEqual(t, self.thetype())
def test_weakref(self):
s = self.thetype('gallahad')
p = weakref.proxy(s)
self.assertEqual(str(p), str(s))
s = None
self.assertRaises(ReferenceError, str, p)
def test_rich_compare(self): # pragma: no cover
if sys.version_info[0] < 3:
return
class TestRichSetCompare:
def __gt__(self, some_set):
self.gt_called = True
return False
def __lt__(self, some_set):
self.lt_called = True
return False
def __ge__(self, some_set):
self.ge_called = True
return False
def __le__(self, some_set):
self.le_called = True
return False
# This first tries the bulitin rich set comparison, which doesn't know
# how to handle the custom object. Upon returning NotImplemented, the
# corresponding comparison on the right object is invoked.
myset = set((1, 2, 3))
myobj = TestRichSetCompare()
myset < myobj
self.assertTrue(myobj.gt_called)
myobj = TestRichSetCompare()
myset > myobj
self.assertTrue(myobj.lt_called)
myobj = TestRichSetCompare()
myset <= myobj
self.assertTrue(myobj.ge_called)
myobj = TestRichSetCompare()
myset >= myobj
self.assertTrue(myobj.le_called)
class SetSubclass(set):
pass
class TestSetSubclass(TestSet):
thetype = SetSubclass
basetype = set
class SetSubclassWithKeywordArgs(set):
def __init__(self, iterable=[], newarg=None):
set.__init__(self, iterable)
class TestSetSubclassWithKeywordArgs(TestSet):
def test_keywords_in_subclass(self):
'SF bug #1486663 -- this used to erroneously raise a TypeError'
SetSubclassWithKeywordArgs(newarg=1)
# Tests taken from test_sets.py =============================================
empty_set = set()
#==============================================================================
class TestBasicOps(unittest.TestCase):
def test_repr(self):
if self.repr is not None:
self.assertEqual(repr(self.set), self.repr)
def test_print(self):
try:
fo = open(support.TESTFN, "w")
fo.write(str(self.set))
fo.close()
fo = open(support.TESTFN, "r")
self.assertEqual(fo.read(), repr(self.set))
finally:
fo.close()
support.unlink(support.TESTFN)
def test_length(self):
self.assertEqual(len(self.set), self.length)
def test_self_equality(self):
self.assertEqual(self.set, self.set)
def test_equivalent_equality(self):
self.assertEqual(self.set, self.dup)
def test_copy(self):
self.assertEqual(self.set.copy(), self.dup)
def test_self_union(self):
result = self.set | self.set
self.assertEqual(result, self.dup)
def test_empty_union(self):
result = self.set | empty_set
self.assertEqual(result, self.dup)
def test_union_empty(self):
result = empty_set | self.set
self.assertEqual(result, self.dup)
def test_self_intersection(self):
result = self.set & self.set
self.assertEqual(result, self.dup)
def test_empty_intersection(self):
result = self.set & empty_set
self.assertEqual(result, empty_set)
def test_intersection_empty(self):
result = empty_set & self.set
self.assertEqual(result, empty_set)
def test_self_isdisjoint(self):
result = self.set.isdisjoint(self.set)
self.assertEqual(result, not self.set)
def test_empty_isdisjoint(self):
result = self.set.isdisjoint(empty_set)
self.assertEqual(result, True)
def test_isdisjoint_empty(self):
result = empty_set.isdisjoint(self.set)
self.assertEqual(result, True)
def test_self_symmetric_difference(self):
result = self.set ^ self.set
self.assertEqual(result, empty_set)
def test_checkempty_symmetric_difference(self):
result = self.set ^ empty_set
self.assertEqual(result, self.set)
def test_self_difference(self):
result = self.set - self.set
self.assertEqual(result, empty_set)
def test_empty_difference(self):
result = self.set - empty_set
self.assertEqual(result, self.dup)
def test_empty_difference_rev(self):
result = empty_set - self.set
self.assertEqual(result, empty_set)
def test_iteration(self):
for v in self.set:
self.assertIn(v, self.values)
setiter = iter(self.set)
# note: __length_hint__ is an internal undocumented API,
# don't rely on it in your own programs
#self.assertEqual(setiter.__length_hint__(), len(self.set))
def test_pickling(self):
p = pickle.dumps(self.set)
copy = pickle.loads(p)
self.assertEqual(self.set, copy,
"%s != %s" % (self.set, copy))
#------------------------------------------------------------------------------
class TestBasicOpsEmpty(TestBasicOps):
def setUp(self):
self.case = "empty set"
self.values = []
self.set = set(self.values)
self.dup = set(self.values)
self.length = 0
self.repr = "sortedset()"
#------------------------------------------------------------------------------
class TestBasicOpsSingleton(TestBasicOps):
def setUp(self):
self.case = "unit set (number)"
self.values = [3]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 1
self.repr = "sortedset([3])"
def test_in(self):
self.assertIn(3, self.set)
def test_not_in(self):
self.assertNotIn(2, self.set)
#------------------------------------------------------------------------------
class TestBasicOpsTuple(TestBasicOps):
def setUp(self):
self.case = "unit set (tuple)"
self.values = [(0, 1)]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 1
self.repr = "sortedset([(0, 1)])"
def test_in(self):
self.assertIn((0, 1), self.set)
def test_not_in(self):
self.assertNotIn(9, self.set)
#------------------------------------------------------------------------------
class TestBasicOpsTriple(TestBasicOps):
def setUp(self):
self.case = "triple set"
self.values = [0, 1, 2]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 3
self.repr = None
#------------------------------------------------------------------------------
class TestBasicOpsString(TestBasicOps):
def setUp(self):
self.case = "string set"
self.values = ["a", "b", "c"]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 3
self.repr = "sortedset(['a', 'b', 'c'])"
#------------------------------------------------------------------------------
def baditer():
raise TypeError
yield True # pragma: no cover
def gooditer():
yield True
class TestExceptionPropagation(unittest.TestCase):
"""SF 628246: Set constructor should not trap iterator TypeErrors"""
def test_instanceWithException(self):
self.assertRaises(TypeError, set, baditer())
def test_instancesWithoutException(self):
# All of these iterables should load without exception.
set([1,2,3])
set((1,2,3))
set({'one':1, 'two':2, 'three':3})
set(range(3))
set('abc')
set(gooditer())
def test_changingSizeWhileIterating(self):
s = set([1,2,3])
try:
for i in s:
s.update([4])
except RuntimeError:
pass
else: # pragma: no cover
self.fail("no exception when changing size during iteration")
#==============================================================================
class TestSetOfSets(unittest.TestCase):
def test_constructor(self):
inner = frozenset([1])
outer = set([inner])
element = outer.pop()
self.assertEqual(type(element), frozenset)
outer.add(inner) # Rebuild set of sets with .add method
outer.remove(inner)
self.assertEqual(outer, set()) # Verify that remove worked
outer.discard(inner) # Absence of KeyError indicates working fine
#==============================================================================
class TestBinaryOps(unittest.TestCase):
def setUp(self):
self.set = set((2, 4, 6))
def test_eq(self): # SF bug 643115
self.assertEqual(self.set, set({2:1,4:3,6:5}))
def test_union_subset(self):
result = self.set | set([2])
self.assertEqual(result, set((2, 4, 6)))
def test_union_superset(self):
result = self.set | set([2, 4, 6, 8])
self.assertEqual(result, set([2, 4, 6, 8]))
def test_union_overlap(self):
result = self.set | set([3, 4, 5])
self.assertEqual(result, set([2, 3, 4, 5, 6]))
def test_union_non_overlap(self):
result = self.set | set([8])
self.assertEqual(result, set([2, 4, 6, 8]))
def test_intersection_subset(self):
result = self.set & set((2, 4))
self.assertEqual(result, set((2, 4)))
def test_intersection_superset(self):
result = self.set & set([2, 4, 6, 8])
self.assertEqual(result, set([2, 4, 6]))
def test_intersection_overlap(self):
result = self.set & set([3, 4, 5])
self.assertEqual(result, set([4]))
def test_intersection_non_overlap(self):
result = self.set & set([8])
self.assertEqual(result, empty_set)
def test_isdisjoint_subset(self):
result = self.set.isdisjoint(set((2, 4)))
self.assertEqual(result, False)
def test_isdisjoint_superset(self):
result = self.set.isdisjoint(set([2, 4, 6, 8]))
self.assertEqual(result, False)
def test_isdisjoint_overlap(self):
result = self.set.isdisjoint(set([3, 4, 5]))
self.assertEqual(result, False)
def test_isdisjoint_non_overlap(self):
result = self.set.isdisjoint(set([8]))
self.assertEqual(result, True)
def test_sym_difference_subset(self):
result = self.set ^ set((2, 4))
self.assertEqual(result, set([6]))
def test_sym_difference_superset(self):
result = self.set ^ set((2, 4, 6, 8))
self.assertEqual(result, set([8]))
def test_sym_difference_overlap(self):
result = self.set ^ set((3, 4, 5))
self.assertEqual(result, set([2, 3, 5, 6]))
def test_sym_difference_non_overlap(self):
result = self.set ^ set([8])
self.assertEqual(result, set([2, 4, 6, 8]))
#==============================================================================
class TestUpdateOps(unittest.TestCase):
def setUp(self):
self.set = set((2, 4, 6))
def test_union_subset(self):
self.set |= set([2])
self.assertEqual(self.set, set((2, 4, 6)))
def test_union_superset(self):
self.set |= set([2, 4, 6, 8])
self.assertEqual(self.set, set([2, 4, 6, 8]))
def test_union_overlap(self):
self.set |= set([3, 4, 5])
self.assertEqual(self.set, set([2, 3, 4, 5, 6]))
def test_union_non_overlap(self):
self.set |= set([8])
self.assertEqual(self.set, set([2, 4, 6, 8]))
def test_union_method_call(self):
self.set.update(set([3, 4, 5]))
self.assertEqual(self.set, set([2, 3, 4, 5, 6]))
def test_intersection_subset(self):
self.set &= set((2, 4))
self.assertEqual(self.set, set((2, 4)))
def test_intersection_superset(self):
self.set &= set([2, 4, 6, 8])
self.assertEqual(self.set, set([2, 4, 6]))
def test_intersection_overlap(self):
self.set &= set([3, 4, 5])
self.assertEqual(self.set, set([4]))
def test_intersection_non_overlap(self):
self.set &= set([8])
self.assertEqual(self.set, empty_set)
def test_intersection_method_call(self):
self.set.intersection_update(set([3, 4, 5]))
self.assertEqual(self.set, set([4]))
def test_sym_difference_subset(self):
self.set ^= set((2, 4))
self.assertEqual(self.set, set([6]))
def test_sym_difference_superset(self):
self.set ^= set((2, 4, 6, 8))
self.assertEqual(self.set, set([8]))
def test_sym_difference_overlap(self):
self.set ^= set((3, 4, 5))
self.assertEqual(self.set, set([2, 3, 5, 6]))
def test_sym_difference_non_overlap(self):
self.set ^= set([8])
self.assertEqual(self.set, set([2, 4, 6, 8]))
def test_sym_difference_method_call(self):
self.set.symmetric_difference_update(set([3, 4, 5]))
self.assertEqual(self.set, set([2, 3, 5, 6]))
def test_difference_subset(self):
self.set -= set((2, 4))
self.assertEqual(self.set, set([6]))
def test_difference_superset(self):
self.set -= set((2, 4, 6, 8))
self.assertEqual(self.set, set([]))
def test_difference_overlap(self):
self.set -= set((3, 4, 5))
self.assertEqual(self.set, set([2, 6]))
def test_difference_non_overlap(self):
self.set -= set([8])
self.assertEqual(self.set, set([2, 4, 6]))
def test_difference_method_call(self):
self.set.difference_update(set([3, 4, 5]))
self.assertEqual(self.set, set([2, 6]))
#==============================================================================
class TestMutate(unittest.TestCase):
def setUp(self):
self.values = ["a", "b", "c"]
self.set = set(self.values)
def test_add_present(self):
self.set.add("c")
self.assertEqual(self.set, set("abc"))
def test_add_absent(self):
self.set.add("d")
self.assertEqual(self.set, set("abcd"))
def test_add_until_full(self):
tmp = set()
expected_len = 0
for v in self.values:
tmp.add(v)
expected_len += 1
self.assertEqual(len(tmp), expected_len)
self.assertEqual(tmp, self.set)
def test_remove_present(self):
self.set.remove("b")
self.assertEqual(self.set, set("ac"))
def test_remove_absent(self):
try:
self.set.remove("d")
self.fail("Removing missing element should have raised LookupError") # pragma: no cover
except LookupError:
pass
def test_remove_until_empty(self):
expected_len = len(self.set)
for v in self.values:
self.set.remove(v)
expected_len -= 1
self.assertEqual(len(self.set), expected_len)
def test_discard_present(self):
self.set.discard("c")
self.assertEqual(self.set, set("ab"))
def test_discard_absent(self):
self.set.discard("d")
self.assertEqual(self.set, set("abc"))
def test_clear(self):
self.set.clear()
self.assertEqual(len(self.set), 0)
def test_pop(self):
popped = {}
while self.set:
popped[self.set.pop()] = None
self.assertEqual(len(popped), len(self.values))
for v in self.values:
self.assertIn(v, popped)
def test_update_empty_tuple(self):
self.set.update(())
self.assertEqual(self.set, set(self.values))
def test_update_unit_tuple_overlap(self):
self.set.update(("a",))
self.assertEqual(self.set, set(self.values))
def test_update_unit_tuple_non_overlap(self):
self.set.update(("a", "z"))
self.assertEqual(self.set, set(self.values + ["z"]))
#==============================================================================
class TestSubsets(unittest.TestCase):
case2method = {"<=": "issubset",
">=": "issuperset",
}
reverse = {"==": "==",
"!=": "!=",
"<": ">",
">": "<",
"<=": ">=",
">=": "<=",
}
def test_issubset(self):
x = self.left
y = self.right
for case in "!=", "==", "<", "<=", ">", ">=":
expected = case in self.cases
# Test the binary infix spelling.
result = eval("x" + case + "y", locals())
self.assertEqual(result, expected)
# Test the "friendly" method-name spelling, if one exists.
if case in TestSubsets.case2method:
method = getattr(x, TestSubsets.case2method[case])
result = method(y)
self.assertEqual(result, expected)
# Now do the same for the operands reversed.
rcase = TestSubsets.reverse[case]
result = eval("y" + rcase + "x", locals())
self.assertEqual(result, expected)
if rcase in TestSubsets.case2method:
method = getattr(y, TestSubsets.case2method[rcase])
result = method(x)
self.assertEqual(result, expected)
#------------------------------------------------------------------------------
class TestSubsetEqualEmpty(TestSubsets):
left = set()
right = set()
name = "both empty"
cases = "==", "<=", ">="
#------------------------------------------------------------------------------
class TestSubsetEqualNonEmpty(TestSubsets):
left = set([1, 2])
right = set([1, 2])
name = "equal pair"
cases = "==", "<=", ">="
#------------------------------------------------------------------------------
class TestSubsetEmptyNonEmpty(TestSubsets):
left = set()
right = set([1, 2])
name = "one empty, one non-empty"
cases = "!=", "<", "<="
#------------------------------------------------------------------------------
class TestSubsetPartial(TestSubsets):
left = set([1])
right = set([1, 2])
name = "one a non-empty proper subset of other"
cases = "!=", "<", "<="
#------------------------------------------------------------------------------
class TestSubsetNonOverlap(TestSubsets):
left = set([1])
right = set([2])
name = "neither empty, neither contains"
cases = "!="
#==============================================================================
class TestOnlySetsInBinaryOps(unittest.TestCase):
def test_eq_ne(self):
# Unlike the others, this is testing that == and != *are* allowed.
self.assertEqual(self.other == self.set, False)
self.assertEqual(self.set == self.other, False)
self.assertEqual(self.other != self.set, True)
self.assertEqual(self.set != self.other, True)
def test_ge_gt_le_lt(self):
self.assertRaises(TypeError, lambda: self.set < self.other)
self.assertRaises(TypeError, lambda: self.set <= self.other)
self.assertRaises(TypeError, lambda: self.set > self.other)
self.assertRaises(TypeError, lambda: self.set >= self.other)
self.assertRaises(TypeError, lambda: self.other < self.set)
self.assertRaises(TypeError, lambda: self.other <= self.set)
self.assertRaises(TypeError, lambda: self.other > self.set)
self.assertRaises(TypeError, lambda: self.other >= self.set)
def test_update_operator(self):
if self.otherIsIterable:
self.set |= self.other
else:
try:
self.set |= self.other
except TypeError:
pass
else: # pragma: no cover
self.fail("expected TypeError")
def test_update(self):
if self.otherIsIterable:
self.set.update(self.other)
else:
self.assertRaises(TypeError, self.set.update, self.other)
def test_union(self):
if self.otherIsIterable:
self.set | self.other
self.other | self.set
self.set.union(self.other)
else:
self.assertRaises(TypeError, lambda: self.set | self.other)
self.assertRaises(TypeError, lambda: self.other | self.set)
self.assertRaises(TypeError, self.set.union, self.other)
def test_intersection_update_operator(self):
if self.otherIsIterable:
self.set &= self.other
else:
try:
self.set &= self.other
except TypeError:
pass
else: # pragma: no cover
self.fail("expected TypeError")
def test_intersection_update(self):
if self.otherIsIterable:
self.set.intersection_update(self.other)
else:
self.assertRaises(TypeError,
self.set.intersection_update,
self.other)
def test_intersection(self):
if self.otherIsIterable:
self.set & self.other
self.other & self.set
self.set.intersection(self.other)
else:
self.assertRaises(TypeError, lambda: self.set & self.other)
self.assertRaises(TypeError, lambda: self.other & self.set)
self.assertRaises(TypeError, self.set.intersection, self.other)
def test_sym_difference_update_operator(self):
if self.otherIsIterable:
self.set ^= self.other
else:
try:
self.set ^= self.other
except TypeError:
pass
else: # pragma: no cover
self.fail("expected TypeError")
def test_sym_difference_update(self):
if self.otherIsIterable:
self.set.symmetric_difference_update(self.other)
else:
self.assertRaises(TypeError,
self.set.symmetric_difference_update,
self.other)
def test_sym_difference(self):
if self.otherIsIterable:
self.set ^ self.other
self.other ^ self.set
self.set.symmetric_difference(self.other)
else:
self.assertRaises(TypeError, lambda: self.set ^ self.other)
self.assertRaises(TypeError, lambda: self.other ^ self.set)
self.assertRaises(TypeError, self.set.symmetric_difference, self.other)
def test_difference_update_operator(self):
if self.otherIsIterable:
self.set -= self.other
else:
try:
self.set -= self.other
except TypeError:
pass
else: # pragma: no cover
self.fail("expected TypeError")
def test_difference_update(self):
if self.otherIsIterable:
self.set.difference_update(self.other)
else:
self.assertRaises(TypeError,
self.set.difference_update,
self.other)
def test_difference(self):
if self.otherIsIterable:
self.set - self.other
self.other - self.set
self.set.difference(self.other)
else:
self.assertRaises(TypeError, lambda: self.set - self.other)
self.assertRaises(TypeError, lambda: self.other - self.set)
self.assertRaises(TypeError, self.set.difference, self.other)
#------------------------------------------------------------------------------
class TestOnlySetsNumeric(TestOnlySetsInBinaryOps):
def setUp(self):
self.set = set((1, 2, 3))
self.other = 19
self.otherIsIterable = False
#------------------------------------------------------------------------------
class TestOnlySetsDict(TestOnlySetsInBinaryOps):
def setUp(self):
self.set = set((1, 2, 3))
self.other = {1:2, 3:4}
self.otherIsIterable = True
#------------------------------------------------------------------------------
class TestOnlySetsOperator(TestOnlySetsInBinaryOps):
def setUp(self):
self.set = set((1, 2, 3))
self.other = operator.add
self.otherIsIterable = False
#------------------------------------------------------------------------------
class TestOnlySetsTuple(TestOnlySetsInBinaryOps):
def setUp(self):
self.set = set((1, 2, 3))
self.other = (2, 4, 6)
self.otherIsIterable = True
#------------------------------------------------------------------------------
class TestOnlySetsString(TestOnlySetsInBinaryOps):
def setUp(self):
self.set = set('xyz')
self.other = 'abc'
self.otherIsIterable = True
#------------------------------------------------------------------------------
class TestOnlySetsGenerator(TestOnlySetsInBinaryOps):
def setUp(self):
def gen():
for i in range(0, 10, 2):
yield i
self.set = set((1, 2, 3))
self.other = gen()
self.otherIsIterable = True
#==============================================================================
class TestCopying(unittest.TestCase):
def test_copy(self):
dup = self.set.copy()
dup_list = sorted(dup, key=repr)
set_list = sorted(self.set, key=repr)
self.assertEqual(len(dup_list), len(set_list))
for i in range(len(dup_list)):
self.assertTrue(dup_list[i] is set_list[i])
def test_deep_copy(self):
dup = copy.deepcopy(self.set)
##print type(dup), repr(dup)
dup_list = sorted(dup, key=repr)
set_list = sorted(self.set, key=repr)
self.assertEqual(len(dup_list), len(set_list))
for i in range(len(dup_list)):
self.assertEqual(dup_list[i], set_list[i])
#------------------------------------------------------------------------------
class TestCopyingEmpty(TestCopying):
def setUp(self):
self.set = set()
#------------------------------------------------------------------------------
class TestCopyingSingleton(TestCopying):
def setUp(self):
self.set = set(["hello"])
#------------------------------------------------------------------------------
class TestCopyingTriple(TestCopying):
def setUp(self):
self.set = set([-1, 0, 1])
#------------------------------------------------------------------------------
class TestCopyingTuple(TestCopying):
def setUp(self):
self.set = set([(1, 2)])
#------------------------------------------------------------------------------
class TestCopyingNested(TestCopying):
def setUp(self):
self.set = set([((1, 2), (3, 4))])
#==============================================================================
class TestIdentities(unittest.TestCase):
def setUp(self):
self.a = set('abracadabra')
self.b = set('alacazam')
def test_binopsVsSubsets(self):
a, b = self.a, self.b
self.assertTrue(a - b < a)
self.assertTrue(b - a < b)
self.assertTrue(a & b < a)
self.assertTrue(a & b < b)
self.assertTrue(a | b > a)
self.assertTrue(a | b > b)
self.assertTrue(a ^ b < a | b)
def test_commutativity(self):
a, b = self.a, self.b
self.assertEqual(a&b, b&a)
self.assertEqual(a|b, b|a)
self.assertEqual(a^b, b^a)
if a != b:
self.assertNotEqual(a-b, b-a)
def test_summations(self):
# check that sums of parts equal the whole
a, b = self.a, self.b
self.assertEqual((a-b)|(a&b)|(b-a), a|b)
self.assertEqual((a&b)|(a^b), a|b)
self.assertEqual(a|(b-a), a|b)
self.assertEqual((a-b)|b, a|b)
self.assertEqual((a-b)|(a&b), a)
self.assertEqual((b-a)|(a&b), b)
self.assertEqual((a-b)|(b-a), a^b)
def test_exclusion(self):
# check that inverse operations show non-overlap
a, b, zero = self.a, self.b, set()
self.assertEqual((a-b)&b, zero)
self.assertEqual((b-a)&a, zero)
self.assertEqual((a&b)&(a^b), zero)
# Tests derived from test_itertools.py =======================================
def R(seqn):
'Regular generator'
for i in seqn:
yield i
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
next = __next__
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __next__(self): # pragma: no cover
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
next = __next__
class N:
'Iterator missing __next__()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
3 // 0
next = __next__
class S:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def __next__(self):
raise StopIteration
next = __next__
from itertools import chain
def L(seqn):
'Test multiple tiers of iterators'
return chain(map(lambda x:x, R(Ig(G(seqn)))))
class TestVariousIteratorArgs(unittest.TestCase):
def test_constructor(self):
for cons in (set,):
for s in (range(3), (), range(1000), (1.1, 1.2), range(2000,2200,5), range(10)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(sorted(cons(g(s)), key=repr), sorted(g(s), key=repr))
self.assertRaises(TypeError, cons , X(s))
self.assertRaises(TypeError, cons , N(s))
self.assertRaises(ZeroDivisionError, cons , E(s))
def test_inline_methods(self):
s = set([-1,-2,-3])
for data in (range(3), (), range(1000), (1.1, 1.2), range(2000,2200,5), range(10)):
for meth in (s.union, s.intersection, s.difference, s.symmetric_difference, s.isdisjoint):
for g in (G, I, Ig, L, R):
expected = meth(data)
actual = meth(G(data))
if isinstance(expected, bool):
self.assertEqual(actual, expected)
else:
self.assertEqual(sorted(actual, key=repr), sorted(expected, key=repr))
self.assertRaises(TypeError, meth, X(s))
self.assertRaises(TypeError, meth, N(s))
self.assertRaises(ZeroDivisionError, meth, E(s))
def test_inplace_methods(self):
for data in (range(3), (), range(1000), (1.1, 1.2), range(2000,2200,5), range(10)):
for methname in ('update', 'intersection_update',
'difference_update', 'symmetric_difference_update'):
for g in (G, I, Ig, S, L, R):
s = set([1,2,3])
t = s.copy()
getattr(s, methname)(list(g(data)))
getattr(t, methname)(g(data))
self.assertEqual(sorted(s, key=repr), sorted(t, key=repr))
self.assertRaises(TypeError, getattr(set([1,2,3]), methname), X(data))
self.assertRaises(TypeError, getattr(set([1,2,3]), methname), N(data))
self.assertRaises(ZeroDivisionError, getattr(set([1,2,3]), methname), E(data))
#==============================================================================
test_classes = (
TestSet,
TestSetSubclass,
TestSetSubclassWithKeywordArgs,
TestSetOfSets,
TestExceptionPropagation,
TestBasicOpsEmpty,
TestBasicOpsSingleton,
TestBasicOpsTuple,
TestBasicOpsTriple,
TestBasicOpsString,
TestBinaryOps,
TestUpdateOps,
TestMutate,
TestSubsetEqualEmpty,
TestSubsetEqualNonEmpty,
TestSubsetEmptyNonEmpty,
TestSubsetPartial,
TestSubsetNonOverlap,
TestOnlySetsNumeric,
TestOnlySetsDict,
TestOnlySetsOperator,
TestOnlySetsTuple,
TestOnlySetsString,
TestOnlySetsGenerator,
TestCopyingEmpty,
TestCopyingSingleton,
TestCopyingTriple,
TestCopyingTuple,
TestCopyingNested,
TestIdentities,
TestVariousIteratorArgs,
)
|
|
#!/usr/bin/env python
"""Lambda Lets-Encrypt Configuration/Setup Tool
This is a wizard that will help you configure the Lambda function to
automatically manage your SSL certifcates for CloudFront Distributions.
Usage:
setup.py
setup.py (-h | --help)
setup.py --version
Options:
-h --help Show this screen
--version Show the version
"""
from __future__ import print_function
import json
import textwrap
import time
import zipfile
from docopt import docopt
from string import Template
from installer import sns, cloudfront, iam, s3, awslambda, elb, route53
class colors:
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
QUESTION = '\033[96m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def write_str(string):
lines = textwrap.wrap(textwrap.dedent(string), 80)
for line in lines:
print(line)
def print_header(string):
print()
print(colors.OKGREEN, end='')
write_str(string)
print(colors.ENDC, end='')
def get_input(prompt, allow_empty=True):
from sys import version_info
py3 = version_info[0] > 2 # creates boolean value for test that Python major version > 2
response = None
while response is None or (not allow_empty and len(response) == 0):
print(colors.QUESTION + "> " + prompt + colors.ENDC, end='')
if py3:
response = input()
else:
response = raw_input()
return response
def get_yn(prompt, default=True):
if default is True:
prompt += "[Y/n]? "
default = True
else:
prompt += "[y/N]? "
default = False
ret = get_input(prompt, allow_empty=True)
if len(ret) == 0:
return default
if ret.lower() == "y" or ret.lower() == "yes":
return True
return False
def get_selection(prompt, options, prompt_after='Please select from the list above', allow_empty=False):
if allow_empty:
prompt_after += "(Empty for none)"
prompt_after += ": "
while True:
print(prompt)
for item in options:
print('[{}] {}'.format(item['selector'], item['prompt']))
print()
choice = get_input(prompt_after, allow_empty=True)
# Allow for empty things if desired
if len(choice) == 0 and allow_empty:
return None
# find and return their choice
for x in options:
if choice == str(x['selector']):
return x['return']
print(colors.WARNING + 'Please enter a valid choice!' + colors.ENDC)
def choose_s3_bucket():
bucket_list = s3.s3_list_buckets()
options = []
for i, bucket in enumerate(bucket_list):
options.append({
'selector': i,
'prompt': bucket,
'return': bucket
})
return get_selection("Select the S3 Bucket to use:", options, prompt_after="Which S3 Bucket?", allow_empty=False)
def wizard_elb(global_config):
print_header("ELB Configuration")
write_str("""\
Now we'll detect your existing Elastic Load Balancers and allow you
to configure them to use SSL. You must select the domain names
you want on the certificate for each ELB.""")
write_str("""\
Note that only DNS validation(via Route53) is supported for ELBs""")
print()
global_config['elb_sites'] = []
global_config['elb_domains'] = []
# Get the list of all Cloudfront Distributions
elb_list = elb.list_elbs()
elb_list_opts = []
for i, elb_name in enumerate(elb_list):
elb_list_opts.append({
'selector': i,
'prompt': elb_name,
'return': elb_name
})
route53_list = route53.list_zones()
route53_list_opts = []
for i, zone in enumerate(route53_list):
route53_list_opts.append({
'selector': i,
'prompt': "{} - {}".format(zone['Name'], zone['Id']),
'return': zone
})
while True:
lb = get_selection("Choose an ELB to configure SSL for(Leave blank for none)", elb_list_opts, prompt_after="Which ELB?", allow_empty=True)
if lb is None:
break
lb_port = get_input("What port number will this certificate be for(HTTPS is 443) [443]?", allow_empty=True)
if len(lb_port) == 0:
lb_port = 443
domains = []
while True:
if len(domains) > 0:
print("Already selected: {}".format(",".join(domains)))
zone = get_selection("Choose a Route53 Zone that points to this load balancer: ", route53_list_opts, prompt_after="Which zone?", allow_empty=True)
# stop when they don't enter anything
if zone is None:
break
# Only allow adding each domain once
if zone['Name'] in domains:
continue
domains.append(zone['Name'])
global_config['elb_domains'].append({
'DOMAIN': zone['Name'],
'ROUTE53_ZONE_ID': zone['Id'],
'VALIDATION_METHODS': ['dns-01']
})
site = {
'ELB_NAME': lb,
'ELB_PORT': lb_port,
'DOMAINS': domains,
}
global_config['elb_sites'].append(site)
def wizard_cf(global_config):
print_header("CloudFront Configuration")
global_config['cf_sites'] = []
global_config['cf_domains'] = []
# Get the list of all Cloudfront Distributions
cf_dist_list = cloudfront.list_distributions()
cf_dist_opts = []
for i, d in enumerate(cf_dist_list):
cf_dist_opts.append({
'selector': i,
'prompt': "{} - {} ({}) ".format(d['Id'], d['Comment'], ", ".join(d['Aliases'])),
'return': d
})
write_str("""\
Now we'll detect your existing CloudFront Distributions and allow you
to configure them to use SSL. Domain names will be automatically
detected from the 'Aliases/CNAMEs' configuration section of each
Distribution.""")
print()
write_str("""\
You will configure each Distribution fully before being presented with
the list of Distributions again. You can configure as many Distributions
as you like.""")
while True:
print()
dist = get_selection("Select a CloudFront Distribution to configure with Lets-Encrypt(leave blank to finish)", cf_dist_opts, prompt_after="Which CloudFront Distribution?", allow_empty=True)
if dist is None:
break
cnames = dist['Aliases']
write_str("The following domain names exist for the selected CloudFront Distribution:")
write_str(" " + ", ".join(cnames))
write_str("Each domain in this list will be validated with Lets-Encrypt and added to the certificate assigned to this Distribution.")
print()
for dns_name in cnames:
domain = {
'DOMAIN': dns_name,
'VALIDATION_METHODS': []
}
print("Choose validation methods for the domain '{}'".format(dns_name))
route53_id = route53.get_zone_id(dns_name)
if route53_id:
write_str(colors.OKGREEN + "Route53 zone detected!" + colors.ENDC)
validate_via_dns = get_yn("Validate using DNS", default=False)
if validate_via_dns:
domain['ROUTE53_ZONE_ID'] = route53_id
domain['VALIDATION_METHODS'].append('dns-01')
else:
write_str(colors.WARNING + "No Route53 zone detected, DNS validation not possible." + colors.ENDC)
validate_via_http = get_yn("Validate using HTTP", default=True)
if validate_via_http:
domain['CLOUDFRONT_ID'] = dist['Id']
domain['VALIDATION_METHODS'].append('http-01')
global_config['cf_domains'].append(domain)
site = {
'CLOUDFRONT_ID': dist['Id'],
'DOMAINS': cnames
}
global_config['cf_sites'].append(site)
def wizard_sns(global_config):
sns_email = None
print_header("Notifications")
write_str("""\
The lambda function can send notifications when a certificate is issued,
errors occur, or other things that may need your attention.
Notifications are optional.""")
use_sns = True
sns_email = get_input("Enter the email address for notifications(blank to disable): ", allow_empty=True)
if len(sns_email) == 0:
use_sns = False
global_config['use_sns'] = use_sns
global_config['sns_email'] = sns_email
def wizard_s3_cfg_bucket(global_config):
print_header("S3 Configuration Bucket")
write_str('An S3 Bucket is required to store configuration. If you already have a bucket you want to use for this choose no and select it from the list. Otherwise let the wizard create one for you.')
create_s3_cfg_bucket = get_yn("Create a bucket for configuration", True)
if create_s3_cfg_bucket:
s3_cfg_bucket = "lambda-letsencrypt-config-{}".format(global_config['ts'])
else:
s3_cfg_bucket = choose_s3_bucket()
global_config['create_s3_cfg_bucket'] = create_s3_cfg_bucket
global_config['s3_cfg_bucket'] = s3_cfg_bucket
def wizard_iam(global_config):
print_header("IAM Configuration")
write_str("An IAM role must be created for this lambda function giving it access to CloudFront, Route53, S3, SNS(notifications), IAM(certificates), and CloudWatch(logs/alarms).")
print()
write_str("If you do not let the wizard create this role you will be asked to select an existing role to use.")
create_iam_role = get_yn("Do you want to automatically create this role", True)
if not create_iam_role:
role_list = iam.list_roles()
options = []
for i, role in enumerate(role_list):
options.append({
'selector': i,
'prompt': role,
'return': role
})
iam_role_name = get_selection("Select the IAM Role:", options, prompt_after="Which IAM Role?", allow_empty=False)
else:
iam_role_name = "lambda-letsencrypt"
global_config['create_iam_role'] = create_iam_role
global_config['iam_role_name'] = iam_role_name
def wizard_challenges(global_config):
create_s3_challenge_bucket = False
s3_challenge_bucket = None
print_header("Lets-Encrypt Challenge Validation Settings")
write_str("""This tool will handle validation of your domains automatically. There are two possible validation methods: HTTP and DNS.""")
print()
write_str("HTTP validation is only available for CloudFront sites. It requires an S3 bucket to store the challenge responses in. This bucket needs to be publicly accessible. Your CloudFront Distribution(s) will be reconfigured to use this bucket as an origin for challenge responses.")
write_str("If you do not configure a bucket for this you will only be able to use DNS validation.")
print()
write_str("DNS validation requires your domain to be managed with Route53. This validation method is always available and requires no additional configuration.")
write_str(colors.WARNING + "Note: DNS validation is currently only supported by the staging server." + colors.ENDC)
print()
write_str("Each domain you want to manage can be configured to validate using either of these methods.")
print()
use_http_challenges = get_yn("Do you want to configure HTTP validation", True)
if use_http_challenges:
create_s3_challenge_bucket = get_yn("Do you want to create a bucket for these challenges(Choose No to select an existing bucket)", True)
if create_s3_challenge_bucket:
s3_challenge_bucket = "lambda-letsencrypt-challenges-{}".format(global_config['ts'])
else:
s3_challenge_bucket = choose_s3_bucket()
else:
# only dns challenge support is available
pass
global_config['use_http_challenges'] = use_http_challenges
global_config['create_s3_challenge_bucket'] = create_s3_challenge_bucket
global_config['s3_challenge_bucket'] = s3_challenge_bucket
def wizard_summary(global_config):
gc = global_config
print_header("**Summary**")
print("Notification Email: {}".format(gc['sns_email'] or "(notifications disabled)"))
print("S3 Config Bucket: {}".format(gc['s3_cfg_bucket']), end="")
if (gc['create_s3_cfg_bucket']):
print(" (to be created)")
else:
print(" (existing)")
if gc['create_iam_role']:
print("IAM Role Name: {} (to be created)".format(gc['iam_role_name']))
else:
print("IAM Role Name: {} (existing)".format(gc['iam_role_name']))
print("Support HTTP Challenges: {}".format(gc['use_http_challenges']))
if gc['use_http_challenges']:
print("S3 HTTP Challenge Bucket: {}".format(gc['s3_challenge_bucket']), end="")
if (gc['create_s3_challenge_bucket']):
print(" (to be created)")
else:
print(" (existing)")
print("Domains To Manage With Lets-Encrypt")
for d in gc['cf_domains']:
print(" {} - [{}]".format(d['DOMAIN'], ",".join(d['VALIDATION_METHODS'])))
for d in gc['elb_domains']:
print(" {} - [{}]".format(d['DOMAIN'], ",".join(d['VALIDATION_METHODS'])))
print("CloudFront Distributions To Manage:")
for cf in gc['cf_sites']:
print(" {} - [{}]".format(cf['CLOUDFRONT_ID'], ",".join(cf['DOMAINS'])))
print("Elastic Load Balancers to Manage:")
for lb in gc['elb_sites']:
print(" {}:{} - [{}]".format(lb['ELB_NAME'], lb['ELB_PORT'], ",".join(lb['DOMAINS'])))
def wizard_save_config(global_config):
print_header("Making Requested Changes")
templatevars = {}
with open('config.py.dist', 'r') as template:
configfile = Template(template.read())
templatevars['SNS_ARN'] = None
templatevars['NOTIFY_EMAIL'] = None
# Configure SNS if appropriate
sns_arn = None
if len(global_config['sns_email']) > 0:
# Create SNS Topic if necessary
print("Creating SNS Topic for Notifications ", end='')
sns_arn = sns.get_or_create_topic(global_config['sns_email'])
if sns_arn is False or sns_arn is None:
print(colors.FAIL + u'\u2717' + colors.ENDC)
else:
print(colors.OKGREEN + u'\u2713' + colors.ENDC)
templatevars['SNS_ARN'] = sns_arn
templatevars['NOTIFY_EMAIL'] = global_config['sns_email']
# create config bucket if necessary
if global_config['create_s3_cfg_bucket']:
print("Creating S3 Configuration Bucket ", end='')
s3.create_bucket(global_config['s3_cfg_bucket'])
print(colors.OKGREEN + u'\u2713' + colors.ENDC)
# create challenge bucket if necessary(needs to be configured as static website)
if global_config['create_s3_challenge_bucket']:
print("Creating S3 Challenge Bucket ", end='')
s3.create_web_bucket(global_config['s3_challenge_bucket'])
print(colors.OKGREEN + u'\u2713' + colors.ENDC)
# create IAM role if required
if global_config['create_iam_role']:
global_config['iam_role_name'] = 'lambda-letsencrypt-test-role'
policy_document = iam.generate_policy_document(
s3buckets=[
global_config['s3_cfg_bucket'],
global_config['s3_challenge_bucket']
],
snstopicarn=sns_arn
)
iam_arn = iam.configure(global_config['iam_role_name'], policy_document)
templatevars['S3_CONFIG_BUCKET'] = global_config['s3_cfg_bucket']
templatevars['S3_CHALLENGE_BUCKET'] = global_config['s3_challenge_bucket']
domains = global_config['cf_domains'] + global_config['elb_domains']
sites = global_config['cf_sites'] + global_config['elb_sites']
templatevars['DOMAINS'] = json.dumps(domains, indent=4)
templatevars['SITES'] = json.dumps(sites, indent=4)
# write out the config file
config = configfile.substitute(templatevars)
with open("config-wizard.py", 'w') as configfinal:
print("Writing Configuration File ", end='')
configfinal.write(config)
print(colors.OKGREEN + u'\u2713' + colors.ENDC)
print("Creating Zip File To Upload To Lambda")
archive_success = True
archive = zipfile.ZipFile('lambda-letsencrypt-dist.zip', mode='w')
try:
for f in ['lambda_function.py', 'simple_acme.py']:
print(" Adding '{}'".format(f))
archive.write(f)
print(" Adding 'config.py'")
archive.write('config-wizard.py', 'config.py')
except Exception as e:
print(colors.FAIL + 'Zip File Creation Failed' + colors.ENDC)
print(e)
archive_success = False
finally:
print('Zip File Created Successfully')
archive.close()
# can't continue if this failed
if not archive_success:
return
print("Configuring Lambda Function:")
iam_arn = iam.get_arn(global_config['iam_role_name'])
print(" IAM ARN: {}".format(iam_arn))
print(" Uploading Function ", end='')
if awslambda.create_function("lambda-letsencrypt", iam_arn, 'lambda-letsencrypt-dist.zip'):
print(colors.OKGREEN + u'\u2713' + colors.ENDC)
else:
print(colors.FAIL + u'\u2717' + colors.ENDC)
return
print_header("Schedule Lambda Function")
write_str("I've done all I can for you now, there's one last step you have to take manually in order to schedule your lambda function to run once a day.")
write_str("Log into your aws console and go to this page:")
lambda_event_url = "https://console.aws.amazon.com/lambda/home#/functions/lambda-letsencrypt?tab=eventSources"
print(colors.OKBLUE + lambda_event_url + colors.ENDC)
print()
write_str('Click on "Add event source". From the dropdown, choose "Scheduled Event". Enter the following:')
write_str("Name: 'daily - rate(1 day)'")
write_str("Description: 'Run every day'")
write_str("Schedule Expression: 'rate(1 day)'")
print()
write_str("Choose to 'Enable Now', then click 'Submit'")
print_header("Testing")
write_str("You may want to test this before you set it to be recurring. Click on the 'Test' button in the AWS Console for the lambda-letsencrypt function. The data you provide to this function does not matter. Make sure to review the logs after it finishes and check for anything out of the ordinary.")
print()
write_str("It will take at least 2 runs before your certificates are issued, maybe 3 depending on how fast cloudfront responds. This is because it needs one try to configure cloudfront, one to submit the challenge and have it verified, and one final run to issue the certificate and configure the cloudfront distribution")
def wizard(global_config):
ts = int(time.time())
ts = 1000
global_config['ts'] = ts
print_header("Lambda Lets-Encrypt Wizard")
write_str("""\
This wizard will guide you through the process of setting up your existing
CloudFront Distributions to use SSL certificates provided by Lets-Encrypt
and automatically issued/maintained by an AWS Lambda function.
These certificates are free of charge, and valid for 90 days. This wizard
will also set up a Lambda function that is responsible for issuing and
renewing these certificates automatically as they near their expiration
date.
The cost of the AWS services used to make this work are typically less
than a penny per month. For full pricing details please refer to the
docs.
""")
print()
print(colors.WARNING + "WARNING: ")
write_str("""\
Manual configuration is required at this time to configure the Lambda
function to run on a daily basis to keep your certificate updated. If
you do not follow the steps provided at the end of this wizard your
Lambda function will *NOT* run.
""")
print(colors.ENDC)
wizard_sns(global_config)
wizard_iam(global_config)
wizard_s3_cfg_bucket(global_config)
wizard_challenges(global_config)
wizard_cf(global_config)
wizard_elb(global_config)
cfg_menu = []
cfg_menu.append({'selector': 1, 'prompt': 'SNS', 'return': wizard_sns})
cfg_menu.append({'selector': 2, 'prompt': 'IAM', 'return': wizard_iam})
cfg_menu.append({'selector': 3, 'prompt': 'S3 Config', 'return': wizard_s3_cfg_bucket})
cfg_menu.append({'selector': 4, 'prompt': 'Challenges', 'return': wizard_challenges})
cfg_menu.append({'selector': 5, 'prompt': 'CloudFront', 'return': wizard_cf})
cfg_menu.append({'selector': 6, 'prompt': 'Elastic Load Balancers', 'return': wizard_cf})
cfg_menu.append({'selector': 9, 'prompt': 'Done', 'return': None})
finished = False
while not finished:
wizard_summary(global_config)
finished = get_yn("Are these settings correct", True)
if not finished:
selection = get_selection("Which section do you want to change", cfg_menu, prompt_after="Which section to modify?", allow_empty=False)
if selection:
selection(global_config)
wizard_save_config(global_config)
if __name__ == "__main__":
args = docopt(__doc__, version='Lambda Lets-Encrypt 1.0')
global_config = {}
wizard(global_config)
|
|
#!/usr/bin/python
import os
import sys
import numpy
import pdb
import scipy.io
import gzip
import cPickle
import theano
import cv2
##################################
## Data Loading Functions ##
##################################
# From the Theano Tutorials
def shared_dataset(data_xy, borrow=True, svm_flag = True):
data_x, data_y = data_xy
shared_x = theano.shared(numpy.asarray(data_x, dtype=theano.config.floatX), borrow=borrow)
shared_y = theano.shared(numpy.asarray(data_y, dtype=theano.config.floatX), borrow=borrow)
if svm_flag is True:
# one-hot encoded labels as {-1, 1}
n_classes = len(numpy.unique(data_y)) # dangerous?
y1 = -1 * numpy.ones((data_y.shape[0], n_classes))
y1[numpy.arange(data_y.shape[0]), data_y] = 1
shared_y1 = theano.shared(numpy.asarray(y1,dtype=theano.config.floatX), borrow=borrow)
return shared_x, theano.tensor.cast(shared_y, 'int32'), shared_y1
else:
return shared_x, theano.tensor.cast(shared_y, 'int32')
# from theano tutorials for loading pkl files like what is used in theano tutorials.
def load_data_pkl(dataset):
''' Loads the dataset
:type dataset: string
:param dataset: the path to the dataset (here MNIST)
'''
# Download the MNIST dataset if it is not present
data_dir, data_file = os.path.split(dataset)
if data_dir == "" and not os.path.isfile(dataset):
# Check if dataset is in the data directory.
new_path = os.path.join(
os.path.split(__file__)[0],
dataset
)
if os.path.isfile(new_path) or data_file == 'mnist.pkl.gz':
dataset = new_path
if (not os.path.isfile(dataset)) and data_file == 'mnist.pkl.gz':
import urllib
origin = (
'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'
)
print 'Downloading data from %s' % origin
urllib.urlretrieve(origin, dataset)
# Load the dataset
f = gzip.open(dataset, 'rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
#train_set, valid_set, test_set format: tuple(input, target)
#input is an numpy.ndarray of 2 dimensions (a matrix)
#witch row's correspond to an example. target is a
#numpy.ndarray of 1 dimensions (vector)) that have the same length as
#the number of rows in the input. It should give the target
#target to the example with the same index in the input.
test_set_x, test_set_y, test_set_y1 = shared_dataset(test_set)
valid_set_x, valid_set_y, valid_set_y1 = shared_dataset(valid_set)
train_set_x, train_set_y, train_set_y1 = shared_dataset(train_set)
rval = [(train_set_x, train_set_y, train_set_y1), (valid_set_x, valid_set_y, valid_set_y1), (test_set_x, test_set_y, test_set_y1)]
return rval
# for loading matlab based data.
def load_data_mat(dataset = '../dataset/waldo/' ,batch = 1, type_set = 'train', load_z = False):
# Use this code if the data was created in matlab in the right format and needed to be loaded
# print "... Loading " + type_set + " batch number " + str(batch)
#- ---------- Load Dataet ------- -#
mat = scipy.io.loadmat(dataset + type_set + '/batch_' + str(batch) + '.mat')
data_x = numpy.asarray(mat['x'], dtype = theano.config.floatX)
data_y = numpy.array(numpy.squeeze(mat['y']), dtype = 'int32')
if load_z is True:
data_z = numpy.array(numpy.squeeze(mat['z']), dtype=theano.config.floatX )
n_classes = len(numpy.unique(data_y)) # dangerous?
y1 = -1 * numpy.ones((data_y.shape[0], n_classes))
y1[numpy.arange(data_y.shape[0]), data_y] = 1
if load_z is False:
return (data_x,data_y,y1.astype( dtype = theano.config.floatX ))
else:
return (data_x,data_y,y1.astype( dtype = theano.config.floatX ),data_z)
# for MNIST of skdata
def load_skdata_mnist ():
from skdata import mnist
mn = mnist.dataset.MNIST()
mn.fetch(True)
meta = mn.build_meta()
train_x = mn.arrays['train_images'][0:50000]
valid_x = mn.arrays['train_images'][50000:]
test_x = mn.arrays['test_images']
train_y = mn.arrays['train_labels'][0:50000]
valid_y = mn.arrays['train_labels'][50000:]
test_y = mn.arrays['test_labels']
#mn.clean_up() # if you wish to erase the dataset from your comp.
test_set_x, test_set_y, test_set_y1 = shared_dataset((numpy.reshape(test_x/255.,[10000,784]),test_y))
valid_set_x, valid_set_y, valid_set_y1 = shared_dataset((numpy.reshape(valid_x/255.,[10000,784]),valid_y))
train_set_x, train_set_y, train_set_y1 = shared_dataset((numpy.reshape(train_x/255.,[50000,784]),train_y))
rval = [(train_set_x, train_set_y, train_set_y1), (valid_set_x, valid_set_y, valid_set_y1), (test_set_x, test_set_y, test_set_y1)]
return rval
def load_skdata_mnist_noise1():
from skdata import larochelle_etal_2007
mn = larochelle_etal_2007.MNIST_Noise1()
mn.fetch(True)
meta = mn.build_meta()
data_x = mn._inputs
data_y = mn._labels
train_x = data_x[0:10000]
train_y = data_y[0:10000]
test_x = data_x[10000:12000]
test_y = data_y[10000:12000]
valid_x = data_x[12000:]
valid_y = data_y[12000:]
test_set_x, test_set_y, test_set_y1 = shared_dataset((numpy.reshape(test_x,[2000,784]),test_y))
valid_set_x, valid_set_y, valid_set_y1 = shared_dataset((numpy.reshape(valid_x,[2000,784]),valid_y))
train_set_x, train_set_y, train_set_y1 = shared_dataset((numpy.reshape(train_x,[10000,784]),train_y))
rval = [(train_set_x, train_set_y, train_set_y1), (valid_set_x, valid_set_y, valid_set_y1), (test_set_x, test_set_y, test_set_y1)]
return rval
def load_skdata_mnist_noise2():
from skdata import larochelle_etal_2007
mn = larochelle_etal_2007.MNIST_Noise2()
mn.fetch(True)
meta = mn.build_meta()
data_x = mn._inputs
data_y = mn._labels
train_x = data_x[0:10000]
train_y = data_y[0:10000]
test_x = data_x[10000:12000]
test_y = data_y[10000:12000]
valid_x = data_x[12000:]
valid_y = data_y[12000:]
test_set_x, test_set_y, test_set_y1 = shared_dataset((numpy.reshape(test_x,[2000,784]),test_y))
valid_set_x, valid_set_y, valid_set_y1 = shared_dataset((numpy.reshape(valid_x,[2000,784]),valid_y))
train_set_x, train_set_y, train_set_y1 = shared_dataset((numpy.reshape(train_x,[10000,784]),train_y))
rval = [(train_set_x, train_set_y, train_set_y1), (valid_set_x, valid_set_y, valid_set_y1), (test_set_x, test_set_y, test_set_y1)]
return rval
def load_skdata_mnist_noise3():
from skdata import larochelle_etal_2007
mn = larochelle_etal_2007.MNIST_Noise3()
mn.fetch(True)
meta = mn.build_meta()
data_x = mn._inputs
data_y = mn._labels
train_x = data_x[0:10000]
train_y = data_y[0:10000]
test_x = data_x[10000:12000]
test_y = data_y[10000:12000]
valid_x = data_x[12000:]
valid_y = data_y[12000:]
test_set_x, test_set_y, test_set_y1 = shared_dataset((numpy.reshape(test_x,[2000,784]),test_y))
valid_set_x, valid_set_y, valid_set_y1 = shared_dataset((numpy.reshape(valid_x,[2000,784]),valid_y))
train_set_x, train_set_y, train_set_y1 = shared_dataset((numpy.reshape(train_x,[10000,784]),train_y))
rval = [(train_set_x, train_set_y, train_set_y1), (valid_set_x, valid_set_y, valid_set_y1), (test_set_x, test_set_y, test_set_y1)]
return rval
def load_skdata_mnist_noise4():
from skdata import larochelle_etal_2007
mn = larochelle_etal_2007.MNIST_Noise4()
mn.fetch(True)
meta = mn.build_meta()
data_x = mn._inputs
data_y = mn._labels
train_x = data_x[0:10000]
train_y = data_y[0:10000]
test_x = data_x[10000:12000]
test_y = data_y[10000:12000]
valid_x = data_x[12000:]
valid_y = data_y[12000:]
test_set_x, test_set_y, test_set_y1 = shared_dataset((numpy.reshape(test_x,[2000,784]),test_y))
valid_set_x, valid_set_y, valid_set_y1 = shared_dataset((numpy.reshape(valid_x,[2000,784]),valid_y))
train_set_x, train_set_y, train_set_y1 = shared_dataset((numpy.reshape(train_x,[10000,784]),train_y))
rval = [(train_set_x, train_set_y, train_set_y1), (valid_set_x, valid_set_y, valid_set_y1), (test_set_x, test_set_y, test_set_y1)]
return rval
def load_skdata_mnist_noise5():
from skdata import larochelle_etal_2007
mn = larochelle_etal_2007.MNIST_Noise5()
mn.fetch(True)
meta = mn.build_meta()
data_x = mn._inputs
data_y = mn._labels
train_x = data_x[0:10000]
train_y = data_y[0:10000]
test_x = data_x[10000:12000]
test_y = data_y[10000:12000]
valid_x = data_x[12000:]
valid_y = data_y[12000:]
test_set_x, test_set_y, test_set_y1 = shared_dataset((numpy.reshape(test_x,[2000,784]),test_y))
valid_set_x, valid_set_y, valid_set_y1 = shared_dataset((numpy.reshape(valid_x,[2000,784]),valid_y))
train_set_x, train_set_y, train_set_y1 = shared_dataset((numpy.reshape(train_x,[10000,784]),train_y))
rval = [(train_set_x, train_set_y, train_set_y1), (valid_set_x, valid_set_y, valid_set_y1), (test_set_x, test_set_y, test_set_y1)]
return rval
def load_skdata_mnist_noise6():
from skdata import larochelle_etal_2007
mn = larochelle_etal_2007.MNIST_Noise6()
mn.fetch(True)
meta = mn.build_meta()
data_x = mn._inputs
data_y = mn._labels
train_x = data_x[0:10000]
train_y = data_y[0:10000]
test_x = data_x[10000:12000]
test_y = data_y[10000:12000]
valid_x = data_x[12000:]
valid_y = data_y[12000:]
test_set_x, test_set_y, test_set_y1 = shared_dataset((numpy.reshape(test_x,[2000,784]),test_y))
valid_set_x, valid_set_y, valid_set_y1 = shared_dataset((numpy.reshape(valid_x,[2000,784]),valid_y))
train_set_x, train_set_y, train_set_y1 = shared_dataset((numpy.reshape(train_x,[10000,784]),train_y))
rval = [(train_set_x, train_set_y, train_set_y1), (valid_set_x, valid_set_y, valid_set_y1), (test_set_x, test_set_y, test_set_y1)]
return rval
def load_skdata_mnist_bg_images():
from skdata import larochelle_etal_2007
mn = larochelle_etal_2007.MNIST_BackgroundImages()
mn.fetch(True)
meta = mn.build_meta()
data_x = mn._inputs
data_y = mn._labels
train_x = data_x[0:40000]
train_y = data_y[0:40000]
test_x = data_x[50000:]
test_y = data_y[50000:]
valid_x = data_x[40000:50000]
valid_y = data_y[40000:50000]
test_set_x, test_set_y, test_set_y1 = shared_dataset((numpy.reshape(test_x,[12000,784]),test_y))
valid_set_x, valid_set_y, valid_set_y1 = shared_dataset((numpy.reshape(valid_x,[10000,784]),valid_y))
train_set_x, train_set_y, train_set_y1 = shared_dataset((numpy.reshape(train_x,[40000,784]),train_y))
rval = [(train_set_x, train_set_y, train_set_y1), (valid_set_x, valid_set_y, valid_set_y1), (test_set_x, test_set_y, test_set_y1)]
return rval
def load_skdata_mnist_bg_rand():
from skdata import larochelle_etal_2007
mn = larochelle_etal_2007.MNIST_BackgroundRandom()
mn.fetch(True)
meta = mn.build_meta()
data_x = mn._inputs
data_y = mn._labels
train_x = data_x[0:40000]
train_y = data_y[0:40000]
test_x = data_x[50000:]
test_y = data_y[50000:]
valid_x = data_x[40000:50000]
valid_y = data_y[40000:50000]
test_set_x, test_set_y, test_set_y1 = shared_dataset((numpy.reshape(test_x,[12000,784]),test_y))
valid_set_x, valid_set_y, valid_set_y1 = shared_dataset((numpy.reshape(valid_x,[10000,784]),valid_y))
train_set_x, train_set_y, train_set_y1 = shared_dataset((numpy.reshape(train_x,[40000,784]),train_y))
rval = [(train_set_x, train_set_y, train_set_y1), (valid_set_x, valid_set_y, valid_set_y1), (test_set_x, test_set_y, test_set_y1)]
return rval
def load_skdata_mnist_rotated():
from skdata import larochelle_etal_2007
mn = larochelle_etal_2007.MNIST_Rotated()
mn.fetch(True)
meta = mn.build_meta()
data_x = mn._inputs
data_y = mn._labels
train_x = data_x[0:40000]
train_y = data_y[0:40000]
test_x = data_x[50000:]
test_y = data_y[50000:]
valid_x = data_x[40000:50000]
valid_y = data_y[40000:50000]
test_set_x, test_set_y, test_set_y1 = shared_dataset((numpy.reshape(test_x,[12000,784]),test_y))
valid_set_x, valid_set_y, valid_set_y1 = shared_dataset((numpy.reshape(valid_x,[10000,784]),valid_y))
train_set_x, train_set_y, train_set_y1 = shared_dataset((numpy.reshape(train_x,[40000,784]),train_y))
rval = [(train_set_x, train_set_y, train_set_y1), (valid_set_x, valid_set_y, valid_set_y1), (test_set_x, test_set_y, test_set_y1)]
return rval
def load_skdata_mnist_rotated_bg():
from skdata import larochelle_etal_2007
mn = larochelle_etal_2007.MNIST_RotatedBackgroundImages()
mn.fetch(True)
meta = mn.build_meta()
data_x = mn._inputs
data_y = mn._labels
train_x = data_x[0:40000]
train_y = data_y[0:40000]
test_x = data_x[50000:]
test_y = data_y[50000:]
valid_x = data_x[40000:50000]
valid_y = data_y[40000:50000]
test_set_x, test_set_y, test_set_y1 = shared_dataset((numpy.reshape(test_x,[12000,784]),test_y))
valid_set_x, valid_set_y, valid_set_y1 = shared_dataset((numpy.reshape(valid_x,[10000,784]),valid_y))
train_set_x, train_set_y, train_set_y1 = shared_dataset((numpy.reshape(train_x,[40000,784]),train_y))
rval = [(train_set_x, train_set_y, train_set_y1), (valid_set_x, valid_set_y, valid_set_y1), (test_set_x, test_set_y, test_set_y1)]
return rval
# for cifar10 of skdata
def load_skdata_cifar10():
from skdata import cifar10
ci = cifar10.dataset.CIFAR10()
ci.fetch(True)
meta = ci.build_meta()
#ci.clean_up() # if you wish to erase the dataset from your comp.
data_x = numpy.reshape(ci._pixels,[60000,3072])/255.
data_y = ci._labels
# shuffle the data
rand_perm = numpy.random.permutation(data_y.shape[0])
data_x = data_x[rand_perm]
data_y = data_y[rand_perm]
train_x = data_x[0:40000]
train_y = data_y[0:40000]
test_x = data_x[40000:50000]
test_y = data_y[40000:50000]
valid_x = data_x[50000:]
valid_y = data_y[50000:]
test_set_x, test_set_y, test_set_y1 = shared_dataset((test_x,test_y))
valid_set_x, valid_set_y, valid_set_y1 = shared_dataset((valid_x,valid_y))
train_set_x, train_set_y, train_set_y1 = shared_dataset((train_x,train_y))
rval = [(train_set_x, train_set_y, train_set_y1), (valid_set_x, valid_set_y, valid_set_y1), (test_set_x, test_set_y, test_set_y1)]
return rval
# caltech 101 of skdata
def load_skdata_caltech101(batch_size, rand_perm, batch = 1, type_set = 'train', height = 256, width = 256 ):
import skdata
from skdata import caltech
from scipy.misc import imread
cal = caltech.Caltech101()
cal.fetch()
meta = cal._get_meta()
img,data_y = cal.img_classification_task()
data_y = data_y - 1 # Because classes are labelled in this dataset from 1 - 102, I want 0 - 101.
img = numpy.asarray(img.objs[0])
img = img[rand_perm] # Shuffle so that the ordering of classes is changed, but use the same shuffle so that loading works consistently.
data_y = data_y[rand_perm]
data_x = numpy.asarray(numpy.zeros((3*batch_size,height*width*3)), dtype = theano.config.floatX )
data_y = numpy.asarray(data_y[0:3*batch_size] , dtype = 'int32' )
for i in range(batch_size):
temp_img = imread(img[3*batch_size*batch + i])
temp_img = cv2.normalize(temp_img.astype(theano.config.floatX), None, 0.0, 1.0, cv2.NORM_MINMAX)
if temp_img.ndim != 3:
# This is a temporary solution. I am allocating to all channels the grayscale values...
temp_img = cv2.normalize(temp_img.astype(theano.config.floatX), None, 0.0, 1.0, cv2.NORM_MINMAX)
temp_img = cv2.resize(temp_img,(height,width))
temp_img1 = numpy.zeros((height,width,3))
temp_img1 [:,:,0] = temp_img
temp_img1 [:,:,1] = temp_img
temp_img1 [:,:,2] = temp_img
data_x[i] = numpy.reshape(temp_img1,[1,height*width*3] )
else:
data_x[i] = numpy.reshape(cv2.resize(temp_img,(height,width)),[1,height*width*3] )
train_x = data_x[0:batch_size]
train_y = data_y[0:batch_size]
test_x = data_x[batch_size:2*batch_size]
test_y = data_y[batch_size:2*batch_size]
valid_x = data_x[2*batch_size:]
valid_y = data_y[2*batch_size:]
if type_set == 'train':
return (train_x,train_y)
elif type_set == 'test':
return (test_x,test_y)
else:
return (valid_x,valid_y)
|
|
import urllib2, re, os, urllib, csv, sys, time
import xml.etree.ElementTree as ET
from datetime import datetime
CACHES_DIR = '%s/download-cache' % os.path.dirname(os.path.abspath(__file__))
XBRL_ELEMENTS = [
'OtherAssetsCurrent',
'OtherAssetsNoncurrent',
'OtherAssets',
'OtherLiabilities',
'OtherLiabilitiesCurrent',
'OtherLiabilitiesNoncurrent',
'Assets'
]
def _download_url_to_file(url):
cached_content = '%s/%s' % (CACHES_DIR, urllib.quote(url, ''))
if os.path.exists(cached_content):
return cached_content
else:
print 'downloading %s (%s)' % (url, datetime.now().time())
max_tries = 3
for try_count in range(max_tries + 1):
try:
response = urllib2.urlopen(url)
content = response.read()
if not os.path.exists(CACHES_DIR):
os.makedirs(CACHES_DIR)
with open(cached_content, 'w') as f:
f.write(content)
return cached_content
except Exception as e:
if try_count >= max_tries:
raise
else:
# Wait for a while
time.sleep(5)
print 'retrying %s after error: %s' % (url, e)
def _download_url(url):
cached_content = _download_url_to_file(url)
with open(cached_content, 'r') as f:
return f.read()
def _parse_xml_with_ns(xml_file):
events = "start", "start-ns"
root = None
ns_map = []
for event, elem in ET.iterparse(xml_file, events):
# print 'handling %s on %s' % (event, elem)
if event == "start-ns":
ns_map.append(elem)
elif event == "start":
if root is None:
root = elem
return ET.ElementTree(root), dict(ns_map)
def find_company_xml(ticker):
filings_url = 'http://www.sec.gov/cgi-bin/browse-edgar?action=getcompany&CIK=%s&count=100&type=10-k&output=xml' % ticker
filings_xml = _download_url(filings_url)
if 'No matching Ticker Symbol' in filings_xml:
return None
return ET.fromstring(filings_xml)
def find_filings_with_xbrl_ref(company_xml):
results = []
for el in company_xml.findall('./results/filing'):
if el.find('XBRLREF') is not None:
results.append({
'date': el.find('dateFiled').text,
'url': el.find('filingHREF').text
})
return results
def find_xbrl_url_in_filing_by_url(url, ticker):
filing = _download_url(url)
pattern = '/Archives/edgar/data/\w+/\w+/[a-zA-Z0-9]+-\d+\.xml'
m = re.search(pattern, filing, re.DOTALL | re.UNICODE)
if m:
return 'http://www.sec.gov%s' % m.group(0)
else:
print 'Could not find XBRL XML URL by pattern [%s] in %s (company %s)' % (pattern, url, ticker)
return None
def _find_element_value(xml, ns, name, period_end_date, xbrl_html_url):
elements = xml.findall('{%s}%s' % (ns['us-gaap'], name), ns)
if len(elements) == 0:
return None
contexts = []
for e in elements:
contexts.append((e.get('contextRef'), e.text))
# Always ignore records with '_us-gaap' or '_dei' in name
filtered = filter(lambda c: '_us-gaap' not in c[0] and '_dei' not in c[0], contexts)
if len(filtered) == 0:
return None
# There are different date formats used in different XBRLs.
end_date = datetime.strptime(period_end_date, '%Y-%m-%d')
full_date_formats = [
'%Y%m%d',
'%Y-%m-%d',
'%m_%d_%Y',
'%-m_%d_%Y',
'%d%b%Y',
'%-d%b%Y',
'%d-%b-%Y',
'%-d-%b-%Y',
'%d_%b_%Y',
'%-d_%b_%Y',
'%b%d_%Y',
'%b%-d_%Y',
'YEARQUARTER'
]
expected_date_formats = full_date_formats + [
'%Y',
'E%y',
]
# Delete entries for one year ago - heuristic helping in cases when there is no entry for period end date
# date_year_ago = end_date.replace(year = end_date.year - 1)
try:
date_year_ago = datetime(end_date.year - 1, end_date.month, end_date.day, 0, 0)
except ValueError:
date_year_ago = datetime(end_date.year - 1, end_date.month, end_date.day - 1, 0, 0)
for format in full_date_formats:
filtered = filter(lambda c: date_year_ago.strftime(format) not in c[0], filtered)
if len(filtered) == 0:
print 'No value for %s for date %s in %s' % (name, period_end_date, contexts)
return None
# Filter only contexts related to the document end date.
for format in expected_date_formats:
if format != 'YEARQUARTER':
date_string = end_date.strftime(format)
else:
date_string = '%sQ%s' % (end_date.year, (end_date.month - 1) // 3 + 1)
filtered_by_date = filter(lambda c: date_string in c[0], filtered)
if len(filtered_by_date) > 0:
used_date_format = date_string
break
if len(filtered_by_date) != 0:
filtered = filtered_by_date
else:
# Some context do not contain dates but are of format 'c001', 'c002'.
# In this case select the last one.
if len(filtered) > 1 and all(map(lambda c: re.match('c\d+', c[0].lower()) is not None, filtered)):
sorted_by_number = sorted(filtered, key = lambda c: int(c[0].lower().replace('c', '')), reverse = True)
filtered =[ sorted_by_number[0] ]
# If period end is e.g. 2015, for some reason correct context is 'FI2014Q4' for many companies
elif len(filter(lambda c: 'I%sQ4' % (end_date.year - 1) in c[0], filtered)) > 0:
filtered = filter(lambda c: 'I%sQ4' % (end_date.year - 1) in c[0], filtered)
else:
raise Exception(('Could not choose date format for %s for %s in %s . Original contexts: %s') % \
(name, period_end_date, xbrl_html_url, contexts))
# Then remove long records that are prolongation of the first short one,
# e.g. 'I2012Q4_us-gaap_StatementScenarioAxis...' following simple 'I2012Q4'
if len(filtered) > 1:
filtered = sorted(filtered, lambda c1, c2: len(c1[0]) - len(c2[0]))
filtered = filter(lambda c: re.match('^%s.+$' % filtered[0][0], c[0], re.DOTALL) is None, filtered)
# Or try to remove those which are aaa20100610_BlaBlaBla
if len(filtered) > 1:
filtered = sorted(filtered, lambda c1, c2: len(c1[0]) - len(c2[0]))
filtered = filter(lambda c: re.match('^.{,10}%s.{10,}$' % used_date_format, c[0], re.DOTALL) is None, filtered)
if len(filtered) > 1 or len(filtered) == 0:
message = 'Could not choose correct %s for %s in %s : %s. Original contexts: %s' % \
(name, period_end_date, xbrl_html_url, filtered, contexts)
if len(filtered) > 1:
raise Exception(message)
else:
print message
return None
# print 'Chose context %s for %s in %s at %s' % (filtered[0][0], name, period_end_date, xbrl_html_url)
value = filtered[0][1]
return value
def get_xbrl_data(xbrl_xml_url, xbrl_html_url, xbrl_publication_date):
xml_file = _download_url_to_file(xbrl_xml_url)
xml, ns = _parse_xml_with_ns(xml_file)
# print 'processing %s' % xbrl_xml_url
period_focus_element = xml.find('{%s}DocumentFiscalPeriodFocus' % ns['dei'], ns)
period_focus = period_focus_element.text if period_focus_element is not None else None
if period_focus is not None and period_focus != 'FY':
# print 'ignoring report not focusing on full year: %s (%s)' % (period_focus, xbrl_xml_url)
return None
period_end_date = xml.find('{%s}DocumentPeriodEndDate' % ns['dei'], ns).text
result = {
'DocumentPeriodEndDate': period_end_date,
'DateFiled': xbrl_publication_date
}
for name in XBRL_ELEMENTS:
result[name] = _find_element_value(xml, ns, name, period_end_date, xbrl_html_url)
return result
def find_xbrls(company_xml):
filings = find_filings_with_xbrl_ref(company_xml)
xbrls = []
for f in filings:
print 'processing 10-K of %s published on %s' % (ticker, f['date'])
xbrl_url = find_xbrl_url_in_filing_by_url(f['url'], ticker)
xbrl_data = get_xbrl_data(xbrl_url, f['url'], f['date'])
if xbrl_data is not None:
xbrls.append(xbrl_data)
return xbrls
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'Usage: python find_xbrl_by_ticker.py <file with tickers (one per line)> [<output CSV filename>]'
sys.exit(0)
with open(sys.argv[1], 'r') as f:
tickers = map(lambda t: t.replace('\n', '').replace('\r', ''), f.readlines())
if len(sys.argv) > 2:
output_csv = sys.argv[2]
else:
output_csv = 'company_results_over_years.txt'
print 'Fetching XBRLs into file %s for %s companies: %s...' % (output_csv, len(tickers),
str(tickers[0:3]).replace('[', '').replace(']', ''))
with open(output_csv, 'wb') as csvfile:
writer = csv.writer(csvfile, dialect='excel')
writer.writerow(['Ticker', 'CIK', 'CompanyName', 'DocumentPeriodEndDate', 'DateFiled'] + XBRL_ELEMENTS)
for ticker in tickers:
try:
company_xml = find_company_xml(ticker)
if company_xml is None:
print 'NO company found at http://www.sec.gov/ by ticker %s' % ticker
continue
cik = int(company_xml.find('./companyInfo/CIK').text)
company_name = company_xml.find('./companyInfo/name').text
xbrls = find_xbrls(company_xml)
for xbrl in xbrls:
row = [ticker, cik, company_name, xbrl.get('DocumentPeriodEndDate'), xbrl.get('DateFiled')]
for element in XBRL_ELEMENTS:
row.append(xbrl.get(element))
writer.writerow(row)
except Exception as e:
# raise
print 'Failed to process %s: %s' % (ticker, e)
print 'Summary of XBRL reports is ready in CSV file %s' % output_csv
|
|
"""Operations for rigid rotations in Klampt. All rotations are
represented by a 9-list specifying the entries of the rotation matrix
in column major form.
These are useful for interfacing with C code.
"""
import math
from . import vectorops
def __str__(R):
"""Converts a rotation to a string."""
return '\n'.join([' '.join([str(ri) for ri in r]) for r in matrix(R)])
def identity():
"""Returns the identity rotation"""
return [1.,0.,0.,0.,1.,0.,0.,0.,1.]
def inv(R):
"""Inverts the rotation"""
Rinv = [R[0],R[3],R[6],R[1],R[4],R[7],R[2],R[5],R[8]]
return Rinv
def apply(R,point):
"""Applies the rotation to a point"""
return (R[0]*point[0]+R[3]*point[1]+R[6]*point[2],
R[1]*point[0]+R[4]*point[1]+R[7]*point[2],
R[2]*point[0]+R[5]*point[1]+R[8]*point[2])
def matrix(R):
"""Returns the 3x3 rotation matrix corresponding to R"""
return [[R[0],R[3],R[6]],
[R[1],R[4],R[7]],
[R[2],R[5],R[8]]]
def from_matrix(mat):
"""Returns an R corresponding to the 3x3 rotation matrix mat"""
R = [mat[0][0],mat[1][0],mat[2][0],mat[0][1],mat[1][1],mat[2][1],mat[0][2],mat[1][2],mat[2][2]]
return R
def mul(R1,R2):
"""Multiplies two rotations."""
m1=matrix(R1)
m2T=matrix(inv(R2))
mres = matrix(identity())
for i in range(3):
for j in range(3):
mres[i][j] = vectorops.dot(m1[i],m2T[j])
#print "Argument 1"
#print __str__(R1)
#print "Argument 2"
#print __str__(R2)
#print "Result"
R = from_matrix(mres)
#print __str__(R)
return R
def trace(R):
"""Computes the trace of the rotation matrix."""
return R[0]+R[4]+R[8]
def angle(R):
"""Returns absolute deviation of R from identity"""
ctheta = (trace(R) - 1.0)*0.5
return math.acos(max(min(ctheta,1.0),-1.0))
def moment(R):
"""Returns the moment w (exponential map) representation of R such
that e^[w] = R. Equivalent to axis-angle representation with
w/||w||=axis, ||w||=angle."""
theta = angle(R)
if abs(theta-math.pi)<1e-5:
#can't do normal version because the scale factor reaches a singularity
x2=(R[0]+1.)*0.5
y2=(R[4]+1.)*0.5
z2=(R[8]+1.)*0.5
if x2 < 0:
assert(x2>-1e-5)
x2=0
if y2 < 0:
assert(y2>-1e-5)
y2=0
if z2 < 0:
assert(z2>-1e-5)
z2=0
x = math.pi*math.sqrt(x2)
y = math.pi*math.sqrt(y2)
z = math.pi*math.sqrt(z2)
#determined up to sign changes, we know r12=2xy,r13=2xz,r23=2yz
xy=R[3]
xz=R[6]
yz=R[7]
if(x > y):
if(x > z):
#x is largest
if(xy < 0): y=-y
if(xz < 0): z=-z
else:
#z is largest
if(yz < 0): y=-y
if(xz < 0): x=-x
else:
if(y > z):
#y is largest
if(xy < 0): x=-x
if(yz < 0): z=-z
else:
#z is largest
if(yz < 0): y=-y
if(xz < 0): x=-x
return [x,y,z]
#normal
scale = 0.5
if abs(theta) > 1e-5:
scale = 0.5*theta/math.sin(theta)
x = (R[3+2]-R[6+1]) * scale;
y = (R[6+0]-R[0+2]) * scale;
z = (R[0+1]-R[3+0]) * scale;
return [x,y,z]
def axis_angle(R):
"""Returns the (axis,angle) pair representing R"""
m = moment(R)
return (vectorops.unit(m),vectorops.norm(m))
def from_axis_angle(aa):
"""Converts an axis-angle representation (axis,angle) to a 3D rotation
matrix."""
return rotation(aa[0],aa[1])
def from_moment(w):
"""Converts a moment representation w to a 3D rotation matrix."""
length = vectorops.norm(w)
if length < 1e-7: return identity()
return rotation(vectorops.mul(w,1.0/length),length)
def distance(R1,R2):
"""Returns the absolute angle one would need to rotate in order to get
from R1 to R2"""
R = mul(R1,inv(R2))
return angle(R)
def error(R1,R2):
"""Returns a 3D "difference vector" that describes how far R1 is from R2.
More precisely, this is the Lie derivative."""
R = mul(R1,inv(R2))
return moment(R)
def cross_product(w):
"""Returns the cross product matrix associated with w.
The matrix [w]R is the derivative of the matrix R as it rotates about
the axis w/||w|| with angular velocity ||w||.
"""
return [0.,w[2],-w[1], -w[2],0.,w[0], w[1],-w[0],0.]
def rotation(axis,angle):
"""Given a unit axis and an angle in radians, returns the rotation
matrix."""
cm = math.cos(angle)
sm = math.sin(angle)
#m = s[r]-c[r][r]+rrt = s[r]-c(rrt-I)+rrt = cI + rrt(1-c) + s[r]
R = vectorops.mul(cross_product(axis),sm)
for i in range(3):
for j in range(3):
R[i*3+j] += axis[i]*axis[j]*(1.-cm)
R[0] += cm
R[4] += cm
R[8] += cm
return R
def canonical(v):
"""Given a unit vector v, finds R that defines a basis [x,y,z] such that
x = v and y and z are orthogonal"""
if abs(vectorops.normSquared(v) - 1.0) > 1e-4:
raise RuntimeError("Nonunit vector supplied to canonical()")
assert(len(v)==3)
if abs(v[0]-1.0) < 1e-5:
return identity()
elif abs(v[0]+1.0) < 1e-5:
#flip of basis
R = identity()
R[0] = -1.0
R[4] = -1.0
return R
R = v + [0.]*6
(x,y,z) = tuple(v)
scale = (1.0-x)/(1.0-x*x);
R[3]= -y;
R[4]= x + scale*z*z;
R[5]= -scale*y*z;
R[6]= -z;
R[7]= -scale*y*z;
R[8]= x + scale*y*y;
return R
def vector_rotation(v1,v2):
"""Finds the minimal-angle matrix that rotates v1 to v2. v1 and v2
are assumed to be nonzero"""
a1 = vectorops.unit(v1)
a2 = vectorops.unit(v2)
cp = vectorops.cross(a1,a2)
dp = vectorops.dot(a1,a2)
if abs(vectorops.norm(cp)) < 1e-4:
if dp < 0:
R0 = canonical(a1)
#return a rotation 180 degrees about the canonical y axis
return rotation(R0[3:6],math.pi)
else:
return identity()
else:
angle = math.acos(max(min(dp,1.0),-1.0))
axis = vectorops.mul(cp,1.0/vectorops.norm(cp))
return rotation(axis,angle)
def interpolate(R1,R2,u):
"""Interpolate linearly between the two rotations R1 and R2. """
R = mul(inv(R1),R2)
m = moment(R)
angle = vectorops.norm(m)
if angle==0: return R1
axis = vectorops.div(m,angle)
return mul(R1,rotation(axis,angle*u))
|
|
try:
import http.client as httplib
except ImportError:
import httplib
import zlib
import io
from socket import timeout as SocketTimeout
from ._collections import HTTPHeaderDict
from .exceptions import (
ProtocolError, DecodeError, ReadTimeoutError, ResponseNotChunked
)
from .packages.six import string_types as basestring, binary_type, PY3
from .connection import HTTPException, BaseSSLError
from .util.response import is_fp_closed
class DeflateDecoder(object):
def __init__(self):
self._first_try = True
self._data = binary_type()
self._obj = zlib.decompressobj()
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not data:
return data
if not self._first_try:
return self._obj.decompress(data)
self._data += data
try:
return self._obj.decompress(data)
except zlib.error:
self._first_try = False
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
try:
return self.decompress(self._data)
finally:
self._data = None
class GzipDecoder(object):
def __init__(self):
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not data:
return data
return self._obj.decompress(data)
def _get_decoder(mode):
if mode == 'gzip':
return GzipDecoder()
return DeflateDecoder()
class HTTPResponse(io.IOBase):
"""
HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed. This
class is also compatible with the Python standard library's :mod:`io`
module, and can hence be treated as a readable object in the context of that
framework.
Extra parameters for behaviour not present in httplib.HTTPResponse:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, attempts to decode specific content-encoding's based on headers
(like 'gzip' and 'deflate') will be skipped and raw data will be used
instead.
:param original_response:
When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
"""
CONTENT_DECODERS = ['gzip', 'deflate']
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
def __init__(self, body='', headers=None, status=0, version=0, reason=None,
strict=0, preload_content=True, decode_content=True,
original_response=None, pool=None, connection=None):
if isinstance(headers, HTTPHeaderDict):
self.headers = headers
else:
self.headers = HTTPHeaderDict(headers)
self.status = status
self.version = version
self.reason = reason
self.strict = strict
self.decode_content = decode_content
self._decoder = None
self._body = None
self._fp = None
self._original_response = original_response
self._fp_bytes_read = 0
if body and isinstance(body, (basestring, binary_type)):
self._body = body
self._pool = pool
self._connection = connection
if hasattr(body, 'read'):
self._fp = body
# Are we using the chunked-style of transfer encoding?
self.chunked = False
self.chunk_left = None
tr_enc = self.headers.get('transfer-encoding', '')
if tr_enc.lower() == "chunked":
self.chunked = True
# We certainly don't want to preload content when the response is chunked.
if not self.chunked:
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def get_redirect_location(self):
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in self.REDIRECT_STATUSES:
return self.headers.get('location')
return False
def release_conn(self):
if not self._pool or not self._connection:
return
self._pool._put_conn(self._connection)
self._connection = None
@property
def data(self):
# For backwords-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body
if self._fp:
return self.read(cache_content=True)
def tell(self):
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``HTTPResponse.read`` if bytes
are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
# Note: content-encoding value should be case-insensitive, per RFC 7230
# Section 3.2
content_encoding = self.headers.get('content-encoding', '').lower()
if self._decoder is None:
if content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
if decode_content is None:
decode_content = self.decode_content
if self._fp is None:
return
flush_decoder = False
try:
try:
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read()
flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt)
if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do
# not properly close the connection in all cases. There is
# no harm in redundantly calling close.
self._fp.close()
flush_decoder = True
except SocketTimeout:
# FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
# there is yet no clean way to get at it from this context.
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except BaseSSLError as e:
# FIXME: Is there a better way to differentiate between SSLErrors?
if 'read operation timed out' not in str(e): # Defensive:
# This shouldn't happen but just in case we're missing an edge
# case, let's avoid swallowing SSL errors.
raise
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except HTTPException as e:
# This includes IncompleteRead.
raise ProtocolError('Connection broken: %r' % e, e)
self._fp_bytes_read += len(data)
try:
if decode_content and self._decoder:
data = self._decoder.decompress(data)
except (IOError, zlib.error) as e:
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding, e)
if flush_decoder and decode_content and self._decoder:
buf = self._decoder.decompress(binary_type())
data += buf + self._decoder.flush()
if cache_content:
self._body = data
return data
finally:
if self._original_response and self._original_response.isclosed():
self.release_conn()
def stream(self, amt=2**16, decode_content=None):
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
if self.chunked:
for line in self.read_chunked(amt):
yield line
else:
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
@classmethod
def from_httplib(ResponseCls, r, **response_kw):
"""
Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
headers = r.msg
if not isinstance(headers, HTTPHeaderDict):
if PY3: # Python 3
headers = HTTPHeaderDict(headers.items())
else: # Python 2
headers = HTTPHeaderDict.from_httplib(headers)
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0)
resp = ResponseCls(body=r,
headers=headers,
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw)
return resp
# Backwards-compatibility methods for httplib.HTTPResponse
def getheaders(self):
return self.headers
def getheader(self, name, default=None):
return self.headers.get(name, default)
# Overrides from io.IOBase
def close(self):
if not self.closed:
self._fp.close()
@property
def closed(self):
if self._fp is None:
return True
elif hasattr(self._fp, 'closed'):
return self._fp.closed
elif hasattr(self._fp, 'isclosed'): # Python 2
return self._fp.isclosed()
else:
return True
def fileno(self):
if self._fp is None:
raise IOError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise IOError("The file-like object this HTTPResponse is wrapped "
"around has no file descriptor")
def flush(self):
if self._fp is not None and hasattr(self._fp, 'flush'):
return self._fp.flush()
def readable(self):
# This method is required for `io` module compatibility.
return True
def readinto(self, b):
# This method is required for `io` module compatibility.
temp = self.read(len(b))
if len(temp) == 0:
return 0
else:
b[:len(temp)] = temp
return len(temp)
def read_chunked(self, amt=None):
# FIXME: Rewrite this method and make it a class with
# a better structured logic.
if not self.chunked:
raise ResponseNotChunked("Response is not chunked. "
"Header 'transfer-encoding: chunked' is missing.")
while True:
# First, we'll figure out length of a chunk and then
# we'll try to read it from socket.
if self.chunk_left is None:
line = self._fp.fp.readline()
line = line.decode()
# See RFC 7230: Chunked Transfer Coding.
i = line.find(';')
if i >= 0:
line = line[:i] # Strip chunk-extensions.
try:
self.chunk_left = int(line, 16)
except ValueError:
# Invalid chunked protocol response, abort.
self.close()
raise httplib.IncompleteRead(''.join(line))
if self.chunk_left == 0:
break
if amt is None:
chunk = self._fp._safe_read(self.chunk_left)
yield chunk
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
elif amt < self.chunk_left:
value = self._fp._safe_read(amt)
self.chunk_left = self.chunk_left - amt
yield value
elif amt == self.chunk_left:
value = self._fp._safe_read(amt)
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
yield value
else: # amt > self.chunk_left
yield self._fp._safe_read(self.chunk_left)
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
# Chunk content ends with \r\n: discard it.
while True:
line = self._fp.fp.readline()
if not line:
# Some sites may not end with '\r\n'.
break
if line == b'\r\n':
break
# We read everything; close the "file".
self.release_conn()
|
|
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This modules contains flags DEFINE functions.
Do NOT import this module directly. Import the flags package and use the
aliases defined at the package level instead.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import types
from absl.flags import _argument_parser
from absl.flags import _exceptions
from absl.flags import _flag
from absl.flags import _flagvalues
from absl.flags import _helpers
from absl.flags import _validators
# pylint: disable=unused-import
try:
from typing import Text, List, Any
except ImportError:
pass
try:
import enum
except ImportError:
pass
# pylint: enable=unused-import
_helpers.disclaim_module_ids.add(id(sys.modules[__name__]))
def _register_bounds_validator_if_needed(parser, name, flag_values):
"""Enforces lower and upper bounds for numeric flags.
Args:
parser: NumericParser (either FloatParser or IntegerParser), provides lower
and upper bounds, and help text to display.
name: str, name of the flag
flag_values: FlagValues.
"""
if parser.lower_bound is not None or parser.upper_bound is not None:
def checker(value):
if value is not None and parser.is_outside_bounds(value):
message = '%s is not %s' % (value, parser.syntactic_help)
raise _exceptions.ValidationError(message)
return True
_validators.register_validator(name, checker, flag_values=flag_values)
def DEFINE( # pylint: disable=invalid-name
parser,
name,
default,
help, # pylint: disable=redefined-builtin
flag_values=_flagvalues.FLAGS,
serializer=None,
module_name=None,
required=False,
**args):
"""Registers a generic Flag object.
NOTE: in the docstrings of all DEFINE* functions, "registers" is short
for "creates a new flag and registers it".
Auxiliary function: clients should use the specialized DEFINE_<type>
function instead.
Args:
parser: ArgumentParser, used to parse the flag arguments.
name: str, the flag name.
default: The default value of the flag.
help: str, the help message.
flag_values: FlagValues, the FlagValues instance with which the flag will be
registered. This should almost never need to be overridden.
serializer: ArgumentSerializer, the flag serializer instance.
module_name: str, the name of the Python module declaring this flag. If not
provided, it will be computed using the stack trace of this call.
required: bool, is this a required flag. This must be used as a keyword
argument.
**args: dict, the extra keyword args that are passed to Flag __init__.
Returns:
a handle to defined flag.
"""
return DEFINE_flag(
_flag.Flag(parser, serializer, name, default, help, **args), flag_values,
module_name, required)
def DEFINE_flag( # pylint: disable=invalid-name
flag,
flag_values=_flagvalues.FLAGS,
module_name=None,
required=False):
"""Registers a 'Flag' object with a 'FlagValues' object.
By default, the global FLAGS 'FlagValue' object is used.
Typical users will use one of the more specialized DEFINE_xxx
functions, such as DEFINE_string or DEFINE_integer. But developers
who need to create Flag objects themselves should use this function
to register their flags.
Args:
flag: Flag, a flag that is key to the module.
flag_values: FlagValues, the FlagValues instance with which the flag will be
registered. This should almost never need to be overridden.
module_name: str, the name of the Python module declaring this flag. If not
provided, it will be computed using the stack trace of this call.
required: bool, is this a required flag. This must be used as a keyword
argument.
Returns:
a handle to defined flag.
"""
if required and flag.default is not None:
raise ValueError('Required flag --%s cannot have a non-None default' %
flag.name)
# Copying the reference to flag_values prevents pychecker warnings.
fv = flag_values
fv[flag.name] = flag
# Tell flag_values who's defining the flag.
if module_name:
module = sys.modules.get(module_name)
else:
module, module_name = _helpers.get_calling_module_object_and_name()
flag_values.register_flag_by_module(module_name, flag)
flag_values.register_flag_by_module_id(id(module), flag)
if required:
_validators.mark_flag_as_required(flag.name, fv)
ensure_non_none_value = (flag.default is not None) or required
return _flagvalues.FlagHolder(
fv, flag, ensure_non_none_value=ensure_non_none_value)
def _internal_declare_key_flags(flag_names,
flag_values=_flagvalues.FLAGS,
key_flag_values=None):
"""Declares a flag as key for the calling module.
Internal function. User code should call declare_key_flag or
adopt_module_key_flags instead.
Args:
flag_names: [str], a list of strings that are names of already-registered
Flag objects.
flag_values: FlagValues, the FlagValues instance with which the flags listed
in flag_names have registered (the value of the flag_values argument from
the DEFINE_* calls that defined those flags). This should almost never
need to be overridden.
key_flag_values: FlagValues, the FlagValues instance that (among possibly
many other things) keeps track of the key flags for each module. Default
None means "same as flag_values". This should almost never need to be
overridden.
Raises:
UnrecognizedFlagError: Raised when the flag is not defined.
"""
key_flag_values = key_flag_values or flag_values
module = _helpers.get_calling_module()
for flag_name in flag_names:
flag = flag_values[flag_name]
key_flag_values.register_key_flag_for_module(module, flag)
def declare_key_flag(flag_name, flag_values=_flagvalues.FLAGS):
"""Declares one flag as key to the current module.
Key flags are flags that are deemed really important for a module.
They are important when listing help messages; e.g., if the
--helpshort command-line flag is used, then only the key flags of the
main module are listed (instead of all flags, as in the case of
--helpfull).
Sample usage:
flags.declare_key_flag('flag_1')
Args:
flag_name: str, the name of an already declared flag. (Redeclaring flags as
key, including flags implicitly key because they were declared in this
module, is a no-op.)
flag_values: FlagValues, the FlagValues instance in which the flag will be
declared as a key flag. This should almost never need to be overridden.
Raises:
ValueError: Raised if flag_name not defined as a Python flag.
"""
if flag_name in _helpers.SPECIAL_FLAGS:
# Take care of the special flags, e.g., --flagfile, --undefok.
# These flags are defined in SPECIAL_FLAGS, and are treated
# specially during flag parsing, taking precedence over the
# user-defined flags.
_internal_declare_key_flags([flag_name],
flag_values=_helpers.SPECIAL_FLAGS,
key_flag_values=flag_values)
return
try:
_internal_declare_key_flags([flag_name], flag_values=flag_values)
except KeyError:
raise ValueError('Flag --%s is undefined. To set a flag as a key flag '
'first define it in Python.' % flag_name)
def adopt_module_key_flags(module, flag_values=_flagvalues.FLAGS):
"""Declares that all flags key to a module are key to the current module.
Args:
module: module, the module object from which all key flags will be declared
as key flags to the current module.
flag_values: FlagValues, the FlagValues instance in which the flags will be
declared as key flags. This should almost never need to be overridden.
Raises:
Error: Raised when given an argument that is a module name (a string),
instead of a module object.
"""
if not isinstance(module, types.ModuleType):
raise _exceptions.Error('Expected a module object, not %r.' % (module,))
_internal_declare_key_flags(
[f.name for f in flag_values.get_key_flags_for_module(module.__name__)],
flag_values=flag_values)
# If module is this flag module, take _helpers.SPECIAL_FLAGS into account.
if module == _helpers.FLAGS_MODULE:
_internal_declare_key_flags(
# As we associate flags with get_calling_module_object_and_name(), the
# special flags defined in this module are incorrectly registered with
# a different module. So, we can't use get_key_flags_for_module.
# Instead, we take all flags from _helpers.SPECIAL_FLAGS (a private
# FlagValues, where no other module should register flags).
[_helpers.SPECIAL_FLAGS[name].name for name in _helpers.SPECIAL_FLAGS],
flag_values=_helpers.SPECIAL_FLAGS,
key_flag_values=flag_values)
def disclaim_key_flags():
"""Declares that the current module will not define any more key flags.
Normally, the module that calls the DEFINE_xxx functions claims the
flag to be its key flag. This is undesirable for modules that
define additional DEFINE_yyy functions with its own flag parsers and
serializers, since that module will accidentally claim flags defined
by DEFINE_yyy as its key flags. After calling this function, the
module disclaims flag definitions thereafter, so the key flags will
be correctly attributed to the caller of DEFINE_yyy.
After calling this function, the module will not be able to define
any more flags. This function will affect all FlagValues objects.
"""
globals_for_caller = sys._getframe(1).f_globals # pylint: disable=protected-access
module, _ = _helpers.get_module_object_and_name(globals_for_caller)
_helpers.disclaim_module_ids.add(id(module))
def DEFINE_string( # pylint: disable=invalid-name,redefined-builtin
name,
default,
help,
flag_values=_flagvalues.FLAGS,
required=False,
**args):
"""Registers a flag whose value can be any string."""
parser = _argument_parser.ArgumentParser()
serializer = _argument_parser.ArgumentSerializer()
return DEFINE(
parser,
name,
default,
help,
flag_values,
serializer,
required=required,
**args)
def DEFINE_boolean( # pylint: disable=invalid-name,redefined-builtin
name,
default,
help,
flag_values=_flagvalues.FLAGS,
module_name=None,
required=False,
**args):
"""Registers a boolean flag.
Such a boolean flag does not take an argument. If a user wants to
specify a false value explicitly, the long option beginning with 'no'
must be used: i.e. --noflag
This flag will have a value of None, True or False. None is possible
if default=None and the user does not specify the flag on the command
line.
Args:
name: str, the flag name.
default: bool|str|None, the default value of the flag.
help: str, the help message.
flag_values: FlagValues, the FlagValues instance with which the flag will be
registered. This should almost never need to be overridden.
module_name: str, the name of the Python module declaring this flag. If not
provided, it will be computed using the stack trace of this call.
required: bool, is this a required flag. This must be used as a keyword
argument.
**args: dict, the extra keyword args that are passed to Flag __init__.
Returns:
a handle to defined flag.
"""
return DEFINE_flag(
_flag.BooleanFlag(name, default, help, **args), flag_values, module_name,
required)
def DEFINE_float( # pylint: disable=invalid-name,redefined-builtin
name,
default,
help,
lower_bound=None,
upper_bound=None,
flag_values=_flagvalues.FLAGS,
required=False,
**args):
"""Registers a flag whose value must be a float.
If lower_bound or upper_bound are set, then this flag must be
within the given range.
Args:
name: str, the flag name.
default: float|str|None, the default value of the flag.
help: str, the help message.
lower_bound: float, min value of the flag.
upper_bound: float, max value of the flag.
flag_values: FlagValues, the FlagValues instance with which the flag will be
registered. This should almost never need to be overridden.
required: bool, is this a required flag. This must be used as a keyword
argument.
**args: dict, the extra keyword args that are passed to DEFINE.
Returns:
a handle to defined flag.
"""
parser = _argument_parser.FloatParser(lower_bound, upper_bound)
serializer = _argument_parser.ArgumentSerializer()
result = DEFINE(
parser,
name,
default,
help,
flag_values,
serializer,
required=required,
**args)
_register_bounds_validator_if_needed(parser, name, flag_values=flag_values)
return result
def DEFINE_integer( # pylint: disable=invalid-name,redefined-builtin
name,
default,
help,
lower_bound=None,
upper_bound=None,
flag_values=_flagvalues.FLAGS,
required=False,
**args):
"""Registers a flag whose value must be an integer.
If lower_bound, or upper_bound are set, then this flag must be
within the given range.
Args:
name: str, the flag name.
default: int|str|None, the default value of the flag.
help: str, the help message.
lower_bound: int, min value of the flag.
upper_bound: int, max value of the flag.
flag_values: FlagValues, the FlagValues instance with which the flag will be
registered. This should almost never need to be overridden.
required: bool, is this a required flag. This must be used as a keyword
argument.
**args: dict, the extra keyword args that are passed to DEFINE.
Returns:
a handle to defined flag.
"""
parser = _argument_parser.IntegerParser(lower_bound, upper_bound)
serializer = _argument_parser.ArgumentSerializer()
result = DEFINE(
parser,
name,
default,
help,
flag_values,
serializer,
required=required,
**args)
_register_bounds_validator_if_needed(parser, name, flag_values=flag_values)
return result
def DEFINE_enum( # pylint: disable=invalid-name,redefined-builtin
name,
default,
enum_values,
help,
flag_values=_flagvalues.FLAGS,
module_name=None,
required=False,
**args):
"""Registers a flag whose value can be any string from enum_values.
Instead of a string enum, prefer `DEFINE_enum_class`, which allows
defining enums from an `enum.Enum` class.
Args:
name: str, the flag name.
default: str|None, the default value of the flag.
enum_values: [str], a non-empty list of strings with the possible values for
the flag.
help: str, the help message.
flag_values: FlagValues, the FlagValues instance with which the flag will be
registered. This should almost never need to be overridden.
module_name: str, the name of the Python module declaring this flag. If not
provided, it will be computed using the stack trace of this call.
required: bool, is this a required flag. This must be used as a keyword
argument.
**args: dict, the extra keyword args that are passed to Flag __init__.
Returns:
a handle to defined flag.
"""
return DEFINE_flag(
_flag.EnumFlag(name, default, help, enum_values, **args), flag_values,
module_name, required)
def DEFINE_enum_class( # pylint: disable=invalid-name,redefined-builtin
name,
default,
enum_class,
help,
flag_values=_flagvalues.FLAGS,
module_name=None,
case_sensitive=False,
required=False,
**args):
"""Registers a flag whose value can be the name of enum members.
Args:
name: str, the flag name.
default: Enum|str|None, the default value of the flag.
enum_class: class, the Enum class with all the possible values for the flag.
help: str, the help message.
flag_values: FlagValues, the FlagValues instance with which the flag will be
registered. This should almost never need to be overridden.
module_name: str, the name of the Python module declaring this flag. If not
provided, it will be computed using the stack trace of this call.
case_sensitive: bool, whether to map strings to members of the enum_class
without considering case.
required: bool, is this a required flag. This must be used as a keyword
argument.
**args: dict, the extra keyword args that are passed to Flag __init__.
Returns:
a handle to defined flag.
"""
return DEFINE_flag(
_flag.EnumClassFlag(
name,
default,
help,
enum_class,
case_sensitive=case_sensitive,
**args), flag_values, module_name, required)
def DEFINE_list( # pylint: disable=invalid-name,redefined-builtin
name,
default,
help,
flag_values=_flagvalues.FLAGS,
required=False,
**args):
"""Registers a flag whose value is a comma-separated list of strings.
The flag value is parsed with a CSV parser.
Args:
name: str, the flag name.
default: list|str|None, the default value of the flag.
help: str, the help message.
flag_values: FlagValues, the FlagValues instance with which the flag will be
registered. This should almost never need to be overridden.
required: bool, is this a required flag. This must be used as a keyword
argument.
**args: Dictionary with extra keyword args that are passed to the Flag
__init__.
Returns:
a handle to defined flag.
"""
parser = _argument_parser.ListParser()
serializer = _argument_parser.CsvListSerializer(',')
return DEFINE(
parser,
name,
default,
help,
flag_values,
serializer,
required=required,
**args)
def DEFINE_spaceseplist( # pylint: disable=invalid-name,redefined-builtin
name,
default,
help,
comma_compat=False,
flag_values=_flagvalues.FLAGS,
required=False,
**args):
"""Registers a flag whose value is a whitespace-separated list of strings.
Any whitespace can be used as a separator.
Args:
name: str, the flag name.
default: list|str|None, the default value of the flag.
help: str, the help message.
comma_compat: bool - Whether to support comma as an additional separator. If
false then only whitespace is supported. This is intended only for
backwards compatibility with flags that used to be comma-separated.
flag_values: FlagValues, the FlagValues instance with which the flag will be
registered. This should almost never need to be overridden.
required: bool, is this a required flag. This must be used as a keyword
argument.
**args: Dictionary with extra keyword args that are passed to the Flag
__init__.
Returns:
a handle to defined flag.
"""
parser = _argument_parser.WhitespaceSeparatedListParser(
comma_compat=comma_compat)
serializer = _argument_parser.ListSerializer(' ')
return DEFINE(
parser,
name,
default,
help,
flag_values,
serializer,
required=required,
**args)
def DEFINE_multi( # pylint: disable=invalid-name,redefined-builtin
parser,
serializer,
name,
default,
help,
flag_values=_flagvalues.FLAGS,
module_name=None,
required=False,
**args):
"""Registers a generic MultiFlag that parses its args with a given parser.
Auxiliary function. Normal users should NOT use it directly.
Developers who need to create their own 'Parser' classes for options
which can appear multiple times can call this module function to
register their flags.
Args:
parser: ArgumentParser, used to parse the flag arguments.
serializer: ArgumentSerializer, the flag serializer instance.
name: str, the flag name.
default: Union[Iterable[T], Text, None], the default value of the flag. If
the value is text, it will be parsed as if it was provided from the
command line. If the value is a non-string iterable, it will be iterated
over to create a shallow copy of the values. If it is None, it is left
as-is.
help: str, the help message.
flag_values: FlagValues, the FlagValues instance with which the flag will be
registered. This should almost never need to be overridden.
module_name: A string, the name of the Python module declaring this flag. If
not provided, it will be computed using the stack trace of this call.
required: bool, is this a required flag. This must be used as a keyword
argument.
**args: Dictionary with extra keyword args that are passed to the Flag
__init__.
Returns:
a handle to defined flag.
"""
return DEFINE_flag(
_flag.MultiFlag(parser, serializer, name, default, help, **args),
flag_values, module_name, required)
def DEFINE_multi_string( # pylint: disable=invalid-name,redefined-builtin
name,
default,
help,
flag_values=_flagvalues.FLAGS,
required=False,
**args):
"""Registers a flag whose value can be a list of any strings.
Use the flag on the command line multiple times to place multiple
string values into the list. The 'default' may be a single string
(which will be converted into a single-element list) or a list of
strings.
Args:
name: str, the flag name.
default: Union[Iterable[Text], Text, None], the default value of the flag;
see `DEFINE_multi`.
help: str, the help message.
flag_values: FlagValues, the FlagValues instance with which the flag will be
registered. This should almost never need to be overridden.
required: bool, is this a required flag. This must be used as a keyword
argument.
**args: Dictionary with extra keyword args that are passed to the Flag
__init__.
Returns:
a handle to defined flag.
"""
parser = _argument_parser.ArgumentParser()
serializer = _argument_parser.ArgumentSerializer()
return DEFINE_multi(
parser,
serializer,
name,
default,
help,
flag_values,
required=required,
**args)
def DEFINE_multi_integer( # pylint: disable=invalid-name,redefined-builtin
name,
default,
help,
lower_bound=None,
upper_bound=None,
flag_values=_flagvalues.FLAGS,
required=False,
**args):
"""Registers a flag whose value can be a list of arbitrary integers.
Use the flag on the command line multiple times to place multiple
integer values into the list. The 'default' may be a single integer
(which will be converted into a single-element list) or a list of
integers.
Args:
name: str, the flag name.
default: Union[Iterable[int], Text, None], the default value of the flag;
see `DEFINE_multi`.
help: str, the help message.
lower_bound: int, min values of the flag.
upper_bound: int, max values of the flag.
flag_values: FlagValues, the FlagValues instance with which the flag will be
registered. This should almost never need to be overridden.
required: bool, is this a required flag. This must be used as a keyword
argument.
**args: Dictionary with extra keyword args that are passed to the Flag
__init__.
Returns:
a handle to defined flag.
"""
parser = _argument_parser.IntegerParser(lower_bound, upper_bound)
serializer = _argument_parser.ArgumentSerializer()
return DEFINE_multi(
parser,
serializer,
name,
default,
help,
flag_values,
required=required,
**args)
def DEFINE_multi_float( # pylint: disable=invalid-name,redefined-builtin
name,
default,
help,
lower_bound=None,
upper_bound=None,
flag_values=_flagvalues.FLAGS,
required=False,
**args):
"""Registers a flag whose value can be a list of arbitrary floats.
Use the flag on the command line multiple times to place multiple
float values into the list. The 'default' may be a single float
(which will be converted into a single-element list) or a list of
floats.
Args:
name: str, the flag name.
default: Union[Iterable[float], Text, None], the default value of the flag;
see `DEFINE_multi`.
help: str, the help message.
lower_bound: float, min values of the flag.
upper_bound: float, max values of the flag.
flag_values: FlagValues, the FlagValues instance with which the flag will be
registered. This should almost never need to be overridden.
required: bool, is this a required flag. This must be used as a keyword
argument.
**args: Dictionary with extra keyword args that are passed to the Flag
__init__.
Returns:
a handle to defined flag.
"""
parser = _argument_parser.FloatParser(lower_bound, upper_bound)
serializer = _argument_parser.ArgumentSerializer()
return DEFINE_multi(
parser,
serializer,
name,
default,
help,
flag_values,
required=required,
**args)
def DEFINE_multi_enum( # pylint: disable=invalid-name,redefined-builtin
name,
default,
enum_values,
help,
flag_values=_flagvalues.FLAGS,
case_sensitive=True,
required=False,
**args):
"""Registers a flag whose value can be a list strings from enum_values.
Use the flag on the command line multiple times to place multiple
enum values into the list. The 'default' may be a single string
(which will be converted into a single-element list) or a list of
strings.
Args:
name: str, the flag name.
default: Union[Iterable[Text], Text, None], the default value of the flag;
see `DEFINE_multi`.
enum_values: [str], a non-empty list of strings with the possible values for
the flag.
help: str, the help message.
flag_values: FlagValues, the FlagValues instance with which the flag will be
registered. This should almost never need to be overridden.
case_sensitive: Whether or not the enum is to be case-sensitive.
required: bool, is this a required flag. This must be used as a keyword
argument.
**args: Dictionary with extra keyword args that are passed to the Flag
__init__.
Returns:
a handle to defined flag.
"""
parser = _argument_parser.EnumParser(enum_values, case_sensitive)
serializer = _argument_parser.ArgumentSerializer()
return DEFINE_multi(
parser,
serializer,
name,
default,
help,
flag_values,
required=required,
**args)
def DEFINE_multi_enum_class( # pylint: disable=invalid-name,redefined-builtin
name,
default,
enum_class,
help,
flag_values=_flagvalues.FLAGS,
module_name=None,
case_sensitive=False,
required=False,
**args):
"""Registers a flag whose value can be a list of enum members.
Use the flag on the command line multiple times to place multiple
enum values into the list.
Args:
name: str, the flag name.
default: Union[Iterable[Enum], Iterable[Text], Enum, Text, None], the
default value of the flag; see `DEFINE_multi`; only differences are
documented here. If the value is a single Enum, it is treated as a
single-item list of that Enum value. If it is an iterable, text values
within the iterable will be converted to the equivalent Enum objects.
enum_class: class, the Enum class with all the possible values for the flag.
help: str, the help message.
flag_values: FlagValues, the FlagValues instance with which the flag will be
registered. This should almost never need to be overridden.
module_name: A string, the name of the Python module declaring this flag. If
not provided, it will be computed using the stack trace of this call.
case_sensitive: bool, whether to map strings to members of the enum_class
without considering case.
required: bool, is this a required flag. This must be used as a keyword
argument.
**args: Dictionary with extra keyword args that are passed to the Flag
__init__.
Returns:
a handle to defined flag.
"""
return DEFINE_flag(
_flag.MultiEnumClassFlag(
name, default, help, enum_class, case_sensitive=case_sensitive),
flag_values,
module_name,
required=required,
**args)
def DEFINE_alias( # pylint: disable=invalid-name
name,
original_name,
flag_values=_flagvalues.FLAGS,
module_name=None):
"""Defines an alias flag for an existing one.
Args:
name: str, the flag name.
original_name: str, the original flag name.
flag_values: FlagValues, the FlagValues instance with which the flag will be
registered. This should almost never need to be overridden.
module_name: A string, the name of the module that defines this flag.
Returns:
a handle to defined flag.
Raises:
flags.FlagError:
UnrecognizedFlagError: if the referenced flag doesn't exist.
DuplicateFlagError: if the alias name has been used by some existing flag.
"""
if original_name not in flag_values:
raise _exceptions.UnrecognizedFlagError(original_name)
flag = flag_values[original_name]
class _FlagAlias(_flag.Flag):
"""Overrides Flag class so alias value is copy of original flag value."""
def parse(self, argument):
flag.parse(argument)
self.present += 1
def _parse_from_default(self, value):
# The value was already parsed by the aliased flag, so there is no
# need to call the parser on it a second time.
# Additionally, because of how MultiFlag parses and merges values,
# it isn't possible to delegate to the aliased flag and still get
# the correct values.
return value
@property
def value(self):
return flag.value
@value.setter
def value(self, value):
flag.value = value
help_msg = 'Alias for --%s.' % flag.name
# If alias_name has been used, flags.DuplicatedFlag will be raised.
return DEFINE_flag(
_FlagAlias(
flag.parser,
flag.serializer,
name,
flag.default,
help_msg,
boolean=flag.boolean), flag_values, module_name)
|
|
"""A module for consuming the Penn Dining API"""
import datetime
from .base import WrapperBase
BASE_URL = "https://esb.isc-seo.upenn.edu/8091/open_data/dining/"
V2_BASE_URL = "https://esb.isc-seo.upenn.edu/8091/open_data/dining/v2/?service="
ENDPOINTS = {
'MENUS': BASE_URL + 'menus',
'VENUES': BASE_URL + 'venues',
}
V2_ENDPOINTS = {
'VENUES': V2_BASE_URL + 'venues',
'HOURS': V2_BASE_URL + 'cafes&cafe=',
'MENUS': V2_BASE_URL + 'menus&cafe=',
'ITEMS': V2_BASE_URL + 'items&item='
}
VENUE_NAMES = {
'593': '1920 Commons',
'636': 'Hill House',
'637': 'Kings Court English House',
'638': 'Kosher Dining at Falk'
}
def normalize_weekly(data):
"""Normalization for dining menu data"""
if "tblMenu" not in data["result_data"]["Document"]:
data["result_data"]["Document"]["tblMenu"] = []
if isinstance(data["result_data"]["Document"]["tblMenu"], dict):
data["result_data"]["Document"]["tblMenu"] = [data["result_data"]["Document"]["tblMenu"]]
for day in data["result_data"]["Document"]["tblMenu"]:
if "tblDayPart" not in day:
continue
if isinstance(day["tblDayPart"], dict):
day["tblDayPart"] = [day["tblDayPart"]]
for meal in day["tblDayPart"]:
if isinstance(meal["tblStation"], dict):
meal["tblStation"] = [meal["tblStation"]]
for station in meal["tblStation"]:
if isinstance(station["tblItem"], dict):
station["tblItem"] = [station["tblItem"]]
return data
def get_meals(v2_response, building_id):
"""Extract meals into old format from a DiningV2 JSON response"""
result_data = v2_response["result_data"]
meals = []
day_parts = result_data["days"][0]["cafes"][building_id]["dayparts"][0]
for meal in day_parts:
stations = []
for station in meal["stations"]:
items = []
for item_id in station["items"]:
item = result_data["items"][item_id]
new_item = {}
new_item["txtTitle"] = item["label"]
new_item["txtPrice"] = ""
new_item["txtNutritionInfo"] = ""
new_item["txtDescription"] = item["description"]
new_item["tblSide"] = ""
new_item["tblFarmToFork"] = ""
attrs = [{"description": item["cor_icon"][attr]} for attr in item["cor_icon"]]
if len(attrs) == 1:
new_item["tblAttributes"] = {"txtAttribute": attrs[0]}
elif len(attrs) > 1:
new_item["tblAttributes"] = {"txtAttribute": attrs}
else:
new_item["tblAttributes"] = ""
if isinstance(item["options"], list):
item["options"] = {}
if "values" in item["options"]:
for side in item["options"]["values"]:
new_item["tblSide"] = {"txtSideName": side["label"]}
items.append(new_item)
stations.append({"tblItem": items, "txtStationDescription": station["label"]})
meals.append({"tblStation": stations, "txtDayPartDescription": meal["label"]})
return meals
class DiningV2(WrapperBase):
"""The client for the Registrar. Used to make requests to the API.
:param bearer: The user code for the API
:param token: The password code for the API
Usage::
>>> from penn import DiningV2
>>> din = DiningV2('MY_USERNAME_TOKEN', 'MY_PASSWORD_TOKEN')
"""
def venues(self):
"""Get a list of all venue objects.
>>> venues = din.venues()
"""
response = self._request(V2_ENDPOINTS['VENUES'])
return response
def hours(self, venue_id):
"""Get the list of hours for the venue corresponding to
venue_id.
:param venue_id:
A string representing the id of a venue, e.g. "abc".
>>> commons_hours = din.hours("593")
"""
response = self._request(V2_ENDPOINTS['HOURS'] + venue_id)
return response
def menu(self, venue_id, date):
"""Get the menu for the venue corresponding to venue_id,
on date.
:param venue_id:
A string representing the id of a venue, e.g. "abc".
:param date:
A string representing the date of a venue's menu, e.g. "2015-09-20".
>>> commons_menu = din.menu("593", "2015-09-20")
"""
query = "&date=" + date
response = self._request(V2_ENDPOINTS['MENUS'] + venue_id + query)
return response
def item(self, item_id):
"""Get a description of the food item corresponding to item_id.
:param item_id:
A string representing the id of an item, e.g. "3899220".
>>> tomato_sauce = din.item("3899220")
"""
response = self._request(V2_ENDPOINTS['ITEMS'] + item_id)
return response
class Dining(WrapperBase):
"""The client for the Registrar. Used to make requests to the API.
:param bearer: The user code for the API
:param token: The password code for the API
Usage::
>>> from penn import Dining
>>> din = Dining('MY_USERNAME_TOKEN', 'MY_PASSWORD_TOKEN')
"""
def venues(self):
"""Get a list of all venue objects.
>>> venues = din.venues()
"""
response = self._request(V2_ENDPOINTS['VENUES'])
# Normalize `dateHours` to array
for venue in response["result_data"]["document"]["venue"]:
if venue.get("id") in VENUE_NAMES:
venue["name"] = VENUE_NAMES[venue.get("id")]
if isinstance(venue.get("dateHours"), dict):
venue["dateHours"] = [venue["dateHours"]]
if "dateHours" in venue:
for dh in venue["dateHours"]:
if isinstance(dh.get("meal"), dict):
dh["meal"] = [dh["meal"]]
return response
def menu_daily(self, building_id):
"""Get a menu object corresponding to the daily menu for the
venue with building_id.
:param building_id:
A string representing the id of a building, e.g. "abc".
>>> commons_today = din.menu_daily("593")
"""
today = str(datetime.date.today())
v2_response = DiningV2(self.bearer, self.token).menu(building_id, today)
response = {'result_data': {'Document': {}}}
response["result_data"]["Document"]["menudate"] = datetime.datetime.strptime(today, '%Y-%m-%d').strftime('%-m/%d/%Y')
if building_id in VENUE_NAMES:
response["result_data"]["Document"]["location"] = VENUE_NAMES[building_id]
else:
response["result_data"]["Document"]["location"] = v2_response["result_data"]["days"][0]["cafes"][building_id]["name"]
response["result_data"]["Document"]["tblMenu"] = {"tblDayPart": get_meals(v2_response, building_id)}
return response
def menu_weekly(self, building_id):
"""Get an array of menu objects corresponding to the weekly menu for the
venue with building_id.
:param building_id:
A string representing the id of a building, e.g. "abc".
>>> commons_week = din.menu_weekly("593")
"""
din = DiningV2(self.bearer, self.token)
response = {'result_data': {'Document': {}}}
days = []
for i in range(7):
date = str(datetime.date.today() + datetime.timedelta(days=i))
v2_response = din.menu(building_id, date)
if building_id in VENUE_NAMES:
response["result_data"]["Document"]["location"] = VENUE_NAMES[building_id]
else:
response["result_data"]["Document"]["location"] = v2_response["result_data"]["days"][0]["cafes"][building_id]["name"]
formatted_date = datetime.datetime.strptime(date, '%Y-%m-%d').strftime('%-m/%d/%Y')
days.append({"tblDayPart": get_meals(v2_response, building_id), "menudate": formatted_date})
response["result_data"]["Document"]["tblMenu"] = days
return normalize_weekly(response)
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""The main entry point for the new development server."""
import argparse
import errno
import getpass
import itertools
import logging
import os
import sys
import tempfile
import time
from google.appengine.api import appinfo
from google.appengine.datastore import datastore_stub_util
from google.appengine.tools import boolean_action
from google.appengine.tools.devappserver2.admin import admin_server
from google.appengine.tools.devappserver2 import api_server
from google.appengine.tools.devappserver2 import application_configuration
from google.appengine.tools.devappserver2 import dispatcher
from google.appengine.tools.devappserver2 import login
from google.appengine.tools.devappserver2 import runtime_config_pb2
from google.appengine.tools.devappserver2 import shutdown
from google.appengine.tools.devappserver2 import update_checker
from google.appengine.tools.devappserver2 import wsgi_request_info
# Initialize logging early -- otherwise some library packages may
# pre-empt our log formatting. NOTE: the level is provisional; it may
# be changed in main() based on the --debug flag.
logging.basicConfig(
level=logging.INFO,
format='%(levelname)-8s %(asctime)s %(filename)s:%(lineno)s] %(message)s')
# Valid choices for --log_level and their corresponding constants in
# runtime_config_pb2.Config.stderr_log_level.
_LOG_LEVEL_TO_RUNTIME_CONSTANT = {
'debug': 0,
'info': 1,
'warning': 2,
'error': 3,
'critical': 4,
}
# Valid choices for --dev_appserver_log_level and their corresponding Python
# logging levels
_LOG_LEVEL_TO_PYTHON_CONSTANT = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
# The default encoding used by the production interpreter.
_PROD_DEFAULT_ENCODING = 'ascii'
def _generate_storage_paths(app_id):
"""Yield an infinite sequence of possible storage paths."""
if sys.platform == 'win32':
# The temp directory is per-user on Windows so there is no reason to add
# the username to the generated directory name.
user_format = ''
else:
try:
user_name = getpass.getuser()
except Exception: # The possible set of exceptions is not documented.
user_format = ''
else:
user_format = '.%s' % user_name
tempdir = tempfile.gettempdir()
yield os.path.join(tempdir, 'appengine.%s%s' % (app_id, user_format))
for i in itertools.count(1):
yield os.path.join(tempdir, 'appengine.%s%s.%d' % (app_id, user_format, i))
def _get_storage_path(path, app_id):
"""Returns a path to the directory where stub data can be stored."""
_, _, app_id = app_id.replace(':', '_').rpartition('~')
if path is None:
for path in _generate_storage_paths(app_id):
try:
os.mkdir(path, 0700)
except OSError, e:
if e.errno == errno.EEXIST:
# Check that the directory is only accessable by the current user to
# protect against an attacker creating the directory in advance in
# order to access any created files. Windows has per-user temporary
# directories and st_mode does not include per-user permission
# information so assume that it is safe.
if sys.platform == 'win32' or (
(os.stat(path).st_mode & 0777) == 0700 and os.path.isdir(path)):
return path
else:
continue
raise
else:
return path
elif not os.path.exists(path):
os.mkdir(path)
return path
elif not os.path.isdir(path):
raise IOError('the given storage path %r is a file, a directory was '
'expected' % path)
else:
return path
def _get_default_php_path():
"""Returns the path to the siloed php-cgi binary or None if not present."""
default_php_executable_path = None
if sys.platform == 'win32':
default_php_executable_path = os.path.abspath(
os.path.join(os.path.dirname(sys.argv[0]),
'php/php-5.4-Win32-VC9-x86/php-cgi.exe'))
elif sys.platform == 'darwin':
default_php_executable_path = os.path.abspath(
os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0]))),
'php-cgi'))
if (default_php_executable_path and
os.path.exists(default_php_executable_path)):
return default_php_executable_path
return None
class PortParser(object):
"""A parser for ints that represent ports."""
def __init__(self, allow_port_zero=True):
self._min_port = 0 if allow_port_zero else 1
def __call__(self, value):
try:
port = int(value)
except ValueError:
raise argparse.ArgumentTypeError('Invalid port: %r' % value)
if port < self._min_port or port >= (1 << 16):
raise argparse.ArgumentTypeError('Invalid port: %d' % port)
return port
def parse_per_module_option(
value, value_type, value_predicate,
single_bad_type_error, single_bad_predicate_error,
multiple_bad_type_error, multiple_bad_predicate_error,
multiple_duplicate_module_error):
"""Parses command line options that may be specified per-module.
Args:
value: A str containing the flag value to parse. Two formats are supported:
1. A universal value (may not contain a colon as that is use to
indicate a per-module value).
2. Per-module values. One or more comma separated module-value pairs.
Each pair is a module_name:value. An empty module-name is shorthand
for "default" to match how not specifying a module name in the yaml
is the same as specifying "module: default".
value_type: a callable that converts the string representation of the value
to the actual value. Should raise ValueError if the string can not
be converted.
value_predicate: a predicate to call on the converted value to validate
the converted value. Use "lambda _: True" if all values are valid.
single_bad_type_error: the message to use if a universal value is provided
and value_type throws a ValueError. The message must consume a single
format parameter (the provided value).
single_bad_predicate_error: the message to use if a universal value is
provided and value_predicate returns False. The message does not
get any format parameters.
multiple_bad_type_error: the message to use if a per-module value
either does not have two values separated by a single colon or if
value_types throws a ValueError on the second string. The message must
consume a single format parameter (the module_name:value pair).
multiple_bad_predicate_error: the message to use if a per-module value if
value_predicate returns False. The message must consume a single format
parameter (the module name).
multiple_duplicate_module_error: the message to use if the same module is
repeated. The message must consume a single formater parameter (the
module name).
Returns:
Either a single value of value_type for universal values or a dict of
str->value_type for per-module values.
Raises:
argparse.ArgumentTypeError: the value is invalid.
"""
if ':' not in value:
try:
single_value = value_type(value)
except ValueError:
raise argparse.ArgumentTypeError(single_bad_type_error % value)
else:
if not value_predicate(single_value):
raise argparse.ArgumentTypeError(single_bad_predicate_error)
return single_value
else:
module_to_value = {}
for module_value in value.split(','):
try:
module_name, single_value = module_value.split(':')
single_value = value_type(single_value)
except ValueError:
raise argparse.ArgumentTypeError(multiple_bad_type_error % module_value)
else:
module_name = module_name.strip()
if not module_name:
module_name = appinfo.DEFAULT_MODULE
if module_name in module_to_value:
raise argparse.ArgumentTypeError(
multiple_duplicate_module_error % module_name)
if not value_predicate(single_value):
raise argparse.ArgumentTypeError(
multiple_bad_predicate_error % module_name)
module_to_value[module_name] = single_value
return module_to_value
def parse_max_module_instances(value):
"""Returns the parsed value for the --max_module_instances flag.
Args:
value: A str containing the flag value for parse. The format should follow
one of the following examples:
1. "5" - All modules are limited to 5 instances.
2. "default:3,backend:20" - The default module can have 3 instances,
"backend" can have 20 instances and all other modules are
unaffected. An empty name (i.e. ":3") is shorthand for default
to match how not specifying a module name in the yaml is the
same as specifying "module: default".
Returns:
The parsed value of the max_module_instances flag. May either be an int
(for values of the form "5") or a dict of str->int (for values of the
form "default:3,backend:20").
Raises:
argparse.ArgumentTypeError: the value is invalid.
"""
return parse_per_module_option(
value, int, lambda instances: instances > 0,
'Invalid max instance count: %r',
'Max instance count must be greater than zero',
'Expected "module:max_instance_count": %r',
'Max instance count for module %s must be greater than zero',
'Duplicate max instance count for module %s')
def parse_threadsafe_override(value):
"""Returns the parsed value for the --threadsafe_override flag.
Args:
value: A str containing the flag value for parse. The format should follow
one of the following examples:
1. "False" - All modules override the YAML threadsafe configuration
as if the YAML contained False.
2. "default:False,backend:True" - The default module overrides the
YAML threadsafe configuration as if the YAML contained False, the
"backend" module overrides with a value of True and all other
modules use the value in the YAML file. An empty name (i.e.
":True") is shorthand for default to match how not specifying a
module name in the yaml is the same as specifying
"module: default".
Returns:
The parsed value of the threadsafe_override flag. May either be a bool
(for values of the form "False") or a dict of str->bool (for values of the
form "default:False,backend:True").
Raises:
argparse.ArgumentTypeError: the value is invalid.
"""
return parse_per_module_option(
value, boolean_action.BooleanParse, lambda _: True,
'Invalid threadsafe override: %r',
None,
'Expected "module:threadsafe_override": %r',
None,
'Duplicate threadsafe override value for module %s')
def parse_path(value):
"""Returns the given path with ~ and environment variables expanded."""
return os.path.expanduser(os.path.expandvars(value))
def create_command_line_parser():
"""Returns an argparse.ArgumentParser to parse command line arguments."""
# TODO: Add more robust argument validation. Consider what flags
# are actually needed.
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('yaml_files', nargs='+')
common_group = parser.add_argument_group('Common')
common_group.add_argument(
'--host', default='localhost',
help='host name to which application modules should bind')
common_group.add_argument(
'--port', type=PortParser(), default=8080,
help='lowest port to which application modules should bind')
common_group.add_argument(
'--admin_host', default='localhost',
help='host name to which the admin server should bind')
common_group.add_argument(
'--admin_port', type=PortParser(), default=8000,
help='port to which the admin server should bind')
common_group.add_argument(
'--auth_domain', default='gmail.com',
help='name of the authorization domain to use')
common_group.add_argument(
'--storage_path', metavar='PATH',
type=parse_path,
help='path to the data (datastore, blobstore, etc.) associated with the '
'application.')
common_group.add_argument(
'--log_level', default='info',
choices=_LOG_LEVEL_TO_RUNTIME_CONSTANT.keys(),
help='the log level below which logging messages generated by '
'application code will not be displayed on the console')
common_group.add_argument(
'--max_module_instances',
type=parse_max_module_instances,
help='the maximum number of runtime instances that can be started for a '
'particular module - the value can be an integer, in what case all '
'modules are limited to that number of instances or a comma-seperated '
'list of module:max_instances e.g. "default:5,backend:3"')
common_group.add_argument(
'--use_mtime_file_watcher',
action=boolean_action.BooleanAction,
const=True,
default=False,
help='use mtime polling for detecting source code changes - useful if '
'modifying code from a remote machine using a distributed file system')
common_group.add_argument(
'--threadsafe_override',
type=parse_threadsafe_override,
help='override the application\'s threadsafe configuration - the value '
'can be a boolean, in which case all modules threadsafe setting will '
'be overridden or a comma-separated list of module:threadsafe_override '
'e.g. "default:False,backend:True"')
# PHP
php_group = parser.add_argument_group('PHP')
php_group.add_argument('--php_executable_path', metavar='PATH',
type=parse_path,
default=_get_default_php_path(),
help='path to the PHP executable')
php_group.add_argument('--php_remote_debugging',
action=boolean_action.BooleanAction,
const=True,
default=False,
help='enable XDebug remote debugging')
# App Identity
appidentity_group = parser.add_argument_group('Application Identity')
appidentity_group.add_argument(
'--appidentity_email_address',
help='email address associated with a service account that has a '
'downloadable key. May be None for no local application identity.')
appidentity_group.add_argument(
'--appidentity_private_key_path',
help='path to private key file associated with service account '
'(.pem format). Must be set if appidentity_email_address is set.')
# Python
python_group = parser.add_argument_group('Python')
python_group.add_argument(
'--python_startup_script',
help='the script to run at the startup of new Python runtime instances '
'(useful for tools such as debuggers.')
python_group.add_argument(
'--python_startup_args',
help='the arguments made available to the script specified in '
'--python_startup_script.')
# Blobstore
blobstore_group = parser.add_argument_group('Blobstore API')
blobstore_group.add_argument(
'--blobstore_path',
type=parse_path,
help='path to directory used to store blob contents '
'(defaults to a subdirectory of --storage_path if not set)',
default=None)
# Cloud SQL
cloud_sql_group = parser.add_argument_group('Cloud SQL')
cloud_sql_group.add_argument(
'--mysql_host',
default='localhost',
help='host name of a running MySQL server used for simulated Google '
'Cloud SQL storage')
cloud_sql_group.add_argument(
'--mysql_port', type=PortParser(allow_port_zero=False),
default=3306,
help='port number of a running MySQL server used for simulated Google '
'Cloud SQL storage')
cloud_sql_group.add_argument(
'--mysql_user',
default='',
help='username to use when connecting to the MySQL server specified in '
'--mysql_host and --mysql_port or --mysql_socket')
cloud_sql_group.add_argument(
'--mysql_password',
default='',
help='passpord to use when connecting to the MySQL server specified in '
'--mysql_host and --mysql_port or --mysql_socket')
cloud_sql_group.add_argument(
'--mysql_socket',
help='path to a Unix socket file to use when connecting to a running '
'MySQL server used for simulated Google Cloud SQL storage')
# Datastore
datastore_group = parser.add_argument_group('Datastore API')
datastore_group.add_argument(
'--datastore_path',
type=parse_path,
default=None,
help='path to a file used to store datastore contents '
'(defaults to a file in --storage_path if not set)',)
datastore_group.add_argument('--clear_datastore',
action=boolean_action.BooleanAction,
const=True,
default=False,
help='clear the datastore on startup')
datastore_group.add_argument(
'--datastore_consistency_policy',
default='time',
choices=['consistent', 'random', 'time'],
help='the policy to apply when deciding whether a datastore write should '
'appear in global queries')
datastore_group.add_argument(
'--require_indexes',
action=boolean_action.BooleanAction,
const=True,
default=False,
help='generate an error on datastore queries that '
'requires a composite index not found in index.yaml')
datastore_group.add_argument(
'--auto_id_policy',
default=datastore_stub_util.SCATTERED,
choices=[datastore_stub_util.SEQUENTIAL,
datastore_stub_util.SCATTERED],
help='the type of sequence from which the datastore stub '
'assigns automatic IDs. NOTE: Sequential IDs are '
'deprecated. This flag will be removed in a future '
'release. Please do not rely on sequential IDs in your '
'tests.')
# Logs
logs_group = parser.add_argument_group('Logs API')
logs_group.add_argument(
'--logs_path', default=None,
help='path to a file used to store request logs (defaults to a file in '
'--storage_path if not set)',)
# Mail
mail_group = parser.add_argument_group('Mail API')
mail_group.add_argument(
'--show_mail_body',
action=boolean_action.BooleanAction,
const=True,
default=False,
help='logs the contents of e-mails sent using the Mail API')
mail_group.add_argument(
'--enable_sendmail',
action=boolean_action.BooleanAction,
const=True,
default=False,
help='use the "sendmail" tool to transmit e-mail sent '
'using the Mail API (ignored if --smpt_host is set)')
mail_group.add_argument(
'--smtp_host', default='',
help='host name of an SMTP server to use to transmit '
'e-mail sent using the Mail API')
mail_group.add_argument(
'--smtp_port', default=25,
type=PortParser(allow_port_zero=False),
help='port number of an SMTP server to use to transmit '
'e-mail sent using the Mail API (ignored if --smtp_host '
'is not set)')
mail_group.add_argument(
'--smtp_user', default='',
help='username to use when connecting to the SMTP server '
'specified in --smtp_host and --smtp_port')
mail_group.add_argument(
'--smtp_password', default='',
help='password to use when connecting to the SMTP server '
'specified in --smtp_host and --smtp_port')
# Matcher
prospective_search_group = parser.add_argument_group('Prospective Search API')
prospective_search_group.add_argument(
'--prospective_search_path', default=None,
type=parse_path,
help='path to a file used to store the prospective '
'search subscription index (defaults to a file in '
'--storage_path if not set)')
prospective_search_group.add_argument(
'--clear_prospective_search',
action=boolean_action.BooleanAction,
const=True,
default=False,
help='clear the prospective search subscription index')
# Search
search_group = parser.add_argument_group('Search API')
search_group.add_argument(
'--search_indexes_path', default=None,
type=parse_path,
help='path to a file used to store search indexes '
'(defaults to a file in --storage_path if not set)',)
search_group.add_argument(
'--clear_search_indexes',
action=boolean_action.BooleanAction,
const=True,
default=False,
help='clear the search indexes')
# Taskqueue
taskqueue_group = parser.add_argument_group('Task Queue API')
taskqueue_group.add_argument(
'--enable_task_running',
action=boolean_action.BooleanAction,
const=True,
default=True,
help='run "push" tasks created using the taskqueue API automatically')
# Misc
misc_group = parser.add_argument_group('Miscellaneous')
misc_group.add_argument(
'--allow_skipped_files',
action=boolean_action.BooleanAction,
const=True,
default=False,
help='make files specified in the app.yaml "skip_files" or "static" '
'handles readable by the application.')
# No help to avoid lengthening help message for rarely used feature:
# host name to which the server for API calls should bind.
misc_group.add_argument(
'--api_host', default='localhost',
help=argparse.SUPPRESS)
misc_group.add_argument(
'--api_port', type=PortParser(), default=0,
help='port to which the server for API calls should bind')
misc_group.add_argument(
'--automatic_restart',
action=boolean_action.BooleanAction,
const=True,
default=True,
help=('restart instances automatically when files relevant to their '
'module are changed'))
misc_group.add_argument(
'--dev_appserver_log_level', default='info',
choices=_LOG_LEVEL_TO_PYTHON_CONSTANT.keys(),
help='the log level below which logging messages generated by '
'the development server will not be displayed on the console (this '
'flag is more useful for diagnosing problems in dev_appserver.py rather '
'than in application code)')
misc_group.add_argument(
'--skip_sdk_update_check',
action=boolean_action.BooleanAction,
const=True,
default=False,
help='skip checking for SDK updates (if false, use .appcfg_nag to '
'decide)')
misc_group.add_argument(
'--default_gcs_bucket_name', default=None,
help='default Google Cloud Storgage bucket name')
return parser
PARSER = create_command_line_parser()
def _clear_datastore_storage(datastore_path):
"""Delete the datastore storage file at the given path."""
# lexists() returns True for broken symlinks, where exists() returns False.
if os.path.lexists(datastore_path):
try:
os.remove(datastore_path)
except OSError, e:
logging.warning('Failed to remove datastore file %r: %s',
datastore_path,
e)
def _clear_prospective_search_storage(prospective_search_path):
"""Delete the perspective search storage file at the given path."""
# lexists() returns True for broken symlinks, where exists() returns False.
if os.path.lexists(prospective_search_path):
try:
os.remove(prospective_search_path)
except OSError, e:
logging.warning('Failed to remove prospective search file %r: %s',
prospective_search_path,
e)
def _clear_search_indexes_storage(search_index_path):
"""Delete the search indexes storage file at the given path."""
# lexists() returns True for broken symlinks, where exists() returns False.
if os.path.lexists(search_index_path):
try:
os.remove(search_index_path)
except OSError, e:
logging.warning('Failed to remove search indexes file %r: %s',
search_index_path,
e)
def _setup_environ(app_id):
"""Sets up the os.environ dictionary for the front-end server and API server.
This function should only be called once.
Args:
app_id: The id of the application.
"""
os.environ['APPLICATION_ID'] = app_id
class DevelopmentServer(object):
"""Encapsulates the logic for the development server.
Only a single instance of the class may be created per process. See
_setup_environ.
"""
def __init__(self):
# A list of servers that are currently running.
self._running_modules = []
self._module_to_port = {}
def module_to_address(self, module_name, instance=None):
"""Returns the address of a module."""
if module_name is None:
return self._dispatcher.dispatch_address
return self._dispatcher.get_hostname(
module_name,
self._dispatcher.get_default_version(module_name),
instance)
def start(self, options):
"""Start devappserver2 servers based on the provided command line arguments.
Args:
options: An argparse.Namespace containing the command line arguments.
"""
logging.getLogger().setLevel(
_LOG_LEVEL_TO_PYTHON_CONSTANT[options.dev_appserver_log_level])
configuration = application_configuration.ApplicationConfiguration(
options.yaml_files)
if options.skip_sdk_update_check:
logging.info('Skipping SDK update check.')
else:
update_checker.check_for_updates(configuration)
# There is no good way to set the default encoding from application code
# (it needs to be done during interpreter initialization in site.py or
# sitecustomize.py) so just warn developers if they have a different
# encoding than production.
if sys.getdefaultencoding() != _PROD_DEFAULT_ENCODING:
logging.warning(
'The default encoding of your local Python interpreter is set to %r '
'while App Engine\'s production environment uses %r; as a result '
'your code may behave differently when deployed.',
sys.getdefaultencoding(), _PROD_DEFAULT_ENCODING)
if options.port == 0:
logging.warn('DEFAULT_VERSION_HOSTNAME will not be set correctly with '
'--port=0')
_setup_environ(configuration.app_id)
self._dispatcher = dispatcher.Dispatcher(
configuration,
options.host,
options.port,
options.auth_domain,
_LOG_LEVEL_TO_RUNTIME_CONSTANT[options.log_level],
self._create_php_config(options),
self._create_python_config(options),
self._create_cloud_sql_config(options),
self._create_module_to_setting(options.max_module_instances,
configuration, '--max_module_instances'),
options.use_mtime_file_watcher,
options.automatic_restart,
options.allow_skipped_files,
self._create_module_to_setting(options.threadsafe_override,
configuration, '--threadsafe_override'))
request_data = wsgi_request_info.WSGIRequestInfo(self._dispatcher)
storage_path = _get_storage_path(options.storage_path, configuration.app_id)
apis = self._create_api_server(
request_data, storage_path, options, configuration)
apis.start()
self._running_modules.append(apis)
self._dispatcher.start(options.api_host, apis.port, request_data)
self._running_modules.append(self._dispatcher)
xsrf_path = os.path.join(storage_path, 'xsrf')
admin = admin_server.AdminServer(options.admin_host, options.admin_port,
self._dispatcher, configuration, xsrf_path)
admin.start()
self._running_modules.append(admin)
def stop(self):
"""Stops all running devappserver2 modules."""
while self._running_modules:
self._running_modules.pop().quit()
@staticmethod
def _create_api_server(request_data, storage_path, options, configuration):
datastore_path = options.datastore_path or os.path.join(storage_path,
'datastore.db')
logs_path = options.logs_path or os.path.join(storage_path, 'logs.db')
search_index_path = options.search_indexes_path or os.path.join(
storage_path, 'search_indexes')
prospective_search_path = options.prospective_search_path or os.path.join(
storage_path, 'prospective-search')
blobstore_path = options.blobstore_path or os.path.join(storage_path,
'blobs')
if options.clear_datastore:
_clear_datastore_storage(datastore_path)
if options.clear_prospective_search:
_clear_prospective_search_storage(prospective_search_path)
if options.clear_search_indexes:
_clear_search_indexes_storage(search_index_path)
if options.auto_id_policy==datastore_stub_util.SEQUENTIAL:
logging.warn("--auto_id_policy='sequential' is deprecated. This option "
"will be removed in a future release.")
application_address = '%s' % options.host
if options.port and options.port != 80:
application_address += ':' + str(options.port)
user_login_url = '/%s?%s=%%s' % (login.LOGIN_URL_RELATIVE,
login.CONTINUE_PARAM)
user_logout_url = '%s&%s=%s' % (user_login_url, login.ACTION_PARAM,
login.LOGOUT_ACTION)
if options.datastore_consistency_policy == 'time':
consistency = datastore_stub_util.TimeBasedHRConsistencyPolicy()
elif options.datastore_consistency_policy == 'random':
consistency = datastore_stub_util.PseudoRandomHRConsistencyPolicy()
elif options.datastore_consistency_policy == 'consistent':
consistency = datastore_stub_util.PseudoRandomHRConsistencyPolicy(1.0)
else:
assert 0, ('unknown consistency policy: %r' %
options.datastore_consistency_policy)
api_server.maybe_convert_datastore_file_stub_data_to_sqlite(
configuration.app_id, datastore_path)
api_server.setup_stubs(
request_data=request_data,
app_id=configuration.app_id,
application_root=configuration.modules[0].application_root,
# The "trusted" flag is only relevant for Google administrative
# applications.
trusted=getattr(options, 'trusted', False),
appidentity_email_address=options.appidentity_email_address,
appidentity_private_key_path=os.path.abspath(
options.appidentity_private_key_path)
if options.appidentity_private_key_path else None,
blobstore_path=blobstore_path,
datastore_path=datastore_path,
datastore_consistency=consistency,
datastore_require_indexes=options.require_indexes,
datastore_auto_id_policy=options.auto_id_policy,
images_host_prefix='http://%s' % application_address,
logs_path=logs_path,
mail_smtp_host=options.smtp_host,
mail_smtp_port=options.smtp_port,
mail_smtp_user=options.smtp_user,
mail_smtp_password=options.smtp_password,
mail_enable_sendmail=options.enable_sendmail,
mail_show_mail_body=options.show_mail_body,
matcher_prospective_search_path=prospective_search_path,
search_index_path=search_index_path,
taskqueue_auto_run_tasks=options.enable_task_running,
taskqueue_default_http_server=application_address,
user_login_url=user_login_url,
user_logout_url=user_logout_url,
default_gcs_bucket_name=options.default_gcs_bucket_name)
return api_server.APIServer(options.api_host, options.api_port,
configuration.app_id)
@staticmethod
def _create_php_config(options):
php_config = runtime_config_pb2.PhpConfig()
if options.php_executable_path:
php_config.php_executable_path = os.path.abspath(
options.php_executable_path)
php_config.enable_debugger = options.php_remote_debugging
return php_config
@staticmethod
def _create_python_config(options):
python_config = runtime_config_pb2.PythonConfig()
if options.python_startup_script:
python_config.startup_script = os.path.abspath(
options.python_startup_script)
if options.python_startup_args:
python_config.startup_args = options.python_startup_args
return python_config
@staticmethod
def _create_cloud_sql_config(options):
cloud_sql_config = runtime_config_pb2.CloudSQL()
cloud_sql_config.mysql_host = options.mysql_host
cloud_sql_config.mysql_port = options.mysql_port
cloud_sql_config.mysql_user = options.mysql_user
cloud_sql_config.mysql_password = options.mysql_password
if options.mysql_socket:
cloud_sql_config.mysql_socket = options.mysql_socket
return cloud_sql_config
@staticmethod
def _create_module_to_setting(setting, configuration, option):
"""Create a per-module dictionary configuration.
Creates a dictionary that maps a module name to a configuration
setting. Used in conjunction with parse_per_module_option.
Args:
setting: a value that can be None, a dict of str->type or a single value.
configuration: an ApplicationConfiguration object.
option: the option name the setting came from.
Returns:
A dict of str->type.
"""
if setting is None:
return {}
module_names = [module_configuration.module_name
for module_configuration in configuration.modules]
if isinstance(setting, dict):
# Warn and remove a setting if the module name is unknown.
module_to_setting = {}
for module_name, value in setting.items():
if module_name in module_names:
module_to_setting[module_name] = value
else:
logging.warning('Unknown module %r for %r', module_name, option)
return module_to_setting
# Create a dict with an entry for every module.
return {module_name: setting for module_name in module_names}
def main():
shutdown.install_signal_handlers()
# The timezone must be set in the devappserver2 process rather than just in
# the runtime so printed log timestamps are consistent and the taskqueue stub
# expects the timezone to be UTC. The runtime inherits the environment.
os.environ['TZ'] = 'UTC'
if hasattr(time, 'tzset'):
# time.tzet() should be called on Unix, but doesn't exist on Windows.
time.tzset()
options = PARSER.parse_args()
dev_server = DevelopmentServer()
try:
dev_server.start(options)
shutdown.wait_until_shutdown()
finally:
dev_server.stop()
if __name__ == '__main__':
main()
|
|
""" Defines the PlotAxis class, and associated validator and UI.
"""
from __future__ import with_statement
# Major library import
from numpy import array, around, absolute, cos, dot, float64, inf, pi, \
sqrt, sin, transpose
# Enthought Library imports
from enable.api import ColorTrait, LineStyle
from kiva.trait_defs.kiva_font_trait import KivaFont
from traits.api import Any, Float, Int, Str, Trait, Unicode, \
Bool, Event, List, Array, Instance, Enum, Callable
# Local relative imports
from ticks import AbstractTickGenerator, DefaultTickGenerator
from abstract_mapper import AbstractMapper
from abstract_overlay import AbstractOverlay
from label import Label
from log_mapper import LogMapper
def DEFAULT_TICK_FORMATTER(val):
return ("%f"%val).rstrip("0").rstrip(".")
class PlotAxis(AbstractOverlay):
"""
The PlotAxis is a visual component that can be rendered on its own as
a standalone component or attached as an overlay to another component.
(To attach it as an overlay, set its **component** attribute.)
When it is attached as an overlay, it draws into the padding around
the component.
"""
# The mapper that drives this axis.
mapper = Instance(AbstractMapper)
# Keep an origin for plots that aren't attached to a component
origin = Enum("bottom left", "top left", "bottom right", "top right")
# The text of the axis title.
title = Trait('', Str, Unicode) #May want to add PlotLabel option
# The font of the title.
title_font = KivaFont('modern 12')
# The spacing between the axis line and the title
title_spacing = Trait('auto', 'auto', Float)
# The color of the title.
title_color = ColorTrait("black")
# The thickness (in pixels) of each tick.
tick_weight = Float(1.0)
# The color of the ticks.
tick_color = ColorTrait("black")
# The font of the tick labels.
tick_label_font = KivaFont('modern 10')
# The color of the tick labels.
tick_label_color = ColorTrait("black")
# The rotation of the tick labels.
tick_label_rotate_angle = Float(0)
# Whether to align to corners or edges (corner is better for 45 degree rotation)
tick_label_alignment = Enum('edge', 'corner')
# The margin around the tick labels.
tick_label_margin = Int(2)
# The distance of the tick label from the axis.
tick_label_offset = Float(8.)
# Whether the tick labels appear to the inside or the outside of the plot area
tick_label_position = Enum("outside", "inside")
# A callable that is passed the numerical value of each tick label and
# that returns a string.
tick_label_formatter = Callable(DEFAULT_TICK_FORMATTER)
# The number of pixels by which the ticks extend into the plot area.
tick_in = Int(5)
# The number of pixels by which the ticks extend into the label area.
tick_out = Int(5)
# Are ticks visible at all?
tick_visible = Bool(True)
# The dataspace interval between ticks.
tick_interval = Trait('auto', 'auto', Float)
# A callable that implements the AbstractTickGenerator interface.
tick_generator = Instance(AbstractTickGenerator)
# The location of the axis relative to the plot. This determines where
# the axis title is located relative to the axis line.
orientation = Enum("top", "bottom", "left", "right")
# Is the axis line visible?
axis_line_visible = Bool(True)
# The color of the axis line.
axis_line_color = ColorTrait("black")
# The line thickness (in pixels) of the axis line.
axis_line_weight = Float(1.0)
# The dash style of the axis line.
axis_line_style = LineStyle('solid')
# A special version of the axis line that is more useful for geophysical
# plots.
small_haxis_style = Bool(False)
# Does the axis ensure that its end labels fall within its bounding area?
ensure_labels_bounded = Bool(False)
# Does the axis prevent the ticks from being rendered outside its bounds?
# This flag is off by default because the standard axis *does* render ticks
# that encroach on the plot area.
ensure_ticks_bounded = Bool(False)
# Fired when the axis's range bounds change.
updated = Event
#------------------------------------------------------------------------
# Override default values of inherited traits
#------------------------------------------------------------------------
# Background color (overrides AbstractOverlay). Axes usually let the color of
# the container show through.
bgcolor = ColorTrait("transparent")
# Dimensions that the axis is resizable in (overrides PlotComponent).
# Typically, axes are resizable in both dimensions.
resizable = "hv"
#------------------------------------------------------------------------
# Private Traits
#------------------------------------------------------------------------
# Cached position calculations
_tick_list = List # These are caches of their respective positions
_tick_positions = Any #List
_tick_label_list = Any
_tick_label_positions = Any
_tick_label_bounding_boxes = List
_major_axis_size = Float
_minor_axis_size = Float
_major_axis = Array
_title_orientation = Array
_title_angle = Float
_origin_point = Array
_inside_vector = Array
_axis_vector = Array
_axis_pixel_vector = Array
_end_axis_point = Array
ticklabel_cache = List
_cache_valid = Bool(False)
#------------------------------------------------------------------------
# Public methods
#------------------------------------------------------------------------
def __init__(self, component=None, **kwargs):
# TODO: change this back to a factory in the instance trait some day
self.tick_generator = DefaultTickGenerator()
# Override init so that our component gets set last. We want the
# _component_changed() event handler to get run last.
super(PlotAxis, self).__init__(**kwargs)
if component is not None:
self.component = component
def invalidate(self):
""" Invalidates the pre-computed layout and scaling data.
"""
self._reset_cache()
self.invalidate_draw()
return
def traits_view(self):
""" Returns a View instance for use with Traits UI. This method is
called automatically be the Traits framework when .edit_traits() is
invoked.
"""
from axis_view import AxisView
return AxisView
#------------------------------------------------------------------------
# PlotComponent and AbstractOverlay interface
#------------------------------------------------------------------------
def _do_layout(self, *args, **kw):
""" Tells this component to do layout at a given size.
Overrides Component.
"""
if self.use_draw_order and self.component is not None:
self._layout_as_overlay(*args, **kw)
else:
super(PlotAxis, self)._do_layout(*args, **kw)
return
def overlay(self, component, gc, view_bounds=None, mode='normal'):
""" Draws this component overlaid on another component.
Overrides AbstractOverlay.
"""
if not self.visible:
return
self._draw_component(gc, view_bounds, mode, component)
return
def _draw_overlay(self, gc, view_bounds=None, mode='normal'):
""" Draws the overlay layer of a component.
Overrides PlotComponent.
"""
self._draw_component(gc, view_bounds, mode)
return
def _draw_component(self, gc, view_bounds=None, mode='normal', component=None):
""" Draws the component.
This method is preserved for backwards compatibility. Overrides
PlotComponent.
"""
if not self.visible:
return
if not self._cache_valid:
if component is not None:
self._calculate_geometry_overlay(component)
else:
self._calculate_geometry()
self._compute_tick_positions(gc, component)
self._compute_labels(gc)
with gc:
# slight optimization: if we set the font correctly on the
# base gc before handing it in to our title and tick labels,
# their set_font() won't have to do any work.
gc.set_font(self.tick_label_font)
if self.axis_line_visible:
self._draw_axis_line(gc, self._origin_point, self._end_axis_point)
if self.title:
self._draw_title(gc)
self._draw_ticks(gc)
self._draw_labels(gc)
self._cache_valid = True
return
#------------------------------------------------------------------------
# Private draw routines
#------------------------------------------------------------------------
def _layout_as_overlay(self, size=None, force=False):
""" Lays out the axis as an overlay on another component.
"""
if self.component is not None:
if self.orientation in ("left", "right"):
self.y = self.component.y
self.height = self.component.height
if self.orientation == "left":
self.width = self.component.padding_left
self.x = self.component.outer_x
elif self.orientation == "right":
self.width = self.component.padding_right
self.x = self.component.x2 + 1
else:
self.x = self.component.x
self.width = self.component.width
if self.orientation == "bottom":
self.height = self.component.padding_bottom
self.y = self.component.outer_y
elif self.orientation == "top":
self.height = self.component.padding_top
self.y = self.component.y2 + 1
return
def _draw_axis_line(self, gc, startpoint, endpoint):
""" Draws the line for the axis.
"""
with gc:
gc.set_antialias(0)
gc.set_line_width(self.axis_line_weight)
gc.set_stroke_color(self.axis_line_color_)
gc.set_line_dash(self.axis_line_style_)
gc.move_to(*around(startpoint))
gc.line_to(*around(endpoint))
gc.stroke_path()
return
def _draw_title(self, gc, label=None, axis_offset=None):
""" Draws the title for the axis.
"""
if label is None:
title_label = Label(text=self.title,
font=self.title_font,
color=self.title_color,
rotate_angle=self.title_angle)
else:
title_label = label
# get the _rotated_ bounding box of the label
tl_bounds = array(title_label.get_bounding_box(gc), float64)
text_center_to_corner = -tl_bounds/2.0
# which axis are we moving away from the axis line along?
axis_index = self._major_axis.argmin()
if self.title_spacing != 'auto':
axis_offset = self.title_spacing
if (self.title_spacing) and (axis_offset is None ):
if not self.ticklabel_cache:
axis_offset = 25
else:
axis_offset = max([l._bounding_box[axis_index] for l in self.ticklabel_cache]) * 1.3
offset = (self._origin_point+self._end_axis_point)/2
axis_dist = self.tick_out + tl_bounds[axis_index]/2.0 + axis_offset
offset -= self._inside_vector * axis_dist
offset += text_center_to_corner
gc.translate_ctm(*offset)
title_label.draw(gc)
gc.translate_ctm(*(-offset))
return
def _draw_ticks(self, gc):
""" Draws the tick marks for the axis.
"""
if not self.tick_visible:
return
gc.set_stroke_color(self.tick_color_)
gc.set_line_width(self.tick_weight)
gc.set_antialias(False)
gc.begin_path()
tick_in_vector = self._inside_vector*self.tick_in
tick_out_vector = self._inside_vector*self.tick_out
for tick_pos in self._tick_positions:
gc.move_to(*(tick_pos + tick_in_vector))
gc.line_to(*(tick_pos - tick_out_vector))
gc.stroke_path()
return
def _draw_labels(self, gc):
""" Draws the tick labels for the axis.
"""
# which axis are we moving away from the axis line along?
axis_index = self._major_axis.argmin()
inside_vector = self._inside_vector
if self.tick_label_position == "inside":
inside_vector = -inside_vector
for i in range(len(self._tick_label_positions)):
#We want a more sophisticated scheme than just 2 decimals all the time
ticklabel = self.ticklabel_cache[i]
tl_bounds = self._tick_label_bounding_boxes[i]
#base_position puts the tick label at a point where the vector
#extending from the tick mark inside 8 units
#just touches the rectangular bounding box of the tick label.
#Note: This is not necessarily optimal for non
#horizontal/vertical axes. More work could be done on this.
base_position = self._tick_label_positions[i].copy()
axis_dist = self.tick_label_offset + tl_bounds[axis_index]/2.0
base_position -= inside_vector * axis_dist
base_position -= tl_bounds/2.0
if self.tick_label_alignment == 'corner':
if self.orientation in ("top", "bottom"):
base_position[0] += tl_bounds[0]/2.0
elif self.orientation == "left":
base_position[1] -= tl_bounds[1]/2.0
elif self.orientation == "right":
base_position[1] += tl_bounds[1]/2.0
if self.ensure_labels_bounded:
bound_idx = self._major_axis.argmax()
if i == 0:
base_position[bound_idx] = max(base_position[bound_idx],
self._origin_point[bound_idx])
elif i == len(self._tick_label_positions)-1:
base_position[bound_idx] = min(base_position[bound_idx],
self._end_axis_point[bound_idx] - \
tl_bounds[bound_idx])
tlpos = around(base_position)
gc.translate_ctm(*tlpos)
ticklabel.draw(gc)
gc.translate_ctm(*(-tlpos))
return
#------------------------------------------------------------------------
# Private methods for computing positions and layout
#------------------------------------------------------------------------
def _reset_cache(self):
""" Clears the cached tick positions, labels, and label positions.
"""
self._tick_positions = []
self._tick_label_list = []
self._tick_label_positions = []
return
def _compute_tick_positions(self, gc, overlay_component=None):
""" Calculates the positions for the tick marks.
"""
if (self.mapper is None):
self._reset_cache()
self._cache_valid = True
return
datalow = self.mapper.range.low
datahigh = self.mapper.range.high
screenhigh = self.mapper.high_pos
screenlow = self.mapper.low_pos
if overlay_component is not None:
origin = getattr(overlay_component, 'origin', 'bottom left')
else:
origin = self.origin
if self.orientation in ("top", "bottom"):
if "right" in origin:
flip_from_gc = True
else:
flip_from_gc = False
elif self.orientation in ("left", "right"):
if "top" in origin:
flip_from_gc = True
else:
flip_from_gc = False
if flip_from_gc:
screenlow, screenhigh = screenhigh, screenlow
if (datalow == datahigh) or (screenlow == screenhigh) or \
(datalow in [inf, -inf]) or (datahigh in [inf, -inf]):
self._reset_cache()
self._cache_valid = True
return
if datalow > datahigh:
raise RuntimeError, "DataRange low is greater than high; unable to compute axis ticks."
if not self.tick_generator:
return
if hasattr(self.tick_generator, "get_ticks_and_labels"):
# generate ticks and labels simultaneously
tmp = self.tick_generator.get_ticks_and_labels(datalow, datahigh,
screenlow, screenhigh)
if len(tmp) == 0:
tick_list = []
labels = []
else:
tick_list, labels = tmp
# compute the labels here
self.ticklabel_cache = [Label(text=lab,
font=self.tick_label_font,
color=self.tick_label_color) \
for lab in labels]
self._tick_label_bounding_boxes = [array(ticklabel.get_bounding_box(gc), float64) \
for ticklabel in self.ticklabel_cache]
else:
scale = 'log' if isinstance(self.mapper, LogMapper) else 'linear'
if self.small_haxis_style:
tick_list = array([datalow, datahigh])
else:
tick_list = array(self.tick_generator.get_ticks(datalow, datahigh,
datalow, datahigh,
self.tick_interval,
use_endpoints=False,
scale=scale), float64)
mapped_tick_positions = (array(self.mapper.map_screen(tick_list))-screenlow) / \
(screenhigh-screenlow)
self._tick_positions = around(array([self._axis_vector*tickpos + self._origin_point \
for tickpos in mapped_tick_positions]))
self._tick_label_list = tick_list
self._tick_label_positions = self._tick_positions
return
def _compute_labels(self, gc):
"""Generates the labels for tick marks.
Waits for the cache to become invalid.
"""
# tick labels are already computed
if hasattr(self.tick_generator, "get_ticks_and_labels"):
return
formatter = self.tick_label_formatter
def build_label(val):
tickstring = formatter(val) if formatter is not None else str(val)
return Label(text=tickstring,
font=self.tick_label_font,
color=self.tick_label_color,
rotate_angle=self.tick_label_rotate_angle,
margin=self.tick_label_margin)
self.ticklabel_cache = [build_label(val) for val in self._tick_label_list]
self._tick_label_bounding_boxes = [array(ticklabel.get_bounding_box(gc), float)
for ticklabel in self.ticklabel_cache]
return
def _calculate_geometry(self):
origin = self.origin
screenhigh = self.mapper.high_pos
screenlow = self.mapper.low_pos
if self.orientation in ('top', 'bottom'):
self._major_axis_size = self.bounds[0]
self._minor_axis_size = self.bounds[1]
self._major_axis = array([1., 0.])
self._title_orientation = array([0.,1.])
self.title_angle = 0.0
if self.orientation == 'top':
self._origin_point = array(self.position)
self._inside_vector = array([0.,-1.])
else: #self.oriention == 'bottom'
self._origin_point = array(self.position) + array([0., self.bounds[1]])
self._inside_vector = array([0., 1.])
if "right" in origin:
screenlow, screenhigh = screenhigh, screenlow
elif self.orientation in ('left', 'right'):
self._major_axis_size = self.bounds[1]
self._minor_axis_size = self.bounds[0]
self._major_axis = array([0., 1.])
self._title_orientation = array([-1., 0])
if self.orientation == 'left':
self._origin_point = array(self.position) + array([self.bounds[0], 0.])
self._inside_vector = array([1., 0.])
self.title_angle = 90.0
else: #self.orientation == 'right'
self._origin_point = array(self.position)
self._inside_vector = array([-1., 0.])
self.title_angle = 270.0
if "top" in origin:
screenlow, screenhigh = screenhigh, screenlow
if self.ensure_ticks_bounded:
self._origin_point -= self._inside_vector*self.tick_in
self._end_axis_point = abs(screenhigh-screenlow)*self._major_axis + self._origin_point
self._axis_vector = self._end_axis_point - self._origin_point
# This is the vector that represents one unit of data space in terms of screen space.
self._axis_pixel_vector = self._axis_vector/sqrt(dot(self._axis_vector,self._axis_vector))
return
def _calculate_geometry_overlay(self, overlay_component=None):
if overlay_component is None:
overlay_component = self
component_origin = getattr(overlay_component, "origin", 'bottom left')
screenhigh = self.mapper.high_pos
screenlow = self.mapper.low_pos
if self.orientation in ('top', 'bottom'):
self._major_axis_size = overlay_component.bounds[0]
self._minor_axis_size = overlay_component.bounds[1]
self._major_axis = array([1., 0.])
self._title_orientation = array([0.,1.])
self.title_angle = 0.0
if self.orientation == 'top':
self._origin_point = array([overlay_component.x, overlay_component.y2])
self._inside_vector = array([0.0, -1.0])
else:
self._origin_point = array([overlay_component.x, overlay_component.y])
self._inside_vector = array([0.0, 1.0])
if "right" in component_origin:
screenlow, screenhigh = screenhigh, screenlow
elif self.orientation in ('left', 'right'):
self._major_axis_size = overlay_component.bounds[1]
self._minor_axis_size = overlay_component.bounds[0]
self._major_axis = array([0., 1.])
self._title_orientation = array([-1., 0])
if self.orientation == 'left':
self._origin_point = array([overlay_component.x, overlay_component.y])
self._inside_vector = array([1.0, 0.0])
self.title_angle = 90.0
else:
self._origin_point = array([overlay_component.x2, overlay_component.y])
self._inside_vector = array([-1.0, 0.0])
self.title_angle = 270.0
if "top" in component_origin:
screenlow, screenhigh = screenhigh, screenlow
if self.ensure_ticks_bounded:
self._origin_point -= self._inside_vector*self.tick_in
self._end_axis_point = abs(screenhigh-screenlow)*self._major_axis + self._origin_point
self._axis_vector = self._end_axis_point - self._origin_point
# This is the vector that represents one unit of data space in terms of screen space.
self._axis_pixel_vector = self._axis_vector/sqrt(dot(self._axis_vector,self._axis_vector))
return
#------------------------------------------------------------------------
# Event handlers
#------------------------------------------------------------------------
def _bounds_changed(self, old, new):
super(PlotAxis, self)._bounds_changed(old, new)
self._layout_needed = True
self._invalidate()
def _bounds_items_changed(self, event):
super(PlotAxis, self)._bounds_items_changed(event)
self._layout_needed = True
self._invalidate()
def _mapper_changed(self, old, new):
if old is not None:
old.on_trait_change(self.mapper_updated, "updated", remove=True)
if new is not None:
new.on_trait_change(self.mapper_updated, "updated")
self._invalidate()
def mapper_updated(self):
"""
Event handler that is bound to this axis's mapper's **updated** event
"""
self._invalidate()
def _position_changed(self, old, new):
super(PlotAxis, self)._position_changed(old, new)
self._cache_valid = False
def _position_items_changed(self, event):
super(PlotAxis, self)._position_items_changed(event)
self._cache_valid = False
def _position_changed_for_component(self):
self._cache_valid = False
def _position_items_changed_for_component(self):
self._cache_valid = False
def _bounds_changed_for_component(self):
self._cache_valid = False
self._layout_needed = True
def _bounds_items_changed_for_component(self):
self._cache_valid = False
self._layout_needed = True
def _origin_changed_for_component(self):
self._invalidate()
def _updated_fired(self):
"""If the axis bounds changed, redraw."""
self._cache_valid = False
return
def _invalidate(self):
self._cache_valid = False
self.invalidate_draw()
if self.component:
self.component.invalidate_draw()
return
def _component_changed(self):
if self.mapper is not None:
# If there is a mapper set, just leave it be.
return
# Try to pick the most appropriate mapper for our orientation
# and what information we can glean from our component.
attrmap = { "left": ("ymapper", "y_mapper", "value_mapper"),
"bottom": ("xmapper", "x_mapper", "index_mapper"), }
attrmap["right"] = attrmap["left"]
attrmap["top"] = attrmap["bottom"]
component = self.component
attr1, attr2, attr3 = attrmap[self.orientation]
for attr in attrmap[self.orientation]:
if hasattr(component, attr):
self.mapper = getattr(component, attr)
break
# Keep our origin in sync with the component
self.origin = getattr(component, 'origin', 'bottom left')
return
#------------------------------------------------------------------------
# The following event handlers just invalidate our previously computed
# Label instances and backbuffer if any of our visual attributes change.
# TODO: refactor this stuff and the caching of contained objects (e.g. Label)
#------------------------------------------------------------------------
def _title_changed(self):
self.invalidate_draw()
if self.component:
self.component.invalidate_draw()
return
def _anytrait_changed(self, name, old, new):
""" For every trait that defines a visual attribute
we just call _invalidate() when a change is made.
"""
invalidate_traits = [
'title_font',
'title_spacing',
'title_color',
'tick_weight',
'tick_color',
'tick_label_font',
'tick_label_color',
'tick_label_rotate_angle',
'tick_label_alignment',
'tick_label_margin',
'tick_label_offset',
'tick_label_position',
'tick_label_formatter',
'tick_in',
'tick_out',
'tick_visible',
'tick_interval',
'tick_generator',
'orientation',
'origin',
'axis_line_visible',
'axis_line_color',
'axis_line_weight',
'axis_line_style',
'small_haxis_style',
'ensure_labels_bounded',
'ensure_ticks_bounded',
]
if name in invalidate_traits:
self._invalidate()
#------------------------------------------------------------------------
# Persistence-related methods
#------------------------------------------------------------------------
def __getstate__(self):
dont_pickle = [
'_tick_list',
'_tick_positions',
'_tick_label_list',
'_tick_label_positions',
'_tick_label_bounding_boxes',
'_major_axis_size',
'_minor_axis_size',
'_major_axis',
'_title_orientation',
'_title_angle',
'_origin_point',
'_inside_vector',
'_axis_vector',
'_axis_pixel_vector',
'_end_axis_point',
'_ticklabel_cache',
'_cache_valid'
]
state = super(PlotAxis,self).__getstate__()
for key in dont_pickle:
if state.has_key(key):
del state[key]
return state
def __setstate__(self, state):
super(PlotAxis,self).__setstate__(state)
self._mapper_changed(None, self.mapper)
self._reset_cache()
self._cache_valid = False
return
# EOF ########################################################################
|
|
import logging
from lib.utils import util, constants
from lib.base_controller import ShellException
from .log_analyzer_command_controller import LogAnalyzerCommandController
class _GrepFile(LogAnalyzerCommandController):
def __init__(self, modifiers):
self.modifiers = modifiers
self.logger = logging.getLogger("asadm")
def do_show(self, line):
if not line:
raise ShellException(
"Could not understand log request, " + "see 'help log'"
)
mods = self.parse_modifiers(line, duplicates_in_line_allowed=True)
line = mods["line"]
tline = line[:]
search_strs = []
ignore_strs = []
output_page_size = 10
start_tm = "head"
duration = ""
sources = []
is_and = False
is_casesensitive = True
reading_strings = None
uniq = False
system_grep = False
while tline:
string_read = False
word = tline.pop(0)
if word == "-s":
reading_strings = search_strs
string_read = True
elif word == "-a":
is_and = True
elif word == "-v":
reading_strings = ignore_strs
string_read = True
elif word == "-i":
is_casesensitive = False
elif word == "-u":
uniq = True
elif word == "-sg":
system_grep = True
elif word == "-f":
start_tm = tline.pop(0)
start_tm = util.strip_string(start_tm)
elif word == "-d":
duration = tline.pop(0)
duration = util.strip_string(duration)
elif word == "-p":
try:
output_page_size = int(util.strip_string(tline.pop(0)))
except Exception:
self.logger.warning("Wrong output page size, setting default value")
elif word == "-n":
try:
sources = [
int(i) for i in util.strip_string(tline.pop(0)).split(",")
]
except Exception:
sources = []
elif reading_strings is not None:
try:
reading_strings.append(util.strip_string(word))
except Exception:
pass
string_read = True
else:
raise ShellException(
"Do not understand '%s' in '%s'" % (word, " ".join(line))
)
if not string_read:
reading_strings = None
if not search_strs:
return
logs = self.log_handler.get_logs_by_index(sources)
if not logs:
self.logger.info("No log files added. Use add command to add log files.")
show_results = self.log_handler.grep(
logs,
search_strs,
ignore_strs=ignore_strs,
is_and=is_and,
is_casesensitive=is_casesensitive,
start_tm_arg=start_tm,
duration_arg=duration,
uniq=uniq,
output_page_size=output_page_size,
system_grep=system_grep,
)
page_index = 1
for show_res in show_results:
if show_res:
self.view.show_grep("", show_res[constants.SHOW_RESULT_KEY])
page_index += 1
show_results.close()
def do_count(self, line):
if not line:
raise ShellException(
"Could not understand log request, " + "see 'help log'"
)
mods = self.parse_modifiers(line, duplicates_in_line_allowed=True)
line = mods["line"]
tline = line[:]
search_strs = []
ignore_strs = []
output_page_size = 10
is_and = False
is_casesensitive = True
start_tm = "head"
duration = ""
slice_duration = "600"
sources = []
reading_strings = None
title_every_nth = 0
uniq = False
system_grep = False
while tline:
string_read = False
word = tline.pop(0)
if word == "-s":
reading_strings = search_strs
string_read = True
elif word == "-a":
is_and = True
elif word == "-v":
reading_strings = ignore_strs
string_read = True
elif word == "-i":
is_casesensitive = False
elif word == "-u":
uniq = True
elif word == "-sg":
system_grep = True
elif word == "-p":
try:
output_page_size = int(util.strip_string(tline.pop(0)))
except Exception:
self.logger.warning("Wrong output page size, setting default value")
elif word == "-r":
try:
title_every_nth = int(util.strip_string(tline.pop(0)))
except Exception:
self.logger.warning(
"Wrong output title repetition value, setting default value"
)
elif word == "-f":
start_tm = tline.pop(0)
start_tm = util.strip_string(start_tm)
elif word == "-d":
duration = tline.pop(0)
duration = util.strip_string(duration)
elif word == "-t":
slice_duration = tline.pop(0)
slice_duration = util.strip_string(slice_duration)
elif word == "-n":
try:
sources = [
int(i) for i in util.strip_string(tline.pop(0)).split(",")
]
except Exception:
sources = []
elif reading_strings is not None:
try:
reading_strings.append(util.strip_string(word))
except Exception:
pass
string_read = True
else:
raise ShellException(
"Do not understand '%s' in '%s'" % (word, " ".join(line))
)
if not string_read:
reading_strings = None
if not search_strs:
return
logs = self.log_handler.get_logs_by_index(sources)
if not logs:
self.logger.info("No log files added. Use add command to add log files.")
count_results = self.log_handler.grep_count(
logs,
search_strs,
ignore_strs=ignore_strs,
is_and=is_and,
is_casesensitive=is_casesensitive,
start_tm_arg=start_tm,
duration_arg=duration,
uniq=uniq,
slice_duration=slice_duration,
output_page_size=output_page_size,
system_grep=system_grep,
)
page_index = 1
for count_res in count_results:
if count_res:
self.view.show_grep_count(
"%s(Page-%d)" % ("cluster ", page_index),
count_res,
title_every_nth=title_every_nth,
)
page_index += 1
count_results.close()
def do_diff(self, line):
if not line:
raise ShellException(
"Could not understand log request, " + "see 'help log'"
)
mods = self.parse_modifiers(line, duplicates_in_line_allowed=True)
line = mods["line"]
tline = line[:]
search_strs = []
start_tm = "head"
duration = ""
slice_tm = "10"
output_page_size = 10
show_count = 1
limit = ""
sources = []
is_casesensitive = True
title_every_nth = 0
reading_search_strings = False
search_string_read = False
while tline:
search_string_read = False
word = tline.pop(0)
if word == "-s":
try:
search_strs.append(util.strip_string(tline.pop(0)))
reading_search_strings = True
search_string_read = True
except Exception:
search_strs = []
elif word == "-f":
start_tm = tline.pop(0)
start_tm = util.strip_string(start_tm)
elif word == "-d":
duration = tline.pop(0)
duration = util.strip_string(duration)
elif word == "-t":
slice_tm = tline.pop(0)
slice_tm = util.strip_string(slice_tm)
elif word == "-k":
show_count = tline.pop(0)
show_count = int(util.strip_string(show_count))
elif word == "-i":
is_casesensitive = False
elif word == "-p":
try:
output_page_size = int(util.strip_string(tline.pop(0)))
except Exception:
self.logger.warning("Wrong output page size, setting default value")
elif word == "-r":
try:
title_every_nth = int(util.strip_string(tline.pop(0)))
except Exception:
self.logger.warning(
"Wrong output title repetition value, setting default value"
)
elif word == "-n":
try:
sources = [
int(i) for i in util.strip_string(tline.pop(0)).split(",")
]
except Exception:
sources = []
elif word == "-l" and tline:
limit = tline.pop(0)
limit = int(util.strip_string(limit))
elif reading_search_strings:
try:
search_strs.append(util.strip_string(word))
except Exception:
pass
search_string_read = True
else:
raise ShellException(
"Do not understand '%s' in '%s'" % (word, " ".join(line))
)
if not search_string_read:
reading_search_strings = False
if not search_strs:
return
logs = self.log_handler.get_logs_by_index(sources)
if not logs:
self.logger.info("No log files added. Use add command to add log files.")
diff_results = self.log_handler.grep_diff(
logs,
search_strs,
is_casesensitive=is_casesensitive,
start_tm_arg=start_tm,
duration_arg=duration,
slice_duration=slice_tm,
every_nth_slice=show_count,
upper_limit_check=limit,
output_page_size=output_page_size,
)
page_index = 1
for diff_res in diff_results:
if diff_res:
self.view.show_grep_diff(
"%s Diff (Page-%d)" % (search_strs[-1], page_index),
diff_res,
title_every_nth=title_every_nth,
)
page_index += 1
diff_results.close()
def do_latency(self, line):
if not line:
raise ShellException(
"Could not understand latency request, " + "see 'help log'"
)
mods = self.parse_modifiers(line, duplicates_in_line_allowed=True)
line = mods["line"]
tline = line[:]
hist = ""
start_tm = "head"
duration = ""
slice_tm = "10"
output_page_size = 10
bucket_count = 3
every_nth_bucket = 3
sources = []
time_rounding = True
title_every_nth = 0
ns = None
show_relative_stats = False
while tline:
word = tline.pop(0)
if word == "-h":
hist = tline.pop(0)
hist = util.strip_string(hist)
elif word == "-f":
start_tm = tline.pop(0)
start_tm = util.strip_string(start_tm)
elif word == "-d":
duration = tline.pop(0)
duration = util.strip_string(duration)
elif word == "-t":
slice_tm = tline.pop(0)
slice_tm = util.strip_string(slice_tm)
elif word == "-e":
every_nth_bucket = tline.pop(0)
every_nth_bucket = int(util.strip_string(every_nth_bucket))
elif word == "-b":
bucket_count = tline.pop(0)
bucket_count = int(util.strip_string(bucket_count))
elif word == "-p":
try:
output_page_size = int(util.strip_string(tline.pop(0)))
except Exception:
self.logger.warning("Wrong output page size, setting default value")
elif word == "-r":
try:
title_every_nth = int(util.strip_string(tline.pop(0)))
except Exception:
self.logger.warning(
"Wrong output title repetition value, setting default value"
)
elif word == "-n":
try:
sources = [
int(i) for i in util.strip_string(tline.pop(0)).split(",")
]
except Exception:
sources = []
elif word == "-o":
time_rounding = False
elif word == "-N":
try:
ns = tline.pop(0)
ns = util.strip_string(ns)
except Exception:
pass
elif word == "--relative-stats":
show_relative_stats = True
else:
raise ShellException(
"Do not understand '%s' in '%s'" % (word, " ".join(line))
)
if not hist:
return
ns_hist = ""
if ns:
ns_hist += "%s - " % (ns)
ns_hist += "%s" % (hist)
logs = self.log_handler.get_logs_by_index(sources)
if not logs:
self.logger.info(
"No log files added. Use 'add /path/to/log' command to add log files."
)
latency_results = self.log_handler.loglatency(
logs,
hist,
start_tm_arg=start_tm,
duration_arg=duration,
slice_duration=slice_tm,
bucket_count=bucket_count,
every_nth_bucket=every_nth_bucket,
rounding_time=time_rounding,
output_page_size=output_page_size,
ns=ns,
show_relative_stats=show_relative_stats,
)
page_index = 1
for latency_res in latency_results:
if latency_res:
if not self.view.show_log_latency(
"%s Latency (Page-%d)" % (ns_hist, page_index),
latency_res,
title_every_nth=title_every_nth,
):
break
page_index += 1
latency_results.close()
|
|
#!/usr/bin/env python
import xml.etree.ElementTree as ET
class brocade_sec_services(object):
"""Auto generated class.
"""
def __init__(self, **kwargs):
self._callback = kwargs.pop('callback')
def telnet_sa_telnet_server_shutdown(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
telnet_sa = ET.SubElement(config, "telnet-sa", xmlns="urn:brocade.com:mgmt:brocade-sec-services")
telnet = ET.SubElement(telnet_sa, "telnet")
server = ET.SubElement(telnet, "server")
shutdown = ET.SubElement(server, "shutdown")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def telnet_sa_telnet_server_standby_enable(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
telnet_sa = ET.SubElement(config, "telnet-sa", xmlns="urn:brocade.com:mgmt:brocade-sec-services")
telnet = ET.SubElement(telnet_sa, "telnet")
server = ET.SubElement(telnet, "server")
standby = ET.SubElement(server, "standby")
enable = ET.SubElement(standby, "enable")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def ssh_sa_ssh_server_shutdown(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ssh_sa = ET.SubElement(config, "ssh-sa", xmlns="urn:brocade.com:mgmt:brocade-sec-services")
ssh = ET.SubElement(ssh_sa, "ssh")
server = ET.SubElement(ssh, "server")
shutdown = ET.SubElement(server, "shutdown")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def ssh_sa_ssh_server_key_exchange_protocol(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ssh_sa = ET.SubElement(config, "ssh-sa", xmlns="urn:brocade.com:mgmt:brocade-sec-services")
ssh = ET.SubElement(ssh_sa, "ssh")
server = ET.SubElement(ssh, "server")
key_exchange = ET.SubElement(server, "key-exchange")
protocol = ET.SubElement(key_exchange, "protocol")
protocol.text = kwargs.pop('protocol')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def ssh_sa_ssh_server_rekey_interval(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ssh_sa = ET.SubElement(config, "ssh-sa", xmlns="urn:brocade.com:mgmt:brocade-sec-services")
ssh = ET.SubElement(ssh_sa, "ssh")
server = ET.SubElement(ssh, "server")
rekey_interval = ET.SubElement(server, "rekey-interval")
rekey_interval.text = kwargs.pop('rekey_interval')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def ssh_sa_ssh_server_cipher(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ssh_sa = ET.SubElement(config, "ssh-sa", xmlns="urn:brocade.com:mgmt:brocade-sec-services")
ssh = ET.SubElement(ssh_sa, "ssh")
server = ET.SubElement(ssh, "server")
cipher = ET.SubElement(server, "cipher")
cipher.text = kwargs.pop('cipher')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def ssh_sa_ssh_server_standby_enable(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ssh_sa = ET.SubElement(config, "ssh-sa", xmlns="urn:brocade.com:mgmt:brocade-sec-services")
ssh = ET.SubElement(ssh_sa, "ssh")
server = ET.SubElement(ssh, "server")
standby = ET.SubElement(server, "standby")
enable = ET.SubElement(standby, "enable")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def ssh_sa_ssh_server_key_rsa(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ssh_sa = ET.SubElement(config, "ssh-sa", xmlns="urn:brocade.com:mgmt:brocade-sec-services")
ssh = ET.SubElement(ssh_sa, "ssh")
server = ET.SubElement(ssh, "server")
key = ET.SubElement(server, "key")
rsa = ET.SubElement(key, "rsa")
rsa.text = kwargs.pop('rsa')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def ssh_sa_ssh_server_key_ecdsa(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ssh_sa = ET.SubElement(config, "ssh-sa", xmlns="urn:brocade.com:mgmt:brocade-sec-services")
ssh = ET.SubElement(ssh_sa, "ssh")
server = ET.SubElement(ssh, "server")
key = ET.SubElement(server, "key")
ecdsa = ET.SubElement(key, "ecdsa")
ecdsa.text = kwargs.pop('ecdsa')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def ssh_sa_ssh_server_key_dsa(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ssh_sa = ET.SubElement(config, "ssh-sa", xmlns="urn:brocade.com:mgmt:brocade-sec-services")
ssh = ET.SubElement(ssh_sa, "ssh")
server = ET.SubElement(ssh, "server")
key = ET.SubElement(server, "key")
dsa = ET.SubElement(key, "dsa")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def ssh_sa_ssh_client_cipher(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ssh_sa = ET.SubElement(config, "ssh-sa", xmlns="urn:brocade.com:mgmt:brocade-sec-services")
ssh = ET.SubElement(ssh_sa, "ssh")
client = ET.SubElement(ssh, "client")
cipher = ET.SubElement(client, "cipher")
cipher.text = kwargs.pop('cipher')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def telnet_sa_telnet_server_shutdown(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
telnet_sa = ET.SubElement(config, "telnet-sa", xmlns="urn:brocade.com:mgmt:brocade-sec-services")
telnet = ET.SubElement(telnet_sa, "telnet")
server = ET.SubElement(telnet, "server")
shutdown = ET.SubElement(server, "shutdown")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def telnet_sa_telnet_server_standby_enable(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
telnet_sa = ET.SubElement(config, "telnet-sa", xmlns="urn:brocade.com:mgmt:brocade-sec-services")
telnet = ET.SubElement(telnet_sa, "telnet")
server = ET.SubElement(telnet, "server")
standby = ET.SubElement(server, "standby")
enable = ET.SubElement(standby, "enable")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def ssh_sa_ssh_server_shutdown(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ssh_sa = ET.SubElement(config, "ssh-sa", xmlns="urn:brocade.com:mgmt:brocade-sec-services")
ssh = ET.SubElement(ssh_sa, "ssh")
server = ET.SubElement(ssh, "server")
shutdown = ET.SubElement(server, "shutdown")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def ssh_sa_ssh_server_key_exchange_protocol(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ssh_sa = ET.SubElement(config, "ssh-sa", xmlns="urn:brocade.com:mgmt:brocade-sec-services")
ssh = ET.SubElement(ssh_sa, "ssh")
server = ET.SubElement(ssh, "server")
key_exchange = ET.SubElement(server, "key-exchange")
protocol = ET.SubElement(key_exchange, "protocol")
protocol.text = kwargs.pop('protocol')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def ssh_sa_ssh_server_rekey_interval(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ssh_sa = ET.SubElement(config, "ssh-sa", xmlns="urn:brocade.com:mgmt:brocade-sec-services")
ssh = ET.SubElement(ssh_sa, "ssh")
server = ET.SubElement(ssh, "server")
rekey_interval = ET.SubElement(server, "rekey-interval")
rekey_interval.text = kwargs.pop('rekey_interval')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def ssh_sa_ssh_server_cipher(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ssh_sa = ET.SubElement(config, "ssh-sa", xmlns="urn:brocade.com:mgmt:brocade-sec-services")
ssh = ET.SubElement(ssh_sa, "ssh")
server = ET.SubElement(ssh, "server")
cipher = ET.SubElement(server, "cipher")
cipher.text = kwargs.pop('cipher')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def ssh_sa_ssh_server_standby_enable(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ssh_sa = ET.SubElement(config, "ssh-sa", xmlns="urn:brocade.com:mgmt:brocade-sec-services")
ssh = ET.SubElement(ssh_sa, "ssh")
server = ET.SubElement(ssh, "server")
standby = ET.SubElement(server, "standby")
enable = ET.SubElement(standby, "enable")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def ssh_sa_ssh_server_key_rsa(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ssh_sa = ET.SubElement(config, "ssh-sa", xmlns="urn:brocade.com:mgmt:brocade-sec-services")
ssh = ET.SubElement(ssh_sa, "ssh")
server = ET.SubElement(ssh, "server")
key = ET.SubElement(server, "key")
rsa = ET.SubElement(key, "rsa")
rsa.text = kwargs.pop('rsa')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def ssh_sa_ssh_server_key_ecdsa(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ssh_sa = ET.SubElement(config, "ssh-sa", xmlns="urn:brocade.com:mgmt:brocade-sec-services")
ssh = ET.SubElement(ssh_sa, "ssh")
server = ET.SubElement(ssh, "server")
key = ET.SubElement(server, "key")
ecdsa = ET.SubElement(key, "ecdsa")
ecdsa.text = kwargs.pop('ecdsa')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def ssh_sa_ssh_server_key_dsa(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ssh_sa = ET.SubElement(config, "ssh-sa", xmlns="urn:brocade.com:mgmt:brocade-sec-services")
ssh = ET.SubElement(ssh_sa, "ssh")
server = ET.SubElement(ssh, "server")
key = ET.SubElement(server, "key")
dsa = ET.SubElement(key, "dsa")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def ssh_sa_ssh_client_cipher(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ssh_sa = ET.SubElement(config, "ssh-sa", xmlns="urn:brocade.com:mgmt:brocade-sec-services")
ssh = ET.SubElement(ssh_sa, "ssh")
client = ET.SubElement(ssh, "client")
cipher = ET.SubElement(client, "cipher")
cipher.text = kwargs.pop('cipher')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
|
"""
Tests for botogram/objects/__init__.py
Copyright (c) 2015 Pietro Albini <pietro@pietroalbini.io>
Released under the MIT license
"""
import pytest
import botogram.objects
def test_user_avatar(api, mock_req):
mock_req({
"getUserProfilePhotos": {
"ok": True,
"result": {
"total_count": 1,
"photos": [
[
{
"file_id": "aaaaaa",
"width": 50,
"height": 50,
"file_size": 128,
},
{
"file_id": "bbbbbb",
"width": 25,
"height": 25,
"file_size": 64,
},
],
],
},
},
})
# First of all, make sure the API wrapper is required to fetch avatars
user = botogram.objects.User({"id": 123, "first_name": "Bob"})
with pytest.raises(RuntimeError):
user.avatar # Access the avatar without an API wrapper
# Now use an API
user = botogram.objects.User({"id": 123, "first_name": "Bob"}, api)
# Be sure the avatar isn't loaded yet
assert not hasattr(user, "_avatar")
# Now fetch the avatar
avatar = user.avatar
assert avatar.file_id == "aaaaaa"
# And be sure it's cached
assert hasattr(user, "_avatar")
assert user._avatar == avatar
def test_user_avatar_with_no_photos(api, mock_req):
mock_req({
"getUserProfilePhotos": {
"ok": True,
"result": {
"total_count": 0,
"photos": [],
},
},
})
user = botogram.objects.User({"id": 123, "first_name": "Bob"}, api)
assert user.avatar is None
def test_user_avatar_history(api, mock_req):
mock_req({
"getUserProfilePhotos": {
"ok": True,
"result": {
"total_count": 3,
"photos": [
[
{
"file_id": "aaaaaa",
"width": 50,
"height": 50,
"file_size": 128,
},
],
[
{
"file_id": "bbbbbb",
"width": 50,
"height": 50,
"file_size": 128,
},
],
[
{
"file_id": "cccccc",
"width": 50,
"height": 50,
"file_size": 128,
},
],
],
},
},
})
# First of all, make sure the API wrapper is required to fetch avatars
user = botogram.objects.User({"id": 123, "first_name": "Bob"})
with pytest.raises(RuntimeError):
user.avatar_history() # Access the avatar without an API wrapper
# Now use an API
user = botogram.objects.User({"id": 123, "first_name": "Bob"}, api)
files = [avatar.file_id for avatar in user.avatar_history()]
assert files == ["aaaaaa", "bbbbbb", "cccccc"]
def test_user_avatar_history_multiple_requests(api, mock_req):
mock_req({
"getUserProfilePhotos": {
"ok": True,
"result": {
# This is the double of the avatars provided with this request
# This simulates if the user has more than 100 avatars
"total_count": 4,
"photos": [
[
{
"file_id": "aaaaaa",
"width": 50,
"height": 50,
"file_size": 128,
},
],
[
{
"file_id": "bbbbbb",
"width": 50,
"height": 50,
"file_size": 128,
},
],
],
},
},
})
user = botogram.objects.User({"id": 123, "first_name": "Bob"}, api)
files = [avatar.file_id for avatar in user.avatar_history()]
assert files == ["aaaaaa", "bbbbbb", "aaaaaa", "bbbbbb"]
def test_user_avatar_history_no_photos(api, mock_req):
mock_req({
"getUserProfilePhotos": {
"ok": True,
"result": {
"total_count": 0,
"photos": [],
},
},
})
user = botogram.objects.User({"id": 123, "first_name": "Bob"}, api)
assert user.avatar_history() == []
def test_photo_object():
# The Photo object is custom-made, so it's better to ensure all it's
# working as expected
data = [
{"file_id": "aaaaaa", "width": 10, "height": 10, "file_size": 48},
{"file_id": "aaaaaa", "width": 20, "height": 20, "file_size": 148},
{"file_id": "aaaaaa", "width": 30, "height": 30, "file_size": 248},
]
# Let's create a valid Photo object
photo = botogram.objects.Photo(data)
assert len(photo.sizes) == len(data)
assert photo.sizes[0].file_id == data[0]["file_id"]
assert photo.smallest.file_id == data[0]["file_id"]
assert photo.biggest.file_id == data[-1]["file_id"]
assert photo.biggest.file_id == photo.file_id
assert photo.serialize() == data
# Test if set_api is working
photo2 = botogram.objects.Photo(data, "testapi")
assert photo2._api == "testapi"
assert photo2.sizes[0]._api == "testapi"
photo2.set_api("anotherapi")
assert photo2._api == "anotherapi"
assert photo2.sizes[0]._api == "anotherapi"
# Empty PhotoSize not supported, sorry
with pytest.raises(ValueError):
botogram.objects.Photo([])
# The data provided must be a list
with pytest.raises(ValueError):
botogram.objects.Photo("I'm not a list (doh)")
# And the items inside a list must be PhotoSize
with pytest.raises(ValueError):
botogram.objects.Photo([{"This": "isn't", "a": "PhotoSize"}])
def test_user_name():
# Create a dummy User object
user = botogram.objects.User({"id": 123, "first_name": "John"})
# With only the first name
assert user.name == "John"
# Also with a last name
user.last_name = "Doe"
assert user.name == "John Doe"
def test_chat_name():
# Create a dummy Chat object
chat = botogram.objects.Chat({"id": 123, "type": "",
"title": "Test", "first_name": "John"})
# With a title
assert chat.name == "Test"
# Without a title
chat.title = None
assert chat.name == "John"
# Without a title and with a last name
chat.last_name = "Doe"
assert chat.name == "John Doe"
|
|
# pylint: disable=too-many-lines
"""
homeassistant.components.camera
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Component to interface with various cameras.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/camera/
"""
import logging
import re
import time
import requests
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.const import (
ATTR_ENTITY_PICTURE,
HTTP_NOT_FOUND,
ATTR_ENTITY_ID,
)
DOMAIN = 'camera'
DEPENDENCIES = ['http']
GROUP_NAME_ALL_CAMERAS = 'all_cameras'
SCAN_INTERVAL = 30
ENTITY_ID_FORMAT = DOMAIN + '.{}'
SWITCH_ACTION_RECORD = 'record'
SWITCH_ACTION_SNAPSHOT = 'snapshot'
SERVICE_CAMERA = 'camera_service'
STATE_RECORDING = 'recording'
DEFAULT_RECORDING_SECONDS = 30
# Maps discovered services to their platforms
DISCOVERY_PLATFORMS = {}
FILE_DATETIME_FORMAT = '%Y-%m-%d_%H-%M-%S-%f'
DIR_DATETIME_FORMAT = '%Y-%m-%d_%H-%M-%S'
REC_DIR_PREFIX = 'recording-'
REC_IMG_PREFIX = 'recording_image-'
STATE_STREAMING = 'streaming'
STATE_IDLE = 'idle'
CAMERA_PROXY_URL = '/api/camera_proxy_stream/{0}'
CAMERA_STILL_URL = '/api/camera_proxy/{0}'
ENTITY_IMAGE_URL = '/api/camera_proxy/{0}?time={1}'
MULTIPART_BOUNDARY = '--jpegboundary'
MJPEG_START_HEADER = 'Content-type: {0}\r\n\r\n'
# pylint: disable=too-many-branches
def setup(hass, config):
""" Track states and offer events for cameras. """
component = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL,
DISCOVERY_PLATFORMS)
component.setup(config)
# -------------------------------------------------------------------------
# CAMERA COMPONENT ENDPOINTS
# -------------------------------------------------------------------------
# The following defines the endpoints for serving images from the camera
# via the HA http server. This is means that you can access images from
# your camera outside of your LAN without the need for port forwards etc.
# Because the authentication header can't be added in image requests these
# endpoints are secured with session based security.
# pylint: disable=unused-argument
def _proxy_camera_image(handler, path_match, data):
""" Proxies the camera image via the HA server. """
entity_id = path_match.group(ATTR_ENTITY_ID)
camera = component.entities.get(entity_id)
if camera is None:
handler.send_response(HTTP_NOT_FOUND)
handler.end_headers()
return
response = camera.camera_image()
if response is None:
handler.send_response(HTTP_NOT_FOUND)
handler.end_headers()
return
handler.wfile.write(response)
hass.http.register_path(
'GET',
re.compile(r'/api/camera_proxy/(?P<entity_id>[a-zA-Z\._0-9]+)'),
_proxy_camera_image)
# pylint: disable=unused-argument
def _proxy_camera_mjpeg_stream(handler, path_match, data):
"""
Proxies the camera image as an mjpeg stream via the HA server.
This function takes still images from the IP camera and turns them
into an MJPEG stream. This means that HA can return a live video
stream even with only a still image URL available.
"""
entity_id = path_match.group(ATTR_ENTITY_ID)
camera = component.entities.get(entity_id)
if camera is None:
handler.send_response(HTTP_NOT_FOUND)
handler.end_headers()
return
try:
camera.is_streaming = True
camera.update_ha_state()
handler.request.sendall(bytes('HTTP/1.1 200 OK\r\n', 'utf-8'))
handler.request.sendall(bytes(
'Content-type: multipart/x-mixed-replace; \
boundary=--jpgboundary\r\n\r\n', 'utf-8'))
handler.request.sendall(bytes('--jpgboundary\r\n', 'utf-8'))
# MJPEG_START_HEADER.format()
while True:
img_bytes = camera.camera_image()
if img_bytes is None:
continue
headers_str = '\r\n'.join((
'Content-length: {}'.format(len(img_bytes)),
'Content-type: image/jpeg',
)) + '\r\n\r\n'
handler.request.sendall(
bytes(headers_str, 'utf-8') +
img_bytes +
bytes('\r\n', 'utf-8'))
handler.request.sendall(
bytes('--jpgboundary\r\n', 'utf-8'))
time.sleep(0.5)
except (requests.RequestException, IOError):
camera.is_streaming = False
camera.update_ha_state()
hass.http.register_path(
'GET',
re.compile(
r'/api/camera_proxy_stream/(?P<entity_id>[a-zA-Z\._0-9]+)'),
_proxy_camera_mjpeg_stream)
return True
class Camera(Entity):
""" The base class for camera components. """
def __init__(self):
self.is_streaming = False
@property
# pylint: disable=no-self-use
def is_recording(self):
""" Returns true if the device is recording. """
return False
@property
# pylint: disable=no-self-use
def brand(self):
""" Should return a string of the camera brand. """
return None
@property
# pylint: disable=no-self-use
def model(self):
""" Returns string of camera model. """
return None
def camera_image(self):
""" Return bytes of camera image. """
raise NotImplementedError()
@property
def state(self):
""" Returns the state of the entity. """
if self.is_recording:
return STATE_RECORDING
elif self.is_streaming:
return STATE_STREAMING
else:
return STATE_IDLE
@property
def state_attributes(self):
""" Returns optional state attributes. """
attr = {
ATTR_ENTITY_PICTURE: ENTITY_IMAGE_URL.format(
self.entity_id, time.time()),
}
if self.model:
attr['model_name'] = self.model
if self.brand:
attr['brand'] = self.brand
return attr
|
|
#!/usr/bin/env python
#
# Copyright (c) 2013-2015 Kevin Steves <kevin.steves@pobox.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
from __future__ import print_function
from datetime import date, timedelta
import sys
import os
import signal
import getopt
import json
import pprint
import logging
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
libpath = os.path.dirname(os.path.abspath(__file__))
sys.path[:0] = [os.path.join(libpath, os.pardir, 'lib')]
import pan.wfapi
import pan.config
debug = 0
def main():
try:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError:
# Windows
pass
# set_encoding()
options = parse_opts()
if options['debug']:
logger = logging.getLogger()
if options['debug'] == 3:
logger.setLevel(pan.wfapi.DEBUG3)
elif options['debug'] == 2:
logger.setLevel(pan.wfapi.DEBUG2)
elif options['debug'] == 1:
logger.setLevel(pan.wfapi.DEBUG1)
# log_format = '%(levelname)s %(name)s %(message)s'
log_format = '%(message)s'
handler = logging.StreamHandler()
formatter = logging.Formatter(log_format)
handler.setFormatter(formatter)
logger.addHandler(handler)
try:
wfapi = pan.wfapi.PanWFapi(tag=options['tag'],
api_key=options['api_key'],
hostname=options['hostname'],
timeout=options['timeout'],
http=options['http'],
cacloud=options['cacloud'],
cafile=options['cafile'],
capath=options['capath'])
except pan.wfapi.PanWFapiError as msg:
print('pan.wfapi.PanWFapi:', msg, file=sys.stderr)
sys.exit(1)
if options['debug'] > 2:
print('wfapi.__str__()===>\n', wfapi, '\n<===',
sep='', file=sys.stderr)
try:
hashes = process_hashes(options['hash'])
if options['submit'] is not None:
action = 'submit'
kwargs = {}
if os.path.isfile(options['submit']):
kwargs['file'] = options['submit']
else:
o = urlparse(options['submit'])
if options['debug']:
print(o, file=sys.stderr)
if o.scheme == 'file':
if o.path and os.path.isfile(o.path):
kwargs['file'] = o.path
else:
print('Invalid URL: file not found:',
options['submit'], file=sys.stderr)
sys.exit(1)
else:
if o.scheme in ['http', 'https', 'ftp']:
kwargs['url'] = options['submit']
else:
print('Invalid file or URL:',
options['submit'], file=sys.stderr)
sys.exit(1)
wfapi.submit(**kwargs)
print_status(wfapi, action)
print_response(wfapi, options)
if options['submit-link'] is not None:
action = 'submit'
kwargs = {}
kwargs['links'] = process_arg(options['submit-link'], list=True)
wfapi.submit(**kwargs)
print_status(wfapi, action)
print_response(wfapi, options)
if options['change-request']:
action = 'change-request'
kwargs = {}
if len(hashes) > 1:
print('Only 1 hash allowed for %s' % action, file=sys.stderr)
sys.exit(1)
if len(hashes) == 1:
kwargs['hash'] = hashes[0]
if options['new-verdict'] is not None:
kwargs['verdict'] = process_verdict(options['new-verdict'])
if options['email'] is not None:
kwargs['email'] = options['email']
if options['comment'] is not None:
kwargs['comment'] = process_arg(options['comment'])
wfapi.change_request(**kwargs)
print_status(wfapi, action)
print_response(wfapi, options)
if options['report']:
action = 'report'
kwargs = {}
if len(hashes) > 1:
print('Only 1 hash allowed for %s' % action, file=sys.stderr)
sys.exit(1)
if len(hashes) == 1:
kwargs['hash'] = hashes[0]
if options['format'] is not None:
kwargs['format'] = options['format']
wfapi.report(**kwargs)
print_status(wfapi, action)
print_response(wfapi, options)
save_file(wfapi, options)
if options['verdict']:
kwargs = {}
if len(hashes) == 1:
action = 'verdict'
kwargs['hash'] = hashes[0]
wfapi.verdict(**kwargs)
elif len(hashes) > 1:
action = 'verdicts'
kwargs['hashes'] = hashes
wfapi.verdicts(**kwargs)
else:
action = 'verdict'
wfapi.verdict(**kwargs)
print_status(wfapi, action)
print_response(wfapi, options)
save_file(wfapi, options)
if options['sample']:
action = 'sample'
kwargs = {}
if len(hashes) > 1:
print('Only 1 hash allowed for %s' % action, file=sys.stderr)
sys.exit(1)
if len(hashes) == 1:
kwargs['hash'] = hashes[0]
wfapi.sample(**kwargs)
print_status(wfapi, action)
print_response(wfapi, options)
save_file(wfapi, options)
if options['pcap']:
action = 'pcap'
kwargs = {}
if len(hashes) > 1:
print('Only 1 hash allowed for %s' % action, file=sys.stderr)
sys.exit(1)
if len(hashes) == 1:
kwargs['hash'] = hashes[0]
if options['platform'] is not None:
kwargs['platform'] = options['platform']
wfapi.pcap(**kwargs)
print_status(wfapi, action)
print_response(wfapi, options)
save_file(wfapi, options)
if options['changed']:
action = 'verdicts_changed'
kwargs = {}
if options['date'] is not None:
kwargs['date'] = options['date']
try:
x = int(options['date'])
except ValueError:
pass
else:
if x < 1:
d = date.today()
d = d - timedelta(-x)
kwargs['date'] = d.isoformat()
if options['debug']:
print('relative date(%d): %s' % (x, kwargs['date']),
file=sys.stderr)
wfapi.verdicts_changed(**kwargs)
print_status(wfapi, action)
print_response(wfapi, options)
save_file(wfapi, options)
if options['testfile']:
action = 'testfile'
wfapi.testfile()
print_status(wfapi, action)
print_response(wfapi, options)
save_file(wfapi, options)
except pan.wfapi.PanWFapiError as msg:
print_status(wfapi, action, msg)
print_response(wfapi, options)
sys.exit(1)
sys.exit(0)
def process_hashes(list):
stdin_char = '-'
hashes = []
for hash in list:
lines = []
if hash == stdin_char:
lines = sys.stdin.readlines()
else:
try:
f = open(hash)
lines = f.readlines()
f.close()
except IOError:
# only validate hash from command line
validate_hash(hash)
hashes.append(hash)
if len(lines) > 0:
[hashes.append(x.rstrip('\r\n')) for x in lines]
if debug > 1:
print('hashes:', len(hashes), file=sys.stderr)
return hashes
def validate_hash(hash):
if debug > 0:
return
if not (len(hash) == 32 or len(hash) == 64):
print('hash length must be 32 (MD5) or 64 (SHA256)',
file=sys.stderr)
sys.exit(1)
def process_arg(s, list=False):
stdin_char = '-'
if s == stdin_char:
lines = sys.stdin.readlines()
else:
try:
f = open(s)
lines = f.readlines()
f.close()
except IOError:
lines = [s]
if debug > 1:
print('lines:', lines, file=sys.stderr)
if list:
l = [x.rstrip('\r\n') for x in lines]
return l
lines = ''.join(lines)
return lines
def process_verdict(verdict):
verdicts = {
'benign': pan.wfapi.BENIGN,
'malware': pan.wfapi.MALWARE,
'grayware': pan.wfapi.GRAYWARE,
}
try:
int(verdict)
return verdict
except ValueError:
if verdict in verdicts:
return str(verdicts[verdict])
return verdict
def parse_opts():
options = {
'submit': None,
'submit-link': None,
'change-request': False,
'report': False,
'verdict': False,
'sample': False,
'pcap': False,
'changed': False,
'hash': [],
'platform': None,
'new-verdict': None,
'email': None,
'comment': None,
'testfile': False,
'format': None,
'date': None,
'dst': None,
'api_key': None,
'hostname': None,
'http': False,
'cacloud': True,
'cafile': None,
'capath': None,
'print_xml': False,
'print_python': False,
'print_json': False,
'print_html': False,
'debug': 0,
'tag': None,
'timeout': None,
}
short_options = 'K:h:xpjHDt:T:'
long_options = ['version', 'help',
'submit=', 'submit-link=',
'change-request', 'report', 'verdict', 'sample',
'pcap', 'changed',
'hash=', 'platform=', 'testfile',
'new-verdict=', 'email=', 'comment=',
'format=', 'date=', 'dst=',
'http', 'nocacloud', 'cafile=', 'capath=',
]
try:
opts, args = getopt.getopt(sys.argv[1:],
short_options,
long_options)
except getopt.GetoptError as error:
print(error, file=sys.stderr)
sys.exit(1)
for opt, arg in opts:
if False:
pass
elif opt == '--submit':
options['submit'] = arg
elif opt == '--submit-link':
options['submit-link'] = arg
elif opt == '--change-request':
options['change-request'] = True
elif opt == '--report':
options['report'] = True
elif opt == '--verdict':
options['verdict'] = True
elif opt == '--sample':
options['sample'] = True
elif opt == '--pcap':
options['pcap'] = True
elif opt == '--changed':
options['changed'] = True
elif opt == '--hash':
options['hash'].append(arg)
elif opt == '--platform':
options['platform'] = arg
elif opt == '--new-verdict':
options['new-verdict'] = arg
elif opt == '--email':
options['email'] = arg
elif opt == '--comment':
options['comment'] = arg
elif opt == '--testfile':
options['testfile'] = True
elif opt == '--format':
options['format'] = arg
elif opt == '--date':
options['date'] = arg
elif opt == '--dst':
options['dst'] = arg
elif opt == '-K':
options['api_key'] = arg
elif opt == '-h':
options['hostname'] = arg
elif opt == '--http':
options['http'] = True
elif opt == '--nocacloud':
options['cacloud'] = False
elif opt == '--cafile':
options['cafile'] = arg
elif opt == '--capath':
options['capath'] = arg
elif opt == '-x':
options['print_xml'] = True
elif opt == '-p':
options['print_python'] = True
elif opt == '-j':
options['print_json'] = True
elif opt == '-H':
options['print_html'] = True
elif opt == '-D':
if not options['debug'] < 3:
print('Maximum debug level is 3', file=sys.stderr)
sys.exit(1)
global debug
debug += 1
options['debug'] = debug
elif opt == '-t':
if arg:
options['tag'] = arg
elif opt == '-T':
options['timeout'] = arg
elif opt == '--version':
print('pan-python', pan.wfapi.__version__)
sys.exit(0)
elif opt == '--help':
usage()
sys.exit(0)
else:
assert False, 'unhandled option %s' % opt
if options['debug'] > 2:
s = pprint.pformat(options, indent=4)
print(s, file=sys.stderr)
return options
def print_status(wfapi, action, exception_msg=None):
print(action, end='', file=sys.stderr)
if exception_msg is not None:
print(': %s' % exception_msg, end='', file=sys.stderr)
else:
if wfapi.http_code is not None:
print(': %s' % wfapi.http_code, end='', file=sys.stderr)
if wfapi.http_reason is not None:
print(' %s' % wfapi.http_reason, end='', file=sys.stderr)
print(' [', end='', file=sys.stderr)
if wfapi.attachment is not None:
print('attachment="%s"' % wfapi.attachment['filename'], end='',
file=sys.stderr)
else:
body = True if wfapi.response_body is not None else False
print('response_body=%s' % body, end='', file=sys.stderr)
if wfapi.response_type is not None:
print(' response_type=%s' % wfapi.response_type, end='',
file=sys.stderr)
if body:
print(' length=%d' % len(wfapi.response_body), end='',
file=sys.stderr)
print(']', end='', file=sys.stderr)
print(file=sys.stderr)
def print_response(wfapi, options):
if wfapi.response_type is 'html' and wfapi.response_body is not None:
if options['print_html']:
print(wfapi.response_body.rstrip())
elif wfapi.response_type is 'xml' and wfapi.response_body is not None:
if options['print_xml']:
print(wfapi.response_body.rstrip())
if options['print_python'] or options['print_json']:
if wfapi.xml_element_root is None:
return
elem = wfapi.xml_element_root
tags_forcelist = set(['entry'])
try:
conf = pan.config.PanConfig(config=elem,
tags_forcelist=tags_forcelist)
except pan.config.PanConfigError as msg:
print('pan.config.PanConfigError:', msg, file=sys.stderr)
sys.exit(1)
d = conf.python()
if d:
if options['print_python']:
print('var1 =', pprint.pformat(d))
if options['print_json']:
print(json.dumps(d, sort_keys=True, indent=2))
def save_file(wfapi, options):
if wfapi.attachment is None:
return
if options['dst'] is not None:
path = options['dst']
if os.path.isdir(path):
path = os.path.join(path, wfapi.attachment['filename'])
else:
path = wfapi.attachment['filename']
try:
f = open(path, 'wb')
except IOError as msg:
print('open %s: %s' % (path, msg), file=sys.stderr)
return
try:
f.write(wfapi.attachment['content'])
except IOError as msg:
print('write %s: %s' % (path, msg), file=sys.stderr)
f.close()
return
f.close()
print('saved %s' % path, file=sys.stderr)
def set_encoding():
#
# XXX UTF-8 won't encode to latin-1/ISO8859-1:
# UnicodeEncodeError: 'latin-1' codec can't encode character '\u2019'
#
# do PYTHONIOENCODING=utf8 equivalent
#
encoding = 'utf-8'
if hasattr(sys.stdin, 'detach'):
# >= 3.1
import io
for s in ('stdin', 'stdout', 'stderr'):
line_buffering = getattr(sys, s).line_buffering
# print(s, line_buffering, file=sys.stderr)
setattr(sys, s, io.TextIOWrapper(getattr(sys, s).detach(),
encoding=encoding,
line_buffering=line_buffering))
else:
import codecs
sys.stdin = codecs.getreader(encoding)(sys.stdin)
sys.stdout = codecs.getwriter(encoding)(sys.stdout)
sys.stderr = codecs.getwriter(encoding)(sys.stderr)
def usage():
usage = '''%s [options]
--submit path|url submit file or URL to WildFire for analysis
--submit-link link submit links to WildFire for analysis
--change-request request review of sample's verdict
--report get WildFire report
--verdict get WildFire sample verdict
--sample get WildFire sample file
--pcap get WildFire PCAP files
--changed get changed verdicts
--hash hash query MD5 or SHA256 hash
--platform id platform ID for sandbox environment
--new-verdict verdict benign|malware|grayware
--email address notification e-mail address
--comment comment change request explanation
--testfile get sample malware test file
--format format report output format
--date date start date for changed verdicts
(YYYY-MM-DD or -days)
--dst dst save file to directory or path
-K api_key WildFire API key
-h hostname WildFire hostname
-x print XML response to stdout
-p print XML response in Python to stdout
-j print XML response in JSON to stdout
-D enable debug (multiple up to -DDD)
-t tag .panrc tagname
-T seconds urlopen() timeout
--http use http URL scheme (default https)
--nocacloud disable default cloud CA certificate verification
--cafile path file containing CA certificates
--capath path directory of hashed certificate files
--version display version
--help display usage
'''
print(usage % os.path.basename(sys.argv[0]), end='')
if __name__ == '__main__':
main()
|
|
from __future__ import print_function, division
# Still to implement:
# - Performance monitoring
# - Remove resolved models
# - Optional FITS input/output
# - Output convolved fluxes
import numpy as np
from astropy import units as u
from . import timer
from .models import Models
from .source import Source
from .utils import io
from .utils.validator import validate_array
from . import six
from .fit_info import FitInfoFile
class Fitter(object):
"""
A fitter class that can be used to fit sources.
This class is initialized using a particular set of models, and with
specific fit parameters. It can then be used to fit data given by Source
instances, and returns a FitInfo instance. Once initialized, the fit
parameters cannot be changed, because changing most of them would require
re-reading the models from disk.
Parameters
----------
filter_names : tuple or list
List of filter names (given as individual strings) for which the data
is defined. The filter names should be the name of the files in the
``convolved`` directory for the models, without the extensions. This is
typically ``2J``, ``I1``, ``M1``, etc. You can also specify the
wavelength as a :class:`~astropy.units.quantity.Quantity` instance
instead of a filter name, and this will indicate that the SED fluxes
closest to the requested wavelength should be used in the fitting.
apertures : :class:`~astropy.units.quantity.Quantity` array instance
The aperture radii that the data is specified in (as an angle). The
fluxes may not be measured from aperture photometry, but this is meant
to give an indication of the sizescale of the emission, and can be used
to reject models that would have been clearly resolved at the distance
specified.
models_dir : str
Name of the directory containing the models to use.
extinction_law : :class:`~sedfitter.extinction.Extinction` instance
The extinction law to use.
av_range : tuple
Minimum and maximum Av to allow in the fitting.
distance_range : :class:`~astropy.units.quantity.Quantity` array instance
Minimum and maximum distance to allow in the fitting in units of length.
remove_resolved : bool, optional
If set, then models larger than the aperture are removed. See
Robitaille et al. (2007) for a discussion of this criterion.
"""
def __init__(self, filter_names, apertures, model_dir,
extinction_law=None, av_range=None, distance_range=None,
remove_resolved=False):
validate_array('apertures', apertures, domain='positive', ndim=1, physical_type='angle')
validate_array('distance_range', distance_range, domain='positive', ndim=1, shape=(2,), physical_type='length')
if len(apertures) != len(filter_names):
raise ValueError("length of apertures list should match length of filter names list")
# Construct filters dictionary
self.filters = []
for i in range(len(apertures)):
filt = {'aperture_arcsec': apertures[i].to(u.arcsec).value}
if isinstance(filter_names[i], six.string_types):
filt['name'] = filter_names[i]
elif isinstance(filter_names[i], u.Quantity):
filt['wav'] = filter_names[i]
else:
raise ValueError("filter should be a string or a Quantity")
self.filters.append(filt)
# Read in models
self.models = Models.read(model_dir, self.filters, distance_range=distance_range, remove_resolved=remove_resolved)
# Add wavelength to filters
for i, f in enumerate(self.filters):
if 'wav' not in f:
f['wav'] = self.models.wavelengths[i]
# Set Av law
self.av_law = extinction_law.get_av(self.models.wavelengths)
# Set scale model - make this a scalar
self.sc_law = -2. * np.ones(self.av_law.shape)
self.model_dir = model_dir
self.av_range = av_range
self.extinction_law = extinction_law
def fit(self, source):
"""
Fit the specified source.
Parameters
----------
source : `~sedfitter.source.Source`
The source to fit.
Returns
-------
fit_info : `sedfitter.fit_info.FitInfo`
The results of the fit.
"""
info = self.models.fit(source, self.av_law, self.sc_law,
self.av_range[0], self.av_range[1])
info.meta.model_dir = self.model_dir
info.meta.filters = self.filters
info.meta.extinction_law = self.extinction_law
return info
def fit(data, filter_names, apertures, model_dir, output, n_data_min=3,
extinction_law=None, av_range=None, distance_range=None,
output_format=('F', 6.), output_convolved=False,
remove_resolved=False):
"""
Fit a set of sources with models.
Parameters
----------
data : str
Filename of the file containing the data, one source per line (see
documentation for a description of the required format).
filter_names : tuple or list
List of filter names (given as individual strings) for which the data
is defined. The filter names should be the name of the files in the
``convolved`` directory for the models, without the extensions. This is
typically ``2J``, ``I1``, ``M1``, etc. You can also specify the
wavelength as a :class:`~astropy.units.quantity.Quantity` instance
instead of a filter name, and this will indicate that the SED fluxes
closest to the requested wavelength should be used in the fitting.
apertures : :class:`~astropy.units.quantity.Quantity` array instance
The aperture radii that the data is specified in (as an angle). The
fluxes may not be measured from aperture photometry, but this is meant
to give an indication of the sizescale of the emission, and can be used
to reject models that would have been clearly resolved at the distance
specified.
models_dir : str
Name of the directory containing the models to use.
output : str
Name of the file to output the fit information to (in binary format).
extinction_law : :class:`~sedfitter.extinction.Extinction` instance
The extinction law to use.
av_range : tuple
Minimum and maximum Av to allow in the fitting.
distance_range : :class:`~astropy.units.quantity.Quantity` array instance
Minimum and maximum distance to allow in the fitting in units of length.
n_data_min : int, optional
The minimum number of points a source needs to be fit.
output_format : tuple, optional
Tuple specifying which fits should be output. See the documentation
for a description of the tuple syntax.
output_convolved : bool, optional
Whether to output the convolved fluxes (necessary if the convolved
model fluxes are needed for the SED plot).
remove_resolved : bool, optional
If set, then models larger than the aperture are removed. See
Robitaille et al. (2007) for a discussion of this criterion.
"""
fitter = Fitter(filter_names, apertures, model_dir,
extinction_law=extinction_law, av_range=av_range,
distance_range=distance_range,
remove_resolved=remove_resolved)
print(" ------------------------------------------------------------")
print(" => Fitting parameters")
print(" ------------------------------------------------------------")
print("")
print(" Minimum A_V : %9.3f mag" % av_range[0])
print(" Maximum A_V : %9.3f mag" % av_range[1])
print(" Minimum distance : %9.3f %s" % (distance_range[0].value, distance_range.unit))
print(" Maximum distance : %9.3f %s" % (distance_range[1].value, distance_range.unit))
print("")
print(" ------------------------------------------------------------")
print(" => Output parameters")
print(" ------------------------------------------------------------")
print("")
print(" File : %s" % output)
print(" Format : %s" % output_format[0])
print(" Number : %g" % output_format[1])
print("")
print(" ------------------------------------------------------------")
print(" => Data format parameters")
print(" ------------------------------------------------------------")
print("")
print(" Number of filters : %i" % len(filter_names))
print("")
# Open datafile
if isinstance(data, six.string_types):
data_file = open(data, 'r')
else:
data_file = data
print('')
print(' Filter Wavelength Aperture (") ')
print(' ----------------------------------------')
for f in fitter.filters:
print(' %5s %9.2f %9.2f ' % (f.get('name', ''), f['wav'].to(u.micron).value, f['aperture_arcsec']))
print('')
# Cycle through sources
io.delete_file(output)
fout = FitInfoFile(output, 'w')
s = Source()
t = timer.Timer()
while True:
try:
s = Source.from_ascii(data_file.readline())
except EOFError:
break
if s.n_data >= n_data_min:
info = fitter.fit(s)
if not output_convolved:
info.model_fluxes = None
info.keep(output_format)
fout.write(info)
t.display()
t.display(force=True)
fout.close()
|
|
from __future__ import print_function
import os
import sys
import glob
try:
from setuptools import setup, Extension, find_packages
except ImportError:
from distutils.core import setup, Extension, find_packages # noqa
from distutils import sysconfig
from distutils.errors import (
CCompilerError,
DistutilsExecError,
DistutilsPlatformError
)
HERE = os.path.dirname(os.path.abspath(__file__))
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError)
if sys.platform == 'win32' and sys.version_info >= (2, 6):
# distutils.msvc9compiler can raise IOError if the compiler is missing
ext_errors += (IOError, )
is_jython = sys.platform.startswith('java')
is_pypy = hasattr(sys, 'pypy_version_info')
is_py3k = sys.version_info[0] == 3
BUILD_WARNING = """
-----------------------------------------------------------------------
WARNING: The C extensions could not be compiled
-----------------------------------------------------------------------
Maybe you do not have a C compiler installed on this system?
The reason was:
%s
This is just a warning as most of the functionality will work even
without the updated C extension. It will simply fallback to the
built-in _multiprocessing module. Most notably you will not be able to use
FORCE_EXECV on POSIX systems. If this is a problem for you then please
install a C compiler or fix the error(s) above.
-----------------------------------------------------------------------
"""
# -*- py3k -*-
extras = {}
# -*- Distribution Meta -*-
import re
re_meta = re.compile(r'__(\w+?)__\s*=\s*(.*)')
re_vers = re.compile(r'VERSION\s*=\s*\((.*?)\)')
re_doc = re.compile(r'^"""(.+?)"""')
rq = lambda s: s.strip("\"'")
def add_default(m):
attr_name, attr_value = m.groups()
return ((attr_name, rq(attr_value)), )
def add_version(m):
v = list(map(rq, m.groups()[0].split(', ')))
return (('VERSION', '.'.join(v[0:4]) + ''.join(v[4:])), )
def add_doc(m):
return (('doc', m.groups()[0]), )
pats = {re_meta: add_default,
re_vers: add_version,
re_doc: add_doc}
here = os.path.abspath(os.path.dirname(__file__))
meta_fh = open(os.path.join(here, 'billiard/__init__.py'))
try:
meta = {}
for line in meta_fh:
if line.strip() == '# -eof meta-':
break
for pattern, handler in pats.items():
m = pattern.match(line.strip())
if m:
meta.update(handler(m))
finally:
meta_fh.close()
if sys.version_info < (2, 5):
raise ValueError('Versions of Python before 2.5 are not supported')
if sys.platform == 'win32': # Windows
macros = dict()
libraries = ['ws2_32']
elif sys.platform.startswith('darwin'): # Mac OSX
macros = dict(
HAVE_SEM_OPEN=1,
HAVE_SEM_TIMEDWAIT=0,
HAVE_FD_TRANSFER=1,
HAVE_BROKEN_SEM_GETVALUE=1
)
libraries = []
elif sys.platform.startswith('cygwin'): # Cygwin
macros = dict(
HAVE_SEM_OPEN=1,
HAVE_SEM_TIMEDWAIT=1,
HAVE_FD_TRANSFER=0,
HAVE_BROKEN_SEM_UNLINK=1
)
libraries = []
elif sys.platform in ('freebsd4', 'freebsd5', 'freebsd6'):
# FreeBSD's P1003.1b semaphore support is very experimental
# and has many known problems. (as of June 2008)
macros = dict( # FreeBSD 4-6
HAVE_SEM_OPEN=0,
HAVE_SEM_TIMEDWAIT=0,
HAVE_FD_TRANSFER=1,
)
libraries = []
elif re.match('^(gnukfreebsd(8|9|10|11)|freebsd(7|8|9|0))', sys.platform):
macros = dict( # FreeBSD 7+ and GNU/kFreeBSD 8+
HAVE_SEM_OPEN=bool(
sysconfig.get_config_var('HAVE_SEM_OPEN') and not
bool(sysconfig.get_config_var('POSIX_SEMAPHORES_NOT_ENABLED'))
),
HAVE_SEM_TIMEDWAIT=1,
HAVE_FD_TRANSFER=1,
)
libraries = []
elif sys.platform.startswith('openbsd'):
macros = dict( # OpenBSD
HAVE_SEM_OPEN=0, # Not implemented
HAVE_SEM_TIMEDWAIT=0,
HAVE_FD_TRANSFER=1,
)
libraries = []
else: # Linux and other unices
macros = dict(
HAVE_SEM_OPEN=1,
HAVE_SEM_TIMEDWAIT=1,
HAVE_FD_TRANSFER=1,
)
libraries = ['rt']
if sys.platform == 'win32':
multiprocessing_srcs = [
'Modules/_billiard/multiprocessing.c',
'Modules/_billiard/semaphore.c',
'Modules/_billiard/win32_functions.c',
]
else:
multiprocessing_srcs = [
'Modules/_billiard/multiprocessing.c',
]
if macros.get('HAVE_SEM_OPEN', False):
multiprocessing_srcs.append('Modules/_billiard/semaphore.c')
long_description = open(os.path.join(HERE, 'README.rst')).read()
long_description += """
===========
Changes
===========
"""
long_description += open(os.path.join(HERE, 'CHANGES.txt')).read()
if not is_py3k:
long_description = long_description.encode('ascii', 'replace')
# -*- Installation Requires -*-
py_version = sys.version_info
is_jython = sys.platform.startswith('java')
is_pypy = hasattr(sys, 'pypy_version_info')
def strip_comments(l):
return l.split('#', 1)[0].strip()
def reqs(f):
return list(filter(None, [strip_comments(l) for l in open(
os.path.join(os.getcwd(), 'requirements', f)).readlines()]))
if py_version[0] == 3:
tests_require = reqs('test3.txt')
else:
tests_require = reqs('test.txt')
def _is_build_command(argv=sys.argv, cmds=('install', 'build', 'bdist')):
for arg in argv:
if arg.startswith(cmds):
return arg
def run_setup(with_extensions=True):
extensions = []
if with_extensions:
extensions = [
Extension(
'_billiard',
sources=multiprocessing_srcs,
define_macros=macros.items(),
libraries=libraries,
include_dirs=['Modules/_billiard'],
depends=glob.glob('Modules/_billiard/*.h') + ['setup.py'],
),
]
if sys.platform == 'win32':
extensions.append(
Extension(
'_winapi',
sources=multiprocessing_srcs,
define_macros=macros.items(),
libraries=libraries,
include_dirs=['Modules/_billiard'],
depends=glob.glob('Modules/_billiard/*.h') + ['setup.py'],
),
)
packages = find_packages(exclude=[
'ez_setup', 'tests', 'funtests.*', 'tests.*',
])
setup(
name='billiard',
version=meta['VERSION'],
description=meta['doc'],
long_description=long_description,
packages=packages,
ext_modules=extensions,
author=meta['author'],
author_email=meta['author_email'],
maintainer=meta['maintainer'],
maintainer_email=meta['contact'],
url=meta['homepage'],
zip_safe=False,
license='BSD',
tests_require=tests_require,
test_suite='nose.collector',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: C',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: Jython',
'Programming Language :: Python :: Implementation :: PyPy',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'License :: OSI Approved :: BSD License',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Distributed Computing',
],
**extras
)
try:
run_setup(not (is_jython or is_pypy or is_py3k))
except BaseException:
if _is_build_command(sys.argv):
import traceback
print(BUILD_WARNING % '\n'.join(traceback.format_stack()),
file=sys.stderr)
run_setup(False)
else:
raise
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020 Espressif Systems (Shanghai) CO LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import filecmp
import hashlib
import os
import random
import struct
import sys
import tempfile
import time
import unittest
from functools import partial
from io import open
from itertools import chain
import pexpect
try:
from itertools import izip as zip
except ImportError:
# Python 3
pass
current_dir = os.path.dirname(os.path.realpath(__file__))
mkuf2_dir = os.path.abspath(os.path.join(current_dir, '..'))
mkuf2_path = os.path.join(mkuf2_dir, 'mkuf2.py')
try:
import mkuf2
except ImportError:
sys.path.append(mkuf2_dir)
import mkuf2
class UF2Block(object):
def __init__(self, bs):
self.length = len(bs)
# See https://github.com/microsoft/uf2 for the format
first_part = '<' + 'I' * 8
# payload is between
last_part = '<I'
first_part_len = struct.calcsize(first_part)
last_part_len = struct.calcsize(last_part)
(self.magicStart0, self.magicStart1, self.flags, self.targetAddr, self.payloadSize, self.blockNo,
self.numBlocks, self.familyID) = struct.unpack(first_part, bs[:first_part_len])
self.data = bs[first_part_len:-last_part_len]
(self.magicEnd, ) = struct.unpack(last_part, bs[-last_part_len:])
def __len__(self):
return self.length
class UF2BlockReader(object):
def __init__(self, f_name):
self.f_name = f_name
def get(self):
with open(self.f_name, 'rb') as f:
for chunk in iter(partial(f.read, mkuf2.UF2Writer.UF2_BLOCK_SIZE), b''):
yield UF2Block(chunk)
class BinaryWriter(object):
def __init__(self, f_name):
self.f_name = f_name
def append(self, data):
# File is reopened several times in order to make sure that won't left open
with open(self.f_name, 'ab') as f:
f.write(data)
class BinaryTester(unittest.TestCase):
def generate_binary(self, size):
with tempfile.NamedTemporaryFile(delete=False) as f:
self.addCleanup(os.unlink, f.name)
for _ in range(size):
f.write(struct.pack('B', random.randrange(0, 1 << 8)))
return f.name
@staticmethod
def generate_chipID():
return random.randrange(0, 1 << 32)
def generate_uf2(self, chip_id, iter_addr_offset_tuples, chunk_size=None):
of_name = self.generate_binary(0)
com_args = [mkuf2_path, 'write',
'-o', of_name,
'--chip-id', hex(chip_id)]
com_args += [] if chunk_size is None else ['--chunk-size', str(chunk_size)]
file_args = list(chain(*[(str(addr), f) for addr, f in iter_addr_offset_tuples]))
p = pexpect.spawn(sys.executable, com_args + file_args, timeout=20)
self.addCleanup(p.terminate, force=True)
exp_list = ['Adding {} at {}'.format(f, hex(addr)) for addr, f in iter_addr_offset_tuples]
exp_list += ['"{}" has been written.'.format(of_name)]
for e in exp_list:
p.expect_exact(e)
# Do non-blocking wait instead of the blocking p.wait():
for _ in range(10):
if not p.isalive():
break
time.sleep(0.5)
# else: will be terminated during cleanup
return of_name
def process_blocks(self, uf2block, expected_chip_id):
flags = mkuf2.UF2Writer.UF2_FLAG_FAMILYID_PRESENT | mkuf2.UF2Writer.UF2_FLAG_MD5_PRESENT
parsed_binaries = []
block_list = [] # collect block numbers here
total_blocks = set() # collect total block numbers here
for block in UF2BlockReader(uf2block).get():
if block.blockNo == 0:
# new file has been detected
base_addr = block.targetAddr
current_addr = base_addr
binary_writer = BinaryWriter(self.generate_binary(0))
self.assertEqual(len(block), mkuf2.UF2Writer.UF2_BLOCK_SIZE)
self.assertEqual(block.magicStart0, mkuf2.UF2Writer.UF2_FIRST_MAGIC)
self.assertEqual(block.magicStart1, mkuf2.UF2Writer.UF2_SECOND_MAGIC)
self.assertEqual(block.flags & flags, flags)
self.assertEqual(len(block.data), mkuf2.UF2Writer.UF2_DATA_SIZE)
payload = block.data[:block.payloadSize]
md5_obj = hashlib.md5(payload)
md5_part = block.data[block.payloadSize:block.payloadSize + mkuf2.UF2Writer.UF2_MD5_PART_SIZE]
address, length = struct.unpack('<II', md5_part[:-md5_obj.digest_size])
md5sum = md5_part[-md5_obj.digest_size:]
self.assertEqual(address, block.targetAddr)
self.assertEqual(length, block.payloadSize)
self.assertEqual(md5sum, md5_obj.digest())
self.assertEqual(block.familyID, expected_chip_id)
self.assertEqual(block.magicEnd, mkuf2.UF2Writer.UF2_FINAL_MAGIC)
self.assertEqual(current_addr, block.targetAddr)
binary_writer.append(payload)
block_list.append(block.blockNo)
total_blocks.add(block.numBlocks)
if block.blockNo == block.numBlocks - 1:
self.assertEqual(block_list, list(range(block.numBlocks)))
# we have found all blocks and in the right order
self.assertEqual(total_blocks, {block.numBlocks}) # numBlocks are the same in all the blocks
del block_list[:]
total_blocks.clear()
parsed_binaries += [(base_addr, binary_writer.f_name)]
current_addr += block.payloadSize
return parsed_binaries
def common(self, t, chunk_size=None):
chip_id = self.generate_chipID()
parsed_t = self.process_blocks(self.generate_uf2(chip_id, t, chunk_size), chip_id)
self.assertEqual(len(t), len(parsed_t))
for (orig_addr, orig_fname), (addr, fname) in zip(t, parsed_t):
self.assertEqual(orig_addr, addr)
self.assertTrue(filecmp.cmp(orig_fname, fname))
def test_simple(self):
self.common([(0, self.generate_binary(1))])
def test_more_files(self):
self.common([(100, self.generate_binary(1)), (200, self.generate_binary(1))])
def test_larger_files(self):
self.common([(0x10, self.generate_binary(6)), (0x20, self.generate_binary(8))])
def test_boundaries(self):
self.common([(0x100, self.generate_binary(mkuf2.UF2Writer.UF2_DATA_SIZE)),
(0x200, self.generate_binary(mkuf2.UF2Writer.UF2_DATA_SIZE + 1)),
(0x300, self.generate_binary(mkuf2.UF2Writer.UF2_DATA_SIZE - 1))])
def test_files_with_more_blocks(self):
self.common([(0x100, self.generate_binary(3 * mkuf2.UF2Writer.UF2_DATA_SIZE)),
(0x200, self.generate_binary(2 * mkuf2.UF2Writer.UF2_DATA_SIZE + 1)),
(0x300, self.generate_binary(2 * mkuf2.UF2Writer.UF2_DATA_SIZE - 1))])
def test_very_large_files(self):
self.common([(0x100, self.generate_binary(20 * mkuf2.UF2Writer.UF2_DATA_SIZE + 5)),
(0x10000, self.generate_binary(50 * mkuf2.UF2Writer.UF2_DATA_SIZE + 100)),
(0x100000, self.generate_binary(100 * mkuf2.UF2Writer.UF2_DATA_SIZE))])
def test_chunk_size(self):
chunk_size = 256
self.common([(0x100, self.generate_binary(chunk_size)),
(0x200, self.generate_binary(chunk_size + 1)),
(0x300, self.generate_binary(chunk_size - 1))],
chunk_size)
if __name__ == '__main__':
unittest.main()
|
|
import ast
import traceback
import os
import sys
userFunctions = {}
renames = ['vex.pragma','vex.motor','vex.slaveMotors','vex.motorReversed']
classNames = []
indent = ' '
sameLineBraces = True
compiled = {}
def module_rename(aNode):
if aNode.func.print_c() == 'vex.pragma':
asC = '#pragma '
useComma = False
pragmaDirective = aNode.args.pop(0)
asC += pragmaDirective.s
if aNode.args:
asC += '('
for arg in aNode.args:
if useComma:
asC += ', '
else:
useComma = True
asC += arg.print_c()
asC += ')'
asC += '\n'
return asC
elif aNode.func.print_c() == 'vex.motor':
asC = 'motor[' + aNode.args[0].print_c()
asC += '] = ' + aNode.args[1].print_c()
return asC
elif aNode.func.print_c() == 'vex.slaveMotors':
masterMotor = aNode.args.pop(0).print_c()
asC = ''
for slave in aNode.args:
asC += 'slaveMotor(' + slave.print_c() + ', ' + masterMotor + ');\n'
return asC[:-2]
elif aNode.func.print_c() == 'vex.motorReversed':
asC = 'bMotorReflected[' + aNode.args[0].print_c()
asC += '] = ' + aNode.args[1].print_c()
return asC
return 'Unknown function. This should not happen'
def escape_string(s, unicode = False, max_length = 200):
ret = []
# Try to split on whitespace, not in the middle of a word.
split_at_space_pos = max_length - 10
if split_at_space_pos < 10:
split_at_space_pos = None
position = 0
if unicode:
position += 1
ret.append('L')
ret.append('"')
position += 1
for c in s:
newline = False
if c == "\n":
to_add = r"\n"
newline = True
elif ord(c) < 32 or 0x80 <= ord(c) <= 0xff:
to_add = r"\x{:02X}".format(ord(c))
elif ord(c) > 0xff:
if not unicode:
raise ValueError("string contains unicode character but unicode=False")
to_add = r"\u{:04X}".format(ord(c))
elif r'\"'.find(c) != -1:
to_add = r"\{}".format(c)
else:
to_add = c
ret.append(to_add)
position += len(to_add)
if newline:
position = 0
if split_at_space_pos is not None and position >= split_at_space_pos and " \t".find(c) != -1:
ret.append("\\\n")
position = 0
elif position >= max_length:
ret.append("\\\n")
position = 0
ret.append('"')
return "".join(ret)
class C_Module(ast.Module):
def prepare(self):
pass
def print_c(self):
asC = ''
for node in self.body:
try:
asC += node.print_c()
except Exception as e:
print(traceback.format_exc())
print("Current code:")
print(asC)
return asC
class C_Bytes(ast.Bytes):
def prepare(self):
pass
def print_c(self):
return escape_string(self.s.decode('utf-8'),True)
class C_Str(ast.Str):
def prepare(self):
pass
def print_c(self):
return escape_string(self.s)
class C_Num(ast.Num):
def prepare(self):
pass
def print_c(self):
return str(self.n)
class C_FunctionDef(ast.FunctionDef):
def prepare(self):
"""Prepare for writing. Take note of return types, class names, etc..."""
if self.returns:
userFunctions[self.name] = self.returns.print_c()
def print_c(self):
asC = '\n'
if ast.get_docstring(self):
asC += '/*\n'
asC += ast.get_docstring(self)
self.body.pop(0)
asC += '\n*/\n'
asC += self.returns.id + ' ' + self.name + '('
isFirst = True
for i, argNode in enumerate(self.args.args):
arg = argNode.arg
try:
argType = argNode.annotation.print_c()
except:
argType = argNode.annotation
if isFirst:
isFirst = False
else:
asC += ', '
asC += argType + ' ' + arg
if i >= self.args.minArgs:
asC += ' = ' + (self.args.defaults[i - self.args.minArgs]).print_c()
if sameLineBraces:
asC += ') {\n'
else:
asC += ')\n{\n'
for childNode in self.body:
try:
unindented = childNode.print_c()
unindented = '\n'.join([indent + x for x in unindented.split('\n')])
if not unindented.endswith('}'):
unindented += ';'
unindented += '\n'
asC += unindented
except Exception as e:
print(traceback.format_exc())
print(ast.dump(childNode))
return asC
asC += '}\n'
return asC
class C_arguments(ast.arguments):
def prepare(self):
self.minArgs = len(self.args) - len(self.defaults)
self.maxArgs = len(self.args)
def print_c(self):
return self
class C_Name(ast.Name):
def prepare(self):
pass
def print_c(self):
if self.id == 'True':
return 'true'
elif self.id == 'False':
return 'false'
elif self.id == 'None':
return '0'
return self.id
if "NameConstant" in ast.__dict__:
class C_NameConstant(ast.NameConstant):
def prepare(self):
pass
def print_c(self):
if self.value == True:
# True
return 'true'
elif self.value == False:
# False
return 'false'
else:
return '0'
class C_Expr(ast.Expr):
def prepare(self):
pass
def print_c(self):
return self.value.print_c()
class C_UnaryOp(ast.UnaryOp):
def prepare(self):
pass
def print_c(self):
return self.op.print_c() + self.operand.print_c()
class C_UAdd(ast.UAdd):
def prepare(self):
pass
def print_c(self):
return '+'
class C_USub(ast.USub):
def prepare(self):
pass
def print_c(self):
return '-'
class C_Not(ast.Not):
def prepare(self):
pass
def print_c(self):
return '!'
class C_Invert(ast.Invert):
def prepare(self):
pass
def print_c(self):
return '~'
class C_BinOp(ast.BinOp):
def prepare(self):
pass
def print_c(self):
return '({left} {op} {right})'.format(
left = self.left.print_c(),
op = self.op.print_c(),
right = self.right.print_c())
class C_Add(ast.Add):
def prepare(self):
pass
def print_c(self):
return '+'
class C_Sub(ast.Sub):
def prepare(self):
pass
def print_c(self):
return '-'
class C_Mult(ast.Mult):
def prepare(self):
pass
def print_c(self):
return '*'
class C_Div(ast.Div):
def prepare(self):
pass
def print_c(self):
return '/'
class C_Mod(ast.Mod):
def prepare(self):
pass
def print_c(self):
return '%'
class C_LShift(ast.LShift):
def prepare(self):
pass
def print_c(self):
return '<<'
class C_RShift(ast.RShift):
def prepare(self):
pass
def print_c(self):
return '>>'
class C_BitOr(ast.BitOr):
def prepare(self):
pass
def print_c(self):
return '|'
class C_BitXor(ast.BitXor):
def prepare(self):
pass
def print_c(self):
return '^'
class C_BitAnd(ast.BitAnd):
def prepare(self):
pass
def print_c(self):
return '&'
class C_BoolOp(ast.BoolOp):
def prepare(self):
pass
def print_c(self):
asC = '(' + self.values.pop(0).print_c()
for value in self.values:
asC += ' ' + self.op.print_c() + ' '
asC += value.print_c()
return asC + ')'
class C_And(ast.And):
def prepare(self):
pass
def print_c(self):
return '&&'
class C_Or(ast.Or):
def prepare(self):
pass
def print_c(self):
return '||'
class C_Compare(ast.Compare):
def prepare(self):
pass
def print_c(self):
asC = ''
self.comparators.insert(0,self.left)
addAnd = False
for i,op in enumerate(self.ops):
if addAnd:
asC += ' && '
else:
addAnd = True
asC += '(' + self.comparators[i].print_c() + ' '
asC += op.print_c()
asC += ' ' + self.comparators[i + 1].print_c() + ')'
return asC
class C_Eq(ast.Eq):
def prepare(self):
pass
def print_c(self):
return '=='
class C_NotEq(ast.NotEq):
def prepare(self):
pass
def print_c(self):
return '!='
class C_Lt(ast.Lt):
def prepare(self):
pass
def print_c(self):
return '<'
class C_LtE(ast.LtE):
def prepare(self):
pass
def print_c(self):
return '<='
class C_Gt(ast.Gt):
def prepare(self):
pass
def print_c(self):
return '>'
class C_GtE(ast.GtE):
def prepare(self):
pass
def print_c(self):
return '>='
class C_Call(ast.Call):
def prepare(self):
pass
def print_args(self):
asC = ''
for arg in self.args:
asC += ', '
asC += arg.print_c()
return asC
def print_c(self):
if self.func.print_c() in renames:
return module_rename(self)
if isinstance(self.func,C_Attribute):
# Convert OOP calls to regular function calls
self.args.insert(0,self.func.value)
self.func = C_Name(self.func.attr,None)
asC = self.func.print_c() + '('
useComma = False
for arg in self.args:
if useComma:
asC += ', '
else:
useComma = True
asC += arg.print_c()
asC += ')'
return asC
class C_IfExp(ast.IfExp):
def prepare(self):
pass
def print_c(self):
asC = '(' + self.test.print_c()
asC += ' ? ' + self.body.print_c()
asC += ' : ' + self.orelse.print_c() + ')'
return asC
class C_Attribute(ast.Attribute):
def prepare(self):
pass
def print_c(self):
return self.value.print_c() + '.' + self.attr
class C_Subscript(ast.Subscript):
def prepare(self):
pass
def print_c(self):
return self.value.print_c() + '[' + self.slice.print_c() + ']'
class C_Index(ast.Index):
def prepare(self):
pass
def print_c(self):
return self.value.print_c()
class C_Assign(ast.Assign):
def prepare(self):
pass
def print_c(self):
asC = ''
for target in self.targets:
asC += target.print_c() + ' = '
asC += self.value.print_c()
return asC
if "AnnAssign" in ast.__dict__:
class C_AnnAssign(ast.AnnAssign):
def prepare(self):
pass
def print_c(self):
asC = self.annotation.print_c() + ' '
asC += self.target.print_c()
if isinstance(self.value, C_Call) and self.value.func.print_c() in classNames:
asC += ';\n'
asC += self.value.func.print_c() + '___init__('
asC += self.target.print_c()
asC += self.value.print_args() + ')'
else:
if self.value:
asC += ' = ' + self.value.print_c()
return asC
class C_AugAssign(ast.AugAssign):
def prepare(self):
pass
def print_c(self):
asC = self.target.print_c() + ' '
asC += self.op.print_c() + '= '
asC += self.value.print_c()
return asC
class C_Assert(ast.Assert):
def prepare(self):
pass
def print_c(self):
return 'VERIFY(' + self.test.print_c() + ')'
class C_Pass(ast.Pass):
def prepare(self):
pass
def print_c(self):
return ''
class C_Import(ast.Import):
def prepare(self):
pass
def print_c(self):
importName = '/'.join(self.names[0].name.split('.'))
return '#include ' + importName + '.c\n'
class C_If(ast.If):
def prepare(self):
pass
def print_c(self):
asC = 'if ('
asC += self.test.print_c()
if sameLineBraces:
asC += ') {\n'
else:
asC += ')\n{\n'
for childNode in self.body:
try:
unindented = childNode.print_c()
unindented = '\n'.join([indent + x for x in unindented.split('\n')])
if not unindented.endswith('}'):
unindented += ';'
unindented += '\n'
asC += unindented
except Exception as e:
print(traceback.format_exc())
print(ast.dump(childNode))
return asC
asC += '}'
if self.orelse:
if sameLineBraces:
asC += ' else {\n'
else:
asC += '\nelse\n{\n'
for childNode in self.orelse:
try:
unindented = childNode.print_c()
unindented = '\n'.join([indent + x for x in unindented.split('\n')])
if not unindented.endswith('}'):
unindented += ';'
unindented += '\n'
asC += unindented
except Exception as e:
print(traceback.format_exc())
print(ast.dump(childNode))
return asC
asC += '}'
return asC
class C_For(ast.For):
def prepare(self):
pass
def print_c(self):
# Only supports for _ in range() for now
asC = ''
var = self.target.print_c()
low = '0'
step = '1'
if len(self.iter.args) > 1:
low = self.iter.args[0].print_c()
high = self.iter.args[1].print_c()
if len(self.iter.args) > 2:
step = self.iter.args[2].print_c()
else:
high = self.iter.args[0].print_c()
asC += 'for (' + var + ' = '
asC += low
asC += '; ' + var + ' < ' + high + '; ' + var + ' += ' + step
if sameLineBraces:
asC += ') {\n'
else:
asC += ')\n{\n'
for childNode in self.body:
try:
unindented = childNode.print_c()
unindented = '\n'.join([indent + x for x in unindented.split('\n')])
if not unindented.endswith('}'):
unindented += ';'
unindented += '\n'
asC += unindented
except Exception as e:
print(traceback.format_exc())
print(ast.dump(childNode))
return asC
return asC + '}'
class C_While(ast.While):
def prepare(self):
pass
def print_c(self):
asC = 'while (' + self.test.print_c()
if sameLineBraces:
asC += ') {\n'
else:
asC += ')\n{\n'
for childNode in self.body:
try:
unindented = childNode.print_c()
unindented = '\n'.join([indent + x for x in unindented.split('\n')])
if not unindented.endswith('}'):
unindented += ';'
unindented += '\n'
asC += unindented
except Exception as e:
print(traceback.format_exc())
print(ast.dump(childNode))
return asC
return asC + '}'
class C_Break(ast.Break):
def prepare(self):
pass
def print_c(self):
return 'break'
class C_Continue(ast.Continue):
def prepare(self):
pass
def print_c(self):
return 'continue'
class C_Return(ast.Return):
def prepare(self):
pass
def print_c(self):
return 'return ' + self.value.print_c()
class C_ClassDef(ast.ClassDef):
def prepare(self):
classNames.append(self.name)
def print_c(self):
asC = '/*** Class: ' + self.name + ' ***/\n'
varNames = ClassVariables.scanIn(self)
if ast.get_docstring(self):
asC += '/*\n'
asC += ast.get_docstring(self)
self.body.pop(0)
asC += '\n*/\n'
asC += 'typedef struct'
if sameLineBraces:
asC += ' {\n'
else:
asC += '\n{\n'
for var,type in varNames.items():
asC += indent + type + ' ' + var + ';\n'
asC += '} ' + self.name + ';\n'
for node in self.body:
try:
asC += node.print_c()
except Exception as e:
print(traceback.format_exc())
print("Current code:")
print(asC)
asC += '\n/*** End Class: ' + self.name + ' ***/\n'
return asC
class ClassVariables(ast.NodeVisitor):
def __init__(self,*args,**kwargs):
super(ClassVariables,self).__init__(*args,**kwargs)
self.varNames = {}
def visit_C_AnnAssign(self, aNode):
if aNode.target.print_c().startswith('self.'):
if aNode.target.attr in self.varNames:
if not self.varNames[aNode.target.attr] == aNode.annotation.print_c():
raise TypeError("Redefining a type not permitted in {}->{}".format(self.parentNode.name,aNode.target.print_c()))
else:
self.varNames[aNode.target.attr] = aNode.annotation.print_c()
aNode.__class__ = C_Assign
aNode.targets = [aNode.target]
self.generic_visit(aNode)
@classmethod
def scanIn(cls, aNode):
walker = cls()
walker.parentNode = aNode
walker.visit(aNode)
return walker.varNames
class CNodeTransformer(ast.NodeVisitor):
def __init__(self, *args, **kwargs):
self.toPrepare = []
self.currentClass = None
super(CNodeTransformer,self).__init__(*args,**kwargs)
def visit_C_Import(self, aNode):
# Make sure that we've compiled this file.
filePath = '/'.join(aNode.names[0].name.split('.')) + '.py'
compile_to_c(filePath)
def visit_C_ClassDef(self, aNode):
previousClass = self.currentClass
self.currentClass = aNode
self.generic_visit(aNode)
self.currentClass = previousClass
def visit_C_FunctionDef(self, aNode):
if self.currentClass:
# Since we're scanning this anyways, get this function ready for a class!
if aNode.name == '__init__':
aNode.name = self.currentClass.name + '_' + aNode.name
aNode.args.args[0].annotation = self.currentClass.name # Force use of class
self.generic_visit(aNode)
def visit(self, node):
"""Visit a node."""
if 'C_' + node.__class__.__name__ in globals():
node.__class__ = globals()['C_' + node.__class__.__name__]
self.toPrepare.append(node)
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
visitor(node) # Recursively replace classes
def compile_to_c(filename):
if not os.path.exists(filename):
if os.path.exists(os.path.join(os.path.dirname(os.path.realpath(__file__)),filename)):
filename = os.path.join(os.path.dirname(os.path.realpath(__file__)),filename)
else:
if os.path.exists(os.path.join(os.path.dirname(os.path.realpath(sys.argv[1])),filename)):
filename = os.path.join(os.path.dirname(os.path.realpath(sys.argv[1])),filename)
else:
raise FileNotFoundError(filename)
if not os.path.abspath(filename) in compiled:
module = ast.parse(open(filename, 'r').read())
compiled[os.path.abspath(filename)] = '' # At least fill it in
transformer = CNodeTransformer()
transformer.visit(module)
for nodeToPrepare in transformer.toPrepare:
nodeToPrepare.prepare()
compiled[os.path.abspath(filename)] = module.print_c()
def commonprefix(l):
# this unlike the os.path.commonprefix version
# always returns path prefixes as it compares
# path component wise
cp = []
ls = [p.split(os.path.sep) for p in l]
ml = min( len(p) for p in ls )
for i in range(ml):
s = set( p[i] for p in ls )
if len(s) != 1:
break
cp.append(s.pop())
return os.path.sep.join(cp)
if __name__ == '__main__':
if len(sys.argv) < 2:
print(f"Usage: {__file__} [file]")
sys.exit(1)
compile_to_c(sys.argv[1])
common = commonprefix(compiled)
withRelNames = {os.path.relpath(abspath,common):contents for abspath,contents in compiled.items()}
for file,contents in withRelNames.items():
filename = os.path.join(os.path.dirname(os.path.realpath(sys.argv[1])),os.path.join('output',os.path.splitext(file)[0] + '.c'))
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename,'w') as c_file:
c_file.write(contents)
|
|
from copy import deepcopy
import curves as c
from sys import stderr
#from micc.cgraph import cdfs
def shift(path):
'''
init
'''
temp = path.index(min(path))
return path[temp:] + path[:temp]
def invert(path):
'''
init
'''
return shift(path[::-1])
def contains(small, big):
print type(small), type(big)
for i in xrange(len(big)-len(small)+1):
for j in xrange(len(small)):
if big[i+j] != small[j]:
break
else:
return i, i+len(small)
return False
class Graph:
def add_node(self, node):
self.nodes[node] = []
def __init__(self, edges, rep_num=2):
self.edges = edges
self.rep_num = rep_num
self.nodes = {}
self.counter = 0
self.loops = []
self.gammas = []
self.nodes_to_faces = {}
self.rep_num = rep_num
def compute_loops(self, n, genus):
edges = self.edges[0]
fourgons = [i[1] for i in edges if i[0] == 4]
non_fourgons = [i[1] for i in edges if i[0] != 4]
keys = self.nodes_to_faces.keys()
for i,face in enumerate(non_fourgons):
for node in face:
if not node in keys:
self.nodes_to_faces[node] = [i]
else:
self.nodes_to_faces[node].append(i)
keys = self.nodes_to_faces.keys()
for face in fourgons:
for node in face:
if not node in keys:
self.nodes_to_faces[node] = [None]
else:
self.nodes_to_faces[node].append(None)
keys = self.nodes_to_faces.keys()
self.nodes_to_faces = {int(k): tuple(v) for k,v in self.nodes_to_faces.iteritems()}
nodes = range(n)
for i in nodes:
self.add_node(i)
self.find_all_edges(fourgons, non_fourgons, nodes, self.rep_num)
graph_copy = deepcopy(self.nodes)
#stderr.write(str(edges)+'\n')
#stderr.write(str(graph_copy)+'\n')
#graph_copy = {i : set(j) for i,j in self.nodes.iteritems()}
#for k,v in graph_copy.iteritems():
# stderr.write(str(k)+": "+str(v)+'\n')
#raw_input()
#from sys import stderr
#stderr.write(str(graph_copy)+'\n')
#stderr.write(str(self.nodes_to_faces)+'\n')
#self.loops = cdfs(0,0,graph_copy,[], self.nodes_to_faces)
for start_node in nodes:
#self.loops.extend(self.iter_loop_dfs(graph_copy, start_node, start_node))
for adj_node in graph_copy[start_node]:
#print start_node,adj_node,graph_copy,[start_node], self.nodes_to_faces
self.loops += loop_dfs(start_node,adj_node,graph_copy,[start_node], self.nodes_to_faces)
#self.loops += self.iter_loop_dfs(graph_copy, start_node, start_node,self.nodes_to_faces)
'''
#Johnson circuit locating algorithm
from johnson import Johnson
johnny = Johnson(graph_copy)
johnny.find_all_circuits()
self.loops = johnny.circuits
'''
#print len(self.loops)
from itertools import chain
self.loops = [j for j in set([tuple(i) for i in self.loops])]
edges = self.edges[1]
for path in list(self.loops):
temp_len = len(path)
#in_loops = path in set(self.loops)
removed = False
if temp_len < 3:
if not removed:
self.loops.remove(path)
removed = True
elif invert(path) in self.loops:
if not removed:
self.loops.remove(path)
removed = True
# Trial: remove all duplicates
else:
temp_path = list(path)
#temp_len = len(temp_path)
temp_path = shift(temp_path)
for face in non_fourgons:
for triple in [temp_path[i:i+3] \
for i in xrange(temp_len-2)]:
if set(triple) <= face:
if not removed:
self.loops.remove(path)
removed = True
break
if removed:
break
temp_path = invert(temp_path)
for triple in [temp_path[i:i+3] \
for i in xrange(temp_len-2)]:
if set(triple) <= face:
if not removed:
self.loops.remove(path)
removed = True
break
for i in xrange(temp_len):
temp_path = temp_path[1:] + temp_path[:1]
for triple in (temp_path[i:i+3] for i in xrange(temp_len-2)):
boolA = set(triple) <= face
#boolA = contains(triple,face)
#if set(triple) <= set(face) and path in self.loops:
if boolA :
if not removed:
self.loops.remove(path)
removed = True
break
if removed:
break
for loop in list(self.loops):
path = list(loop)
path_matrix = c.build_matrices(edges, [path])
ladder = [list(path_matrix[0][0,:,1]),list(path_matrix[0][0,:,3])]
gamma = c.CurvePair(ladder[0],ladder[1],0, 0)
if gamma.genus <= genus:
self.gammas.append(loop)
@staticmethod
def get_value(pos_to_insert, ladder, path):
return int(path[int(ladder[int(pos_to_insert)])])+1
def find_all_edges(self, fourgons, non_fourgons, alpha_edge_nodes, rep_num):
'''
Determines all edges between boundary componenets and adds adjacencies between
appropriate edges.
'''
#find all direct connections between non-fourgon regions
regions = fourgons + non_fourgons
for alpha_edge in alpha_edge_nodes:
for region in regions:
if alpha_edge in region:
region.remove(alpha_edge)
for other_edge in region:
self.add_adjacency(alpha_edge, int(other_edge), rep_num)
region.add(alpha_edge)
def add_adjacency(self,node, adjacent_node, rep_num):
'''
Adds adjacencies to a graph represented by an adjacency list.
This is useful when we would like to replicate adjacencies
between nodes.
:param self:
:type self: Graph
:param node: node to add adjacency
:param type: int
:param adjacent_node: the adjacent node
:type adjacent_node: int
:param rep_num: set to 1
:type rep_num: int
'''
for i in range(rep_num):
adjacency_list = self.nodes[node]
if Graph.count(adjacent_node, adjacency_list) < rep_num:
adjacency_list.append(adjacent_node)
adjacency_list = self.nodes[adjacent_node]
if Graph.count(node, adjacency_list) < rep_num:
adjacency_list.append(node)
@staticmethod
def count(adj_node, adj_list):
'''
Determines the number of adjacencies between two nodes in a graph, given the
adjacency list of one of node.
:param self:
:type self: Graph
:param adj_node: Adjacent node
:type adj_node: int
:param adj_list: Adjacency list of the node in question
:type adj_list: list<int>
:returns: number of edges from the adjacent node to the original node
count = 0
for i in adj_list:
if i == adj_node:
count += 1
return count
'''
return adj_list.count(adj_node)
@staticmethod
def faces_share_edges(nodes_to_faces, path):
path_head_3 = path[-3:]
previous_three_faces = [nodes_to_faces[edge] for edge in path_head_3]
previous_three_faces = [set(i) for i in previous_three_faces]
intersection_all = set.intersection(*previous_three_faces)
return len(intersection_all) == 2
def iter_loop_dfs(self, graph, start, goal, nodes_to_faces):
loops = []
stack = [(start, [start])]
while stack:
vertex, path = stack.pop()
in_path = set(path)
for next in graph[vertex]:
if next in in_path:
if len(path) >= 3:
if Graph.faces_share_edges(nodes_to_faces, path):
continue
if next == goal:
loops.append(list(path))
else:
continue
else:
stack.append((next, list(path + [next])))
return loops
from sys import stderr
def loop_dfs( current_node, start_node, graph, current_path, nodes_to_faces):
'''
Recursively finds all closed cycles in a given graph that begin and end at start_node.
As one would guess, it employs a standard depth-first search algorithm on the graph,
appending current_path to all_loops when it returns to start_node.
In the overall distance computation, this function is computationally dominating with
exponential complexity, so take care with its use.
:param self:
:type self: Graph
:param current_node: the current alpha edge in the recursion
:type current_node: int
:param start_node: the source node of the current recursive search
:type start_node: int
:param graph: graph of the overall graph mid-recursion
:type graph: dict<int,list<int> >
:param current_path: list of nodes in the current path
:type current_path: list<int>
:param all_loops: list of all current paths
:type all_loops: list< list<int> >
:returns: set of all closeds cycles in the graph starting and ending at start_node
'''
if len(current_path) >= 3:
path_head_3 = current_path[-3:]
previous_three_faces = [set(nodes_to_faces[edge]) for edge in path_head_3]
intersection_all = set.intersection(*previous_three_faces)
if len(intersection_all) == 2:
return []
if current_node == start_node:
#stderr.write("Found one! \n")
#all_loops.append(shift(list(current_path)))
return [shift(list(current_path))]
else:
loops = []
for adjacent_node in set(graph[current_node]):
if Graph.count(adjacent_node, current_path) < 1:
current_path.append(adjacent_node)
graph[current_node].remove(adjacent_node)
graph[adjacent_node].remove(current_node)
loops += list(loop_dfs(adjacent_node, start_node, graph, current_path, nodes_to_faces))
graph[current_node].append(adjacent_node)
graph[adjacent_node].append(current_node)
current_path.pop()
return loops
'''
for i in range(len(path)):
index = path[i]
beta[0].insert(index+1,None)
beta[1].insert(index+1,None)
beta[0].append('____')
beta[1].append('____')
for j,tup in enumerate(zip(beta[0],beta[1])):
t,b = tup
if type(t) == str or type(b) == str: continue
if t > index:
beta[0][j] +=1
if b > index:
beta[1][j] +=1
beta[0].remove('____')
beta[1].remove('____')
for j in range(len(path)):
if path[j] > index:
path[j] += 1
path.sort()
for i in range(len(path)):
index = path[i]
beta[0][index+1]=int(self.getValue(i,ladder[0],path))
beta[1][index+1] =int(self.getValue(i,ladder[1],path))
'''
'''
count = {}
count[None] = 0
for i,j in zip(beta[0],beta[1]):
if i in count.keys():
count[i] += 1
else:
count[i] = 1
if j in count.keys():
count[j] += 1
else:
count[j] = 1
'''
|
|
from __future__ import unicode_literals
import datetime
from pathlib import Path
from django.template import loader
from django.conf.urls import url, include
from django.conf import settings
from django.contrib import admin
from django.contrib.sitemaps.views import sitemap
from django.contrib.flatpages.models import FlatPage
from django.contrib.flatpages.views import flatpage
from django.contrib.flatpages.sitemaps import FlatPageSitemap
from django.contrib.staticfiles.storage import staticfiles_storage
from django.views.generic.base import RedirectView, TemplateView
from django.views.decorators.cache import cache_page, never_cache
from django.views.decorators.http import last_modified
from contact.views import ContactView
from landing.views import LandingPageView
from landing.sitemaps import LandingSitemap
from landing.models import Service
from community.sitemaps import ForumsSitemap, TopicsSitemap
from headers.utils.decorators import with_headers
from django.contrib.auth.views import login as login_view
from django.contrib.auth import views as authviews
from landing.forms import MyAuthenticationForm, MyPasswordResetForm
from metadata.core.context_processors import login_kwargs
from landing.views import ovpnfile
from metadata.models import Website
from blog.models import Announcement, Link
from tagging.views import TaggedObjectList
from tagging.models import TaggedItem
def fp_lastmod(request, url):
return datetime.datetime.fromtimestamp(
Path(loader.get_template(
FlatPage.objects.get(url=url).template_name
or "flatpages/default.html"
).template.origin.name).stat().st_mtime
)
sitemaps = {
'flatpages': FlatPageSitemap,
'landing': LandingSitemap,
'forums': ForumsSitemap,
'topics': TopicsSitemap,
}
urlpatterns = [
url(r'^$',
#cache_page(60*5)(last_modified(fp_lastmod)(flatpage)),
#cache_page(60)(flatpage),
cache_page(30)(flatpage),
kwargs={'url': '/'},
name='home'
),
url(r'^info/announcements/$',
never_cache(TemplateView.as_view(
template_name='pages/info.html',
content_type='text/html',
)),
kwargs={'announcements':Announcement.objects.all()},
name='info',
),
url(r'^info/links/$',
never_cache(TemplateView.as_view(
template_name='pages/links.html',
content_type='text/html',
)),
name='links',
),
url(r'^about/$',
cache_page(60*5)(last_modified(fp_lastmod)(flatpage)),
#cache_page(60*5)(flatpage),
kwargs={'url': '/about/'},
name='about'
),
url(r'^about/privacy/$',
cache_page(60*5)(last_modified(fp_lastmod)(flatpage)),
kwargs={'url': '/about/privacy/'},
name='privacy'
),
url(r'^about/terms/$',
cache_page(60*5)(last_modified(fp_lastmod)(flatpage)),
kwargs={'url': '/about/terms/'},
name='terms'
),
url(r'^user/blank/$',
never_cache(flatpage),
kwargs={'url': '/user/blank/'},
name='user_blank'
),
url(r'^(?P<url>shared/.*)$',
flatpage,
name='shared'
),
url(r'^contact/$',
ContactView.as_view(
success_url='/contact/',
template_name = 'pages/contact.html'
),
kwargs={'gapi_key' : getattr(settings, 'GOOGLE_API_KEY', None)},
name='contact'
),
url(r'^services/$',
with_headers(False, X_Robots_Tag='noarchive')(
LandingPageView.as_view(
template_name='pages/services.html',
cache_timeout=settings.DEBUG and 5 or 300
)
),
name='services'
),
url(r'^shop/$',
LandingPageView.as_view(
template_name='pages/shop.html'
),
name='shop'
),
url(r'^info/calendar/month/$',
flatpage,
kwargs={'url': '/info/calendar/month/'},
name='monthcal',
),
url(r'^robots\.txt$',
TemplateView.as_view(
content_type='text/plain',
template_name='robots.txt',
),
name='robots'
),
url(r'^sitemap\.xml$',
cache_page(60*60)(sitemap),
kwargs={'sitemaps': sitemaps}
),
url(r'^manifest\.json$', cache_page(60*60)(
TemplateView.as_view(
content_type='application/json',
template_name='manifest.json'
)),
kwargs={'prefix': getattr(settings, 'FAVICON_PREFIX', None)},
name='chrome_manifest'
),
url(r'^treenav/',
include('treenav.urls')
),
url(r'^docs/public/',
include('pubdocs.urls')
),
url(r'^admin/doc/',
include('django.contrib.admindocs.urls')
),
url(r'^admin/',
admin.site.urls
),
url(r'^community/',
include('community.urls')
),
url(r'accounts/login/$',
authviews.login,
kwargs=login_kwargs(authentication_form=MyAuthenticationForm),
name='login'
),
url(r'accounts/logout/$',
authviews.logout,
kwargs=login_kwargs(next_page='/'),
name='logout'
),
url(r'^accounts/password_change/$',
authviews.password_change,
kwargs=login_kwargs(),
name='password_change'
),
url(r'^accounts/password_change/done/$',
authviews.password_change_done,
kwargs=login_kwargs(),
name='password_change_done'
),
url(r'^accounts/password_reset/$',
authviews.password_reset,
kwargs=login_kwargs(password_reset_form=MyPasswordResetForm),
name='password_reset'
),
url(r'^accounts/password_reset/done/$',
authviews.password_reset_done,
kwargs=login_kwargs(),
name='password_reset_done'
),
url(r'^accounts/reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
authviews.password_reset_confirm,
kwargs=login_kwargs(),
name='password_reset_confirm'
),
url(r'^accounts/reset/done/$',
authviews.password_reset_complete,
kwargs=login_kwargs(),
name='password_reset_complete'
),
]
if settings.DEBUG:
urlpatterns.extend([
url(r'^favicon\.ico$',
RedirectView.as_view(
url=staticfiles_storage.url('img/favicon.ico'),
permanent=False
),
name='favicon'
),
])
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
|
|
#!/usr/bin/env python
from panda3d.core import *
import sys
import os
from direct.showbase.ShowBase import ShowBase
from direct.interval.IntervalGlobal import *
from direct.gui.DirectGui import OnscreenText
from direct.showbase.DirectObject import DirectObject
from direct.actor import Actor
from random import *
# Function to put instructions on the screen.
def addInstructions(pos, msg):
return OnscreenText(text=msg, style=1, fg=(1, 1, 1, 1), scale=.05,
shadow=(0, 0, 0, 1), parent=base.a2dTopLeft,
pos=(0.08, -pos - 0.04), align=TextNode.ALeft)
# Function to put title on the screen.
def addTitle(text):
return OnscreenText(text=text, style=1, fg=(1, 1, 1, 1), scale=.07,
parent=base.a2dBottomRight, align=TextNode.ARight,
pos=(-0.1, 0.09), shadow=(0, 0, 0, 1))
class World(DirectObject):
def __init__(self):
# Preliminary capabilities check.
if not base.win.getGsg().getSupportsBasicShaders():
self.t = addTitle(
"Shadow Demo: Video driver reports that shaders are not supported.")
return
if not base.win.getGsg().getSupportsDepthTexture():
self.t = addTitle(
"Shadow Demo: Video driver reports that depth textures are not supported.")
return
# creating the offscreen buffer.
winprops = WindowProperties.size(512, 512)
props = FrameBufferProperties()
props.setRgbColor(1)
props.setAlphaBits(1)
props.setDepthBits(1)
LBuffer = base.graphicsEngine.makeOutput(
base.pipe, "offscreen buffer", -2,
props, winprops,
GraphicsPipe.BFRefuseWindow,
base.win.getGsg(), base.win)
self.buffer = LBuffer
if not LBuffer:
self.t = addTitle(
"Shadow Demo: Video driver cannot create an offscreen buffer.")
return
Ldepthmap = Texture()
LBuffer.addRenderTexture(Ldepthmap, GraphicsOutput.RTMBindOrCopy,
GraphicsOutput.RTPDepthStencil)
if base.win.getGsg().getSupportsShadowFilter():
Ldepthmap.setMinfilter(Texture.FTShadow)
Ldepthmap.setMagfilter(Texture.FTShadow)
# Adding a color texture is totally unnecessary, but it helps with
# debugging.
Lcolormap = Texture()
LBuffer.addRenderTexture(Lcolormap, GraphicsOutput.RTMBindOrCopy,
GraphicsOutput.RTPColor)
self.inst_p = addInstructions(0.06, 'P : stop/start the Panda Rotation')
self.inst_w = addInstructions(0.12, 'W : stop/start the Walk Cycle')
self.inst_t = addInstructions(0.18, 'T : stop/start the Teapot')
self.inst_l = addInstructions(0.24, 'L : move light source far or close')
self.inst_v = addInstructions(0.30, 'V : View the Depth-Texture results')
self.inst_u = addInstructions(0.36, 'U : toggle updating the shadow map')
self.inst_x = addInstructions(0.42, 'Left/Right Arrow : switch camera angles')
self.inst_a = addInstructions(0.48, 'Something about A/Z and push bias')
base.setBackgroundColor(0, 0, 0.2, 1)
base.camLens.setNearFar(1.0, 10000)
base.camLens.setFov(75)
base.disableMouse()
# Load the scene.
floorTex = loader.loadTexture('maps/envir-ground.jpg')
cm = CardMaker('')
cm.setFrame(-2, 2, -2, 2)
floor = render.attachNewNode(PandaNode("floor"))
for y in range(12):
for x in range(12):
nn = floor.attachNewNode(cm.generate())
nn.setP(-90)
nn.setPos((x - 6) * 4, (y - 6) * 4, 0)
floor.setTexture(floorTex)
floor.flattenStrong()
self.pandaAxis = render.attachNewNode('panda axis')
self.pandaModel = Actor.Actor('panda-model', {'walk': 'panda-walk4'})
self.pandaModel.reparentTo(self.pandaAxis)
self.pandaModel.setPos(9, 0, 0)
self.pandaModel.setShaderInput("scale", 0.01, 0.01, 0.01, 1.0)
self.pandaWalk = self.pandaModel.actorInterval('walk', playRate=1.8)
self.pandaWalk.loop()
self.pandaMovement = self.pandaAxis.hprInterval(
20.0, LPoint3(-360, 0, 0), startHpr=LPoint3(0, 0, 0))
self.pandaMovement.loop()
self.teapot = loader.loadModel('teapot')
self.teapot.reparentTo(render)
self.teapot.setPos(0, -20, 10)
self.teapot.setShaderInput("texDisable", 1, 1, 1, 1)
self.teapotMovement = self.teapot.hprInterval(50, LPoint3(0, 360, 360))
self.teapotMovement.loop()
self.accept('escape', sys.exit)
self.accept("arrow_left", self.incrementCameraPosition, [-1])
self.accept("arrow_right", self.incrementCameraPosition, [1])
self.accept("p", self.toggleInterval, [self.pandaMovement])
self.accept("t", self.toggleInterval, [self.teapotMovement])
self.accept("w", self.toggleInterval, [self.pandaWalk])
self.accept("v", base.bufferViewer.toggleEnable)
self.accept("u", self.toggleUpdateShadowMap)
self.accept("l", self.incrementLightPosition, [1])
self.accept("o", base.oobe)
self.accept('a', self.adjustPushBias, [1.1])
self.accept('z', self.adjustPushBias, [0.9])
self.LCam = base.makeCamera(LBuffer)
self.LCam.node().setScene(render)
self.LCam.node().getLens().setFov(40)
self.LCam.node().getLens().setNearFar(10, 100)
# default values
self.pushBias = 0.04
self.ambient = 0.2
self.cameraSelection = 0
self.lightSelection = 0
# setting up shader
render.setShaderInput('light', self.LCam)
render.setShaderInput('Ldepthmap', Ldepthmap)
render.setShaderInput('ambient', self.ambient, 0, 0, 1.0)
render.setShaderInput('texDisable', 0, 0, 0, 0)
render.setShaderInput('scale', 1, 1, 1, 1)
# Put a shader on the Light camera.
lci = NodePath(PandaNode("Light Camera Initializer"))
lci.setShader(loader.loadShader('caster.sha'))
self.LCam.node().setInitialState(lci.getState())
# Put a shader on the Main camera.
# Some video cards have special hardware for shadow maps.
# If the card has that, use it. If not, use a different
# shader that does not require hardware support.
mci = NodePath(PandaNode("Main Camera Initializer"))
if base.win.getGsg().getSupportsShadowFilter():
mci.setShader(loader.loadShader('shadow.sha'))
else:
mci.setShader(loader.loadShader('shadow-nosupport.sha'))
base.cam.node().setInitialState(mci.getState())
self.incrementCameraPosition(0)
self.incrementLightPosition(0)
self.adjustPushBias(1.0)
def toggleInterval(self, ival):
if ival.isPlaying():
ival.pause()
else:
ival.resume()
def toggleUpdateShadowMap(self):
self.buffer.active = not self.buffer.active
def incrementCameraPosition(self, n):
self.cameraSelection = (self.cameraSelection + n) % 6
if (self.cameraSelection == 0):
base.cam.reparentTo(render)
base.cam.setPos(30, -45, 26)
base.cam.lookAt(0, 0, 0)
self.LCam.node().hideFrustum()
if (self.cameraSelection == 1):
base.cam.reparentTo(self.pandaModel)
base.cam.setPos(7, -3, 9)
base.cam.lookAt(0, 0, 0)
self.LCam.node().hideFrustum()
if (self.cameraSelection == 2):
base.cam.reparentTo(self.pandaModel)
base.cam.setPos(-7, -3, 9)
base.cam.lookAt(0, 0, 0)
self.LCam.node().hideFrustum()
if (self.cameraSelection == 3):
base.cam.reparentTo(render)
base.cam.setPos(7, -23, 12)
base.cam.lookAt(self.teapot)
self.LCam.node().hideFrustum()
if (self.cameraSelection == 4):
base.cam.reparentTo(render)
base.cam.setPos(-7, -23, 12)
base.cam.lookAt(self.teapot)
self.LCam.node().hideFrustum()
if (self.cameraSelection == 5):
base.cam.reparentTo(render)
base.cam.setPos(1000, 0, 195)
base.cam.lookAt(0, 0, 0)
self.LCam.node().showFrustum()
def incrementLightPosition(self, n):
self.lightSelection = (self.lightSelection + n) % 2
if (self.lightSelection == 0):
self.LCam.setPos(0, -40, 25)
self.LCam.lookAt(0, -10, 0)
self.LCam.node().getLens().setNearFar(10, 100)
if (self.lightSelection == 1):
self.LCam.setPos(0, -600, 200)
self.LCam.lookAt(0, -10, 0)
self.LCam.node().getLens().setNearFar(10, 1000)
def shaderSupported(self):
return base.win.getGsg().getSupportsBasicShaders() and \
base.win.getGsg().getSupportsDepthTexture() and \
base.win.getGsg().getSupportsShadowFilter()
def adjustPushBias(self, inc):
self.pushBias *= inc
self.inst_a.setText(
'A/Z: Increase/Decrease the Push-Bias [%F]' % self.pushBias)
render.setShaderInput('push', self.pushBias)
if __name__ == '__main__':
base = ShowBase()
w = World()
base.run()
|
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
Websocket proxy that is compatible with OpenStack Nova.
Leverages websockify.py by Joel Martin
'''
import Cookie
import socket
import urlparse
from oslo.config import cfg
import websockify
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import context
from nova import exception
from nova.i18n import _
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova.huawei.openstack.common import HWExtend
import log as operationlog
LOG = logging.getLogger(__name__)
operationlog.init('nova-api')
CONF = cfg.CONF
CONF.import_opt('novncproxy_base_url', 'nova.vnc')
CONF.import_opt('html5proxy_base_url', 'nova.spice', group='spice')
CONF.import_opt('base_url', 'nova.console.serial', group='serial_console')
class NovaProxyRequestHandlerBase(object):
def verify_origin_proto(self, console_type, origin_proto):
if console_type == 'novnc':
expected_proto = \
urlparse.urlparse(CONF.novncproxy_base_url).scheme
elif console_type == 'spice-html5':
expected_proto = \
urlparse.urlparse(CONF.spice.html5proxy_base_url).scheme
elif console_type == 'serial':
expected_proto = \
urlparse.urlparse(CONF.serial_console.base_url).scheme
else:
detail = _("Invalid Console Type for WebSocketProxy: '%s'") % \
console_type
raise exception.ValidationError(detail=detail)
return origin_proto == expected_proto
def new_websocket_client(self):
"""Called after a new WebSocket connection has been established."""
# Reopen the eventlet hub to make sure we don't share an epoll
# fd with parent and/or siblings, which would be bad
from eventlet import hubs
hubs.use_hub()
# The nova expected behavior is to have token
# passed to the method GET of the request
query = urlparse.urlparse(self.path).query
token = urlparse.parse_qs(query).get("token", [""]).pop()
if not token:
# NoVNC uses it's own convention that forward token
# from the request to a cookie header, we should check
# also for this behavior
hcookie = self.headers.getheader('cookie')
if hcookie:
cookie = Cookie.SimpleCookie()
cookie.load(hcookie)
if 'token' in cookie:
token = cookie['token'].value
ctxt = context.get_admin_context()
rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
connect_info = rpcapi.check_token(ctxt, token=token)
if not connect_info:
raise Exception(_("Invalid Token"))
# Verify Origin
expected_origin_hostname = self.headers.getheader('Host')
if ':' in expected_origin_hostname:
e = expected_origin_hostname
expected_origin_hostname = e.split(':')[0]
origin_url = self.headers.getheader('Origin')
# missing origin header indicates non-browser client which is OK
if origin_url is not None:
origin = urlparse.urlparse(origin_url)
origin_hostname = origin.hostname
origin_scheme = origin.scheme
if origin_hostname == '' or origin_scheme == '':
detail = _("Origin header not valid.")
raise exception.ValidationError(detail=detail)
if expected_origin_hostname != origin_hostname:
detail = _("Origin header does not match this host.")
raise exception.ValidationError(detail=detail)
if not self.verify_origin_proto(connect_info['console_type'],
origin.scheme):
detail = _("Origin header protocol does not match this host.")
raise exception.ValidationError(detail=detail)
self.msg(_('connect info: %s'), str(connect_info))
host = connect_info['host']
port = int(connect_info['port'])
# Connect to the target
self.msg(_("connecting to: %(host)s:%(port)s") % {'host': host,
'port': port})
tsock = self.socket(host, port, connect=True)
# Handshake as necessary
if connect_info.get('internal_access_path'):
tsock.send("CONNECT %s HTTP/1.1\r\n\r\n" %
connect_info['internal_access_path'])
while True:
data = tsock.recv(4096, socket.MSG_PEEK)
if data.find("\r\n\r\n") != -1:
if not data.split("\r\n")[0].find("200"):
raise Exception(_("Invalid Connection Info"))
tsock.recv(len(data))
break
instance_id = connect_info.get('instance_uuid', 'None')
# Start proxying
try:
operationlog.info(
"VNC: host:%s, port:%s, is connecting to vm %s, at %s" % (
host, port, instance_id, timeutils.utcnow()),
extra={"type": "operate"})
self.do_proxy(tsock)
except Exception:
if tsock:
tsock.shutdown(socket.SHUT_RDWR)
tsock.close()
operationlog.info(
"VNC: host:%s, port:%s, lost connection with vm %s, at %s"
% (host, port, instance_id, timeutils.utcnow()),
extra={"type": "operate"})
self.vmsg(_("%(host)s:%(port)s: Target closed") %
{'host': host, 'port': port})
LOG.audit("%s:%s: Target closed" % (host, port))
raise
# TODO(sross): when the websockify version is bumped to be >=0.6,
# remove the if-else statement and make the if branch
# contents the only code.
if getattr(websockify, 'ProxyRequestHandler', None) is not None:
class NovaProxyRequestHandler(NovaProxyRequestHandlerBase,
websockify.ProxyRequestHandler):
def __init__(self, *args, **kwargs):
websockify.ProxyRequestHandler.__init__(self, *args, **kwargs)
def socket(self, *args, **kwargs):
return websockify.WebSocketServer.socket(*args, **kwargs)
class NovaWebSocketProxy(websockify.WebSocketProxy):
@staticmethod
def get_logger():
return LOG
else:
import sys
class NovaWebSocketProxy(NovaProxyRequestHandlerBase,
websockify.WebSocketProxy):
def __init__(self, *args, **kwargs):
del kwargs['traffic']
del kwargs['RequestHandlerClass']
websockify.WebSocketProxy.__init__(self, *args,
target_host='ignore',
target_port='ignore',
unix_target=None,
target_cfg=None,
ssl_target=None,
**kwargs)
def new_client(self):
self.new_websocket_client()
def msg(self, *args, **kwargs):
LOG.info(*args, **kwargs)
def vmsg(self, *args, **kwargs):
LOG.debug(*args, **kwargs)
def warn(self, *args, **kwargs):
LOG.warn(*args, **kwargs)
def print_traffic(self, token="."):
if self.traffic:
sys.stdout.write(token)
sys.stdout.flush()
class NovaProxyRequestHandler(object):
pass
|
|
# sql/default_comparator.py
# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Default implementation of SQL comparison operations.
"""
from . import coercions
from . import operators
from . import roles
from . import type_api
from .elements import and_
from .elements import BinaryExpression
from .elements import ClauseList
from .elements import collate
from .elements import CollectionAggregate
from .elements import False_
from .elements import Null
from .elements import or_
from .elements import True_
from .elements import UnaryExpression
from .. import exc
from .. import util
def _boolean_compare(
expr,
op,
obj,
negate=None,
reverse=False,
_python_is_types=(util.NoneType, bool),
result_type=None,
**kwargs
):
if result_type is None:
result_type = type_api.BOOLEANTYPE
if isinstance(obj, _python_is_types + (Null, True_, False_)):
# allow x ==/!= True/False to be treated as a literal.
# this comes out to "== / != true/false" or "1/0" if those
# constants aren't supported and works on all platforms
if op in (operators.eq, operators.ne) and isinstance(
obj, (bool, True_, False_)
):
return BinaryExpression(
expr,
coercions.expect(roles.ConstExprRole, obj),
op,
type_=result_type,
negate=negate,
modifiers=kwargs,
)
elif op in (operators.is_distinct_from, operators.isnot_distinct_from):
return BinaryExpression(
expr,
coercions.expect(roles.ConstExprRole, obj),
op,
type_=result_type,
negate=negate,
modifiers=kwargs,
)
else:
# all other None/True/False uses IS, IS NOT
if op in (operators.eq, operators.is_):
return BinaryExpression(
expr,
coercions.expect(roles.ConstExprRole, obj),
operators.is_,
negate=operators.isnot,
type_=result_type,
)
elif op in (operators.ne, operators.isnot):
return BinaryExpression(
expr,
coercions.expect(roles.ConstExprRole, obj),
operators.isnot,
negate=operators.is_,
type_=result_type,
)
else:
raise exc.ArgumentError(
"Only '=', '!=', 'is_()', 'isnot()', "
"'is_distinct_from()', 'isnot_distinct_from()' "
"operators can be used with None/True/False"
)
else:
obj = coercions.expect(
roles.BinaryElementRole, element=obj, operator=op, expr=expr
)
if reverse:
return BinaryExpression(
obj, expr, op, type_=result_type, negate=negate, modifiers=kwargs
)
else:
return BinaryExpression(
expr, obj, op, type_=result_type, negate=negate, modifiers=kwargs
)
def _custom_op_operate(expr, op, obj, reverse=False, result_type=None, **kw):
if result_type is None:
if op.return_type:
result_type = op.return_type
elif op.is_comparison:
result_type = type_api.BOOLEANTYPE
return _binary_operate(
expr, op, obj, reverse=reverse, result_type=result_type, **kw
)
def _binary_operate(expr, op, obj, reverse=False, result_type=None, **kw):
obj = coercions.expect(
roles.BinaryElementRole, obj, expr=expr, operator=op
)
if reverse:
left, right = obj, expr
else:
left, right = expr, obj
if result_type is None:
op, result_type = left.comparator._adapt_expression(
op, right.comparator
)
return BinaryExpression(left, right, op, type_=result_type, modifiers=kw)
def _conjunction_operate(expr, op, other, **kw):
if op is operators.and_:
return and_(expr, other)
elif op is operators.or_:
return or_(expr, other)
else:
raise NotImplementedError()
def _scalar(expr, op, fn, **kw):
return fn(expr)
def _in_impl(expr, op, seq_or_selectable, negate_op, **kw):
seq_or_selectable = coercions.expect(
roles.InElementRole, seq_or_selectable, expr=expr, operator=op
)
if "in_ops" in seq_or_selectable._annotations:
op, negate_op = seq_or_selectable._annotations["in_ops"]
return _boolean_compare(
expr, op, seq_or_selectable, negate=negate_op, **kw
)
def _getitem_impl(expr, op, other, **kw):
if isinstance(expr.type, type_api.INDEXABLE):
other = coercions.expect(
roles.BinaryElementRole, other, expr=expr, operator=op
)
return _binary_operate(expr, op, other, **kw)
else:
_unsupported_impl(expr, op, other, **kw)
def _unsupported_impl(expr, op, *arg, **kw):
raise NotImplementedError(
"Operator '%s' is not supported on " "this expression" % op.__name__
)
def _inv_impl(expr, op, **kw):
"""See :meth:`.ColumnOperators.__inv__`."""
if hasattr(expr, "negation_clause"):
return expr.negation_clause
else:
return expr._negate()
def _neg_impl(expr, op, **kw):
"""See :meth:`.ColumnOperators.__neg__`."""
return UnaryExpression(expr, operator=operators.neg, type_=expr.type)
def _match_impl(expr, op, other, **kw):
"""See :meth:`.ColumnOperators.match`."""
return _boolean_compare(
expr,
operators.match_op,
coercions.expect(
roles.BinaryElementRole,
other,
expr=expr,
operator=operators.match_op,
),
result_type=type_api.MATCHTYPE,
negate=operators.notmatch_op
if op is operators.match_op
else operators.match_op,
**kw
)
def _distinct_impl(expr, op, **kw):
"""See :meth:`.ColumnOperators.distinct`."""
return UnaryExpression(
expr, operator=operators.distinct_op, type_=expr.type
)
def _between_impl(expr, op, cleft, cright, **kw):
"""See :meth:`.ColumnOperators.between`."""
return BinaryExpression(
expr,
ClauseList(
coercions.expect(
roles.BinaryElementRole,
cleft,
expr=expr,
operator=operators.and_,
),
coercions.expect(
roles.BinaryElementRole,
cright,
expr=expr,
operator=operators.and_,
),
operator=operators.and_,
group=False,
group_contents=False,
),
op,
negate=operators.notbetween_op
if op is operators.between_op
else operators.between_op,
modifiers=kw,
)
def _collate_impl(expr, op, other, **kw):
return collate(expr, other)
# a mapping of operators with the method they use, along with
# their negated operator for comparison operators
operator_lookup = {
"and_": (_conjunction_operate,),
"or_": (_conjunction_operate,),
"inv": (_inv_impl,),
"add": (_binary_operate,),
"mul": (_binary_operate,),
"sub": (_binary_operate,),
"div": (_binary_operate,),
"mod": (_binary_operate,),
"truediv": (_binary_operate,),
"custom_op": (_custom_op_operate,),
"json_path_getitem_op": (_binary_operate,),
"json_getitem_op": (_binary_operate,),
"concat_op": (_binary_operate,),
"any_op": (_scalar, CollectionAggregate._create_any),
"all_op": (_scalar, CollectionAggregate._create_all),
"lt": (_boolean_compare, operators.ge),
"le": (_boolean_compare, operators.gt),
"ne": (_boolean_compare, operators.eq),
"gt": (_boolean_compare, operators.le),
"ge": (_boolean_compare, operators.lt),
"eq": (_boolean_compare, operators.ne),
"is_distinct_from": (_boolean_compare, operators.isnot_distinct_from),
"isnot_distinct_from": (_boolean_compare, operators.is_distinct_from),
"like_op": (_boolean_compare, operators.notlike_op),
"ilike_op": (_boolean_compare, operators.notilike_op),
"notlike_op": (_boolean_compare, operators.like_op),
"notilike_op": (_boolean_compare, operators.ilike_op),
"contains_op": (_boolean_compare, operators.notcontains_op),
"startswith_op": (_boolean_compare, operators.notstartswith_op),
"endswith_op": (_boolean_compare, operators.notendswith_op),
"desc_op": (_scalar, UnaryExpression._create_desc),
"asc_op": (_scalar, UnaryExpression._create_asc),
"nullsfirst_op": (_scalar, UnaryExpression._create_nullsfirst),
"nullslast_op": (_scalar, UnaryExpression._create_nullslast),
"in_op": (_in_impl, operators.notin_op),
"notin_op": (_in_impl, operators.in_op),
"is_": (_boolean_compare, operators.is_),
"isnot": (_boolean_compare, operators.isnot),
"collate": (_collate_impl,),
"match_op": (_match_impl,),
"notmatch_op": (_match_impl,),
"distinct_op": (_distinct_impl,),
"between_op": (_between_impl,),
"notbetween_op": (_between_impl,),
"neg": (_neg_impl,),
"getitem": (_getitem_impl,),
"lshift": (_unsupported_impl,),
"rshift": (_unsupported_impl,),
"contains": (_unsupported_impl,),
}
|
|
# Copyright (c) 2012 OpenStack Foundation.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import mock
import oslo_db.exception as exc
import six
import testtools
import webob.exc
from neutron.api.v2 import attributes as attr
from neutron.common import constants as const
from neutron.common import exceptions as n_exc
from neutron import context
from neutron.db import db_base_plugin_v2
from neutron.db import securitygroups_db
from neutron.extensions import securitygroup as ext_sg
from neutron import manager
from neutron.tests import base
from neutron.tests.unit.db import test_db_base_plugin_v2
DB_PLUGIN_KLASS = ('neutron.tests.unit.extensions.test_securitygroup.'
'SecurityGroupTestPlugin')
class SecurityGroupTestExtensionManager(object):
def get_resources(self):
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attr.RESOURCE_ATTRIBUTE_MAP.update(
ext_sg.RESOURCE_ATTRIBUTE_MAP)
return ext_sg.Securitygroup.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class SecurityGroupsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def _create_security_group(self, fmt, name, description, **kwargs):
data = {'security_group': {'name': name,
'tenant_id': kwargs.get('tenant_id',
'test-tenant'),
'description': description}}
security_group_req = self.new_create_request('security-groups', data,
fmt)
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
# create a specific auth context for this request
security_group_req.environ['neutron.context'] = (
context.Context('', kwargs['tenant_id']))
return security_group_req.get_response(self.ext_api)
def _build_security_group_rule(self, security_group_id, direction, proto,
port_range_min=None, port_range_max=None,
remote_ip_prefix=None, remote_group_id=None,
tenant_id='test-tenant',
ethertype=const.IPv4):
data = {'security_group_rule': {'security_group_id': security_group_id,
'direction': direction,
'protocol': proto,
'ethertype': ethertype,
'tenant_id': tenant_id}}
if port_range_min:
data['security_group_rule']['port_range_min'] = port_range_min
if port_range_max:
data['security_group_rule']['port_range_max'] = port_range_max
if remote_ip_prefix:
data['security_group_rule']['remote_ip_prefix'] = remote_ip_prefix
if remote_group_id:
data['security_group_rule']['remote_group_id'] = remote_group_id
return data
def _create_security_group_rule(self, fmt, rules, **kwargs):
security_group_rule_req = self.new_create_request(
'security-group-rules', rules, fmt)
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
# create a specific auth context for this request
security_group_rule_req.environ['neutron.context'] = (
context.Context('', kwargs['tenant_id']))
return security_group_rule_req.get_response(self.ext_api)
def _make_security_group(self, fmt, name, description, **kwargs):
res = self._create_security_group(fmt, name, description, **kwargs)
if res.status_int >= webob.exc.HTTPBadRequest.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
def _make_security_group_rule(self, fmt, rules, **kwargs):
res = self._create_security_group_rule(self.fmt, rules)
if res.status_int >= webob.exc.HTTPBadRequest.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
@contextlib.contextmanager
def security_group(self, name='webservers', description='webservers',
fmt=None):
if not fmt:
fmt = self.fmt
security_group = self._make_security_group(fmt, name, description)
yield security_group
@contextlib.contextmanager
def security_group_rule(self, security_group_id='4cd70774-cc67-4a87-9b39-7'
'd1db38eb087',
direction='ingress', protocol=const.PROTO_NAME_TCP,
port_range_min='22', port_range_max='22',
remote_ip_prefix=None, remote_group_id=None,
fmt=None, ethertype=const.IPv4):
if not fmt:
fmt = self.fmt
rule = self._build_security_group_rule(security_group_id,
direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix,
remote_group_id,
ethertype=ethertype)
security_group_rule = self._make_security_group_rule(self.fmt, rule)
yield security_group_rule
def _delete_default_security_group_egress_rules(self, security_group_id):
"""Deletes default egress rules given a security group ID."""
res = self._list(
'security-group-rules',
query_params='security_group_id=%s' % security_group_id)
for r in res['security_group_rules']:
if (r['direction'] == 'egress' and not r['port_range_max'] and
not r['port_range_min'] and not r['protocol']
and not r['remote_ip_prefix']):
self._delete('security-group-rules', r['id'])
def _assert_sg_rule_has_kvs(self, security_group_rule, expected_kvs):
"""Asserts that the sg rule has expected key/value pairs passed
in as expected_kvs dictionary
"""
for k, v in six.iteritems(expected_kvs):
self.assertEqual(security_group_rule[k], v)
class SecurityGroupTestPlugin(db_base_plugin_v2.NeutronDbPluginV2,
securitygroups_db.SecurityGroupDbMixin):
"""Test plugin that implements necessary calls on create/delete port for
associating ports with security groups.
"""
__native_pagination_support = True
__native_sorting_support = True
supported_extension_aliases = ["security-group"]
def create_port(self, context, port):
tenant_id = port['port']['tenant_id']
default_sg = self._ensure_default_security_group(context, tenant_id)
if not attr.is_attr_set(port['port'].get(ext_sg.SECURITYGROUPS)):
port['port'][ext_sg.SECURITYGROUPS] = [default_sg]
session = context.session
with session.begin(subtransactions=True):
sgids = self._get_security_groups_on_port(context, port)
port = super(SecurityGroupTestPlugin, self).create_port(context,
port)
self._process_port_create_security_group(context, port,
sgids)
return port
def update_port(self, context, id, port):
session = context.session
with session.begin(subtransactions=True):
if ext_sg.SECURITYGROUPS in port['port']:
port['port'][ext_sg.SECURITYGROUPS] = (
self._get_security_groups_on_port(context, port))
# delete the port binding and read it with the new rules
self._delete_port_security_group_bindings(context, id)
port['port']['id'] = id
self._process_port_create_security_group(
context, port['port'],
port['port'].get(ext_sg.SECURITYGROUPS))
port = super(SecurityGroupTestPlugin, self).update_port(
context, id, port)
return port
def create_network(self, context, network):
self._ensure_default_security_group(context,
network['network']['tenant_id'])
return super(SecurityGroupTestPlugin, self).create_network(context,
network)
def get_ports(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
sorts = sorts or []
neutron_lports = super(SecurityGroupTestPlugin, self).get_ports(
context, filters, sorts=sorts, limit=limit, marker=marker,
page_reverse=page_reverse)
return neutron_lports
class SecurityGroupDBTestCase(SecurityGroupsTestCase):
def setUp(self, plugin=None, ext_mgr=None):
plugin = plugin or DB_PLUGIN_KLASS
ext_mgr = ext_mgr or SecurityGroupTestExtensionManager()
super(SecurityGroupDBTestCase,
self).setUp(plugin=plugin, ext_mgr=ext_mgr)
class TestSecurityGroups(SecurityGroupDBTestCase):
def test_create_security_group(self):
name = 'webservers'
description = 'my webservers'
keys = [('name', name,), ('description', description)]
with self.security_group(name, description) as security_group:
for k, v, in keys:
self.assertEqual(security_group['security_group'][k], v)
# Verify that default egress rules have been created
sg_rules = security_group['security_group']['security_group_rules']
self.assertEqual(2, len(sg_rules))
v4_rules = [r for r in sg_rules if r['ethertype'] == const.IPv4]
self.assertEqual(1, len(v4_rules))
v4_rule = v4_rules[0]
expected = {'direction': 'egress',
'ethertype': const.IPv4,
'remote_group_id': None,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v4_rule, expected)
v6_rules = [r for r in sg_rules if r['ethertype'] == const.IPv6]
self.assertEqual(1, len(v6_rules))
v6_rule = v6_rules[0]
expected = {'direction': 'egress',
'ethertype': const.IPv6,
'remote_group_id': None,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v6_rule, expected)
def test_skip_duplicate_default_sg_error(self):
num_called = [0]
original_func = self.plugin.create_security_group
def side_effect(context, security_group, default_sg):
# can't always raise, or create_security_group will hang
self.assertTrue(default_sg)
self.assertTrue(num_called[0] < 2)
num_called[0] += 1
ret = original_func(context, security_group, default_sg)
if num_called[0] == 1:
return ret
# make another call to cause an exception.
# NOTE(yamamoto): raising the exception by ourselves
# doesn't update the session state appropriately.
self.assertRaises(exc.DBDuplicateEntry,
original_func, context, security_group,
default_sg)
with mock.patch.object(SecurityGroupTestPlugin,
'create_security_group',
side_effect=side_effect):
self.plugin.create_network(
context.get_admin_context(),
{'network': {'name': 'foo',
'admin_state_up': True,
'shared': False,
'tenant_id': 'bar'}})
def test_update_security_group(self):
with self.security_group() as sg:
data = {'security_group': {'name': 'new_name',
'description': 'new_desc'}}
req = self.new_update_request('security-groups',
data,
sg['security_group']['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
self.assertEqual(data['security_group']['name'],
res['security_group']['name'])
self.assertEqual(data['security_group']['description'],
res['security_group']['description'])
def test_update_security_group_name_to_default_fail(self):
with self.security_group() as sg:
data = {'security_group': {'name': 'default',
'description': 'new_desc'}}
req = self.new_update_request('security-groups',
data,
sg['security_group']['id'])
req.environ['neutron.context'] = context.Context('', 'somebody')
res = req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPConflict.code, res.status_int)
def test_update_default_security_group_name_fail(self):
with self.network():
res = self.new_list_request('security-groups')
sg = self.deserialize(self.fmt, res.get_response(self.ext_api))
data = {'security_group': {'name': 'new_name',
'description': 'new_desc'}}
req = self.new_update_request('security-groups',
data,
sg['security_groups'][0]['id'])
req.environ['neutron.context'] = context.Context('', 'somebody')
res = req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int)
def test_update_default_security_group_with_description(self):
with self.network():
res = self.new_list_request('security-groups')
sg = self.deserialize(self.fmt, res.get_response(self.ext_api))
data = {'security_group': {'description': 'new_desc'}}
req = self.new_update_request('security-groups',
data,
sg['security_groups'][0]['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
self.assertEqual(data['security_group']['description'],
res['security_group']['description'])
def test_check_default_security_group_description(self):
with self.network():
res = self.new_list_request('security-groups')
sg = self.deserialize(self.fmt, res.get_response(self.ext_api))
self.assertEqual('Default security group',
sg['security_groups'][0]['description'])
def test_default_security_group(self):
with self.network():
res = self.new_list_request('security-groups')
groups = self.deserialize(self.fmt, res.get_response(self.ext_api))
self.assertEqual(1, len(groups['security_groups']))
def test_create_default_security_group_fail(self):
name = 'default'
description = 'my webservers'
res = self._create_security_group(self.fmt, name, description)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPConflict.code, res.status_int)
def test_create_default_security_group_check_case_insensitive(self):
name = 'DEFAULT'
description = 'my webservers'
res = self._create_security_group(self.fmt, name, description)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPConflict.code, res.status_int)
def test_list_security_groups(self):
with self.security_group(name='sg1', description='sg') as v1,\
self.security_group(name='sg2', description='sg') as v2,\
self.security_group(name='sg3', description='sg') as v3:
security_groups = (v1, v2, v3)
self._test_list_resources('security-group',
security_groups,
query_params='description=sg')
def test_list_security_groups_with_sort(self):
with self.security_group(name='sg1', description='sg') as sg1,\
self.security_group(name='sg2', description='sg') as sg2,\
self.security_group(name='sg3', description='sg') as sg3:
self._test_list_with_sort('security-group',
(sg3, sg2, sg1),
[('name', 'desc')],
query_params='description=sg')
def test_list_security_groups_with_pagination(self):
with self.security_group(name='sg1', description='sg') as sg1,\
self.security_group(name='sg2', description='sg') as sg2,\
self.security_group(name='sg3', description='sg') as sg3:
self._test_list_with_pagination('security-group',
(sg1, sg2, sg3),
('name', 'asc'), 2, 2,
query_params='description=sg')
def test_list_security_groups_with_pagination_reverse(self):
with self.security_group(name='sg1', description='sg') as sg1,\
self.security_group(name='sg2', description='sg') as sg2,\
self.security_group(name='sg3', description='sg') as sg3:
self._test_list_with_pagination_reverse(
'security-group', (sg1, sg2, sg3), ('name', 'asc'), 2, 2,
query_params='description=sg')
def test_create_security_group_rule_ethertype_invalid_as_number(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
ethertype = 2
rule = self._build_security_group_rule(
security_group_id, 'ingress', const.PROTO_NAME_TCP, '22',
'22', None, None, ethertype=ethertype)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int)
def test_create_security_group_rule_ethertype_invalid_for_protocol(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
rule = self._build_security_group_rule(
security_group_id, 'ingress', const.PROTO_NAME_IPV6_ICMP)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int)
def test_create_security_group_rule_invalid_ip_prefix(self):
name = 'webservers'
description = 'my webservers'
for bad_prefix in ['bad_ip', 256, "2001:db8:a::123/129", '172.30./24']:
with self.security_group(name, description) as sg:
sg_id = sg['security_group']['id']
remote_ip_prefix = bad_prefix
rule = self._build_security_group_rule(
sg_id,
'ingress',
const.PROTO_NAME_TCP,
'22', '22',
remote_ip_prefix)
res = self._create_security_group_rule(self.fmt, rule)
self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int)
def test_create_security_group_rule_invalid_ethertype_for_prefix(self):
name = 'webservers'
description = 'my webservers'
test_addr = {'192.168.1.1/24': 'IPv6',
'2001:db8:1234::/48': 'IPv4',
'192.168.2.1/24': 'BadEthertype'}
for remote_ip_prefix, ethertype in six.iteritems(test_addr):
with self.security_group(name, description) as sg:
sg_id = sg['security_group']['id']
rule = self._build_security_group_rule(
sg_id,
'ingress',
const.PROTO_NAME_TCP,
'22', '22',
remote_ip_prefix,
None,
ethertype=ethertype)
res = self._create_security_group_rule(self.fmt, rule)
self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int)
def test_create_security_group_rule_with_unmasked_prefix(self):
name = 'webservers'
description = 'my webservers'
addr = {'10.1.2.3': {'mask': '32', 'ethertype': 'IPv4'},
'fe80::2677:3ff:fe7d:4c': {'mask': '128', 'ethertype': 'IPv6'}}
for ip in addr:
with self.security_group(name, description) as sg:
sg_id = sg['security_group']['id']
ethertype = addr[ip]['ethertype']
remote_ip_prefix = ip
rule = self._build_security_group_rule(
sg_id,
'ingress',
const.PROTO_NAME_TCP,
'22', '22',
remote_ip_prefix,
None,
ethertype=ethertype)
res = self._create_security_group_rule(self.fmt, rule)
self.assertEqual(res.status_int, 201)
res_sg = self.deserialize(self.fmt, res)
prefix = res_sg['security_group_rule']['remote_ip_prefix']
self.assertEqual(prefix, '%s/%s' % (ip, addr[ip]['mask']))
def test_create_security_group_rule_tcp_protocol_as_number(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
protocol = const.PROTO_NUM_TCP # TCP
rule = self._build_security_group_rule(
security_group_id, 'ingress', protocol, '22', '22')
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPCreated.code, res.status_int)
def test_create_security_group_rule_protocol_as_number(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
protocol = 2
rule = self._build_security_group_rule(
security_group_id, 'ingress', protocol)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPCreated.code, res.status_int)
def test_create_security_group_rule_case_insensitive(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = 'TCP'
port_range_min = 22
port_range_max = 22
ethertype = 'ipV4'
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix,
ethertype=ethertype) as rule:
# the lower case value will be return
self.assertEqual(rule['security_group_rule']['protocol'],
protocol.lower())
self.assertEqual(rule['security_group_rule']['ethertype'],
const.IPv4)
def test_get_security_group(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
remote_group_id = sg['security_group']['id']
res = self.new_show_request('security-groups', remote_group_id)
security_group_id = sg['security_group']['id']
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = const.PROTO_NAME_TCP
port_range_min = 22
port_range_max = 22
keys = [('remote_ip_prefix', remote_ip_prefix),
('security_group_id', security_group_id),
('direction', direction),
('protocol', protocol),
('port_range_min', port_range_min),
('port_range_max', port_range_max)]
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix):
group = self.deserialize(
self.fmt, res.get_response(self.ext_api))
sg_rule = group['security_group']['security_group_rules']
self.assertEqual(group['security_group']['id'],
remote_group_id)
self.assertEqual(3, len(sg_rule))
sg_rule = [r for r in sg_rule if r['direction'] == 'ingress']
for k, v, in keys:
self.assertEqual(sg_rule[0][k], v)
def test_get_security_group_on_port_from_wrong_tenant(self):
plugin = manager.NeutronManager.get_plugin()
if not hasattr(plugin, '_get_security_groups_on_port'):
self.skipTest("plugin doesn't use the mixin with this method")
neutron_context = context.get_admin_context()
res = self._create_security_group(self.fmt, 'webservers', 'webservers',
tenant_id='bad_tenant')
sg1 = self.deserialize(self.fmt, res)
with testtools.ExpectedException(ext_sg.SecurityGroupNotFound):
plugin._get_security_groups_on_port(
neutron_context,
{'port': {'security_groups': [sg1['security_group']['id']],
'tenant_id': 'tenant'}}
)
def test_delete_security_group(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
remote_group_id = sg['security_group']['id']
self._delete('security-groups', remote_group_id,
webob.exc.HTTPNoContent.code)
def test_delete_default_security_group_admin(self):
with self.network():
res = self.new_list_request('security-groups')
sg = self.deserialize(self.fmt, res.get_response(self.ext_api))
self._delete('security-groups', sg['security_groups'][0]['id'],
webob.exc.HTTPNoContent.code)
def test_delete_default_security_group_nonadmin(self):
with self.network():
res = self.new_list_request('security-groups')
sg = self.deserialize(self.fmt, res.get_response(self.ext_api))
neutron_context = context.Context('', 'test-tenant')
self._delete('security-groups', sg['security_groups'][0]['id'],
webob.exc.HTTPConflict.code,
neutron_context=neutron_context)
def test_security_group_list_creates_default_security_group(self):
neutron_context = context.Context('', 'test-tenant')
sg = self._list('security-groups',
neutron_context=neutron_context).get('security_groups')
self.assertEqual(1, len(sg))
def test_security_group_port_create_creates_default_security_group(self):
res = self._create_network(self.fmt, 'net1', True,
tenant_id='not_admin',
set_context=True)
net1 = self.deserialize(self.fmt, res)
res = self._create_port(self.fmt, net1['network']['id'],
tenant_id='not_admin', set_context=True)
sg = self._list('security-groups').get('security_groups')
self.assertEqual(1, len(sg))
def test_default_security_group_rules(self):
with self.network():
res = self.new_list_request('security-groups')
groups = self.deserialize(self.fmt, res.get_response(self.ext_api))
self.assertEqual(len(groups['security_groups']), 1)
security_group_id = groups['security_groups'][0]['id']
res = self.new_list_request('security-group-rules')
rules = self.deserialize(self.fmt, res.get_response(self.ext_api))
self.assertEqual(len(rules['security_group_rules']), 4)
# Verify default rule for v4 egress
sg_rules = rules['security_group_rules']
rules = [
r for r in sg_rules
if r['direction'] == 'egress' and r['ethertype'] == const.IPv4
]
self.assertEqual(1, len(rules))
v4_egress = rules[0]
expected = {'direction': 'egress',
'ethertype': const.IPv4,
'remote_group_id': None,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v4_egress, expected)
# Verify default rule for v6 egress
rules = [
r for r in sg_rules
if r['direction'] == 'egress' and r['ethertype'] == const.IPv6
]
self.assertEqual(1, len(rules))
v6_egress = rules[0]
expected = {'direction': 'egress',
'ethertype': const.IPv6,
'remote_group_id': None,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v6_egress, expected)
# Verify default rule for v4 ingress
rules = [
r for r in sg_rules
if r['direction'] == 'ingress' and r['ethertype'] == const.IPv4
]
self.assertEqual(1, len(rules))
v4_ingress = rules[0]
expected = {'direction': 'ingress',
'ethertype': const.IPv4,
'remote_group_id': security_group_id,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v4_ingress, expected)
# Verify default rule for v6 ingress
rules = [
r for r in sg_rules
if r['direction'] == 'ingress' and r['ethertype'] == const.IPv6
]
self.assertEqual(1, len(rules))
v6_ingress = rules[0]
expected = {'direction': 'ingress',
'ethertype': const.IPv6,
'remote_group_id': security_group_id,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v6_ingress, expected)
def test_create_security_group_rule_remote_ip_prefix(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = const.PROTO_NAME_TCP
port_range_min = 22
port_range_max = 22
keys = [('remote_ip_prefix', remote_ip_prefix),
('security_group_id', security_group_id),
('direction', direction),
('protocol', protocol),
('port_range_min', port_range_min),
('port_range_max', port_range_max)]
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix) as rule:
for k, v, in keys:
self.assertEqual(rule['security_group_rule'][k], v)
def test_create_security_group_rule_group_id(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
with self.security_group(name, description) as sg2:
security_group_id = sg['security_group']['id']
direction = "ingress"
remote_group_id = sg2['security_group']['id']
protocol = const.PROTO_NAME_TCP
port_range_min = 22
port_range_max = 22
keys = [('remote_group_id', remote_group_id),
('security_group_id', security_group_id),
('direction', direction),
('protocol', protocol),
('port_range_min', port_range_min),
('port_range_max', port_range_max)]
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_group_id=remote_group_id
) as rule:
for k, v, in keys:
self.assertEqual(rule['security_group_rule'][k], v)
def test_create_security_group_rule_icmp_with_type_and_code(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = const.PROTO_NAME_ICMP
# port_range_min (ICMP type) is greater than port_range_max
# (ICMP code) in order to confirm min <= max port check is
# not called for ICMP.
port_range_min = 8
port_range_max = 5
keys = [('remote_ip_prefix', remote_ip_prefix),
('security_group_id', security_group_id),
('direction', direction),
('protocol', protocol),
('port_range_min', port_range_min),
('port_range_max', port_range_max)]
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix) as rule:
for k, v, in keys:
self.assertEqual(rule['security_group_rule'][k], v)
def test_create_security_group_rule_icmp_with_type_only(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = const.PROTO_NAME_ICMP
# ICMP type
port_range_min = 8
# ICMP code
port_range_max = None
keys = [('remote_ip_prefix', remote_ip_prefix),
('security_group_id', security_group_id),
('direction', direction),
('protocol', protocol),
('port_range_min', port_range_min),
('port_range_max', port_range_max)]
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix) as rule:
for k, v, in keys:
self.assertEqual(rule['security_group_rule'][k], v)
def test_create_security_group_rule_icmpv6_with_type_only(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
direction = "ingress"
ethertype = const.IPv6
remote_ip_prefix = "2001::f401:56ff:fefe:d3dc/128"
protocol = const.PROTO_NAME_IPV6_ICMP
# ICMPV6 type
port_range_min = const.ICMPV6_TYPE_RA
# ICMPV6 code
port_range_max = None
keys = [('remote_ip_prefix', remote_ip_prefix),
('security_group_id', security_group_id),
('direction', direction),
('ethertype', ethertype),
('protocol', protocol),
('port_range_min', port_range_min),
('port_range_max', port_range_max)]
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix,
None, None,
ethertype) as rule:
for k, v, in keys:
self.assertEqual(rule['security_group_rule'][k], v)
def test_create_security_group_source_group_ip_and_ip_prefix(self):
security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = const.PROTO_NAME_TCP
port_range_min = 22
port_range_max = 22
remote_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087"
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix,
remote_group_id)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int)
def test_create_security_group_rule_bad_security_group_id(self):
security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = const.PROTO_NAME_TCP
port_range_min = 22
port_range_max = 22
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int)
def test_create_security_group_rule_bad_tenant(self):
with self.security_group() as sg:
rule = {'security_group_rule':
{'security_group_id': sg['security_group']['id'],
'direction': 'ingress',
'protocol': const.PROTO_NAME_TCP,
'port_range_min': '22',
'port_range_max': '22',
'tenant_id': "bad_tenant"}}
res = self._create_security_group_rule(self.fmt, rule,
tenant_id='bad_tenant',
set_context=True)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int)
def test_create_security_group_rule_bad_tenant_remote_group_id(self):
with self.security_group() as sg:
res = self._create_security_group(self.fmt, 'webservers',
'webservers',
tenant_id='bad_tenant')
sg2 = self.deserialize(self.fmt, res)
rule = {'security_group_rule':
{'security_group_id': sg2['security_group']['id'],
'direction': 'ingress',
'protocol': const.PROTO_NAME_TCP,
'port_range_min': '22',
'port_range_max': '22',
'tenant_id': 'bad_tenant',
'remote_group_id': sg['security_group']['id']}}
res = self._create_security_group_rule(self.fmt, rule,
tenant_id='bad_tenant',
set_context=True)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int)
def test_create_security_group_rule_bad_tenant_security_group_rule(self):
with self.security_group() as sg:
res = self._create_security_group(self.fmt, 'webservers',
'webservers',
tenant_id='bad_tenant')
self.deserialize(self.fmt, res)
rule = {'security_group_rule':
{'security_group_id': sg['security_group']['id'],
'direction': 'ingress',
'protocol': const.PROTO_NAME_TCP,
'port_range_min': '22',
'port_range_max': '22',
'tenant_id': 'bad_tenant'}}
res = self._create_security_group_rule(self.fmt, rule,
tenant_id='bad_tenant',
set_context=True)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int)
def test_create_security_group_rule_bad_remote_group_id(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
remote_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
protocol = const.PROTO_NAME_TCP
port_range_min = 22
port_range_max = 22
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_group_id=remote_group_id)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int)
def test_create_security_group_rule_duplicate_rules(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '22', '22')
self._create_security_group_rule(self.fmt, rule)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPConflict.code, res.status_int)
def test_create_security_group_rule_min_port_greater_max(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
for protocol in [const.PROTO_NAME_TCP, const.PROTO_NAME_UDP,
const.PROTO_NUM_TCP, const.PROTO_NUM_UDP]:
rule = self._build_security_group_rule(
sg['security_group']['id'],
'ingress', protocol, '50', '22')
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPBadRequest.code,
res.status_int)
def test_create_security_group_rule_ports_but_no_protocol(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress', None, '22', '22')
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int)
def test_create_security_group_rule_port_range_min_only(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '22', None)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int)
def test_create_security_group_rule_port_range_max_only(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, None, '22')
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int)
def test_create_security_group_rule_icmp_type_too_big(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_ICMP, '256', None)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int)
def test_create_security_group_rule_icmp_code_too_big(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_ICMP, '8', '256')
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int)
def test_create_security_group_rule_icmp_with_code_only(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
for code in ['2', '0']:
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_ICMP, None, code)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPBadRequest.code,
res.status_int)
def test_list_ports_security_group(self):
with self.network() as n:
with self.subnet(n):
self._create_port(self.fmt, n['network']['id'])
req = self.new_list_request('ports')
res = req.get_response(self.api)
ports = self.deserialize(self.fmt, res)
port = ports['ports'][0]
self.assertEqual(len(port[ext_sg.SECURITYGROUPS]), 1)
self._delete('ports', port['id'])
def test_list_security_group_rules(self):
with self.security_group(name='sg') as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id,
direction='egress',
port_range_min=22,
port_range_max=22) as sgr1,\
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=23,
port_range_max=23) as sgr2,\
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=24,
port_range_max=24) as sgr3:
# Delete default rules as they would fail the following
# assertion at the end.
self._delete_default_security_group_egress_rules(
security_group_id)
q = 'direction=egress&security_group_id=' + security_group_id
self._test_list_resources('security-group-rule',
[sgr1, sgr2, sgr3],
query_params=q)
def test_list_security_group_rules_with_sort(self):
with self.security_group(name='sg') as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id,
direction='egress',
port_range_min=22,
port_range_max=22) as sgr1,\
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=23,
port_range_max=23) as sgr2,\
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=24,
port_range_max=24) as sgr3:
# Delete default rules as they would fail the following
# assertion at the end.
self._delete_default_security_group_egress_rules(
security_group_id)
q = 'direction=egress&security_group_id=' + security_group_id
self._test_list_with_sort('security-group-rule',
(sgr3, sgr2, sgr1),
[('port_range_max', 'desc')],
query_params=q)
def test_list_security_group_rules_with_pagination(self):
with self.security_group(name='sg') as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id,
direction='egress',
port_range_min=22,
port_range_max=22) as sgr1,\
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=23,
port_range_max=23) as sgr2,\
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=24,
port_range_max=24) as sgr3:
# Delete default rules as they would fail the following
# assertion at the end.
self._delete_default_security_group_egress_rules(
security_group_id)
q = 'direction=egress&security_group_id=' + security_group_id
self._test_list_with_pagination(
'security-group-rule', (sgr3, sgr2, sgr1),
('port_range_max', 'desc'), 2, 2,
query_params=q)
def test_list_security_group_rules_with_pagination_reverse(self):
with self.security_group(name='sg') as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id,
direction='egress',
port_range_min=22,
port_range_max=22) as sgr1,\
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=23,
port_range_max=23) as sgr2,\
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=24,
port_range_max=24) as sgr3:
self._test_list_with_pagination_reverse(
'security-group-rule', (sgr3, sgr2, sgr1),
('port_range_max', 'desc'), 2, 2,
query_params='direction=egress')
def test_create_port_with_multiple_security_groups(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg1:
with self.security_group() as sg2:
res = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg1['security_group']['id'],
sg2['security_group']['id']])
port = self.deserialize(self.fmt, res)
self.assertEqual(2, len(
port['port'][ext_sg.SECURITYGROUPS]))
self._delete('ports', port['port']['id'])
def test_create_port_with_no_security_groups(self):
with self.network() as n:
with self.subnet(n):
res = self._create_port(self.fmt, n['network']['id'],
security_groups=[])
port = self.deserialize(self.fmt, res)
self.assertEqual([], port['port'][ext_sg.SECURITYGROUPS])
def test_update_port_with_security_group(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg:
res = self._create_port(self.fmt, n['network']['id'])
port = self.deserialize(self.fmt, res)
data = {'port': {'fixed_ips': port['port']['fixed_ips'],
'name': port['port']['name'],
ext_sg.SECURITYGROUPS:
[sg['security_group']['id']]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(res['port'][ext_sg.SECURITYGROUPS][0],
sg['security_group']['id'])
# Test update port without security group
data = {'port': {'fixed_ips': port['port']['fixed_ips'],
'name': port['port']['name']}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(res['port'][ext_sg.SECURITYGROUPS][0],
sg['security_group']['id'])
self._delete('ports', port['port']['id'])
def test_update_port_with_multiple_security_groups(self):
with self.network() as n:
with self.subnet(n) as s:
with self.port(s) as port:
with self.security_group() as sg1:
with self.security_group() as sg2:
data = {'port': {ext_sg.SECURITYGROUPS:
[sg1['security_group']['id'],
sg2['security_group']['id']]}}
req = self.new_update_request(
'ports', data, port['port']['id'])
port = self.deserialize(
self.fmt, req.get_response(self.api))
self.assertEqual(
2, len(port['port'][ext_sg.SECURITYGROUPS]))
def test_update_port_remove_security_group_empty_list(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg:
res = self._create_port(self.fmt, n['network']['id'],
security_groups=(
[sg['security_group']['id']]))
port = self.deserialize(self.fmt, res)
data = {'port': {'fixed_ips': port['port']['fixed_ips'],
'name': port['port']['name'],
'security_groups': []}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual([],
res['port'].get(ext_sg.SECURITYGROUPS))
self._delete('ports', port['port']['id'])
def test_update_port_remove_security_group_none(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg:
res = self._create_port(self.fmt, n['network']['id'],
security_groups=(
[sg['security_group']['id']]))
port = self.deserialize(self.fmt, res)
data = {'port': {'fixed_ips': port['port']['fixed_ips'],
'name': port['port']['name'],
'security_groups': None}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual([],
res['port'].get(ext_sg.SECURITYGROUPS))
self._delete('ports', port['port']['id'])
def test_create_port_with_bad_security_group(self):
with self.network() as n:
with self.subnet(n):
res = self._create_port(self.fmt, n['network']['id'],
security_groups=['bad_id'])
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int)
def test_create_delete_security_group_port_in_use(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg:
res = self._create_port(self.fmt, n['network']['id'],
security_groups=(
[sg['security_group']['id']]))
port = self.deserialize(self.fmt, res)
self.assertEqual(port['port'][ext_sg.SECURITYGROUPS][0],
sg['security_group']['id'])
# try to delete security group that's in use
res = self._delete('security-groups',
sg['security_group']['id'],
webob.exc.HTTPConflict.code)
# delete the blocking port
self._delete('ports', port['port']['id'])
def test_create_security_group_rule_bulk_native(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk "
"security_group_rule create")
with self.security_group() as sg:
rule1 = self._build_security_group_rule(sg['security_group']['id'],
'ingress',
const.PROTO_NAME_TCP, '22',
'22', '10.0.0.1/24')
rule2 = self._build_security_group_rule(sg['security_group']['id'],
'ingress',
const.PROTO_NAME_TCP, '23',
'23', '10.0.0.1/24')
rules = {'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
ret = self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPCreated.code, res.status_int)
self.assertEqual(2, len(ret['security_group_rules']))
def test_create_security_group_rule_bulk_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('six.moves.builtins.hasattr',
new=fakehasattr):
with self.security_group() as sg:
rule1 = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24')
rule2 = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '23', '23', '10.0.0.1/24')
rules = {'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]
}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPCreated.code, res.status_int)
def test_create_security_group_rule_allow_all_ipv4(self):
with self.security_group() as sg:
rule = {'security_group_id': sg['security_group']['id'],
'direction': 'ingress',
'ethertype': const.IPv4,
'tenant_id': 'test-tenant'}
res = self._create_security_group_rule(
self.fmt, {'security_group_rule': rule})
rule = self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPCreated.code, res.status_int)
def test_create_security_group_rule_allow_all_ipv4_v6_bulk(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk "
"security_group_rule create")
with self.security_group() as sg:
rule_v4 = {'security_group_id': sg['security_group']['id'],
'direction': 'ingress',
'ethertype': const.IPv4,
'tenant_id': 'test-tenant'}
rule_v6 = {'security_group_id': sg['security_group']['id'],
'direction': 'ingress',
'ethertype': const.IPv6,
'tenant_id': 'test-tenant'}
rules = {'security_group_rules': [rule_v4, rule_v6]}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPCreated.code, res.status_int)
def test_create_security_group_rule_duplicate_rule_in_post(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk "
"security_group_rule create")
with self.security_group() as sg:
rule = self._build_security_group_rule(sg['security_group']['id'],
'ingress',
const.PROTO_NAME_TCP, '22',
'22', '10.0.0.1/24')
rules = {'security_group_rules': [rule['security_group_rule'],
rule['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
rule = self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPConflict.code, res.status_int)
def test_create_security_group_rule_duplicate_rule_in_post_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('six.moves.builtins.hasattr',
new=fakehasattr):
with self.security_group() as sg:
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24')
rules = {'security_group_rules': [rule['security_group_rule'],
rule['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
rule = self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPConflict.code, res.status_int)
def test_create_security_group_rule_duplicate_rule_db(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk "
"security_group_rule create")
with self.security_group() as sg:
rule = self._build_security_group_rule(sg['security_group']['id'],
'ingress',
const.PROTO_NAME_TCP, '22',
'22', '10.0.0.1/24')
rules = {'security_group_rules': [rule]}
self._create_security_group_rule(self.fmt, rules)
res = self._create_security_group_rule(self.fmt, rules)
rule = self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPConflict.code, res.status_int)
def test_create_security_group_rule_duplicate_rule_db_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('six.moves.builtins.hasattr',
new=fakehasattr):
with self.security_group() as sg:
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24')
rules = {'security_group_rules': [rule]}
self._create_security_group_rule(self.fmt, rules)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPConflict.code, res.status_int)
def test_create_security_group_rule_different_security_group_ids(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk "
"security_group_rule create")
with self.security_group() as sg1:
with self.security_group() as sg2:
rule1 = self._build_security_group_rule(
sg1['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24')
rule2 = self._build_security_group_rule(
sg2['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '23', '23', '10.0.0.1/24')
rules = {'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]
}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int)
def test_create_security_group_rule_with_invalid_ethertype(self):
security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = const.PROTO_NAME_TCP
port_range_min = 22
port_range_max = 22
remote_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087"
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix,
remote_group_id,
ethertype='IPv5')
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int)
def test_create_security_group_rule_with_invalid_protocol(self):
security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = 'tcp/ip'
port_range_min = 22
port_range_max = 22
remote_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087"
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix,
remote_group_id)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int)
def test_create_security_group_rule_with_invalid_tcp_or_udp_protocol(self):
security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = 'tcp'
port_range_min = 0
port_range_max = 80
remote_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087"
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix,
remote_group_id)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int)
def test_create_port_with_non_uuid(self):
with self.network() as n:
with self.subnet(n):
res = self._create_port(self.fmt, n['network']['id'],
security_groups=['not_valid'])
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int)
def test_create_security_group_rule_with_specific_id(self):
neutron_context = context.Context('', 'test-tenant')
specified_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
with self.security_group() as sg:
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress', const.PROTO_NUM_TCP)
rule['security_group_rule'].update({'id': specified_id,
'port_range_min': None,
'port_range_max': None,
'remote_ip_prefix': None,
'remote_group_id': None})
result = self.plugin.create_security_group_rule(
neutron_context, rule)
self.assertEqual(specified_id, result['id'])
class TestConvertIPPrefixToCIDR(base.BaseTestCase):
def test_convert_bad_ip_prefix_to_cidr(self):
for val in ['bad_ip', 256, "2001:db8:a::123/129"]:
self.assertRaises(n_exc.InvalidCIDR,
ext_sg.convert_ip_prefix_to_cidr, val)
self.assertIsNone(ext_sg.convert_ip_prefix_to_cidr(None))
def test_convert_ip_prefix_no_netmask_to_cidr(self):
addr = {'10.1.2.3': '32', 'fe80::2677:3ff:fe7d:4c': '128'}
for k, v in six.iteritems(addr):
self.assertEqual(ext_sg.convert_ip_prefix_to_cidr(k),
'%s/%s' % (k, v))
def test_convert_ip_prefix_with_netmask_to_cidr(self):
addresses = ['10.1.0.0/16', '10.1.2.3/32', '2001:db8:1234::/48']
for addr in addresses:
self.assertEqual(addr, ext_sg.convert_ip_prefix_to_cidr(addr))
class TestConvertProtocol(base.BaseTestCase):
def test_convert_numeric_protocol(self):
self.assertIsInstance(ext_sg.convert_protocol('2'), str)
def test_convert_bad_protocol(self):
for val in ['bad', '256', '-1']:
self.assertRaises(ext_sg.SecurityGroupRuleInvalidProtocol,
ext_sg.convert_protocol, val)
def test_convert_numeric_protocol_to_string(self):
self.assertIsInstance(ext_sg.convert_protocol(2), str)
class TestConvertEtherType(base.BaseTestCase):
def test_convert_unsupported_ethertype(self):
for val in ['ip', 'ip4', 'ip6', '']:
self.assertRaises(ext_sg.SecurityGroupRuleInvalidEtherType,
ext_sg.convert_ethertype_to_case_insensitive,
val)
|
|
from datetime import datetime, timedelta
import re
import logging
import os
from flask import Flask
from flask_dynamo import Dynamo
from flask_ask import Ask, statement, question, session, context, request, version
import src.pledgeFunc as pledgeFunc
import src.picaFunc as picaFunc
#import boto3
# Program Start
app = Flask(__name__)
#ask = Ask(app, "/Babybot") # only for ngrok
ask = Ask(app, "/")
app.config['DYNAMO_TABLES'] = [
{
'TableName': 'alexausage',
'KeySchema': [{'AttributeName':'userID', 'KeyType':'HASH'}],
'AttributeDefinitions': [{'AttributeName':'userID', 'AttributeType':'S'
},
{'AttributeName':'usageType', 'AttributeType':'S'
},
{'AttributeName': 'requestTimestamp', 'AttributeType': 'S'
},
{'AttributeName': 'responseTimestamp', 'AttributeType': 'S'
},
{'AttributeName': 'responseStatus', 'AttributeType': 'S'
}
],
'ProvisionedThroughput': {'ReadCapacityUnits': 5, 'WriteCapacityUnits':5}
}
]
dynamo = Dynamo(app)
'''
# Logging format
logFormatter = logging.Formatter("%(asctime)s [%(levelname)s] [%(name)s] %(message)s")
def setup_logging():
logger = logging.getLogger()
for h in logger.handlers:
logger.removeHandler(h)
h = logging.StreamHandler(sys.stdout)
# use whatever format you want here
h.setFormatter(logging.Formatter(logFormatter))
logger.addHandler(h)
logger.setLevel(logging.INFO)
return logger
rootLogger = setup_logging()
'''
rootLogger = logging.getLogger()
_DATE_PATTERNS = {
# "today", "tomorrow", "november twenty-fifth": 2015-11-25
'^\d{4}-\d{2}-\d{2}$': ['%Y-%m-%d', 0],
# "this week", "next week": 2015-W48
'^\d{4}-W\d{2}$': ['%Y-W%U-%w', 7],
# "this weekend": 2015-W48-WE
'^\d{4}-W\d{2}-WE$': ['%Y-W%U-WE-%w', 2],
# "this month": 2015-11
'^\d{4}-\d{2}$': ['%Y-%m', 30],
# "next year": 2016
'^\d{4}$': ['%Y', 365],
}
def parse_time_frame(time_frame):
# assumed input is a datetime.date object from flask-ask convert
# build a dict with two fields of isoformat start and end
# timestart=datetime.now().isoformat()
# timeend=timeframe.isoformat()#may need to add timedelta(1)
# if not time_frame:
# time_frame = 'Today' # default timeframe to today
# make so 'next decade' matches work against 'next year' regex
time_frame = re.sub('X$', '0', time_frame)
for re_pattern, format_pattern in list(_DATE_PATTERNS.items()):
if re.match(re_pattern, time_frame):
if '%U' in format_pattern[0]:
# http://stackoverflow.com/a/17087427/1163855
time_frame += '-0'
date_start = datetime.strptime(time_frame, format_pattern[0]).date()
date_end = date_start + timedelta(format_pattern[1])
date_range = {'date_start': date_start.isoformat(), 'date_end': date_end.isoformat()} # convert to format for PICA
# date_range = {'date_start': date_start.strftime("%Y/%m/%d"), 'date_end': date_end.strftime("%Y/%m/%d")}
return date_range
return None
def get_msg_info():
rootLogger.info('Grabbing msg_info')
msg_info = {}
# msg_info["Request ID"] = request.requestId
# msg_info["Request Type"] = request.type
msg_info["Request Timestamp"] = request.timestamp
# msg_info["Session New"] = session.new
# msg_info["User ID"] = session.user.userId
# msg_info["Alexa Version"] = version
msg_info["Device ID"] = context.System.device.deviceId
if not msg_info["Device ID"]:
msg_info["Device ID"] = 'Service Simulator'
rootLogger.info('Msg_info retrieved')
rootLogger.debug(msg_info)
return msg_info
@app.route('/')
def homepage():
return "Welcome to Baby bot!!"
@ask.launch
def start_skill():
welcome_message = 'Hello there! How can I help you'
return question(welcome_message).reprompt('How can I help you today?')
@ask.intent('GetInfo')
def get_info():
info_message = 'I am Baby Bot. I am developed by the Institute of Systems Science, National University of Singapore'
return statement(info_message)
@ask.intent('GetAppointments') #, convert={'TimeFrame': 'date'})
def get_appointment(TimeFrame):
rootLogger.info('Request for appointment received')
msg_info = get_msg_info()
if not TimeFrame:
TimeFrame= datetime.now().strftime('%Y-%m-%d')
response_msg, status = picaFunc.get_appointment_msg(msg_info, parse_time_frame(TimeFrame))
rootLogger.debug(response_msg)
rootLogger.info('Retrieved appointments msg')
usage_log(msg_info["Device ID"], 'Appointment', msg_info["Request Timestamp"].isoformat(),
datetime.now().isoformat(), status)
return statement(response_msg).simple_card(title='Get Appointment', content = response_msg)
@ask.intent('GetMedication')
def get_medication(TimeFrame):
rootLogger.info('Request for medication received')
msg_info = get_msg_info()
if not TimeFrame:
TimeFrame= datetime.now().strftime('%Y-%m-%d')
response_msg, status = picaFunc.get_medication_msg(msg_info, parse_time_frame(TimeFrame))
rootLogger.debug(response_msg)
rootLogger.info('Retrieved medications msg')
usage_log(msg_info["Device ID"], 'Medication', msg_info["Request Timestamp"].isoformat(),
datetime.now().isoformat(), status)
return statement(response_msg).simple_card(title='Get Medication', content = response_msg)
@ask.intent('GetFood')
def get_food(TimeFrame):
rootLogger.info('Request for food received')
msg_info = get_msg_info()
if not TimeFrame:
TimeFrame= datetime.now().strftime('%Y-%m-%d')
response_msg, status = picaFunc.get_food_msg(msg_info, parse_time_frame(TimeFrame))
rootLogger.debug(response_msg)
rootLogger.info('Retrieved food msg')
usage_log(msg_info["Device ID"], 'Food', msg_info["Request Timestamp"].isoformat(),
datetime.now().isoformat(), status)
return statement(response_msg).simple_card(title='Get Food', content = response_msg)
@ask.intent('GetAll')
def get_all(TimeFrame):
if not TimeFrame:
TimeFrame= datetime.now().strftime('%Y-%m-%d')
time_frame = parse_time_frame(TimeFrame)
rootLogger.info('Request for all info received')
msg_info = get_msg_info()
all_msglist = []
all_msglist.append(picaFunc.get_appointment_msg(msg_info, time_frame))
all_msglist.append(picaFunc.get_medication_msg(msg_info, time_frame))
all_msglist.append(picaFunc.get_food_msg(msg_info, time_frame))
all_msg = ' '.join(all_msglist)
rootLogger.debug(all_msg)
rootLogger.info('Retrieved all info msg')
return statement(all_msg).simple_card(title='Get All', content = all_msg)
@ask.intent('GetHelp')
def get_help():
rootLogger.info('Request for help')
msg_info = get_msg_info()
response_msg, status = picaFunc.get_help_msg(msg_info)
rootLogger.debug(response_msg)
rootLogger.info('Retrieved help msg')
usage_log(msg_info["Device ID"], 'Help', msg_info["Request Timestamp"].isoformat(),
datetime.now().isoformat(), status)
return statement(response_msg).simple_card(title='Get Help', content = response_msg)
@ask.intent('GetPledge')
def get_pledge():
rootLogger.info('Request for pledge')
msg_info = get_msg_info()
response_msg, status = pledgeFunc.get_pledge_msg(msg_info)
rootLogger.debug(response_msg)
rootLogger.info('Retrieved pledge msg')
#usage_log(msg_info["Device ID"], 'pledge', msg_info["Request Timestamp"].strftime("%Y/%m/%d"),
# datetime.now().strftime("%Y/%m/%d"), 'success')
usage_log(msg_info["Device ID"], 'Pledge', msg_info["Request Timestamp"].isoformat(),
datetime.now().isoformat(), status)
return statement(response_msg).simple_card(title='Get Pledge', content=response_msg)
@ask.intent('AMAZON.StopIntent')
def stop_intent():
bye_text = 'Ok... bye'
return statement(bye_text)
'''
def db_connect():
return boto3.client('dynamodb', aws_access_key_id = os.environ.get('DB_ACCESS_KEY_ID'),
aws_secret_access_key = os.environ.get('DB_SECRET'))
def usage_log(userID, usageType, requestTimestamp, responseTimestamp, responseStatus):
rootLogger.debug('Connecting to DB')
dynamo = db_connect()
item = {
'userID': {'S':userID},
'usageType': {'S': usageType},
'requestTimestamp': {'S': requestTimestamp},
'responseTimestamp': {'S': responseTimestamp},
'responseStatus': {'S': responseStatus}
}
dynamo.put_item(TableName = 'usage',Item = item)
return rootLogger.debug('Logged usage')
'''
def usage_log(userID, usageType, requestTimestamp, responseTimestamp, responseStatus):
rootLogger.debug('Connecting to DB')
item = {
'userID': userID,
'usageType': usageType,
'requestTimestamp': requestTimestamp,
'responseTimestamp': responseTimestamp,
'responseStatus': responseStatus
}
dynamo.tables['alexausage'].put_item(Item=item)
return rootLogger.debug('Logged usage')
if __name__ == '__main__':
rootLogger.info('Started Up')
app.run(debug=False) # need to change to False when pushing to production
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
"""Ansible module for retrieving and setting openshift related facts"""
DOCUMENTATION = '''
---
module: openshift_facts
short_description: Cluster Facts
author: Jason DeTiberus
requirements: [ ]
'''
EXAMPLES = '''
'''
import ConfigParser
import copy
import os
from distutils.util import strtobool
def hostname_valid(hostname):
""" Test if specified hostname should be considered valid
Args:
hostname (str): hostname to test
Returns:
bool: True if valid, otherwise False
"""
if (not hostname or
hostname.startswith('localhost') or
hostname.endswith('localdomain') or
len(hostname.split('.')) < 2):
return False
return True
def choose_hostname(hostnames=None, fallback=''):
""" Choose a hostname from the provided hostnames
Given a list of hostnames and a fallback value, choose a hostname to
use. This function will prefer fqdns if they exist (excluding any that
begin with localhost or end with localdomain) over ip addresses.
Args:
hostnames (list): list of hostnames
fallback (str): default value to set if hostnames does not contain
a valid hostname
Returns:
str: chosen hostname
"""
hostname = fallback
if hostnames is None:
return hostname
ip_regex = r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z'
ips = [i for i in hostnames
if (i is not None and isinstance(i, basestring)
and re.match(ip_regex, i))]
hosts = [i for i in hostnames
if i is not None and i != '' and i not in ips]
for host_list in (hosts, ips):
for host in host_list:
if hostname_valid(host):
return host
return hostname
def query_metadata(metadata_url, headers=None, expect_json=False):
""" Return metadata from the provided metadata_url
Args:
metadata_url (str): metadata url
headers (dict): headers to set for metadata request
expect_json (bool): does the metadata_url return json
Returns:
dict or list: metadata request result
"""
result, info = fetch_url(module, metadata_url, headers=headers)
if info['status'] != 200:
raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable")
if expect_json:
return module.from_json(result.read())
else:
return [line.strip() for line in result.readlines()]
def walk_metadata(metadata_url, headers=None, expect_json=False):
""" Walk the metadata tree and return a dictionary of the entire tree
Args:
metadata_url (str): metadata url
headers (dict): headers to set for metadata request
expect_json (bool): does the metadata_url return json
Returns:
dict: the result of walking the metadata tree
"""
metadata = dict()
for line in query_metadata(metadata_url, headers, expect_json):
if line.endswith('/') and not line == 'public-keys/':
key = line[:-1]
metadata[key] = walk_metadata(metadata_url + line,
headers, expect_json)
else:
results = query_metadata(metadata_url + line, headers,
expect_json)
if len(results) == 1:
# disable pylint maybe-no-member because overloaded use of
# the module name causes pylint to not detect that results
# is an array or hash
# pylint: disable=maybe-no-member
metadata[line] = results.pop()
else:
metadata[line] = results
return metadata
def get_provider_metadata(metadata_url, supports_recursive=False,
headers=None, expect_json=False):
""" Retrieve the provider metadata
Args:
metadata_url (str): metadata url
supports_recursive (bool): does the provider metadata api support
recursion
headers (dict): headers to set for metadata request
expect_json (bool): does the metadata_url return json
Returns:
dict: the provider metadata
"""
try:
if supports_recursive:
metadata = query_metadata(metadata_url, headers,
expect_json)
else:
metadata = walk_metadata(metadata_url, headers,
expect_json)
except OpenShiftFactsMetadataUnavailableError:
metadata = None
return metadata
def normalize_gce_facts(metadata, facts):
""" Normalize gce facts
Args:
metadata (dict): provider metadata
facts (dict): facts to update
Returns:
dict: the result of adding the normalized metadata to the provided
facts dict
"""
for interface in metadata['instance']['networkInterfaces']:
int_info = dict(ips=[interface['ip']], network_type='gce')
int_info['public_ips'] = [ac['externalIp'] for ac
in interface['accessConfigs']]
int_info['public_ips'].extend(interface['forwardedIps'])
_, _, network_id = interface['network'].rpartition('/')
int_info['network_id'] = network_id
facts['network']['interfaces'].append(int_info)
_, _, zone = metadata['instance']['zone'].rpartition('/')
facts['zone'] = zone
# Default to no sdn for GCE deployments
facts['use_openshift_sdn'] = False
# GCE currently only supports a single interface
facts['network']['ip'] = facts['network']['interfaces'][0]['ips'][0]
pub_ip = facts['network']['interfaces'][0]['public_ips'][0]
facts['network']['public_ip'] = pub_ip
facts['network']['hostname'] = metadata['instance']['hostname']
# TODO: attempt to resolve public_hostname
facts['network']['public_hostname'] = facts['network']['public_ip']
return facts
def normalize_aws_facts(metadata, facts):
""" Normalize aws facts
Args:
metadata (dict): provider metadata
facts (dict): facts to update
Returns:
dict: the result of adding the normalized metadata to the provided
facts dict
"""
for interface in sorted(
metadata['network']['interfaces']['macs'].values(),
key=lambda x: x['device-number']
):
int_info = dict()
var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'}
for ips_var, int_var in var_map.iteritems():
ips = interface.get(int_var)
if isinstance(ips, basestring):
int_info[ips_var] = [ips]
else:
int_info[ips_var] = ips
if 'vpc-id' in interface:
int_info['network_type'] = 'vpc'
else:
int_info['network_type'] = 'classic'
if int_info['network_type'] == 'vpc':
int_info['network_id'] = interface['subnet-id']
else:
int_info['network_id'] = None
facts['network']['interfaces'].append(int_info)
facts['zone'] = metadata['placement']['availability-zone']
# TODO: actually attempt to determine default local and public ips
# by using the ansible default ip fact and the ipv4-associations
# from the ec2 metadata
facts['network']['ip'] = metadata.get('local-ipv4')
facts['network']['public_ip'] = metadata.get('public-ipv4')
# TODO: verify that local hostname makes sense and is resolvable
facts['network']['hostname'] = metadata.get('local-hostname')
# TODO: verify that public hostname makes sense and is resolvable
facts['network']['public_hostname'] = metadata.get('public-hostname')
return facts
def normalize_openstack_facts(metadata, facts):
""" Normalize openstack facts
Args:
metadata (dict): provider metadata
facts (dict): facts to update
Returns:
dict: the result of adding the normalized metadata to the provided
facts dict
"""
# openstack ec2 compat api does not support network interfaces and
# the version tested on did not include the info in the openstack
# metadata api, should be updated if neutron exposes this.
facts['zone'] = metadata['availability_zone']
local_ipv4 = metadata['ec2_compat']['local-ipv4'].split(',')[0]
facts['network']['ip'] = local_ipv4
facts['network']['public_ip'] = metadata['ec2_compat']['public-ipv4']
# TODO: verify local hostname makes sense and is resolvable
facts['network']['hostname'] = metadata['hostname']
# TODO: verify that public hostname makes sense and is resolvable
pub_h = metadata['ec2_compat']['public-hostname']
facts['network']['public_hostname'] = pub_h
return facts
def normalize_provider_facts(provider, metadata):
""" Normalize provider facts
Args:
provider (str): host provider
metadata (dict): provider metadata
Returns:
dict: the normalized provider facts
"""
if provider is None or metadata is None:
return {}
# TODO: test for ipv6_enabled where possible (gce, aws do not support)
# and configure ipv6 facts if available
# TODO: add support for setting user_data if available
facts = dict(name=provider, metadata=metadata,
network=dict(interfaces=[], ipv6_enabled=False))
if provider == 'gce':
facts = normalize_gce_facts(metadata, facts)
elif provider == 'ec2':
facts = normalize_aws_facts(metadata, facts)
elif provider == 'openstack':
facts = normalize_openstack_facts(metadata, facts)
return facts
def set_fluentd_facts_if_unset(facts):
""" Set fluentd facts if not already present in facts dict
dict: the facts dict updated with the generated fluentd facts if
missing
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the generated fluentd
facts if they were not already present
"""
if 'common' in facts:
deployment_type = facts['common']['deployment_type']
if 'use_fluentd' not in facts['common']:
use_fluentd = True if deployment_type == 'online' else False
facts['common']['use_fluentd'] = use_fluentd
return facts
def set_node_schedulability(facts):
""" Set schedulable facts if not already present in facts dict
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the generated schedulable
facts if they were not already present
"""
if 'node' in facts:
if 'schedulable' not in facts['node']:
if 'master' in facts:
facts['node']['schedulable'] = False
else:
facts['node']['schedulable'] = True
return facts
def set_metrics_facts_if_unset(facts):
""" Set cluster metrics facts if not already present in facts dict
dict: the facts dict updated with the generated cluster metrics facts if
missing
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the generated cluster metrics
facts if they were not already present
"""
if 'common' in facts:
if 'use_cluster_metrics' not in facts['common']:
use_cluster_metrics = False
facts['common']['use_cluster_metrics'] = use_cluster_metrics
return facts
def set_identity_providers_if_unset(facts):
""" Set identity_providers fact if not already present in facts dict
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the generated identity providers
facts if they were not already present
"""
if 'master' in facts:
deployment_type = facts['common']['deployment_type']
if 'identity_providers' not in facts['master']:
identity_provider = dict(
name='allow_all', challenge=True, login=True,
kind='AllowAllPasswordIdentityProvider'
)
if deployment_type == 'enterprise':
identity_provider = dict(
name='deny_all', challenge=True, login=True,
kind='DenyAllPasswordIdentityProvider'
)
facts['master']['identity_providers'] = [identity_provider]
return facts
def set_url_facts_if_unset(facts):
""" Set url facts if not already present in facts dict
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the generated url facts if they
were not already present
"""
if 'master' in facts:
api_use_ssl = facts['master']['api_use_ssl']
api_port = facts['master']['api_port']
console_use_ssl = facts['master']['console_use_ssl']
console_port = facts['master']['console_port']
console_path = facts['master']['console_path']
etcd_use_ssl = facts['master']['etcd_use_ssl']
etcd_hosts = facts['master']['etcd_hosts']
etcd_port = facts['master']['etcd_port']
hostname = facts['common']['hostname']
public_hostname = facts['common']['public_hostname']
cluster_hostname = facts['master'].get('cluster_hostname')
cluster_public_hostname = facts['master'].get('cluster_public_hostname')
if 'etcd_urls' not in facts['master']:
etcd_urls = []
if etcd_hosts != '':
facts['master']['etcd_port'] = etcd_port
facts['master']['embedded_etcd'] = False
for host in etcd_hosts:
etcd_urls.append(format_url(etcd_use_ssl, host,
etcd_port))
else:
etcd_urls = [format_url(etcd_use_ssl, hostname,
etcd_port)]
facts['master']['etcd_urls'] = etcd_urls
if 'api_url' not in facts['master']:
api_hostname = cluster_hostname if cluster_hostname else hostname
facts['master']['api_url'] = format_url(api_use_ssl, api_hostname,
api_port)
if 'public_api_url' not in facts['master']:
api_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname
facts['master']['public_api_url'] = format_url(api_use_ssl,
api_public_hostname,
api_port)
if 'console_url' not in facts['master']:
console_hostname = cluster_hostname if cluster_hostname else hostname
facts['master']['console_url'] = format_url(console_use_ssl,
console_hostname,
console_port,
console_path)
if 'public_console_url' not in facts['master']:
console_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname
facts['master']['public_console_url'] = format_url(console_use_ssl,
console_public_hostname,
console_port,
console_path)
return facts
def set_aggregate_facts(facts):
""" Set aggregate facts
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with aggregated facts
"""
all_hostnames = set()
if 'common' in facts:
all_hostnames.add(facts['common']['hostname'])
all_hostnames.add(facts['common']['public_hostname'])
if 'master' in facts:
if 'cluster_hostname' in facts['master']:
all_hostnames.add(facts['master']['cluster_hostname'])
if 'cluster_public_hostname' in facts['master']:
all_hostnames.add(facts['master']['cluster_public_hostname'])
facts['common']['all_hostnames'] = list(all_hostnames)
return facts
def set_deployment_facts_if_unset(facts):
""" Set Facts that vary based on deployment_type. This currently
includes common.service_type, common.config_base, master.registry_url,
node.registry_url
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the generated deployment_type
facts
"""
# Perhaps re-factor this as a map?
# pylint: disable=too-many-branches
if 'common' in facts:
deployment_type = facts['common']['deployment_type']
if 'service_type' not in facts['common']:
service_type = 'atomic-openshift'
if deployment_type == 'origin':
service_type = 'openshift'
elif deployment_type in ['enterprise', 'online']:
service_type = 'openshift'
facts['common']['service_type'] = service_type
if 'config_base' not in facts['common']:
config_base = '/etc/origin'
if deployment_type in ['enterprise', 'online']:
config_base = '/etc/openshift'
elif deployment_type == 'origin':
config_base = '/etc/openshift'
facts['common']['config_base'] = config_base
if 'data_dir' not in facts['common']:
data_dir = '/var/lib/origin'
if deployment_type in ['enterprise', 'online']:
data_dir = '/var/lib/openshift'
elif deployment_type == 'origin':
data_dir = '/var/lib/openshift'
facts['common']['data_dir'] = data_dir
for role in ('master', 'node'):
if role in facts:
deployment_type = facts['common']['deployment_type']
if 'registry_url' not in facts[role]:
registry_url = 'aos3/aos-${component}:${version}'
if deployment_type in ['enterprise', 'online']:
registry_url = 'openshift3/ose-${component}:${version}'
elif deployment_type == 'origin':
registry_url = 'openshift/origin-${component}:${version}'
facts[role]['registry_url'] = registry_url
return facts
def set_sdn_facts_if_unset(facts):
""" Set sdn facts if not already present in facts dict
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the generated sdn facts if they
were not already present
"""
if 'common' in facts:
use_sdn = facts['common']['use_openshift_sdn']
if not (use_sdn == '' or isinstance(use_sdn, bool)):
facts['common']['use_openshift_sdn'] = bool(strtobool(str(use_sdn)))
if 'sdn_network_plugin_name' not in facts['common']:
plugin = 'redhat/openshift-ovs-subnet' if use_sdn else ''
facts['common']['sdn_network_plugin_name'] = plugin
if 'master' in facts:
if 'sdn_cluster_network_cidr' not in facts['master']:
facts['master']['sdn_cluster_network_cidr'] = '10.1.0.0/16'
if 'sdn_host_subnet_length' not in facts['master']:
facts['master']['sdn_host_subnet_length'] = '8'
if 'node' in facts:
if 'sdn_mtu' not in facts['node']:
facts['node']['sdn_mtu'] = '1450'
return facts
def format_url(use_ssl, hostname, port, path=''):
""" Format url based on ssl flag, hostname, port and path
Args:
use_ssl (bool): is ssl enabled
hostname (str): hostname
port (str): port
path (str): url path
Returns:
str: The generated url string
"""
scheme = 'https' if use_ssl else 'http'
netloc = hostname
if (use_ssl and port != '443') or (not use_ssl and port != '80'):
netloc += ":%s" % port
return urlparse.urlunparse((scheme, netloc, path, '', '', ''))
def get_current_config(facts):
""" Get current openshift config
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the current openshift config
"""
current_config = dict()
roles = [role for role in facts if role not in ['common', 'provider']]
for role in roles:
if 'roles' in current_config:
current_config['roles'].append(role)
else:
current_config['roles'] = [role]
# TODO: parse the /etc/sysconfig/openshift-{master,node} config to
# determine the location of files.
# TODO: I suspect this isn't working right now, but it doesn't prevent
# anything from working properly as far as I can tell, perhaps because
# we override the kubeconfig path everywhere we use it?
# Query kubeconfig settings
kubeconfig_dir = '/var/lib/origin/openshift.local.certificates'
if role == 'node':
kubeconfig_dir = os.path.join(
kubeconfig_dir, "node-%s" % facts['common']['hostname']
)
kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig')
if (os.path.isfile('/usr/bin/openshift')
and os.path.isfile(kubeconfig_path)):
try:
_, output, _ = module.run_command(
["/usr/bin/openshift", "ex", "config", "view", "-o",
"json", "--kubeconfig=%s" % kubeconfig_path],
check_rc=False
)
config = json.loads(output)
cad = 'certificate-authority-data'
try:
for cluster in config['clusters']:
config['clusters'][cluster][cad] = 'masked'
except KeyError:
pass
try:
for user in config['users']:
config['users'][user][cad] = 'masked'
config['users'][user]['client-key-data'] = 'masked'
except KeyError:
pass
current_config['kubeconfig'] = config
# override pylint broad-except warning, since we do not want
# to bubble up any exceptions if oc config view
# fails
# pylint: disable=broad-except
except Exception:
pass
return current_config
def apply_provider_facts(facts, provider_facts):
""" Apply provider facts to supplied facts dict
Args:
facts (dict): facts dict to update
provider_facts (dict): provider facts to apply
roles: host roles
Returns:
dict: the merged facts
"""
if not provider_facts:
return facts
use_openshift_sdn = provider_facts.get('use_openshift_sdn')
if isinstance(use_openshift_sdn, bool):
facts['common']['use_openshift_sdn'] = use_openshift_sdn
common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')]
for h_var, ip_var in common_vars:
ip_value = provider_facts['network'].get(ip_var)
if ip_value:
facts['common'][ip_var] = ip_value
facts['common'][h_var] = choose_hostname(
[provider_facts['network'].get(h_var)],
facts['common'][ip_var]
)
facts['provider'] = provider_facts
return facts
def merge_facts(orig, new):
""" Recursively merge facts dicts
Args:
orig (dict): existing facts
new (dict): facts to update
Returns:
dict: the merged facts
"""
facts = dict()
for key, value in orig.iteritems():
if key in new:
if isinstance(value, dict):
facts[key] = merge_facts(value, new[key])
else:
facts[key] = copy.copy(new[key])
else:
facts[key] = copy.deepcopy(value)
new_keys = set(new.keys()) - set(orig.keys())
for key in new_keys:
facts[key] = copy.deepcopy(new[key])
return facts
def save_local_facts(filename, facts):
""" Save local facts
Args:
filename (str): local facts file
facts (dict): facts to set
"""
try:
fact_dir = os.path.dirname(filename)
if not os.path.exists(fact_dir):
os.makedirs(fact_dir)
with open(filename, 'w') as fact_file:
fact_file.write(module.jsonify(facts))
except (IOError, OSError) as ex:
raise OpenShiftFactsFileWriteError(
"Could not create fact file: %s, error: %s" % (filename, ex)
)
def get_local_facts_from_file(filename):
""" Retrieve local facts from fact file
Args:
filename (str): local facts file
Returns:
dict: the retrieved facts
"""
local_facts = dict()
try:
# Handle conversion of INI style facts file to json style
ini_facts = ConfigParser.SafeConfigParser()
ini_facts.read(filename)
for section in ini_facts.sections():
local_facts[section] = dict()
for key, value in ini_facts.items(section):
local_facts[section][key] = value
except (ConfigParser.MissingSectionHeaderError,
ConfigParser.ParsingError):
try:
with open(filename, 'r') as facts_file:
local_facts = json.load(facts_file)
except (ValueError, IOError):
pass
return local_facts
class OpenShiftFactsUnsupportedRoleError(Exception):
"""Origin Facts Unsupported Role Error"""
pass
class OpenShiftFactsFileWriteError(Exception):
"""Origin Facts File Write Error"""
pass
class OpenShiftFactsMetadataUnavailableError(Exception):
"""Origin Facts Metadata Unavailable Error"""
pass
class OpenShiftFacts(object):
""" Origin Facts
Attributes:
facts (dict): facts for the host
Args:
role (str): role for setting local facts
filename (str): local facts file to use
local_facts (dict): local facts to set
Raises:
OpenShiftFactsUnsupportedRoleError:
"""
known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'dns']
def __init__(self, role, filename, local_facts):
self.changed = False
self.filename = filename
if role not in self.known_roles:
raise OpenShiftFactsUnsupportedRoleError(
"Role %s is not supported by this module" % role
)
self.role = role
self.system_facts = ansible_facts(module)
self.facts = self.generate_facts(local_facts)
def generate_facts(self, local_facts):
""" Generate facts
Args:
local_facts (dict): local_facts for overriding generated
defaults
Returns:
dict: The generated facts
"""
local_facts = self.init_local_facts(local_facts)
roles = local_facts.keys()
defaults = self.get_defaults(roles)
provider_facts = self.init_provider_facts()
facts = apply_provider_facts(defaults, provider_facts)
facts = merge_facts(facts, local_facts)
facts['current_config'] = get_current_config(facts)
facts = set_url_facts_if_unset(facts)
facts = set_fluentd_facts_if_unset(facts)
facts = set_node_schedulability(facts)
facts = set_metrics_facts_if_unset(facts)
facts = set_identity_providers_if_unset(facts)
facts = set_sdn_facts_if_unset(facts)
facts = set_deployment_facts_if_unset(facts)
facts = set_aggregate_facts(facts)
return dict(openshift=facts)
def get_defaults(self, roles):
""" Get default fact values
Args:
roles (list): list of roles for this host
Returns:
dict: The generated default facts
"""
defaults = dict()
ip_addr = self.system_facts['default_ipv4']['address']
exit_code, output, _ = module.run_command(['hostname', '-f'])
hostname_f = output.strip() if exit_code == 0 else ''
hostname_values = [hostname_f, self.system_facts['nodename'],
self.system_facts['fqdn']]
hostname = choose_hostname(hostname_values, ip_addr)
common = dict(use_openshift_sdn=True, ip=ip_addr, public_ip=ip_addr,
deployment_type='origin', hostname=hostname,
public_hostname=hostname)
common['client_binary'] = 'oc' if os.path.isfile('/usr/bin/oc') else 'osc'
common['admin_binary'] = 'oadm' if os.path.isfile('/usr/bin/oadm') else 'osadm'
defaults['common'] = common
if 'master' in roles:
master = dict(api_use_ssl=True, api_port='8443',
console_use_ssl=True, console_path='/console',
console_port='8443', etcd_use_ssl=True, etcd_hosts='',
etcd_port='4001', portal_net='172.30.0.0/16',
embedded_etcd=True, embedded_kube=True,
embedded_dns=True, dns_port='53',
bind_addr='0.0.0.0', session_max_seconds=3600,
session_name='ssn', session_secrets_file='',
access_token_max_seconds=86400,
auth_token_max_seconds=500,
oauth_grant_method='auto', cluster_defer_ha=False)
defaults['master'] = master
if 'node' in roles:
node = dict(labels={}, annotations={}, portal_net='172.30.0.0/16')
defaults['node'] = node
return defaults
def guess_host_provider(self):
""" Guess the host provider
Returns:
dict: The generated default facts for the detected provider
"""
# TODO: cloud provider facts should probably be submitted upstream
product_name = self.system_facts['product_name']
product_version = self.system_facts['product_version']
virt_type = self.system_facts['virtualization_type']
virt_role = self.system_facts['virtualization_role']
provider = None
metadata = None
# TODO: this is not exposed through module_utils/facts.py in ansible,
# need to create PR for ansible to expose it
bios_vendor = get_file_content(
'/sys/devices/virtual/dmi/id/bios_vendor'
)
if bios_vendor == 'Google':
provider = 'gce'
metadata_url = ('http://metadata.google.internal/'
'computeMetadata/v1/?recursive=true')
headers = {'Metadata-Flavor': 'Google'}
metadata = get_provider_metadata(metadata_url, True, headers,
True)
# Filter sshKeys and serviceAccounts from gce metadata
if metadata:
metadata['project']['attributes'].pop('sshKeys', None)
metadata['instance'].pop('serviceAccounts', None)
elif (virt_type == 'xen' and virt_role == 'guest'
and re.match(r'.*\.amazon$', product_version)):
provider = 'ec2'
metadata_url = 'http://169.254.169.254/latest/meta-data/'
metadata = get_provider_metadata(metadata_url)
elif re.search(r'OpenStack', product_name):
provider = 'openstack'
metadata_url = ('http://169.254.169.254/openstack/latest/'
'meta_data.json')
metadata = get_provider_metadata(metadata_url, True, None,
True)
if metadata:
ec2_compat_url = 'http://169.254.169.254/latest/meta-data/'
metadata['ec2_compat'] = get_provider_metadata(
ec2_compat_url
)
# disable pylint maybe-no-member because overloaded use of
# the module name causes pylint to not detect that results
# is an array or hash
# pylint: disable=maybe-no-member
# Filter public_keys and random_seed from openstack metadata
metadata.pop('public_keys', None)
metadata.pop('random_seed', None)
if not metadata['ec2_compat']:
metadata = None
return dict(name=provider, metadata=metadata)
def init_provider_facts(self):
""" Initialize the provider facts
Returns:
dict: The normalized provider facts
"""
provider_info = self.guess_host_provider()
provider_facts = normalize_provider_facts(
provider_info.get('name'),
provider_info.get('metadata')
)
return provider_facts
def init_local_facts(self, facts=None):
""" Initialize the provider facts
Args:
facts (dict): local facts to set
Returns:
dict: The result of merging the provided facts with existing
local facts
"""
changed = False
facts_to_set = {self.role: dict()}
if facts is not None:
facts_to_set[self.role] = facts
local_facts = get_local_facts_from_file(self.filename)
for arg in ['labels', 'annotations']:
if arg in facts_to_set and isinstance(facts_to_set[arg],
basestring):
facts_to_set[arg] = module.from_json(facts_to_set[arg])
new_local_facts = merge_facts(local_facts, facts_to_set)
for facts in new_local_facts.values():
keys_to_delete = []
for fact, value in facts.iteritems():
if value == "" or value is None:
keys_to_delete.append(fact)
for key in keys_to_delete:
del facts[key]
if new_local_facts != local_facts:
changed = True
if not module.check_mode:
save_local_facts(self.filename, new_local_facts)
self.changed = changed
return new_local_facts
def main():
""" main """
# disabling pylint errors for global-variable-undefined and invalid-name
# for 'global module' usage, since it is required to use ansible_facts
# pylint: disable=global-variable-undefined, invalid-name
global module
module = AnsibleModule(
argument_spec=dict(
role=dict(default='common', required=False,
choices=OpenShiftFacts.known_roles),
local_facts=dict(default=None, type='dict', required=False),
),
supports_check_mode=True,
add_file_common_args=True,
)
role = module.params['role']
local_facts = module.params['local_facts']
fact_file = '/etc/ansible/facts.d/openshift.fact'
openshift_facts = OpenShiftFacts(role, fact_file, local_facts)
file_params = module.params.copy()
file_params['path'] = fact_file
file_args = module.load_file_common_arguments(file_params)
changed = module.set_fs_attributes_if_different(file_args,
openshift_facts.changed)
return module.exit_json(changed=changed,
ansible_facts=openshift_facts.facts)
# ignore pylint errors related to the module_utils import
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.facts import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
|
|
from __future__ import division, absolute_import, print_function
import functools as ft
import numpy as np
import tensorflow as tf
from tensorflow.contrib.layers.python.layers import regularizers
from niftynet.layer.convolution import ConvLayer
from niftynet.layer.convolution import ConvolutionalLayer
from tests.niftynet_testcase import NiftyNetTestCase
class ConvTest(NiftyNetTestCase):
def get_3d_input(self):
input_shape = (2, 16, 16, 16, 8)
x_3d = tf.ones(input_shape)
return x_3d
def get_2d_input(self):
input_shape = (2, 16, 16, 8)
x_2d = tf.ones(input_shape)
return x_2d
def _test_conv_output_shape(self,
rank,
param_dict,
output_shape):
if rank == 2:
input_data = self.get_2d_input()
elif rank == 3:
input_data = self.get_3d_input()
conv_layer = ConvLayer(**param_dict)
output_data = conv_layer(input_data)
print(conv_layer)
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
output_value = sess.run(output_data)
self.assertAllClose(output_shape, output_value.shape)
def _test_conv_layer_output_shape(self,
rank,
param_dict,
output_shape,
is_training=None,
dropout_prob=None):
if rank == 2:
input_data = self.get_2d_input()
elif rank == 3:
input_data = self.get_3d_input()
conv_layer = ConvolutionalLayer(**param_dict)
output_data = conv_layer(input_data,
is_training=is_training,
keep_prob=dropout_prob)
print(conv_layer)
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
output_value = sess.run(output_data)
self.assertAllClose(output_shape, output_value.shape)
def _test_extended_conv(self, orig_input, init_dict):
"""
Tests the extended padding options of ConvLayer
"""
def _w_init(shape, dtype=tf.float32, **kwargs):
data = np.arange(ft.reduce(lambda prod, x: prod*x, shape, 1))\
.astype(np.float32)
data *= 2.374/data.mean()
data -= data.mean()
return tf.constant(data.reshape(shape), dtype=dtype)
def _b_init(shape, dtype=tf.float32, **kwargs):
data = np.arange(shape[0]).astype(np.float32)
data *= 0.273/data.mean()
data -= data.mean()
return tf.constant(data.reshape(shape), dtype=dtype)
init_dict['w_initializer'] = _w_init
init_dict['b_initializer'] = _b_init
conv_layer = ConvLayer(**init_dict)
small_output = conv_layer(tf.constant(orig_input))
input_shape = orig_input.shape
multiplier = init_dict['kernel_size'] + init_dict['dilation'] \
+ init_dict['stride']
pad = [d*multiplier for d in input_shape[1:-1]]
paddings = [(0, 0)] + [(p, p) for p in pad] + [(0, 0)]
if init_dict['padding'] == 'CONSTANT':
opts = {'constant_values': init_dict.get('padding_constant', 0)}
else:
opts = {}
enlarged_input = np.pad(orig_input,
paddings,
init_dict['padding'].lower(),
**opts)
conv_layer.padding = 'SAME'
large_output = conv_layer(tf.constant(enlarged_input))
def _extract_valid_region(output_tensor, target_tensor):
output_shape = output_tensor.shape
target_shape = target_tensor.shape
extr_slices = []
for d in range(len(target_shape)):
opad = (output_shape[d] - target_shape[d])//2
extr_slices.append(slice(
opad, opad + target_shape[d]))
return output_tensor[tuple(extr_slices)]
assert np.square(
_extract_valid_region(enlarged_input, orig_input) - orig_input).sum() \
<= 1e-6*np.square(orig_input).sum()
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
small_value = sess.run(small_output)
large_value = sess.run(large_output)
extr_value = _extract_valid_region(large_value, small_value)
print(np.square(small_value - extr_value).sum()/np.square(extr_value).sum())
self.assertAllClose(small_value, extr_value, rtol=1e-3)
def _get_pad_test_input_3d(self):
data = np.arange(1024, dtype=np.float32)
return data.reshape([1, 16, 4, 4, 4])
def _get_pad_test_input_2d(self):
data = np.arange(256, dtype=np.float32)
return data.reshape([4, 8, 4, 2])
# padding tests
def _test_extended_padding(self, pad, do_2d):
batch = self._get_pad_test_input_2d() if do_2d \
else self._get_pad_test_input_3d()
const = 127.23
min_dim = min(batch.shape[1:-1]) - 1
for ks in (2, min_dim):
for ds in (1, min_dim):
name = 'pad_test_conv' + ('2' if do_2d else '3')
name += "%i_%i" % (ks, ds)
init_dict = {'n_output_chns': 4,
'kernel_size': ks,
'stride': 1,
'dilation': ds,
'padding': pad,
'name': name}
if ds%2 == 0:
init_dict['padding_constant'] = const
self._test_extended_conv(batch, init_dict)
def test_2d_const_padding(self):
self._test_extended_padding('CONSTANT', True)
def test_2d_reflect_padding(self):
self._test_extended_padding('REFLECT', True)
def test_2d_symmetric_padding(self):
self._test_extended_padding('SYMMETRIC', True)
def test_3d_const_padding(self):
self._test_extended_padding('CONSTANT', False)
def test_3d_reflect_padding(self):
self._test_extended_padding('REFLECT', False)
def test_3d_symmetric_padding(self):
self._test_extended_padding('SYMMETRIC', False)
# 3d tests
def test_3d_conv_default_shape(self):
input_param = {'n_output_chns': 10,
'kernel_size': 3,
'stride': 1}
self._test_conv_output_shape(rank=3,
param_dict=input_param,
output_shape=(2, 16, 16, 16, 10))
def test_3d_conv_full_kernel_size(self):
input_param = {'n_output_chns': 10,
'kernel_size': [3, 3, 1],
'stride': 1}
self._test_conv_output_shape(rank=3,
param_dict=input_param,
output_shape=(2, 16, 16, 16, 10))
def test_3d_conv_full_strides(self):
input_param = {'n_output_chns': 10,
'kernel_size': [3, 3, 1],
'stride': [1, 1, 2]}
self._test_conv_output_shape(rank=3,
param_dict=input_param,
output_shape=(2, 16, 16, 8, 10))
def test_3d_anisotropic_conv(self):
input_param = {'n_output_chns': 10,
'kernel_size': [3, 2, 1],
'stride': [1, 1, 2]}
self._test_conv_output_shape(rank=3,
param_dict=input_param,
output_shape=(2, 16, 16, 8, 10))
def test_3d_conv_bias_shape(self):
input_param = {'n_output_chns': 10,
'kernel_size': [3, 1, 3],
'stride': [1, 1, 2],
'with_bias': True}
self._test_conv_output_shape(rank=3,
param_dict=input_param,
output_shape=(2, 16, 16, 8, 10))
def test_conv_3d_bias_reg_shape(self):
input_param = {'n_output_chns': 10,
'kernel_size': [3, 1, 3],
'stride': [2, 2, 2],
'with_bias': True,
'w_regularizer': regularizers.l2_regularizer(0.5),
'b_regularizer': regularizers.l2_regularizer(0.5)}
self._test_conv_output_shape(rank=3,
param_dict=input_param,
output_shape=(2, 8, 8, 8, 10))
def test_3d_convlayer_default_shape(self):
input_param = {'n_output_chns': 10,
'kernel_size': 3,
'stride': 1}
self._test_conv_output_shape(rank=3,
param_dict=input_param,
output_shape=(2, 16, 16, 16, 10))
def test_3d_convlayer_dilation_default_shape(self):
input_param = {'n_output_chns': 10,
'kernel_size': 3,
'stride': 1,
'dilation': [1, 2, 1]}
self._test_conv_output_shape(rank=3,
param_dict=input_param,
output_shape=(2, 16, 16, 16, 10))
def test_3d_convlayer_bias_shape(self):
input_param = {'n_output_chns': 10,
'kernel_size': 3,
'stride': 1,
'with_bias': True,
'feature_normalization': None}
self._test_conv_layer_output_shape(rank=3,
param_dict=input_param,
output_shape=(2, 16, 16, 16, 10))
def test_convlayer_3d_bias_reg_shape(self):
input_param = {'n_output_chns': 10,
'kernel_size': 3,
'stride': 1,
'with_bias': True,
'feature_normalization': None,
'w_regularizer': regularizers.l2_regularizer(0.5),
'b_regularizer': regularizers.l2_regularizer(0.5)}
self._test_conv_layer_output_shape(rank=3,
param_dict=input_param,
output_shape=(2, 16, 16, 16, 10))
def test_convlayer_3d_bn_reg_shape(self):
input_param = {'n_output_chns': 10,
'kernel_size': [5, 1, 2],
'stride': 1,
'with_bias': False,
'feature_normalization': 'batch',
'w_regularizer': regularizers.l2_regularizer(0.5),
'b_regularizer': regularizers.l2_regularizer(0.5)}
self._test_conv_layer_output_shape(rank=3,
param_dict=input_param,
output_shape=(2, 16, 16, 16, 10),
is_training=True)
def test_convlayer_3d_bn_reg_prelu_shape(self):
input_param = {'n_output_chns': 10,
'kernel_size': [5, 1, 2],
'stride': [1, 1, 2],
'with_bias': False,
'feature_normalization': 'batch',
'acti_func': 'prelu',
'w_regularizer': regularizers.l2_regularizer(0.5),
'b_regularizer': regularizers.l2_regularizer(0.5)}
self._test_conv_layer_output_shape(rank=3,
param_dict=input_param,
output_shape=(2, 16, 16, 8, 10),
is_training=True)
def test_convlayer_3d_relu_shape(self):
input_param = {'n_output_chns': 10,
'kernel_size': [5, 1, 2],
'stride': [1, 2, 2],
'with_bias': False,
'feature_normalization': 'batch',
'acti_func': 'relu',
'w_regularizer': regularizers.l2_regularizer(0.5),
'b_regularizer': regularizers.l2_regularizer(0.5)}
self._test_conv_layer_output_shape(rank=3,
param_dict=input_param,
output_shape=(2, 16, 8, 8, 10),
is_training=True)
def test_convlayer_3d_bn_reg_dropout_shape(self):
input_param = {'n_output_chns': 10,
'kernel_size': [5, 1, 2],
'stride': [1, 2, 2],
'with_bias': False,
'feature_normalization': 'batch',
'acti_func': 'prelu'}
self._test_conv_layer_output_shape(rank=3,
param_dict=input_param,
output_shape=(2, 16, 8, 8, 10),
is_training=True,
dropout_prob=0.4)
def test_convlayer_3d_bn_reg_dropout_valid_shape(self):
input_param = {'n_output_chns': 10,
'kernel_size': [5, 3, 2],
'stride': [2, 2, 3],
'with_bias': False,
'feature_normalization': 'batch',
'w_regularizer': regularizers.l2_regularizer(0.5),
'acti_func': 'prelu',
'padding': 'VALID'}
self._test_conv_layer_output_shape(rank=3,
param_dict=input_param,
output_shape=(2, 6, 7, 5, 10),
is_training=True,
dropout_prob=0.4)
def test_convlayer_3d_group_reg_dropout_valid_shape(self):
input_param = {'n_output_chns': 8,
'kernel_size': [5, 3, 2],
'stride': [2, 2, 3],
'with_bias': False,
'feature_normalization': 'group',
'group_size': 4,
'w_regularizer': regularizers.l2_regularizer(0.5)}
self._test_conv_layer_output_shape(rank=3,
param_dict=input_param,
output_shape=(2, 8, 8, 6, 8),
is_training=True,
dropout_prob=0.4)
# 2d tests
def test_2d_conv_default_shape(self):
input_param = {'n_output_chns': 10,
'kernel_size': [5, 3],
'stride': [2, 2]}
self._test_conv_output_shape(rank=2,
param_dict=input_param,
output_shape=(2, 8, 8, 10))
def test_2d_conv_dilation_default_shape(self):
input_param = {'n_output_chns': 10,
'kernel_size': [5, 3],
'stride': [1, 1],
'dilation': [2, 1]}
self._test_conv_output_shape(rank=2,
param_dict=input_param,
output_shape=(2, 16, 16, 10))
def test_2d_conv_bias_shape(self):
input_param = {'n_output_chns': 10,
'kernel_size': [5, 3],
'stride': [1, 2],
'with_bias': True}
self._test_conv_output_shape(rank=2,
param_dict=input_param,
output_shape=(2, 16, 8, 10))
def test_conv_2d_bias_reg_shape(self):
input_param = {'n_output_chns': 10,
'kernel_size': 5,
'stride': 1,
'with_bias': True,
'w_regularizer': regularizers.l2_regularizer(0.5),
'b_regularizer': regularizers.l2_regularizer(0.5)}
self._test_conv_output_shape(rank=2,
param_dict=input_param,
output_shape=(2, 16, 16, 10))
def test_2d_convlayer_default_shape(self):
input_param = {'n_output_chns': 10,
'kernel_size': 2,
'stride': 1,
'with_bias': True}
self._test_conv_layer_output_shape(rank=2,
param_dict=input_param,
output_shape=(2, 16, 16, 10),
is_training=True)
def test_2d_convlayer_bias_shape(self):
input_param = {'n_output_chns': 10,
'kernel_size': 2,
'stride': [2, 1],
'with_bias': True,
'feature_normalization': None}
self._test_conv_layer_output_shape(rank=2,
param_dict=input_param,
output_shape=(2, 8, 16, 10))
def test_convlayer_2d_bias_reg_shape(self):
input_param = {'n_output_chns': 10,
'kernel_size': [3, 5],
'stride': [2, 1],
'with_bias': True,
'feature_normalization': None,
'w_regularizer': regularizers.l2_regularizer(0.5),
'b_regularizer': regularizers.l2_regularizer(0.5)}
self._test_conv_layer_output_shape(rank=2,
param_dict=input_param,
output_shape=(2, 8, 16, 10))
def test_convlayer_2d_bn_reg_shape(self):
input_param = {'n_output_chns': 10,
'kernel_size': [3, 5],
'stride': [2, 1],
'with_bias': False,
'feature_normalization': 'batch',
'w_regularizer': regularizers.l2_regularizer(0.5),
'b_regularizer': regularizers.l2_regularizer(0.5)}
self._test_conv_layer_output_shape(rank=2,
param_dict=input_param,
output_shape=(2, 8, 16, 10),
is_training=True)
def test_convlayer_2d_bn_reg_prelu_2_shape(self):
input_param = {'n_output_chns': 10,
'kernel_size': 3,
'stride': [2, 1],
'with_bias': False,
'feature_normalization': 'batch',
'acti_func': 'prelu'}
self._test_conv_layer_output_shape(rank=2,
param_dict=input_param,
output_shape=(2, 8, 16, 10),
is_training=True)
def test_convlayer_2d_relu_shape(self):
input_param = {'n_output_chns': 10,
'kernel_size': 3,
'stride': [3, 1],
'with_bias': False,
'feature_normalization': 'batch',
'acti_func': 'relu'}
self._test_conv_layer_output_shape(rank=2,
param_dict=input_param,
output_shape=(2, 6, 16, 10),
is_training=True)
def test_convlayer_2d_bn_reg_prelu_shape(self):
input_param = {'n_output_chns': 10,
'kernel_size': 3,
'stride': 1,
'with_bias': False,
'feature_normalization': 'batch',
'acti_func': 'prelu',
'w_regularizer': regularizers.l2_regularizer(0.5)}
self._test_conv_layer_output_shape(rank=2,
param_dict=input_param,
output_shape=(2, 16, 16, 10),
is_training=True)
def test_convlayer_2d_bn_reg_valid_shape(self):
input_param = {'n_output_chns': 10,
'kernel_size': [3, 2],
'stride': [2, 3],
'with_bias': False,
'feature_normalization': 'batch',
'acti_func': 'prelu',
'padding': 'VALID',
'w_regularizer': regularizers.l2_regularizer(0.5)}
self._test_conv_layer_output_shape(rank=2,
param_dict=input_param,
output_shape=(2, 7, 5, 10),
is_training=True)
if __name__ == "__main__":
tf.test.main()
|
|
#!/usr/bin/python
"""
Code taken from SmartMeshSDK. The origin code was modified.
Copyright (c) 2012, Dust Networks
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Dust Networks nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL DUST NETWORKS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import struct
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('HrParser')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
HR_ID_DEVICE = 0x80
HR_ID_NEIGHBORS = 0x81
HR_ID_DISCOVERED = 0x82
HR_ID_EXTENDED = 0x91
HR_ID_ALL = [
HR_ID_DEVICE,
HR_ID_NEIGHBORS,
HR_ID_DISCOVERED,
HR_ID_EXTENDED,
]
HR_ID_EXTENDED_RSSI = 1
HR_DESC_DEVICE = [
('charge', 'I'),
('queueOcc', 'B'),
('temperature', 'b'),
('batteryVoltage', 'H'),
('numTxOk', 'H'),
('numTxFail', 'H'),
('numRxOk', 'H'),
('numRxLost', 'H'),
('numMacDropped', 'B'),
('numTxBad', 'B'),
('badLinkFrameId', 'B'),
('badLinkSlot', 'I'),
('badLinkOffset', 'B'),
('numNetMicErr', 'B'),
('numMacMicErr', 'B'),
('numMacCrcErr', 'B'),
]
HR_DESC_NEIGHBORS = [
('numItems', 'B'),
]
HR_DESC_NEIGHBOR_DATA = [
('neighborId', 'H'),
('neighborFlag', 'B'),
('rssi', 'b'),
('numTxPackets', 'H'),
('numTxFailures', 'H'),
('numRxPackets', 'H'),
]
HR_DESC_DISCOVERED = [
('numJoinParents', 'B'),
('numItems', 'B'),
]
HR_DESC_DISCOVERED_DATA = [
('neighborId', 'H'),
('rssi', 'b'),
('numRx', 'B'),
]
HR_DESC_EXTENDED = [
('extType', 'B'),
('extLength', 'B'),
]
HR_DESC_EXTENDED_RSSI_DATA = [
('idleRssi', 'b'),
('txUnicastAttempts', 'H'),
('txUnicastFailures', 'H'),
]
# ======================= public ==============================================
def parseHr(hr):
"""
Parse a byte list representing a received HR.
:returns: The parsed HR, of the following format:
{
'Device': {
<fieldName>: <fieldVal>,
...
}
'Neighbors': {
<fieldName>: <fieldVal>,
...,
'neighbors': [
{
<fieldName>: <fieldVal>,
...
}
]
}
'Discovered': {
<fieldName>: <fieldVal>,
...,
'discoveredNeighbors': [
{
<fieldName>: <fieldVal>,
...
}
]
}
}
"""
returnVal = {}
while hr:
if len(hr) < 2:
raise ValueError("Less than 2 bytes in HR")
hr_id = hr[0]
length = hr[1]
payload = hr[2:2+length]
# parse current HR
if hr_id == HR_ID_DEVICE:
returnVal['Device'] = _parseDevice(payload)
elif hr_id == HR_ID_NEIGHBORS:
returnVal['Neighbors'] = _parseNeighbors(payload)
elif hr_id == HR_ID_DISCOVERED:
returnVal['Discovered'] = _parseDiscovered(payload)
elif hr_id == HR_ID_EXTENDED:
returnVal['Extended'] = _parseExtended(payload)
else:
raise ValueError("unknown HR id {0}".format(hr_id))
# remove current HR
hr = hr[2+length:]
return returnVal
def formatHr(hr):
return _formatHr_recursive(hr, 0)
# ======================= private =============================================
def _formatHr_recursive(e, lvl):
output = []
indent = ' '*(4*lvl)
if type(e) in [str, int]:
output += [str(e)]
elif type(e) == dict:
for k in sorted(e.keys()):
if type(e[k]) in [dict, list]:
formatString = '{0}- {1}:\n{2}'
else:
formatString = '{0}- {1:<20}: {2}'
output += [formatString.format(indent, k, _formatHr_recursive(e[k], lvl+1))]
elif type(e) == list:
for idx, v in enumerate(e):
if type(v) in [dict, list]:
output += ['{0}-item {1}\n{2}'.format(
indent,
idx,
_formatHr_recursive(v, lvl+1)
)
]
else:
output += ['{0}- {1}'.format(
indent,
_formatHr_recursive(v, lvl+1)
)
]
else:
raise SystemError("unexpected type {0}".format(type(e)))
output = '\n'.join(output)
return output
def _parseDevice(payload):
(remainder, fields) = _parseAs(
desc = HR_DESC_DEVICE,
payload = payload,
)
assert not remainder
return fields
def _parseNeighbors(payload):
# parse the header
(payload, fields) = _parseAs(
desc = HR_DESC_NEIGHBORS,
payload = payload,
)
# parse the neighbors
fields['neighbors'] = []
for _ in range(fields['numItems']):
(payload, newItem) = _parseAs(
desc = HR_DESC_NEIGHBOR_DATA,
payload = payload,
)
fields['neighbors'] += [newItem]
return fields
def _parseDiscovered(payload):
# parse the header
(payload, fields) = _parseAs(
desc = HR_DESC_DISCOVERED,
payload = payload,
)
# parse the discoveredNeighbors
fields['discoveredNeighbors'] = []
for _ in range(fields['numItems']):
(payload, newItem) = _parseAs(
desc = HR_DESC_DISCOVERED_DATA,
payload = payload,
)
fields['discoveredNeighbors'] += [newItem]
return fields
def _parseExtended(payload):
# parse the header
(payload, fields) = _parseAs(
desc = HR_DESC_EXTENDED,
payload = payload,
)
if fields['extLength'] != len(payload):
raise ValueError("extLength={0} while len(extended HR payload)={1}"
.format(fields['extLength'], len(payload)))
returnVal = {}
if fields['extType'] == HR_ID_EXTENDED_RSSI:
returnVal['RSSI'] = _parseExtendedRSSI(payload)
else:
raise ValueError("unknown extended HR extType {0}".format(fields['extType']))
return returnVal
def _parseExtendedRSSI(payload):
if len(payload) != 75:
raise ValueError("RSSI HR should be of length 75, not {0}".format(len(payload)))
returnVal = []
while payload:
(payload, fields) = _parseAs(
desc = HR_DESC_EXTENDED_RSSI_DATA,
payload = payload,
)
returnVal += [fields]
return returnVal
# ======================= helpers =============================================
def _parseAs(desc, payload):
returnVal = {}
# assemble the format string
fmt = '>'
numFields = 0
while True:
fmt += desc[numFields][1]
numBytes = struct.calcsize(fmt)
if numBytes == len(payload):
break
numFields += 1
if len(desc) == numFields:
break
# verify enough bytes
if len(payload) < numBytes:
raise ValueError("not enough bytes for HR")
# separate string to parse from remainder
hrstring = ''.join([chr(b) for b in payload[:numBytes]])
remainder = payload[numBytes:]
# apply the format string
fields = struct.unpack(fmt, hrstring)
for (d, v) in zip(desc, fields):
returnVal[d[0]] = v
return remainder, returnVal
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import logging
import maya
import mock
from gcdt_testtools.helpers import logcapture
from gcdt_lookups.lookups import _resolve_lookups, _identify_stacks_recurse, \
lookup, _find_matching_certificate, _acm_lookup
from gcdt_lookups.credstash_utils import ItemNotFound
from gcdt.gcdt_defaults import CONFIG_READER_CONFIG
def test_identify_stacks_recurse():
# make sure result is unique
# sample from data-platform, ingest
config = {
'PolicyLambdaDefaultVar' : "lookup:stack:dp-dev-operations-common:PolicyLambdaDefault",
'PolicyLambdaDefaultVar2': "lookup:stack:dp-dev-operations-common:PolicyLambdaDefault"
}
assert _identify_stacks_recurse(config, ['stack']) == \
set([('dp-dev-operations-common', None)])
# sample from pnb-ftp, ftpbackend
config = {
'VpcId': "lookup:stack:pnb-dev:DefaultVPCId"
}
assert _identify_stacks_recurse(config, ['stack']) == set([('pnb-dev', None)])
@mock.patch('gcdt_lookups.lookups.get_outputs_for_stack')
@mock.patch('gcdt_lookups.lookups.stack_exists', return_value=True)
def test_stack_lookup_value(mock_stack_exists, mock_get_outputs_for_stack):
mock_get_outputs_for_stack.return_value = {
'EC2BasicsLambdaArn':
'arn:aws:lambda:eu-west-1:1122233:function:dp-preprod-lambdaEC2Basics-12',
}
# sample from data-platform, operations
context = {
'_awsclient': 'my_awsclient',
'tool': 'ramuda'
}
config = {
'LambdaLookupARN': 'lookup:stack:dp-preprod:EC2BasicsLambdaArn'
}
_resolve_lookups(context, config, ['stack'])
mock_get_outputs_for_stack.assert_called_once_with(
'my_awsclient', 'dp-preprod', None)
assert config.get('LambdaLookupARN') == \
'arn:aws:lambda:eu-west-1:1122233:function:dp-preprod-lambdaEC2Basics-12'
@mock.patch('gcdt_lookups.lookups.get_outputs_for_stack')
@mock.patch('gcdt_lookups.lookups.stack_exists', return_value=True)
def test_stack_lookup_optional_value_no_output(mock_stack_exists, mock_get_outputs_for_stack):
mock_get_outputs_for_stack.return_value = {
'AnotherValue':
'arn:aws:lambda:eu-west-1:1122233:function:dp-preprod-lambdaEC2Basics-12',
}
# sample from data-platform, operations
context = {
'_awsclient': 'my_awsclient',
'tool': 'ramuda'
}
config = {
'LambdaLookupARN': 'lookup:stack:dp-preprod:EC2BasicsLambdaArn:optional'
}
_resolve_lookups(context, config, ['stack'])
mock_get_outputs_for_stack.assert_called_once_with(
'my_awsclient', 'dp-preprod', None)
assert config.get('LambdaLookupARN') == ''
@mock.patch('gcdt_lookups.lookups.get_outputs_for_stack')
@mock.patch('gcdt_lookups.lookups.stack_exists', return_value=False)
def test_stack_lookup_optional_value_no_stack(mock_stack_exists, mock_get_outputs_for_stack):
# sample from data-platform, operations
context = {
'_awsclient': 'my_awsclient',
'tool': 'ramuda'
}
config = {
'LambdaLookupARN': 'lookup:stack:non-existing-stack:NonExistingValue:optional'
}
_resolve_lookups(context, config, ['stack'])
assert config.get('LambdaLookupARN') == ''
@mock.patch('gcdt_lookups.lookups.get_outputs_for_stack')
@mock.patch('gcdt_lookups.lookups.stack_exists', return_value=True)
def test_stack_lookup_stack_output(mock_stack_exists,
mock_get_outputs_for_stack):
# lookup:stack:<stack_name> w/o value gets us the whole stack_output
stack_output = {
'EC2BasicsLambdaArn':
'arn:aws:lambda:eu-west-1:1122233:function:dp-preprod-lambdaEC2Basics-12',
}
mock_get_outputs_for_stack.return_value = stack_output
# sample from data-platform, operations
context = {
'_awsclient': 'my_awsclient',
'tool': 'ramuda'
}
config = {
'stack_output': 'lookup:stack:dp-preprod'
}
_resolve_lookups(context, config, ['stack'])
mock_get_outputs_for_stack.assert_called_once_with(
'my_awsclient', 'dp-preprod', None)
assert config.get('stack_output') == stack_output
@mock.patch('gcdt_lookups.lookups.get_base_ami',
return_value='img-123456')
def test_baseami_lookup(mock_get_base_ami):
# sample from mes-ftp, ftpbackend
context = {
'_awsclient': 'my_awsclient',
'tool': 'kumo'
}
config = {
'BaseAMIID': 'lookup:baseami'
}
_resolve_lookups(context, config, ['baseami'])
mock_get_base_ami.assert_called_once_with(
'my_awsclient', ['569909643510'])
assert config.get('BaseAMIID') == 'img-123456'
@mock.patch('gcdt_lookups.lookups.get_outputs_for_stack')
@mock.patch('gcdt_lookups.lookups.get_ssl_certificate')
def test_read_config_mock_service_discovery_ssl(
mock_get_ssl_certificate, mock_get_outputs_for_stack):
mock_get_outputs_for_stack.return_value = {
'DefaultInstancePolicyARN':
'arn:aws:bla:blub',
}
# Mock Output (List SSL Certs)
mock_get_ssl_certificate.return_value = 'arn:aws:iam::11:server-certificate/cloudfront/2016/wildcard.dp.glomex.cloud-2016-03'
# sample from mes-proxy
context = {
'_awsclient': 'my_awsclient',
'tool': 'ramuda'
}
config = {
'DefaultInstancePolicyARN': 'lookup:stack:portal-dev:DefaultInstancePolicyARN',
'SSLCert': 'lookup:ssl:wildcard.glomex.com'
}
_resolve_lookups(context, config, ['ssl', 'stack'])
mock_get_outputs_for_stack.assert_called_once_with(
'my_awsclient', 'portal-dev', None)
mock_get_ssl_certificate.assert_called_once_with(
'my_awsclient', 'wildcard.glomex.com')
assert config.get('SSLCert') == \
'arn:aws:iam::11:server-certificate/cloudfront/2016/wildcard.dp.glomex.cloud-2016-03'
# tests taken and modified from former config_reader tests
@mock.patch('gcdt_lookups.lookups.get_ssl_certificate')
@mock.patch('gcdt_lookups.lookups.get_secret')
@mock.patch('gcdt_lookups.lookups.get_outputs_for_stack')
@mock.patch('gcdt_lookups.lookups.stack_exists', return_value=True)
def test_lookup_selective_stack_lookup_all_lookups(
mock_stack_exists,
mock_get_outputs_for_stack, mock_get_secret,
mock_get_ssl_certificate):
# Mock Output (Credstash result)
mock_get_secret.return_value = 'secretPassword'
# Mock Output (SSL Cert)
mock_get_ssl_certificate.return_value = 'arn:aws:iam::11:server-certificate/cloudfront/2016/wildcard.dp.glomex.cloud-2016-03'
# Mock Output (Desc Stack)
mock_get_outputs_for_stack.return_value = {
'EC2BasicsLambdaArn': 'arn:aws:lambda:eu-west-1:1122233:function:dp-preprod-lambdaEC2Basics-12',
}
context = {
'_awsclient': 'my_awsclient',
'tool': 'kumo'
}
config = {
'secret': 'lookup:secret:nameOfSecretPassword',
'sslCert': 'lookup:ssl:wildcard.dp.glomex.cloud-2016-03',
'stack': 'lookup:stack:dp-preprod:EC2BasicsLambdaArn'
}
_resolve_lookups(context, config, ['ssl', 'stack', 'secret'])
assert config.get('secret') == 'secretPassword'
assert config.get('sslCert') == \
'arn:aws:iam::11:server-certificate/cloudfront/2016/wildcard.dp.glomex.cloud-2016-03'
assert config.get('stack') == \
'arn:aws:lambda:eu-west-1:1122233:function:dp-preprod-lambdaEC2Basics-12'
# I split the combined testcases into seperate instances
# sorry if this is a little c&p
@mock.patch('gcdt_lookups.lookups.get_ssl_certificate')
@mock.patch('gcdt_lookups.lookups.get_secret')
@mock.patch('gcdt_lookups.lookups.get_outputs_for_stack')
@mock.patch('gcdt_lookups.lookups.stack_exists', return_value=True)
def test_lookup_selective_stack_lookup_limit_to_stack_lookup(
mock_stack_exists,
mock_get_outputs_for_stack, mock_get_secret,
mock_get_ssl_certificate):
# Mock Output (Credstash result)
mock_get_secret.return_value = 'secretPassword'
# Mock Output (SSL Cert)
mock_get_ssl_certificate.return_value = 'arn:aws:iam::11:server-certificate/cloudfront/2016/wildcard.dp.glomex.cloud-2016-03'
# Mock Output (Desc Stack)
mock_get_outputs_for_stack.return_value = {
'EC2BasicsLambdaArn': 'arn:aws:lambda:eu-west-1:1122233:function:dp-preprod-lambdaEC2Basics-12',
}
context = {
'_awsclient': 'my_awsclient',
'tool': 'ramuda'
}
config = {
'secret': 'lookup:secret:nameOfSecretPassword',
'sslCert': 'lookup:ssl:wildcard.dp.glomex.cloud-2016-03',
'stack': 'lookup:stack:dp-preprod:EC2BasicsLambdaArn'
}
_resolve_lookups(context, config, ['stack'])
assert config.get('secret') == 'lookup:secret:nameOfSecretPassword'
assert config.get('sslCert') == \
'lookup:ssl:wildcard.dp.glomex.cloud-2016-03'
assert config.get('stack') == \
'arn:aws:lambda:eu-west-1:1122233:function:dp-preprod-lambdaEC2Basics-12'
@mock.patch('gcdt_lookups.lookups.get_ssl_certificate')
@mock.patch('gcdt_lookups.lookups.get_secret')
@mock.patch('gcdt_lookups.lookups.get_outputs_for_stack')
def test_lookup_selective_stack_lookup_limit_to_secret_lookup(
mock_get_outputs_for_stack, mock_get_secret,
mock_get_ssl_certificate):
# Mock Output (Credstash result)
mock_get_secret.return_value = 'secretPassword'
# Mock Output (SSL Cert)
mock_get_ssl_certificate.return_value = 'arn:aws:iam::11:server-certificate/cloudfront/2016/wildcard.dp.glomex.cloud-2016-03'
# Mock Output (Desc Stack)
mock_get_outputs_for_stack.return_value = {
'EC2BasicsLambdaArn': 'arn:aws:lambda:eu-west-1:1122233:function:dp-preprod-lambdaEC2Basics-12',
}
context = {
'_awsclient': 'my_awsclient',
'tool': 'kumo'
}
config = {
'secret': 'lookup:secret:nameOfSecretPassword',
'sslCert': 'lookup:ssl:wildcard.dp.glomex.cloud-2016-03',
'stack': 'lookup:stack:dp-preprod:EC2BasicsLambdaArn'
}
_resolve_lookups(context, config, ['secret'])
assert config.get('secret') == 'secretPassword'
assert config.get('sslCert') == \
'lookup:ssl:wildcard.dp.glomex.cloud-2016-03'
assert config.get('stack') == \
'lookup:stack:dp-preprod:EC2BasicsLambdaArn'
@mock.patch('gcdt_lookups.lookups.get_ssl_certificate')
@mock.patch('gcdt_lookups.lookups.get_secret')
@mock.patch('gcdt_lookups.lookups.get_outputs_for_stack')
def test_lookup_selective_stack_lookup_limit_to_ssl_lookup(
mock_get_outputs_for_stack, mock_get_secret,
mock_get_ssl_certificate):
# Mock Output (Credstash result)
mock_get_secret.return_value = 'secretPassword'
# Mock Output (SSL Cert)
mock_get_ssl_certificate.return_value = 'arn:aws:iam::11:server-certificate/cloudfront/2016/wildcard.dp.glomex.cloud-2016-03'
# Mock Output (Desc Stack)
mock_get_outputs_for_stack.return_value = {
'EC2BasicsLambdaArn': 'arn:aws:lambda:eu-west-1:1122233:function:dp-preprod-lambdaEC2Basics-12',
}
context = {
'_awsclient': 'my_awsclient',
'tool': 'ramuda'
}
config = {
'secret': 'lookup:secret:nameOfSecretPassword',
'sslCert': 'lookup:ssl:wildcard.dp.glomex.cloud-2016-03',
'stack': 'lookup:stack:dp-preprod:EC2BasicsLambdaArn'
}
_resolve_lookups(context, config, ['ssl'])
assert config.get('secret') == 'lookup:secret:nameOfSecretPassword'
assert config.get('sslCert') == \
'arn:aws:iam::11:server-certificate/cloudfront/2016/wildcard.dp.glomex.cloud-2016-03'
assert config.get('stack') == \
'lookup:stack:dp-preprod:EC2BasicsLambdaArn'
@mock.patch('gcdt_lookups.lookups.get_base_ami')
@mock.patch('gcdt_lookups.lookups.get_outputs_for_stack')
@mock.patch('gcdt_lookups.lookups.stack_exists', return_value=True)
def test_lookup_kumo_sample(
mock_stack_exists,
mock_get_outputs_for_stack,
mock_get_base_ami):
mock_get_base_ami.return_value = 'ami-91307fe2'
mock_get_outputs_for_stack.return_value = {
'DefaultInstancePolicyARN': 'arn:aws:iam::420189626185:policy/7f-managed/infra-dev-Defaultmanagedinstancepolicy-9G6XX1YXZI5O',
'DefaultVPCId': 'vpc-88d2a7ec',
}
context = {
'_awsclient': 'my_awsclient',
'tool': 'kumo'
}
config = {
'kumo': {
'stack': {
'StackName': 'gcdt-sample-stack',
},
'parameters': {
'VPCId': 'vpc-88d2a7ec',
'ScaleMinCapacity': '1',
'ScaleMaxCapacity': '1',
'InstanceType': 't2.micro',
'ELBDNSName': 'supercars',
'BaseStackName': 'infra-dev',
'DefaultInstancePolicyARN': 'lookup:stack:infra-dev:DefaultInstancePolicyARN',
'AMI': 'lookup:baseami'
}
}
}
_resolve_lookups(context, config, ['ssl', 'stack', 'secret', 'baseami'])
assert config['kumo'] == {
'stack': {
'StackName': 'gcdt-sample-stack',
},
'parameters': {
'VPCId': 'vpc-88d2a7ec',
'ScaleMinCapacity': '1',
'ScaleMaxCapacity': '1',
'InstanceType': 't2.micro',
'ELBDNSName': 'supercars',
'BaseStackName': 'infra-dev',
'DefaultInstancePolicyARN': 'arn:aws:iam::420189626185:policy/7f-managed/infra-dev-Defaultmanagedinstancepolicy-9G6XX1YXZI5O',
'AMI': 'ami-91307fe2'
}
}
@mock.patch('gcdt_lookups.lookups.get_secret',
return_value='foobar1234')
def test_secret_lookup(mock_get_secret):
# sample from ops-captaincrunch-slack
context = {
'_awsclient': 'my_awsclient',
'tool': 'ramuda'
}
config = {
'bot_token': 'lookup:secret:captaincrunch.bot_token'
}
_resolve_lookups(context, config, ['secret'])
mock_get_secret.assert_called_once_with(
'my_awsclient', 'captaincrunch.bot_token', region_name=None)
assert config.get('bot_token') == 'foobar1234'
@mock.patch('gcdt_lookups.lookups.get_secret',
return_value='foobar1234')
def test_secret_lookup_continue_if_not_found(mock_get_secret, logcapture):
logcapture.level = logging.INFO
mock_get_secret.side_effect = ItemNotFound('not found, sorry')
context = {
'_awsclient': 'my_awsclient',
'tool': 'ramuda'
}
config = {
'bazz_value': 'lookup:secret:foo.bar.bazz:CONTINUE_IF_NOT_FOUND'
}
_resolve_lookups(context, config, ['secret'])
mock_get_secret.assert_called_once_with(
'my_awsclient', 'foo.bar.bazz', region_name=None)
assert config.get('bazz_value') == \
'lookup:secret:foo.bar.bazz:CONTINUE_IF_NOT_FOUND'
records = list(logcapture.actual())
assert records[0][1] == 'WARNING'
assert records[0][2] == \
'lookup:secret \'foo.bar.bazz\' not found in credstash!'
@mock.patch('gcdt_lookups.lookups.get_secret',
return_value='foobar1234')
def test_secret_lookup_error_case(mock_get_secret, logcapture):
logcapture.level = logging.INFO
mock_get_secret.side_effect = ItemNotFound('not found, sorry')
context = {
'_awsclient': 'my_awsclient',
'tool': 'ramuda'
}
config = {
'lookups': ['secret'],
'bazz_value': 'lookup:secret:foo.bar.bazz'
}
lookup((context, config))
mock_get_secret.assert_called_once_with(
'my_awsclient', 'foo.bar.bazz', region_name=None)
assert context['error'] == \
'lookup for \'bazz_value\' failed: "lookup:secret:foo.bar.bazz"'
assert config.get('bazz_value') == \
'lookup:secret:foo.bar.bazz'
assert context['error'] == \
'lookup for \'bazz_value\' failed: "lookup:secret:foo.bar.bazz"'
records = list(logcapture.actual())
assert records[0][1] == 'ERROR'
assert records[0][2] == 'not found, sorry'
def test_ami_accountid_config():
ami_accountid = CONFIG_READER_CONFIG['plugins']['gcdt_lookups'][
'ami_accountid']
assert ami_accountid == '569909643510'
def test_find_matching_certificate():
names = ['infra.glomex.cloud', '*.infra.glomex.cloud']
expected = 'arn:aws:acm:eu-west-1:123456789012:certificate/klmno-123'
certs = [
{
'CertificateArn': 'arn:aws:acm:eu-west-1:123456789012:certificate/abcde-123',
'Names': ['*.infra.glomex.cloud', 'infra.glomex.cloud',
'*.infra.glomex.cloud'],
'NotAfter': maya.now().add(months=1).datetime()
},
{
'CertificateArn': 'arn:aws:acm:eu-west-1:123456789012:certificate/fghij-123',
'Names': ['*.abc-vvs-test.glomex.com',
'*.abc-vvs.glomex.com', '*.dev.ds.glomex.cloud',
'*.dev.mds.glomex.cloud', '*.dev.mep.glomex.cloud',
'*.dev.mes.glomex.cloud', '*.dev.pnb.glomex.cloud',
'*.dev.vvs.glomex.cloud', '*.ds.glomex.cloud'],
'NotAfter': maya.now().add(months=10).datetime()
},
{
'CertificateArn': 'arn:aws:acm:eu-west-1:123456789012:certificate/klmno-123',
'Names': ['*.infra.glomex.cloud', 'infra.glomex.cloud',
'*.infra.glomex.cloud'],
'NotAfter': maya.now().add(months=20).datetime()
},
]
assert _find_matching_certificate(certs, names) == expected
def test_find_matching_certificate_not_found():
logcapture.level = logging.INFO
names = ['unknown.glomex.cloud', '*.infra.glomex.cloud']
certs = [
{
'CertificateArn': 'arn:aws:acm:eu-west-1:123456789012:certificate/abcde-123',
'Names': ['*.infra.glomex.cloud', 'infra.glomex.cloud',
'*.infra.glomex.cloud'],
'NotAfter': maya.now().add(months=1).datetime()
},
{
'CertificateArn': 'arn:aws:acm:eu-west-1:123456789012:certificate/fghij-123',
'Names': ['*.abc-vvs-test.glomex.com',
'*.abc-vvs.glomex.com', '*.dev.ds.glomex.cloud',
'*.dev.mds.glomex.cloud', '*.dev.mep.glomex.cloud',
'*.dev.mes.glomex.cloud', '*.dev.pnb.glomex.cloud',
'*.dev.vvs.glomex.cloud', '*.ds.glomex.cloud'],
'NotAfter': maya.now().add(months=10).datetime()
},
{
'CertificateArn': 'arn:aws:acm:eu-west-1:123456789012:certificate/klmno-123',
'Names': ['*.infra.glomex.cloud', 'infra.glomex.cloud',
'*.infra.glomex.cloud'],
'NotAfter': maya.now().add(months=20).datetime()
},
]
assert _find_matching_certificate(certs, names) is None
@mock.patch('gcdt_lookups.lookups.get_secret',
return_value='foobar1234')
def test_region_secret_lookup(mock_get_secret):
# sample from ops-captaincrunch-slack
context = {
'_awsclient': 'my_awsclient',
'tool': 'ramuda'
}
config = {
'bot_token': 'lookup:secret:captaincrunch.bot_token'
}
_resolve_lookups(context, config, ['secret'])
mock_get_secret.assert_called_once_with(
'my_awsclient', 'captaincrunch.bot_token', region_name=None)
assert config.get('bot_token') == 'foobar1234'
|
|
import django_filters
from rest_framework import filters
from rest_framework import generics
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from spiderhandler.models import QueryJob, QQInfo, WeiboInfo
from spiderhandler.queryrecords.serializers import QueryRecordSerializer, QQInfoSerializer, WeiboInfoSerializer
class QQInfoFilter(filters.FilterSet):
min_age = django_filters.NumberFilter(name="age", lookup_expr='gte')
max_age = django_filters.NumberFilter(name="age", lookup_expr='lte')
class Meta:
model = QQInfo
fields = ['qq', 'min_age', 'max_age']
class QQInfoList(APIView):
"""
List all QQInfo, or create a new QQInfo.
"""
serializer_class = QQInfoSerializer
filter_backends = (filters.DjangoFilterBackend,)
filter_class = QQInfoFilter
def get(self, request):
param = {
'qq': request.query_params.get('qq', None),
'min_age': request.query_params.get('min_age', None),
'max_age': request.query_params.get('max_age', None),
}
total = QQInfo.objects.count()
offset = int(request.query_params.get('offset', 0))
limit = int(request.query_params.get('limit', total))
queryset = QQInfo.objects.all()
if param['qq'] is not None:
queryset = queryset.filter(qq=param['qq'])
if param['min_age'] is not None:
queryset = queryset.filter(age__gte=param['min_age'])
if param['max_age'] is not None:
queryset = queryset.filter(age__lte=param['max_age'])
queryset = queryset[offset:offset + limit]
serializer = QQInfoSerializer(queryset, many=True)
return Response(serializer.data)
def post(self, request):
serializer = QQInfoSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class QQInfoDetail(generics.RetrieveUpdateDestroyAPIView):
"""
QQInfoDetail
"""
queryset = QQInfo.objects.all()
serializer_class = QQInfoSerializer
class WeiboInfoFilter(filters.FilterSet):
min_age = django_filters.NumberFilter(name="age", lookup_expr='gte')
max_age = django_filters.NumberFilter(name="age", lookup_expr='lte')
class Meta:
model = WeiboInfo
fields = ['weiboId', 'min_age', 'max_age']
class WeiboInfoList(APIView):
"""
List all weibolist, or create a new WeiboInfo.
"""
serializer_class = WeiboInfoSerializer
filter_backends = (filters.DjangoFilterBackend,)
filter_class = QQInfoFilter
def get(self, request):
param = {
'weiboId': request.query_params.get('weiboId', None),
'min_age': request.query_params.get('min_age', None),
'max_age': request.query_params.get('max_age', None),
}
total = WeiboInfo.objects.count()
offset = int(request.query_params.get('offset', 0))
limit = int(request.query_params.get('limit', total))
queryset = WeiboInfo.objects.all()
if param['weiboId'] is not None:
queryset = queryset.filter(qq=param['weiboId'])
if param['min_age'] is not None:
queryset = queryset.filter(age__gte=param['min_age'])
if param['max_age'] is not None:
queryset = queryset.filter(age__lte=param['max_age'])
queryset = queryset[offset:offset + limit]
serializer = WeiboInfoSerializer(queryset, many=True)
return Response(serializer.data)
def post(self, request):
serializer = WeiboInfoSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class WeiboInfoDetail(generics.RetrieveUpdateDestroyAPIView):
"""
QQInfoDetail
"""
queryset = WeiboInfo.objects.all()
serializer_class = WeiboInfoSerializer
class QueryRecordFilter(filters.FilterSet):
class Meta:
model = QueryJob
fields = ['username', 'target_qq_id', 'target_zhihu_id', 'target_weibo_id']
class QueryRecordList(APIView):
serializer_class = QueryRecordSerializer
filter_backends = (filters.DjangoFilterBackend,)
filter_class = QueryRecordFilter
"""
List QueryRecord, or create a new QueryRecord.
"""
def get(self, request):
total = QueryJob.objects.count()
queryset = QueryJob.objects.all()
username = request.query_params.get('username')
forUserQQId = request.query_params.get('forUserQQId')
forUserZhihuId = request.query_params.get('forUserZhihuId')
forUserWeiboId = request.query_params.get('forUserWeiboId')
if username is not None:
queryset = queryset.filter(username=username)
if forUserQQId is not None:
queryset = queryset.filter(forUserQQId=forUserQQId)
if forUserZhihuId is not None:
queryset = queryset.filter(forUserZhihuId=forUserZhihuId)
if forUserWeiboId is not None:
queryset = queryset.filter(forUserWeiboId=forUserWeiboId)
offset = int(request.query_params.get('offset', 0))
limit = int(request.query_params.get('limit', total))
queryset = queryset[offset:(limit + offset)]
serializer = QueryRecordSerializer(queryset, many=True)
return Response(serializer.data)
def post(self, request):
serializer = QueryRecordSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class QueryRecordDetail(generics.RetrieveUpdateDestroyAPIView):
"""
QueryRecordDetail
"""
queryset = QueryJob.objects.all()
serializer_class = QueryRecordSerializer
# class QueryRecordViewSet(viewsets.ModelViewSet):
# """
# API endpoint that allows users to be viewed or edited.
# """
# queryset = QueryRecord.objects.all()
# serializer_class = QueryRecordSerializer
# class SnippetList(generics.ListCreateAPIView):
# print()
# queryset = QueryRecord.objects.all()
# serializer_class = QueryRecordSerializer
#
#
# class SnippetDetail(generics.RetrieveUpdateDestroyAPIView):
# queryset = QueryRecord.objects.all()
# serializer_class = QueryRecordSerializer
# class QueryRecordDetail(mixins.RetrieveModelMixin,
# mixins.UpdateModelMixin,
# mixins.DestroyModelMixin,
# generics.GenericAPIView):
# queryset = QueryRecord.objects.all()
# serializer_class = QueryRecordSerializer
#
# def get(self, request, *args, **kwargs):
# return self.retrieve(request, *args, **kwargs)
#
# def put(self, request, *args, **kwargs):
# return self.update(request, *args, **kwargs)
#
# def delete(self, request, *args, **kwargs):
# return self.destroy(request, *args, **kwargs)
|
|
from django.contrib.admin.options import flatten_fieldsets, HORIZONTAL, VERTICAL
from django.contrib.admin.util import get_fields_from_path, NotRelationField
from django.contrib.admin.validation import validate as django_validate
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.db.models.fields import FieldDoesNotExist
from django.forms.models import BaseModelForm, _get_foreign_key
from django_mongoengine.forms.documents import (
DocumentFormMetaclass, fields_for_document, BaseDocumentFormSet
)
from django_mongoengine.forms.document_options import DocumentMetaWrapper
from mongoengine.fields import ListField, EmbeddedDocumentField, ReferenceField
from mongoengine.base import BaseDocument
from options import BaseDocumentAdmin, EmbeddedDocumentAdmin
__all__ = ['validate']
def validate(cls, model):
if issubclass(model, models.Model):
django_validate(cls, model)
else:
_validate(cls, model)
def _validate(cls, model):
"""
Does basic ModelAdmin option validation. Calls custom validation
classmethod in the end if it is provided in cls. The signature of the
custom validation classmethod should be: def validate(cls, model).
"""
# Before we can introspect models, they need to be fully loaded so that
# inter-relations are set up correctly. We force that here.
#models.get_apps()
opts = model._meta
validate_base(cls, model)
# list_display
if hasattr(cls, 'list_display'):
check_isseq(cls, 'list_display', cls.list_display)
for idx, field in enumerate(cls.list_display):
if not callable(field):
if not hasattr(cls, field):
if not hasattr(model, field):
try:
opts.get_field(field)
except models.FieldDoesNotExist:
raise ImproperlyConfigured("%s.list_display[%d], %r is not a callable or an attribute of %r or found in the model %r."
% (cls.__name__, idx, field, cls.__name__, model._meta.object_name))
else:
# getattr(model, field) could be an X_RelatedObjectsDescriptor
f = fetch_attr(cls, model, opts, "list_display[%d]" % idx, field)
if isinstance(f, models.ManyToManyField):
raise ImproperlyConfigured("'%s.list_display[%d]', '%s' is a ManyToManyField which is not supported."
% (cls.__name__, idx, field))
# list_display_links
if hasattr(cls, 'list_display_links'):
check_isseq(cls, 'list_display_links', cls.list_display_links)
for idx, field in enumerate(cls.list_display_links):
if field not in cls.list_display:
raise ImproperlyConfigured("'%s.list_display_links[%d]' "
"refers to '%s' which is not defined in 'list_display'."
% (cls.__name__, idx, field))
# list_filter
if hasattr(cls, 'list_filter'):
check_isseq(cls, 'list_filter', cls.list_filter)
#for idx, fpath in enumerate(cls.list_filter):
# print idx
# print fpath
# try:
# get_fields_from_path(model, fpath)
# except (NotRelationField, FieldDoesNotExist), e:
# raise ImproperlyConfigured(
# "'%s.list_filter[%d]' refers to '%s' which does not refer to a Field." % (
## cls.__name__, idx, fpath
# )
# )
# list_per_page = 100
if hasattr(cls, 'list_per_page') and not isinstance(cls.list_per_page, int):
raise ImproperlyConfigured("'%s.list_per_page' should be a integer."
% cls.__name__)
# list_editable
if hasattr(cls, 'list_editable') and cls.list_editable:
check_isseq(cls, 'list_editable', cls.list_editable)
for idx, field_name in enumerate(cls.list_editable):
try:
field = opts.get_field_by_name(field_name)[0]
except models.FieldDoesNotExist:
raise ImproperlyConfigured("'%s.list_editable[%d]' refers to a "
"field, '%s', not defined on %s."
% (cls.__name__, idx, field_name, model.__name__))
if field_name not in cls.list_display:
raise ImproperlyConfigured("'%s.list_editable[%d]' refers to "
"'%s' which is not defined in 'list_display'."
% (cls.__name__, idx, field_name))
if field_name in cls.list_display_links:
raise ImproperlyConfigured("'%s' cannot be in both '%s.list_editable'"
" and '%s.list_display_links'"
% (field_name, cls.__name__, cls.__name__))
if not cls.list_display_links and cls.list_display[0] in cls.list_editable:
raise ImproperlyConfigured("'%s.list_editable[%d]' refers to"
" the first field in list_display, '%s', which can't be"
" used unless list_display_links is set."
% (cls.__name__, idx, cls.list_display[0]))
if not field.editable:
raise ImproperlyConfigured("'%s.list_editable[%d]' refers to a "
"field, '%s', which isn't editable through the admin."
% (cls.__name__, idx, field_name))
# search_fields = ()
if hasattr(cls, 'search_fields'):
check_isseq(cls, 'search_fields', cls.search_fields)
# date_hierarchy = None
if cls.date_hierarchy:
f = get_field(cls, model, opts, 'date_hierarchy', cls.date_hierarchy)
if not isinstance(f, (models.DateField, models.DateTimeField)):
raise ImproperlyConfigured("'%s.date_hierarchy is "
"neither an instance of DateField nor DateTimeField."
% cls.__name__)
# ordering = None
if cls.ordering:
check_isseq(cls, 'ordering', cls.ordering)
for idx, field in enumerate(cls.ordering):
if field == '?' and len(cls.ordering) != 1:
raise ImproperlyConfigured("'%s.ordering' has the random "
"ordering marker '?', but contains other fields as "
"well. Please either remove '?' or the other fields."
% cls.__name__)
if field == '?':
continue
if field.startswith('-'):
field = field[1:]
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
if '__' in field:
continue
get_field(cls, model, opts, 'ordering[%d]' % idx, field)
if hasattr(cls, "readonly_fields"):
check_readonly_fields(cls, model, opts)
# list_select_related = False
# save_as = False
# save_on_top = False
for attr in ('list_select_related', 'save_as', 'save_on_top'):
if not isinstance(getattr(cls, attr), bool):
raise ImproperlyConfigured("'%s.%s' should be a boolean."
% (cls.__name__, attr))
# inlines = []
if hasattr(cls, 'inlines'):
check_isseq(cls, 'inlines', cls.inlines)
for idx, inline in enumerate(cls.inlines):
if not issubclass(inline, BaseDocumentAdmin):
raise ImproperlyConfigured("'%s.inlines[%d]' does not inherit "
"from BaseModelAdmin." % (cls.__name__, idx))
if not inline.document:
raise ImproperlyConfigured("'document' is a required attribute "
"of '%s.inlines[%d]'." % (cls.__name__, idx))
if not issubclass(inline.document, BaseDocument):
raise ImproperlyConfigured("'%s.inlines[%d].model' does not "
"inherit from models.Model." % (cls.__name__, idx))
validate_base(inline, inline.document)
validate_inline(inline, cls, model)
def validate_inline(cls, parent, parent_model):
# model is already verified to exist and be a Model
if cls.fk_name: # default value is None
f = get_field(cls, cls.model, cls.model._meta, 'fk_name', cls.fk_name)
if not isinstance(f, models.ForeignKey):
raise ImproperlyConfigured("'%s.fk_name is not an instance of "
"models.ForeignKey." % cls.__name__)
if not issubclass(cls, EmbeddedDocumentAdmin):
fk = _get_foreign_key(parent_model, cls.model, fk_name=cls.fk_name, can_fail=True)
else:
fk = None
# extra = 3
if not isinstance(cls.extra, int):
raise ImproperlyConfigured("'%s.extra' should be a integer."
% cls.__name__)
# max_num = None
max_num = getattr(cls, 'max_num', None)
if max_num is not None and not isinstance(max_num, int):
raise ImproperlyConfigured("'%s.max_num' should be an integer or None (default)."
% cls.__name__)
# formset
if hasattr(cls, 'formset') and not issubclass(cls.formset, BaseDocumentFormSet):
raise ImproperlyConfigured("'%s.formset' does not inherit from "
"BaseDocumentFormSet." % cls.__name__)
# exclude
if hasattr(cls, 'exclude') and cls.exclude:
if fk and fk.name in cls.exclude:
raise ImproperlyConfigured("%s cannot exclude the field "
"'%s' - this is the foreign key to the parent model "
"%s." % (cls.__name__, fk.name, parent_model.__name__))
if hasattr(cls, "readonly_fields"):
check_readonly_fields(cls, cls.document, cls.document._meta)
def validate_base(cls, model):
opts = model._meta
if isinstance(opts, dict):
opts = DocumentMetaWrapper(model)
# raw_id_fields
if hasattr(cls, 'raw_id_fields'):
check_isseq(cls, 'raw_id_fields', cls.raw_id_fields)
for idx, field in enumerate(cls.raw_id_fields):
f = get_field(cls, model, opts, 'raw_id_fields', field)
if not isinstance(f, (models.ForeignKey, models.ManyToManyField)):
raise ImproperlyConfigured("'%s.raw_id_fields[%d]', '%s' must "
"be either a ForeignKey or ManyToManyField."
% (cls.__name__, idx, field))
# fields
if cls.fields: # default value is None
check_isseq(cls, 'fields', cls.fields)
for field in cls.fields:
if field in cls.readonly_fields:
# Stuff can be put in fields that isn't actually a model field
# if it's in readonly_fields, readonly_fields will handle the
# validation of such things.
continue
check_formfield(cls, model, opts, 'fields', field)
try:
f = opts.get_field(field)
except models.FieldDoesNotExist:
# If we can't find a field on the model that matches,
# it could be an extra field on the form.
continue
if isinstance(f, models.ManyToManyField) and not f.rel.through._meta.auto_created:
raise ImproperlyConfigured("'%s.fields' can't include the ManyToManyField "
"field '%s' because '%s' manually specifies "
"a 'through' model." % (cls.__name__, field, field))
if cls.fieldsets:
raise ImproperlyConfigured('Both fieldsets and fields are specified in %s.' % cls.__name__)
if len(cls.fields) > len(set(cls.fields)):
raise ImproperlyConfigured('There are duplicate field(s) in %s.fields' % cls.__name__)
# fieldsets
if cls.fieldsets: # default value is None
check_isseq(cls, 'fieldsets', cls.fieldsets)
for idx, fieldset in enumerate(cls.fieldsets):
check_isseq(cls, 'fieldsets[%d]' % idx, fieldset)
if len(fieldset) != 2:
raise ImproperlyConfigured("'%s.fieldsets[%d]' does not "
"have exactly two elements." % (cls.__name__, idx))
check_isdict(cls, 'fieldsets[%d][1]' % idx, fieldset[1])
if 'fields' not in fieldset[1]:
raise ImproperlyConfigured("'fields' key is required in "
"%s.fieldsets[%d][1] field options dict."
% (cls.__name__, idx))
for fields in fieldset[1]['fields']:
# The entry in fields might be a tuple. If it is a standalone
# field, make it into a tuple to make processing easier.
if type(fields) != tuple:
fields = (fields,)
for field in fields:
if field in cls.readonly_fields:
# Stuff can be put in fields that isn't actually a
# model field if it's in readonly_fields,
# readonly_fields will handle the validation of such
# things.
continue
check_formfield(cls, model, opts, "fieldsets[%d][1]['fields']" % idx, field)
try:
f = opts.get_field(field)
if isinstance(f, models.ManyToManyField) and not f.rel.through._meta.auto_created:
raise ImproperlyConfigured("'%s.fieldsets[%d][1]['fields']' "
"can't include the ManyToManyField field '%s' because "
"'%s' manually specifies a 'through' model." % (
cls.__name__, idx, field, field))
except models.FieldDoesNotExist:
# If we can't find a field on the model that matches,
# it could be an extra field on the form.
pass
flattened_fieldsets = flatten_fieldsets(cls.fieldsets)
if len(flattened_fieldsets) > len(set(flattened_fieldsets)):
raise ImproperlyConfigured('There are duplicate field(s) in %s.fieldsets' % cls.__name__)
# exclude
if cls.exclude: # default value is None
check_isseq(cls, 'exclude', cls.exclude)
for field in cls.exclude:
check_formfield(cls, model, opts, 'exclude', field)
try:
f = opts.get_field(field)
except models.FieldDoesNotExist:
# If we can't find a field on the model that matches,
# it could be an extra field on the form.
continue
if len(cls.exclude) > len(set(cls.exclude)):
raise ImproperlyConfigured('There are duplicate field(s) in %s.exclude' % cls.__name__)
# form
# TODO: FInd out why issubclass doesn't work!
if hasattr(cls, 'form') and not (issubclass(cls.form, BaseModelForm) or
cls.form.__class__.__name__ == 'DocumentFormMetaclass'):
raise ImproperlyConfigured("%s.form does not inherit from "
"BaseModelForm." % cls.__name__)
# filter_vertical
if hasattr(cls, 'filter_vertical'):
check_isseq(cls, 'filter_vertical', cls.filter_vertical)
for idx, field in enumerate(cls.filter_vertical):
f = get_field(cls, model, opts, 'filter_vertical', field)
if not isinstance(f, models.ManyToManyField):
raise ImproperlyConfigured("'%s.filter_vertical[%d]' must be "
"a ManyToManyField." % (cls.__name__, idx))
# filter_horizontal
if hasattr(cls, 'filter_horizontal'):
check_isseq(cls, 'filter_horizontal', cls.filter_horizontal)
for idx, field in enumerate(cls.filter_horizontal):
f = get_field(cls, model, opts, 'filter_horizontal', field)
if not isinstance(f, ListField) and not isinstance(f.field, ReferenceField):
raise ImproperlyConfigured("'%s.filter_horizontal[%d]' must be "
"a ManyToManyField." % (cls.__name__, idx))
# radio_fields
if hasattr(cls, 'radio_fields'):
check_isdict(cls, 'radio_fields', cls.radio_fields)
for field, val in cls.radio_fields.items():
f = get_field(cls, model, opts, 'radio_fields', field)
if not (isinstance(f, models.ForeignKey) or f.choices):
raise ImproperlyConfigured("'%s.radio_fields['%s']' "
"is neither an instance of ForeignKey nor does "
"have choices set." % (cls.__name__, field))
if not val in (HORIZONTAL, VERTICAL):
raise ImproperlyConfigured("'%s.radio_fields['%s']' "
"is neither admin.HORIZONTAL nor admin.VERTICAL."
% (cls.__name__, field))
# prepopulated_fields
if hasattr(cls, 'prepopulated_fields'):
check_isdict(cls, 'prepopulated_fields', cls.prepopulated_fields)
for field, val in cls.prepopulated_fields.items():
f = get_field(cls, model, opts, 'prepopulated_fields', field)
if isinstance(f, (models.DateTimeField, models.ForeignKey,
models.ManyToManyField)):
raise ImproperlyConfigured("'%s.prepopulated_fields['%s']' "
"is either a DateTimeField, ForeignKey or "
"ManyToManyField. This isn't allowed."
% (cls.__name__, field))
check_isseq(cls, "prepopulated_fields['%s']" % field, val)
for idx, f in enumerate(val):
get_field(cls, model, opts, "prepopulated_fields['%s'][%d]" % (field, idx), f)
def check_isseq(cls, label, obj):
if not isinstance(obj, (list, tuple)):
raise ImproperlyConfigured("'%s.%s' must be a list or tuple." % (cls.__name__, label))
def check_isdict(cls, label, obj):
if not isinstance(obj, dict):
raise ImproperlyConfigured("'%s.%s' must be a dictionary." % (cls.__name__, label))
def get_field(cls, model, opts, label, field):
try:
return opts.get_field(field)
except models.FieldDoesNotExist:
raise ImproperlyConfigured("'%s.%s' refers to field '%s' that is missing from model '%s'."
% (cls.__name__, label, field, model.__name__))
def check_formfield(cls, model, opts, label, field):
if getattr(cls.form, 'base_fields', None):
try:
cls.form.base_fields[field]
except KeyError:
raise ImproperlyConfigured("'%s.%s' refers to field '%s' that "
"is missing from the form." % (cls.__name__, label, field))
else:
fields = fields_for_document(model)
try:
fields[field]
except KeyError:
if hasattr(model, field) and isinstance(getattr(model, field), ListField):
if isinstance(model._fields[field].field, EmbeddedDocumentField):
return
if hasattr(model, field) and isinstance(getattr(model, field), EmbeddedDocumentField):
return
raise ImproperlyConfigured("'%s.%s' refers to field '%s' that "
"is missing from the form." % (cls.__name__, label, field))
def fetch_attr(cls, model, opts, label, field):
try:
return getattr(model, field)
except AttributeError:
raise ImproperlyConfigured("'%s.%s' refers to '%s' that is neither a field, method or property of model '%s'."
% (cls.__name__, label, field, model.__name__))
def check_readonly_fields(cls, model, opts):
check_isseq(cls, "readonly_fields", cls.readonly_fields)
for idx, field in enumerate(cls.readonly_fields):
if not callable(field):
if not hasattr(cls, field):
if not hasattr(model, field):
try:
opts.get_field(field)
except models.FieldDoesNotExist:
raise ImproperlyConfigured("%s.readonly_fields[%d], %r is not a callable or an attribute of %r or found in the model %r."
% (cls.__name__, idx, field, cls.__name__, model._meta.object_name))
|
|
#!/usr/bin/python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Closure compiler on JavaScript files to check for errors and produce
minified output."""
import argparse
import os
import re
import subprocess
import sys
import tempfile
import processor
_CURRENT_DIR = os.path.join(os.path.dirname(__file__))
_JAVA_BIN = "java"
_JDK_PATH = os.path.join(_CURRENT_DIR, "..", "jdk", "current", "bin", _JAVA_BIN)
_JAVA_PATH = _JDK_PATH if os.path.isfile(_JDK_PATH) else _JAVA_BIN
class Compiler(object):
"""Runs the Closure compiler on given source files to typecheck them
and produce minified output."""
_JAR_COMMAND = [
_JAVA_PATH,
"-jar",
"-Xms1024m",
"-client",
"-XX:+TieredCompilation",
]
_POLYMER_EXTERNS = os.path.join(_CURRENT_DIR, "externs", "polymer-1.0.js")
def __init__(self, verbose=False):
"""
Args:
verbose: Whether this class should output diagnostic messages.
"""
self._compiler_jar = os.path.join(_CURRENT_DIR, "compiler", "compiler.jar")
self._target = None
self._temp_files = []
self._verbose = verbose
def _nuke_temp_files(self):
"""Deletes any temp files this class knows about."""
if not self._temp_files:
return
self._log_debug("Deleting temp files: %s" % ", ".join(self._temp_files))
for f in self._temp_files:
os.remove(f)
self._temp_files = []
def _log_debug(self, msg, error=False):
"""Logs |msg| to stdout if --verbose/-v is passed when invoking this script.
Args:
msg: A debug message to log.
"""
if self._verbose:
print "(INFO) %s" % msg
def _log_error(self, msg):
"""Logs |msg| to stderr regardless of --flags.
Args:
msg: An error message to log.
"""
print >> sys.stderr, "(ERROR) %s" % msg
def run_jar(self, jar, args):
"""Runs a .jar from the command line with arguments.
Args:
jar: A file path to a .jar file
args: A list of command line arguments to be passed when running the .jar.
Return:
(exit_code, stderr) The exit code of the command (e.g. 0 for success) and
the stderr collected while running |jar| (as a string).
"""
shell_command = " ".join(self._JAR_COMMAND + [jar] + args)
self._log_debug("Running jar: %s" % shell_command)
devnull = open(os.devnull, "w")
kwargs = {"stdout": devnull, "stderr": subprocess.PIPE, "shell": True}
process = subprocess.Popen(shell_command, **kwargs)
_, stderr = process.communicate()
return process.returncode, stderr
def _get_line_number(self, match):
"""When chrome is built, it preprocesses its JavaScript from:
<include src="blah.js">
alert(1);
to:
/* contents of blah.js inlined */
alert(1);
Because Closure Compiler requires this inlining already be done (as
<include> isn't valid JavaScript), this script creates temporary files to
expand all the <include>s.
When type errors are hit in temporary files, a developer doesn't know the
original source location to fix. This method maps from /tmp/file:300 back to
/original/source/file:100 so fixing errors is faster for developers.
Args:
match: A re.MatchObject from matching against a line number regex.
Returns:
The fixed up /file and :line number.
"""
real_file = self._processor.get_file_from_line(match.group(1))
return "%s:%d" % (os.path.abspath(real_file.file), real_file.line_number)
def _clean_up_error(self, error):
"""Reverse the effects that funky <include> preprocessing steps have on
errors messages.
Args:
error: A Closure compiler error (2 line string with error and source).
Return:
The fixed up error string.
"""
assert self._target
assert self._expanded_file
expanded_file = self._expanded_file
fixed = re.sub("%s:(\d+)" % expanded_file, self._get_line_number, error)
return fixed.replace(expanded_file, os.path.abspath(self._target))
def _format_errors(self, errors):
"""Formats Closure compiler errors to easily spot compiler output.
Args:
errors: A list of strings extracted from the Closure compiler's output.
Returns:
A formatted output string.
"""
contents = "\n## ".join("\n\n".join(errors).splitlines())
return "## %s" % contents if contents else ""
def _create_temp_file(self, contents):
"""Creates an owned temporary file with |contents|.
Args:
content: A string of the file contens to write to a temporary file.
Return:
The filepath of the newly created, written, and closed temporary file.
"""
with tempfile.NamedTemporaryFile(mode="wt", delete=False) as tmp_file:
self._temp_files.append(tmp_file.name)
tmp_file.write(contents)
return tmp_file.name
def run(self, sources, out_file, closure_args=None,
custom_sources=False, custom_includes=False):
"""Closure compile |sources| while checking for errors.
Args:
sources: Files to compile. sources[0] is the typically the target file.
sources[1:] are externs and dependencies in topological order. Order
is not guaranteed if custom_sources is True.
out_file: A file where the compiled output is written to.
closure_args: Arguments passed directly to the Closure compiler.
custom_sources: Whether |sources| was customized by the target (e.g. not
in GYP dependency order).
custom_includes: Whether <include>s are processed when |custom_sources|
is True.
Returns:
(found_errors, stderr) A boolean indicating whether errors were found and
the raw Closure compiler stderr (as a string).
"""
is_extern = lambda f: 'externs' in f
externs_and_deps = [self._POLYMER_EXTERNS]
if custom_sources:
if custom_includes:
# TODO(dbeam): this is fairly hacky. Can we just remove custom_sources
# soon when all the things kept on life support using it die?
self._target = sources.pop()
externs_and_deps += sources
else:
self._target = sources[0]
externs_and_deps += sources[1:]
externs = filter(is_extern, externs_and_deps)
deps = filter(lambda f: not is_extern(f), externs_and_deps)
assert externs or deps or self._target
self._log_debug("Externs: %s" % externs)
self._log_debug("Dependencies: %s" % deps)
self._log_debug("Target: %s" % self._target)
js_args = deps + ([self._target] if self._target else [])
process_includes = custom_includes or not custom_sources
if process_includes:
# TODO(dbeam): compiler.jar automatically detects "@externs" in a --js arg
# and moves these files to a different AST tree. However, because we use
# one big funky <include> meta-file, it thinks all the code is one big
# externs. Just use --js when <include> dies.
cwd, tmp_dir = os.getcwd(), tempfile.gettempdir()
rel_path = lambda f: os.path.join(os.path.relpath(cwd, tmp_dir), f)
contents = ['<include src="%s">' % rel_path(f) for f in js_args]
meta_file = self._create_temp_file("\n".join(contents))
self._log_debug("Meta file: %s" % meta_file)
self._processor = processor.Processor(meta_file)
self._expanded_file = self._create_temp_file(self._processor.contents)
self._log_debug("Expanded file: %s" % self._expanded_file)
js_args = [self._expanded_file]
closure_args = closure_args or []
closure_args += ["summary_detail_level=3", "continue_after_errors"]
args = ["--externs=%s" % e for e in externs] + \
["--js=%s" % s for s in js_args] + \
["--%s" % arg for arg in closure_args]
assert out_file
out_dir = os.path.dirname(out_file)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
checks_only = 'checks_only' in closure_args
if not checks_only:
args += ["--js_output_file=%s" % out_file]
self._log_debug("Args: %s" % " ".join(args))
return_code, stderr = self.run_jar(self._compiler_jar, args)
errors = stderr.strip().split("\n\n")
maybe_summary = errors.pop()
summary = re.search("(?P<error_count>\d+).*error.*warning", maybe_summary)
if summary:
self._log_debug("Summary: %s" % maybe_summary)
else:
# Not a summary. Running the jar failed. Bail.
self._log_error(stderr)
self._nuke_temp_files()
sys.exit(1)
if summary.group('error_count') != "0":
if os.path.exists(out_file):
os.remove(out_file)
elif checks_only and return_code == 0:
# Compile succeeded but --checks_only disables --js_output_file from
# actually writing a file. Write a file ourselves so incremental builds
# still work.
with open(out_file, 'w') as f:
f.write('')
if process_includes:
errors = map(self._clean_up_error, errors)
output = self._format_errors(errors)
if errors:
prefix = "\n" if output else ""
self._log_error("Error in: %s%s%s" % (self._target, prefix, output))
elif output:
self._log_debug("Output: %s" % output)
self._nuke_temp_files()
return bool(errors) or return_code > 0, stderr
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Typecheck JavaScript using Closure compiler")
parser.add_argument("sources", nargs=argparse.ONE_OR_MORE,
help="Path to a source file to typecheck")
parser.add_argument("--custom_sources", action="store_true",
help="Whether this rules has custom sources.")
parser.add_argument("--custom_includes", action="store_true",
help="If present, <include>s are processed when "
"using --custom_sources.")
parser.add_argument("-o", "--out_file", required=True,
help="A file where the compiled output is written to")
parser.add_argument("-c", "--closure_args", nargs=argparse.ZERO_OR_MORE,
help="Arguments passed directly to the Closure compiler")
parser.add_argument("-v", "--verbose", action="store_true",
help="Show more information as this script runs")
opts = parser.parse_args()
compiler = Compiler(verbose=opts.verbose)
found_errors, stderr = compiler.run(opts.sources, out_file=opts.out_file,
closure_args=opts.closure_args,
custom_sources=opts.custom_sources,
custom_includes=opts.custom_includes)
if found_errors:
if opts.custom_sources:
print stderr
sys.exit(1)
|
|
# Copyright 2018 the authors.
# This file is part of Hy, which is free software licensed under the Expat
# license. See the LICENSE.
from __future__ import unicode_literals
from contextlib import contextmanager
from math import isnan, isinf
from hy._compat import PY3, str_type, bytes_type, long_type, string_types
from fractions import Fraction
from clint.textui import colored
PRETTY = True
@contextmanager
def pretty(pretty=True):
"""
Context manager to temporarily enable
or disable pretty-printing of Hy model reprs.
"""
global PRETTY
old, PRETTY = PRETTY, pretty
try:
yield
finally:
PRETTY = old
class HyObject(object):
"""
Generic Hy Object model. This is helpful to inject things into all the
Hy lexing Objects at once.
"""
def replace(self, other):
if isinstance(other, HyObject):
for attr in ["start_line", "end_line",
"start_column", "end_column"]:
if not hasattr(self, attr) and hasattr(other, attr):
setattr(self, attr, getattr(other, attr))
else:
raise TypeError("Can't replace a non Hy object with a Hy object")
return self
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, super(HyObject, self).__repr__())
_wrappers = {}
def wrap_value(x):
"""Wrap `x` into the corresponding Hy type.
This allows replace_hy_obj to convert a non Hy object to a Hy object.
This also allows a macro to return an unquoted expression transparently.
"""
wrapper = _wrappers.get(type(x))
if wrapper is None:
return x
else:
return wrapper(x)
def replace_hy_obj(obj, other):
if isinstance(obj, HyObject):
return obj.replace(other)
wrapped_obj = wrap_value(obj)
if isinstance(wrapped_obj, HyObject):
return wrapped_obj.replace(other)
else:
raise TypeError("Don't know how to wrap a %s object to a HyObject"
% type(obj))
def repr_indent(obj):
return repr(obj).replace("\n", "\n ")
class HyString(HyObject, str_type):
"""
Generic Hy String object. Helpful to store string literals from Hy
scripts. It's either a ``str`` or a ``unicode``, depending on the
Python version.
"""
def __new__(cls, s=None, brackets=None):
value = super(HyString, cls).__new__(cls, s)
value.brackets = brackets
return value
_wrappers[str_type] = HyString
class HyBytes(HyObject, bytes_type):
"""
Generic Hy Bytes object. It's either a ``bytes`` or a ``str``, depending
on the Python version.
"""
pass
_wrappers[bytes_type] = HyBytes
class HySymbol(HyString):
"""
Hy Symbol. Basically a String.
"""
def __init__(self, string):
self += string
_wrappers[bool] = lambda x: HySymbol("True") if x else HySymbol("False")
_wrappers[type(None)] = lambda foo: HySymbol("None")
class HyKeyword(HyObject, str_type):
"""Generic Hy Keyword object. It's either a ``str`` or a ``unicode``,
depending on the Python version.
"""
PREFIX = "\uFDD0"
def __new__(cls, value):
if not value.startswith(cls.PREFIX):
value = cls.PREFIX + value
obj = str_type.__new__(cls, value)
return obj
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self[1:]))
def strip_digit_separators(number):
# Don't strip a _ or , if it's the first character, as _42 and
# ,42 aren't valid numbers
return (number[0] + number[1:].replace("_", "").replace(",", "")
if isinstance(number, string_types) and len(number) > 1
else number)
class HyInteger(HyObject, long_type):
"""
Internal representation of a Hy Integer. May raise a ValueError as if
int(foo) was called, given HyInteger(foo). On python 2.x long will
be used instead
"""
def __new__(cls, number, *args, **kwargs):
if isinstance(number, string_types):
number = strip_digit_separators(number)
bases = {"0x": 16, "0o": 8, "0b": 2}
for leader, base in bases.items():
if number.startswith(leader):
# We've got a string, known leader, set base.
number = long_type(number, base=base)
break
else:
# We've got a string, no known leader; base 10.
number = long_type(number, base=10)
else:
# We've got a non-string; convert straight.
number = long_type(number)
return super(HyInteger, cls).__new__(cls, number)
_wrappers[int] = HyInteger
if not PY3: # do not add long on python3
_wrappers[long_type] = HyInteger
def check_inf_nan_cap(arg, value):
if isinstance(arg, string_types):
if isinf(value) and "i" in arg.lower() and "Inf" not in arg:
raise ValueError('Inf must be capitalized as "Inf"')
if isnan(value) and "NaN" not in arg:
raise ValueError('NaN must be capitalized as "NaN"')
class HyFloat(HyObject, float):
"""
Internal representation of a Hy Float. May raise a ValueError as if
float(foo) was called, given HyFloat(foo).
"""
def __new__(cls, num, *args, **kwargs):
value = super(HyFloat, cls).__new__(cls, strip_digit_separators(num))
check_inf_nan_cap(num, value)
return value
_wrappers[float] = HyFloat
class HyComplex(HyObject, complex):
"""
Internal representation of a Hy Complex. May raise a ValueError as if
complex(foo) was called, given HyComplex(foo).
"""
def __new__(cls, real, imag=0, *args, **kwargs):
if isinstance(real, string_types):
value = super(HyComplex, cls).__new__(
cls, strip_digit_separators(real)
)
p1, _, p2 = real.lstrip("+-").replace("-", "+").partition("+")
check_inf_nan_cap(p1, value.imag if "j" in p1 else value.real)
if p2:
check_inf_nan_cap(p2, value.imag)
return value
return super(HyComplex, cls).__new__(cls, real, imag)
_wrappers[complex] = HyComplex
class HyList(HyObject, list):
"""
Hy List. Basically just a list.
"""
def replace(self, other):
for x in self:
replace_hy_obj(x, other)
HyObject.replace(self, other)
return self
def __add__(self, other):
return self.__class__(super(HyList, self).__add__(other))
def __getslice__(self, start, end):
return self.__class__(super(HyList, self).__getslice__(start, end))
def __getitem__(self, item):
ret = super(HyList, self).__getitem__(item)
if isinstance(item, slice):
return self.__class__(ret)
return ret
color = staticmethod(colored.cyan)
def __repr__(self):
return str(self) if PRETTY else super(HyList, self).__repr__()
def __str__(self):
with pretty():
c = self.color
if self:
return ("{}{}\n {}{}").format(
c(self.__class__.__name__),
c("(["),
(c(",") + "\n ").join([repr_indent(e) for e in self]),
c("])"))
else:
return '' + c(self.__class__.__name__ + "()")
_wrappers[list] = lambda l: HyList(wrap_value(x) for x in l)
_wrappers[tuple] = lambda t: HyList(wrap_value(x) for x in t)
class HyDict(HyList):
"""
HyDict (just a representation of a dict)
"""
def __str__(self):
with pretty():
g = colored.green
if self:
pairs = []
for k, v in zip(self[::2],self[1::2]):
k, v = repr_indent(k), repr_indent(v)
pairs.append(
("{0}{c}\n {1}\n "
if '\n' in k+v
else "{0}{c} {1}").format(k, v, c=g(',')))
if len(self) % 2 == 1:
pairs.append("{} {}\n".format(
repr_indent(self[-1]), g("# odd")))
return "{}\n {}{}".format(
g("HyDict(["), ("{c}\n ".format(c=g(',')).join(pairs)), g("])"))
else:
return '' + g("HyDict()")
def keys(self):
return self[0::2]
def values(self):
return self[1::2]
def items(self):
return list(zip(self.keys(), self.values()))
_wrappers[dict] = lambda d: HyDict(wrap_value(x) for x in sum(d.items(), ()))
class HyExpression(HyList):
"""
Hy S-Expression. Basically just a list.
"""
color = staticmethod(colored.yellow)
_wrappers[HyExpression] = lambda e: HyExpression(wrap_value(x) for x in e)
_wrappers[Fraction] = lambda e: HyExpression(
[HySymbol("fraction"), wrap_value(e.numerator), wrap_value(e.denominator)])
class HySet(HyList):
"""
Hy set (just a representation of a set)
"""
color = staticmethod(colored.red)
_wrappers[set] = lambda s: HySet(wrap_value(x) for x in s)
class HyCons(HyObject):
"""
HyCons: a cons object.
Building a HyCons of something and a HyList really builds a HyList
"""
__slots__ = ["car", "cdr"]
def __new__(cls, car, cdr):
if isinstance(cdr, list):
# Keep unquotes in the cdr of conses
if type(cdr) == HyExpression:
if len(cdr) > 0 and type(cdr[0]) == HySymbol:
if cdr[0] in ("unquote", "unquote-splice"):
return super(HyCons, cls).__new__(cls)
return cdr.__class__([wrap_value(car)] + cdr)
elif cdr is None:
return HyExpression([wrap_value(car)])
else:
return super(HyCons, cls).__new__(cls)
def __init__(self, car, cdr):
self.car = wrap_value(car)
self.cdr = wrap_value(cdr)
def __getitem__(self, n):
if n == 0:
return self.car
if n == slice(1, None):
return self.cdr
raise IndexError(
"Can only get the car ([0]) or the cdr ([1:]) of a HyCons")
def __setitem__(self, n, new):
if n == 0:
self.car = new
return
if n == slice(1, None):
self.cdr = new
return
raise IndexError(
"Can only set the car ([0]) or the cdr ([1:]) of a HyCons")
def __iter__(self):
yield self.car
try:
iterator = (i for i in self.cdr)
except TypeError:
if self.cdr is not None:
yield self.cdr
raise TypeError("Iteration on malformed cons")
else:
for i in iterator:
yield i
def replace(self, other):
if self.car is not None:
replace_hy_obj(self.car, other)
if self.cdr is not None:
replace_hy_obj(self.cdr, other)
HyObject.replace(self, other)
def __repr__(self):
if PRETTY:
return str(self)
else:
return "HyCons({}, {})".format(
repr(self.car), repr(self.cdr))
def __str__(self):
with pretty():
c = colored.yellow
lines = ['' + c("<HyCons (")]
while True:
lines.append(" " + repr_indent(self.car))
if not isinstance(self.cdr, HyCons):
break
self = self.cdr
lines.append("{} {}{}".format(
c("."), repr_indent(self.cdr), c(")>")))
return '\n'.join(lines)
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
self.car == other.car and
self.cdr == other.cdr
)
|
|
import sys
import threading
import time
from unittest import skipIf, skipUnless
from django.db import (
DatabaseError, Error, IntegrityError, OperationalError, connection,
transaction,
)
from django.test import (
TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature,
)
from .models import Reporter
@skipUnless(connection.features.uses_savepoints, "'atomic' requires transactions and savepoints.")
class AtomicTests(TransactionTestCase):
"""
Tests for the atomic decorator and context manager.
The tests make assertions on internal attributes because there isn't a
robust way to ask the database for its current transaction state.
Since the decorator syntax is converted into a context manager (see the
implementation), there are only a few basic tests with the decorator
syntax and the bulk of the tests use the context manager syntax.
"""
available_apps = ['transactions']
def test_decorator_syntax_commit(self):
@transaction.atomic
def make_reporter():
Reporter.objects.create(first_name="Tintin")
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_decorator_syntax_rollback(self):
@transaction.atomic
def make_reporter():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
with self.assertRaisesMessage(Exception, "Oops"):
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_alternate_decorator_syntax_commit(self):
@transaction.atomic()
def make_reporter():
Reporter.objects.create(first_name="Tintin")
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_alternate_decorator_syntax_rollback(self):
@transaction.atomic()
def make_reporter():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
with self.assertRaisesMessage(Exception, "Oops"):
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_rollback(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_nested_commit_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(
Reporter.objects.all(),
['<Reporter: Archibald Haddock>', '<Reporter: Tintin>']
)
def test_nested_commit_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_nested_rollback_commit(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with transaction.atomic():
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_nested_rollback_rollback(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_commit_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(
Reporter.objects.all(),
['<Reporter: Archibald Haddock>', '<Reporter: Tintin>']
)
def test_merged_commit_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
# Writes in the outer block are rolled back too.
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_rollback_commit(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_rollback_rollback(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_reuse_commit_commit(self):
atomic = transaction.atomic()
with atomic:
Reporter.objects.create(first_name="Tintin")
with atomic:
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Archibald Haddock>', '<Reporter: Tintin>'])
def test_reuse_commit_rollback(self):
atomic = transaction.atomic()
with atomic:
Reporter.objects.create(first_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_reuse_rollback_commit(self):
atomic = transaction.atomic()
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(last_name="Tintin")
with atomic:
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_reuse_rollback_rollback(self):
atomic = transaction.atomic()
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(last_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_force_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
# atomic block shouldn't rollback, but force it.
self.assertFalse(transaction.get_rollback())
transaction.set_rollback(True)
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_prevent_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
sid = transaction.savepoint()
# trigger a database error inside an inner atomic without savepoint
with self.assertRaises(DatabaseError):
with transaction.atomic(savepoint=False):
with connection.cursor() as cursor:
cursor.execute(
"SELECT no_such_col FROM transactions_reporter")
# prevent atomic from rolling back since we're recovering manually
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
transaction.savepoint_rollback(sid)
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
class AtomicInsideTransactionTests(AtomicTests):
"""All basic tests for atomic should also pass within an existing transaction."""
def setUp(self):
self.atomic = transaction.atomic()
self.atomic.__enter__()
def tearDown(self):
self.atomic.__exit__(*sys.exc_info())
@skipIf(
connection.features.autocommits_when_autocommit_is_off,
"This test requires a non-autocommit mode that doesn't autocommit."
)
class AtomicWithoutAutocommitTests(AtomicTests):
"""All basic tests for atomic should also pass when autocommit is turned off."""
def setUp(self):
transaction.set_autocommit(False)
def tearDown(self):
# The tests access the database after exercising 'atomic', initiating
# a transaction ; a rollback is required before restoring autocommit.
transaction.rollback()
transaction.set_autocommit(True)
@skipUnless(connection.features.uses_savepoints, "'atomic' requires transactions and savepoints.")
class AtomicMergeTests(TransactionTestCase):
"""Test merging transactions with savepoint=False."""
available_apps = ['transactions']
def test_merged_outer_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Calculus")
raise Exception("Oops, that's his last name")
# The third insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The second insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The first block has a savepoint and must roll back.
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_inner_savepoint_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Calculus")
raise Exception("Oops, that's his last name")
# The third insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The second block has a savepoint and must roll back.
self.assertEqual(Reporter.objects.count(), 1)
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
@skipUnless(connection.features.uses_savepoints, "'atomic' requires transactions and savepoints.")
class AtomicErrorsTests(TransactionTestCase):
available_apps = ['transactions']
def test_atomic_prevents_setting_autocommit(self):
autocommit = transaction.get_autocommit()
with transaction.atomic():
with self.assertRaises(transaction.TransactionManagementError):
transaction.set_autocommit(not autocommit)
# Make sure autocommit wasn't changed.
self.assertEqual(connection.autocommit, autocommit)
def test_atomic_prevents_calling_transaction_methods(self):
with transaction.atomic():
with self.assertRaises(transaction.TransactionManagementError):
transaction.commit()
with self.assertRaises(transaction.TransactionManagementError):
transaction.rollback()
def test_atomic_prevents_queries_in_broken_transaction(self):
r1 = Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with transaction.atomic():
r2 = Reporter(first_name="Cuthbert", last_name="Calculus", id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
# The transaction is marked as needing rollback.
with self.assertRaises(transaction.TransactionManagementError):
r2.save(force_update=True)
self.assertEqual(Reporter.objects.get(pk=r1.pk).last_name, "Haddock")
@skipIfDBFeature('atomic_transactions')
def test_atomic_allows_queries_after_fixing_transaction(self):
r1 = Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with transaction.atomic():
r2 = Reporter(first_name="Cuthbert", last_name="Calculus", id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
# Mark the transaction as no longer needing rollback.
transaction.set_rollback(False)
r2.save(force_update=True)
self.assertEqual(Reporter.objects.get(pk=r1.pk).last_name, "Calculus")
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_atomic_prevents_queries_in_broken_transaction_after_client_close(self):
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
connection.close()
# The connection is closed and the transaction is marked as
# needing rollback. This will raise an InterfaceError on databases
# that refuse to create cursors on closed connections (PostgreSQL)
# and a TransactionManagementError on other databases.
with self.assertRaises(Error):
Reporter.objects.create(first_name="Cuthbert", last_name="Calculus")
# The connection is usable again .
self.assertEqual(Reporter.objects.count(), 0)
@skipUnless(connection.vendor == 'mysql', "MySQL-specific behaviors")
class AtomicMySQLTests(TransactionTestCase):
available_apps = ['transactions']
@skipIf(threading is None, "Test requires threading")
def test_implicit_savepoint_rollback(self):
"""MySQL implicitly rolls back savepoints when it deadlocks (#22291)."""
Reporter.objects.create(id=1)
Reporter.objects.create(id=2)
main_thread_ready = threading.Event()
def other_thread():
try:
with transaction.atomic():
Reporter.objects.select_for_update().get(id=1)
main_thread_ready.wait()
# 1) This line locks... (see below for 2)
Reporter.objects.exclude(id=1).update(id=2)
finally:
# This is the thread-local connection, not the main connection.
connection.close()
other_thread = threading.Thread(target=other_thread)
other_thread.start()
with self.assertRaisesMessage(OperationalError, 'Deadlock found'):
# Double atomic to enter a transaction and create a savepoint.
with transaction.atomic():
with transaction.atomic():
Reporter.objects.select_for_update().get(id=2)
main_thread_ready.set()
# The two threads can't be synchronized with an event here
# because the other thread locks. Sleep for a little while.
time.sleep(1)
# 2) ... and this line deadlocks. (see above for 1)
Reporter.objects.exclude(id=2).update(id=1)
other_thread.join()
class AtomicMiscTests(TransactionTestCase):
available_apps = []
def test_wrap_callable_instance(self):
"""#20028 -- Atomic must support wrapping callable instances."""
class Callable:
def __call__(self):
pass
# Must not raise an exception
transaction.atomic(Callable())
@skipUnlessDBFeature('can_release_savepoints')
def test_atomic_does_not_leak_savepoints_on_failure(self):
"""#23074 -- Savepoints must be released after rollback."""
# Expect an error when rolling back a savepoint that doesn't exist.
# Done outside of the transaction block to ensure proper recovery.
with self.assertRaises(Error):
# Start a plain transaction.
with transaction.atomic():
# Swallow the intentional error raised in the sub-transaction.
with self.assertRaisesMessage(Exception, "Oops"):
# Start a sub-transaction with a savepoint.
with transaction.atomic():
sid = connection.savepoint_ids[-1]
raise Exception("Oops")
# This is expected to fail because the savepoint no longer exists.
connection.savepoint_rollback(sid)
@skipIf(
connection.features.autocommits_when_autocommit_is_off,
"This test requires a non-autocommit mode that doesn't autocommit."
)
class NonAutocommitTests(TransactionTestCase):
available_apps = []
def test_orm_query_after_error_and_rollback(self):
"""
ORM queries are allowed after an error and a rollback in non-autocommit
mode (#27504).
"""
transaction.set_autocommit(False)
r1 = Reporter.objects.create(first_name='Archibald', last_name='Haddock')
r2 = Reporter(first_name='Cuthbert', last_name='Calculus', id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
transaction.rollback()
Reporter.objects.last()
def test_orm_query_without_autocommit(self):
"""#24921 -- ORM queries must be possible after set_autocommit(False)."""
transaction.set_autocommit(False)
try:
Reporter.objects.create(first_name="Tintin")
finally:
transaction.rollback()
transaction.set_autocommit(True)
|
|
# coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rank-1 BNN ResNet-50 on ImageNet.
A Rank-1 Bayesian neural net (Rank-1 BNN) [1] is an efficient and scalable
approach to variational BNNs that posits prior distributions on rank-1 factors
of the weights and optimizes global mixture variational posterior distributions.
References:
[1]: Michael W. Dusenberry*, Ghassen Jerfel*, Yeming Wen, Yian Ma, Jasper
Snoek, Katherine Heller, Balaji Lakshminarayanan, Dustin Tran. Efficient
and Scalable Bayesian Neural Nets with Rank-1 Factors. In Proc. of
International Conference on Machine Learning (ICML) 2020.
https://arxiv.org/abs/2005.07186
"""
import os
import time
from absl import app
from absl import flags
from absl import logging
import robustness_metrics as rm
import tensorflow as tf
import tensorflow_datasets as tfds
import uncertainty_baselines as ub
import utils # local file import from baselines.imagenet
from tensorboard.plugins.hparams import api as hp
flags.DEFINE_integer('kl_annealing_epochs', 90,
'Number of epochs over which to anneal the KL term to 1.')
flags.DEFINE_string('alpha_initializer', 'trainable_normal',
'Initializer name for the alpha parameters.')
flags.DEFINE_string('gamma_initializer', 'trainable_normal',
'Initializer name for the gamma parameters.')
flags.DEFINE_string('alpha_regularizer', 'normal_kl_divergence',
'Regularizer name for the alpha parameters.')
flags.DEFINE_string('gamma_regularizer', 'normal_kl_divergence',
'Regularizer name for the gamma parameters.')
flags.DEFINE_boolean('use_additive_perturbation', False,
'Use additive perturbations instead of multiplicative.')
# General model flags
flags.DEFINE_integer('ensemble_size', 4, 'Size of ensemble.')
flags.DEFINE_integer('per_core_batch_size', 128, 'Batch size per TPU core/GPU.')
flags.DEFINE_float('random_sign_init', 0.75,
'Use random sign init for fast weights.')
flags.DEFINE_integer('seed', 0, 'Random seed.')
flags.DEFINE_float('base_learning_rate', 0.1,
'Base learning rate when train batch size is 256.')
flags.DEFINE_float('one_minus_momentum', 0.1, 'Optimizer momentum.')
flags.DEFINE_float('dropout_rate', 1e-3,
'Dropout rate. Only used if alpha/gamma initializers are, '
'e.g., trainable normal with a fixed stddev.')
flags.DEFINE_float('prior_stddev', 0.05,
'Prior stddev. Sort of like a prior on dropout rate, where '
'it encourages defaulting/shrinking to this value.')
flags.DEFINE_float('l2', 1e-4, 'L2 coefficient.')
flags.DEFINE_float('fast_weight_lr_multiplier', 1.0,
'fast weights lr multiplier.')
flags.DEFINE_string('data_dir', None, 'Path to training and testing data.')
flags.DEFINE_string('output_dir', '/tmp/imagenet',
'The directory where the model weights and '
'training/evaluation summaries are stored.')
flags.DEFINE_integer('train_epochs', 135, 'Number of training epochs.')
flags.DEFINE_integer('corruptions_interval', 135,
'Number of epochs between evaluating on the corrupted '
'test data. Use -1 to never evaluate.')
flags.DEFINE_integer('checkpoint_interval', 27,
'Number of epochs between saving checkpoints. Use -1 to '
'never save checkpoints.')
flags.DEFINE_string('alexnet_errors_path', None,
'Path to AlexNet corruption errors file.')
flags.DEFINE_integer('num_bins', 15, 'Number of bins for ECE computation.')
flags.DEFINE_bool('use_ensemble_bn', False, 'Whether to use ensemble bn.')
flags.DEFINE_integer('num_eval_samples', 1,
'Number of model predictions to sample per example at '
'eval time.')
# Accelerator flags.
flags.DEFINE_bool('use_gpu', False, 'Whether to run on GPU or otherwise TPU.')
flags.DEFINE_bool('use_bfloat16', False, 'Whether to use mixed precision.')
flags.DEFINE_integer('num_cores', 32, 'Number of TPU cores or number of GPUs.')
flags.DEFINE_string('tpu', None,
'Name of the TPU. Only used if use_gpu is False.')
FLAGS = flags.FLAGS
# Number of images in ImageNet-1k train dataset.
APPROX_IMAGENET_TRAIN_IMAGES = 1281167
# Number of images in eval dataset.
IMAGENET_VALIDATION_IMAGES = 50000
NUM_CLASSES = 1000
def main(argv):
del argv # unused arg
tf.random.set_seed(FLAGS.seed)
per_core_batch_size = FLAGS.per_core_batch_size // FLAGS.ensemble_size
batch_size = per_core_batch_size * FLAGS.num_cores
steps_per_epoch = APPROX_IMAGENET_TRAIN_IMAGES // batch_size
steps_per_eval = IMAGENET_VALIDATION_IMAGES // batch_size
logging.info('Saving checkpoints at %s', FLAGS.output_dir)
data_dir = FLAGS.data_dir
if FLAGS.use_gpu:
logging.info('Use GPU')
strategy = tf.distribute.MirroredStrategy()
else:
logging.info('Use TPU at %s',
FLAGS.tpu if FLAGS.tpu is not None else 'local')
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
train_builder = ub.datasets.ImageNetDataset(
split=tfds.Split.TRAIN,
use_bfloat16=FLAGS.use_bfloat16,
data_dir=data_dir)
train_dataset = train_builder.load(batch_size=batch_size, strategy=strategy)
test_builder = ub.datasets.ImageNetDataset(
split=tfds.Split.TEST, use_bfloat16=FLAGS.use_bfloat16, data_dir=data_dir)
clean_test_dataset = test_builder.load(
batch_size=batch_size, strategy=strategy)
test_datasets = {
'clean': clean_test_dataset
}
if FLAGS.corruptions_interval > 0:
corruption_types, max_intensity = utils.load_corrupted_test_info()
for name in corruption_types:
for intensity in range(1, max_intensity + 1):
dataset_name = '{0}_{1}'.format(name, intensity)
dataset = utils.load_corrupted_test_dataset(
batch_size=batch_size,
corruption_name=name,
corruption_intensity=intensity,
use_bfloat16=FLAGS.use_bfloat16)
test_datasets[dataset_name] = (
strategy.experimental_distribute_dataset(dataset))
if FLAGS.use_bfloat16:
tf.keras.mixed_precision.set_global_policy('mixed_bfloat16')
summary_writer = tf.summary.create_file_writer(
os.path.join(FLAGS.output_dir, 'summaries'))
with strategy.scope():
logging.info('Building Keras ResNet-50 model')
model = ub.models.resnet50_rank1(
input_shape=(224, 224, 3),
num_classes=NUM_CLASSES,
alpha_initializer=FLAGS.alpha_initializer,
gamma_initializer=FLAGS.gamma_initializer,
alpha_regularizer=FLAGS.alpha_regularizer,
gamma_regularizer=FLAGS.gamma_regularizer,
use_additive_perturbation=FLAGS.use_additive_perturbation,
ensemble_size=FLAGS.ensemble_size,
random_sign_init=FLAGS.random_sign_init,
dropout_rate=FLAGS.dropout_rate,
prior_stddev=FLAGS.prior_stddev,
use_tpu=not FLAGS.use_gpu,
use_ensemble_bn=FLAGS.use_ensemble_bn)
logging.info('Model input shape: %s', model.input_shape)
logging.info('Model output shape: %s', model.output_shape)
logging.info('Model number of weights: %s', model.count_params())
# Scale learning rate and decay epochs by vanilla settings.
base_lr = FLAGS.base_learning_rate * batch_size / 256
decay_epochs = [
(FLAGS.train_epochs * 30) // 90,
(FLAGS.train_epochs * 60) // 90,
(FLAGS.train_epochs * 80) // 90,
]
learning_rate = ub.schedules.WarmUpPiecewiseConstantSchedule(
steps_per_epoch=steps_per_epoch,
base_learning_rate=base_lr,
decay_ratio=0.1,
decay_epochs=decay_epochs,
warmup_epochs=5)
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate,
momentum=1.0 - FLAGS.one_minus_momentum,
nesterov=True)
metrics = {
'train/negative_log_likelihood': tf.keras.metrics.Mean(),
'train/kl': tf.keras.metrics.Mean(),
'train/kl_scale': tf.keras.metrics.Mean(),
'train/elbo': tf.keras.metrics.Mean(),
'train/loss': tf.keras.metrics.Mean(),
'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
'train/ece': rm.metrics.ExpectedCalibrationError(
num_bins=FLAGS.num_bins),
'train/diversity': rm.metrics.AveragePairwiseDiversity(),
'test/negative_log_likelihood': tf.keras.metrics.Mean(),
'test/kl': tf.keras.metrics.Mean(),
'test/elbo': tf.keras.metrics.Mean(),
'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
'test/ece': rm.metrics.ExpectedCalibrationError(
num_bins=FLAGS.num_bins),
'test/diversity': rm.metrics.AveragePairwiseDiversity(),
'test/member_accuracy_mean': (
tf.keras.metrics.SparseCategoricalAccuracy()),
'test/member_ece_mean': rm.metrics.ExpectedCalibrationError(
num_bins=FLAGS.num_bins),
}
if FLAGS.corruptions_interval > 0:
corrupt_metrics = {}
for intensity in range(1, max_intensity + 1):
for corruption in corruption_types:
dataset_name = '{0}_{1}'.format(corruption, intensity)
corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
tf.keras.metrics.Mean())
corrupt_metrics['test/kl_{}'.format(dataset_name)] = (
tf.keras.metrics.Mean())
corrupt_metrics['test/elbo_{}'.format(dataset_name)] = (
tf.keras.metrics.Mean())
corrupt_metrics['test/accuracy_{}'.format(dataset_name)] = (
tf.keras.metrics.SparseCategoricalAccuracy())
corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins))
if FLAGS.ensemble_size > 1:
for i in range(FLAGS.ensemble_size):
metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean()
metrics['test/accuracy_member_{}'.format(i)] = (
tf.keras.metrics.SparseCategoricalAccuracy())
logging.info('Finished building Keras ResNet-50 model')
checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
initial_epoch = 0
if latest_checkpoint:
# checkpoint.restore must be within a strategy.scope() so that optimizer
# slot variables are mirrored.
checkpoint.restore(latest_checkpoint)
logging.info('Loaded checkpoint %s', latest_checkpoint)
initial_epoch = optimizer.iterations.numpy() // steps_per_epoch
def compute_l2_loss(model):
filtered_variables = []
for var in model.trainable_variables:
# Apply l2 on the BN parameters and bias terms. This
# excludes only fast weight approximate posterior/prior parameters,
# but pay caution to their naming scheme.
if ('kernel' in var.name or
'batch_norm' in var.name or
'bias' in var.name):
filtered_variables.append(tf.reshape(var, (-1,)))
l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss(
tf.concat(filtered_variables, axis=0))
return l2_loss
@tf.function
def train_step(iterator):
"""Training StepFn."""
def step_fn(inputs):
"""Per-Replica StepFn."""
images = inputs['features']
labels = inputs['labels']
if FLAGS.ensemble_size > 1:
images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])
labels = tf.tile(labels, [FLAGS.ensemble_size])
with tf.GradientTape() as tape:
logits = model(images, training=True)
if FLAGS.use_bfloat16:
logits = tf.cast(logits, tf.float32)
probs = tf.nn.softmax(logits)
if FLAGS.ensemble_size > 1:
per_probs = tf.reshape(
probs, tf.concat([[FLAGS.ensemble_size, -1], probs.shape[1:]], 0))
metrics['train/diversity'].add_batch(per_probs)
negative_log_likelihood = tf.reduce_mean(
tf.keras.losses.sparse_categorical_crossentropy(labels,
logits,
from_logits=True))
l2_loss = compute_l2_loss(model)
kl = sum(model.losses) / APPROX_IMAGENET_TRAIN_IMAGES
kl_scale = tf.cast(optimizer.iterations + 1, kl.dtype)
kl_scale /= steps_per_epoch * FLAGS.kl_annealing_epochs
kl_scale = tf.minimum(1., kl_scale)
kl_loss = kl_scale * kl
# Scale the loss given the TPUStrategy will reduce sum all gradients.
loss = negative_log_likelihood + l2_loss + kl_loss
scaled_loss = loss / strategy.num_replicas_in_sync
elbo = -(negative_log_likelihood + l2_loss + kl)
grads = tape.gradient(scaled_loss, model.trainable_variables)
# Separate learning rate implementation.
if FLAGS.fast_weight_lr_multiplier != 1.0:
grads_and_vars = []
for grad, var in zip(grads, model.trainable_variables):
# Apply different learning rate on the fast weights. This excludes BN
# and slow weights, but pay caution to the naming scheme.
if ('batch_norm' not in var.name and 'kernel' not in var.name):
grads_and_vars.append((grad * FLAGS.fast_weight_lr_multiplier,
var))
else:
grads_and_vars.append((grad, var))
optimizer.apply_gradients(grads_and_vars)
else:
optimizer.apply_gradients(zip(grads, model.trainable_variables))
metrics['train/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics['train/kl'].update_state(kl)
metrics['train/kl_scale'].update_state(kl_scale)
metrics['train/elbo'].update_state(elbo)
metrics['train/loss'].update_state(loss)
metrics['train/accuracy'].update_state(labels, logits)
metrics['train/ece'].add_batch(probs, label=labels)
for _ in tf.range(tf.cast(steps_per_epoch, tf.int32)):
strategy.run(step_fn, args=(next(iterator),))
@tf.function
def test_step(iterator, dataset_name):
"""Evaluation StepFn."""
def step_fn(inputs):
"""Per-Replica StepFn."""
images = inputs['features']
labels = inputs['labels']
if FLAGS.ensemble_size > 1:
images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])
logits = tf.reshape(
[model(images, training=False)
for _ in range(FLAGS.num_eval_samples)],
[FLAGS.num_eval_samples, FLAGS.ensemble_size, -1, NUM_CLASSES])
if FLAGS.use_bfloat16:
logits = tf.cast(logits, tf.float32)
all_probs = tf.nn.softmax(logits)
probs = tf.math.reduce_mean(all_probs, axis=[0, 1]) # marginalize
# Negative log marginal likelihood computed in a numerically-stable way.
labels_broadcasted = tf.broadcast_to(
labels,
[FLAGS.num_eval_samples, FLAGS.ensemble_size, tf.shape(labels)[0]])
log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy(
labels_broadcasted, logits, from_logits=True)
negative_log_likelihood = tf.reduce_mean(
-tf.reduce_logsumexp(log_likelihoods, axis=[0, 1]) +
tf.math.log(float(FLAGS.num_eval_samples * FLAGS.ensemble_size)))
l2_loss = compute_l2_loss(model)
kl = sum(model.losses) / IMAGENET_VALIDATION_IMAGES
elbo = -(negative_log_likelihood + l2_loss + kl)
if dataset_name == 'clean':
if FLAGS.ensemble_size > 1:
per_probs = tf.reduce_mean(all_probs, axis=0) # marginalize samples
metrics['test/diversity'].add_batch(per_probs)
for i in range(FLAGS.ensemble_size):
member_probs = per_probs[i]
member_loss = tf.keras.losses.sparse_categorical_crossentropy(
labels, member_probs)
metrics['test/nll_member_{}'.format(i)].update_state(member_loss)
metrics['test/accuracy_member_{}'.format(i)].update_state(
labels, member_probs)
metrics['test/member_accuracy_mean'].update_state(
labels, member_probs)
metrics['test/member_ece_mean'].add_batch(
member_probs, label=labels)
metrics['test/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics['test/kl'].update_state(kl)
metrics['test/elbo'].update_state(elbo)
metrics['test/accuracy'].update_state(labels, probs)
metrics['test/ece'].add_batch(probs, label=labels)
else:
corrupt_metrics['test/nll_{}'.format(dataset_name)].update_state(
negative_log_likelihood)
corrupt_metrics['test/kl_{}'.format(dataset_name)].update_state(kl)
corrupt_metrics['test/elbo_{}'.format(dataset_name)].update_state(elbo)
corrupt_metrics['test/accuracy_{}'.format(dataset_name)].update_state(
labels, probs)
corrupt_metrics['test/ece_{}'.format(dataset_name)].add_batch(
probs, label=labels)
for _ in tf.range(tf.cast(steps_per_eval, tf.int32)):
strategy.run(step_fn, args=(next(iterator),))
train_iterator = iter(train_dataset)
start_time = time.time()
for epoch in range(initial_epoch, FLAGS.train_epochs):
logging.info('Starting to run epoch: %s', epoch)
train_step(train_iterator)
current_step = (epoch + 1) * steps_per_epoch
max_steps = steps_per_epoch * FLAGS.train_epochs
time_elapsed = time.time() - start_time
steps_per_sec = float(current_step) / time_elapsed
eta_seconds = (max_steps - current_step) / steps_per_sec
message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
current_step / max_steps,
epoch + 1,
FLAGS.train_epochs,
steps_per_sec,
eta_seconds / 60,
time_elapsed / 60))
logging.info(message)
datasets_to_evaluate = {'clean': test_datasets['clean']}
if (FLAGS.corruptions_interval > 0 and
(epoch + 1) % FLAGS.corruptions_interval == 0):
datasets_to_evaluate = test_datasets
for dataset_name, test_dataset in datasets_to_evaluate.items():
logging.info('Testing on dataset %s', dataset_name)
test_iterator = iter(test_dataset)
logging.info('Starting to run eval at epoch: %s', epoch)
test_step(test_iterator, dataset_name)
logging.info('Done with testing on %s', dataset_name)
corrupt_results = {}
if (FLAGS.corruptions_interval > 0 and
(epoch + 1) % FLAGS.corruptions_interval == 0):
corrupt_results = utils.aggregate_corrupt_metrics(
corrupt_metrics, corruption_types, max_intensity,
FLAGS.alexnet_errors_path)
logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
metrics['train/loss'].result(),
metrics['train/accuracy'].result() * 100)
logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
metrics['test/negative_log_likelihood'].result(),
metrics['test/accuracy'].result() * 100)
for i in range(FLAGS.ensemble_size):
logging.info('Member %d Test Loss: %.4f, Accuracy: %.2f%%',
i, metrics['test/nll_member_{}'.format(i)].result(),
metrics['test/accuracy_member_{}'.format(i)].result() * 100)
total_results = {name: metric.result() for name, metric in metrics.items()}
total_results.update(corrupt_results)
# Results from Robustness Metrics themselves return a dict, so flatten them.
total_results = utils.flatten_dictionary(total_results)
with summary_writer.as_default():
for name, result in total_results.items():
tf.summary.scalar(name, result, step=epoch + 1)
for metric in metrics.values():
metric.reset_states()
if (FLAGS.checkpoint_interval > 0 and
(epoch + 1) % FLAGS.checkpoint_interval == 0):
checkpoint_name = checkpoint.save(os.path.join(
FLAGS.output_dir, 'checkpoint'))
logging.info('Saved checkpoint to %s', checkpoint_name)
final_checkpoint_name = checkpoint.save(
os.path.join(FLAGS.output_dir, 'checkpoint'))
logging.info('Saved last checkpoint to %s', final_checkpoint_name)
with summary_writer.as_default():
hp.hparams({
'base_learning_rate': FLAGS.base_learning_rate,
'one_minus_momentum': FLAGS.one_minus_momentum,
'l2': FLAGS.l2,
'fast_weight_lr_multiplier': FLAGS.fast_weight_lr_multiplier,
'num_eval_samples': FLAGS.num_eval_samples,
})
if __name__ == '__main__':
app.run(main)
|
|
# -*- coding: utf-8 -*-
"""
Evaluate model performance after training.
This is for comparison of supervised accuracy on different datasets.
Especially for the plots for the broken data comparison.
The usual setup is that simulations are broken (so that the AE has not to be trained again)
so 3 tests are necessary:
Trained on broken --> Test on broken (seeming performance)
Trained on broken --> Test on real (actual perfromance)
Trained on real --> Test on real (best case)
"""
import argparse
import numpy as np
import matplotlib.pyplot as plt
from util.evaluation_utilities import make_or_load_files, make_binned_data_plot, make_energy_mae_plot_mean_only, make_energy_mae_plot_mean_only_single
from util.saved_setups_for_plot_statistics import get_path_best_epoch
from energy_evaluation import make_or_load_hist_data
def parse_input():
parser = argparse.ArgumentParser(description='Evaluate model performance after training. This is for comparison of supervised accuracy on different datasets. Especially for the plots for the broken data comparison.')
parser.add_argument('info_tags', nargs="+", type=str, help='Names of identifiers for a saved setup. All for making all available ones.')
args = parser.parse_args()
params = vars(args)
return params
#Standard, plot acc vs energy plots of these saved setups (taken from parser now)
#which_ones=("4_64_enc",)
#extra string to be included in file names
extra_name=""
#number of bins of the histogram plot; default (is 97) is 32 now; backward compatibility with 98 bins
bins=32
#If not None: Change the y range of all plots to this one (to make unit looks)
y_lims_override = None
#instead of plotting acc vs. energy, one can also make a compare plot,
#which shows the difference #between "on simulations" and "on measured data"
#then, the number of the broken mode has to be given
#can be True, False or "both"
#TODO Rework, disfunctional
make_difference_plot=False
which_broken_study=4
def get_procedure(broken_model, real_model, brokendata_tag, realdata_tag):
#For when the "Simulation"-dataset is manipulated simulations:
modelidents = (broken_model, broken_model, real_model)
dataset_array = (brokendata_tag, realdata_tag, realdata_tag)
return modelidents, dataset_array
def get_info(which_one, extra_name="", y_lims_override=None):
"""
Saved setups of plots.
Returns all relevant infos to exactly produce (or reproduce) these plots.
"""
#DEFAULT VALUES (overwritten when necessary)
#This will be added before all modelidents
modelpath = "/home/woody/capn/mppi013h/Km3-Autoencoder/models/"
#Default class type the evaluation is done for. None for autoencoders.
class_type = (2, 'up_down')
#mse, acc, mre
plot_type = "acc"
#Default location of legend ("best")
legend_loc="best"
#ylims of plot ( only for acc )
y_lims=(0.5,1.0)
#Where to save the plots
plot_path = "/home/woody/capn/mppi013h/Km3-Autoencoder/results/plots/"
folder_in_the_plots_path = "broken_study/"
#Labels for the plot are defined below now!
#label_array=["On 'simulations'", "On 'measured' data", "Upper limit on 'measured' data"]
title_of_plot=""
#Overwrite default color palette. Leave empty for auto
color_array=["orange", "blue", "navy"]
#Add the number of bins to the name of the plot file (usually 32)
extra_name="_"+ str(bins)+"_bins" + extra_name
try: which_one=int(which_one)
except: ValueError
# ----------------------------- Up down -----------------------------
if which_one=="1_unf" or which_one==0:
#vgg_3_broken1_unf
modelidents = ("vgg_3-broken1/trained_vgg_3-broken1_supervised_up_down_epoch6.h5",
"vgg_3-broken1/trained_vgg_3-broken1_supervised_up_down_epoch6.h5",
"vgg_3/trained_vgg_3_supervised_up_down_new_epoch5.h5")
#Which dataset each to use
dataset_array = ("xzt_broken", "xzt", "xzt")
#Plot properties: All in the array are plotted in one figure, with own label each
title_of_plot='Unfrozen network performance with manipulated simulations'
#in the results/plots folder:
plot_file_name = "vgg_3_broken1_unf"+extra_name+".pdf"
#y limits of plot:
y_lims=(0.4,1.05)
elif which_one=="1_enc" or which_one==1:
#vgg_3_broken1_enc
modelidents = ("vgg_3/trained_vgg_3_autoencoder_epoch10_supervised_up_down_broken1_epoch14.h5",
"vgg_3/trained_vgg_3_autoencoder_epoch10_supervised_up_down_broken1_epoch14.h5",
"vgg_3/trained_vgg_3_autoencoder_epoch10_supervised_up_down_accdeg_epoch24.h5")
#Which dataset each to use
dataset_array = ("xzt_broken", "xzt", "xzt")
#Plot properties: All in the array are plotted in one figure, with own label each
title_of_plot='Autoencoder-encoder network performance with manipulated simulations'
#in the results/plots folder:
plot_file_name = "vgg_3_broken1_enc"+extra_name+".pdf"
#y limits of plot:
y_lims=(0.4,1.05)
legend_loc="lower right"
elif which_one=="2_unf" or which_one==2:
#vgg_3_broken2_unf
modelidents = ("vgg_3/trained_vgg_3_supervised_up_down_new_epoch5.h5",
"vgg_3/trained_vgg_3_supervised_up_down_new_epoch5.h5",
"vgg_3-noise10/trained_vgg_3-noise10_supervised_up_down_epoch6.h5")
#Which dataset each to use
dataset_array = ("xzt", "xzt_broken2", "xzt_broken2")
#Plot properties: All in the array are plotted in one figure, with own label each
title_of_plot='Unfrozen network performance with noisy data'
#in the results/plots folder:
plot_file_name = "vgg_3_broken2_unf"+extra_name+".pdf"
#y limits of plot:
y_lims=(0.68,0.96)
legend_loc="lower right"
elif which_one=="2_enc" or which_one==3:
#vgg_3_broken2_enc
modelidents = ("vgg_3-noise10/trained_vgg_3-noise10_autoencoder_epoch10_supervised_up_down_epoch9.h5",
"vgg_3-noise10/trained_vgg_3-noise10_autoencoder_epoch10_supervised_up_down_epoch9.h5",
"vgg_3-noise10/trained_vgg_3-noise10_autoencoder_epoch10_supervised_up_down_noise_epoch14.h5")
#Which dataset each to use
dataset_array = ("xzt", "xzt_broken2", "xzt_broken2")
#Plot properties: All in the array are plotted in one figure, with own label each
title_of_plot='Autoencoder-encoder network performance with noisy data'
#in the results/plots folder:
plot_file_name = "vgg_3_broken2_enc"+extra_name+".pdf"
#y limits of plot:
y_lims=(0.68,0.96)
legend_loc="lower right"
elif which_one=="4_unf" or which_one==4:
modelidents = ("vgg_3-broken4/trained_vgg_3-broken4_supervised_up_down_epoch4.h5",
"vgg_3-broken4/trained_vgg_3-broken4_supervised_up_down_epoch4.h5",
"vgg_3/trained_vgg_3_supervised_up_down_new_epoch5.h5")
#Which dataset each to use
dataset_array = ("xzt_broken4", "xzt", "xzt")
#Plot properties: All in the array are plotted in one figure, with own label each
title_of_plot='Unfrozen network performance with manipulated simulations'
#in the results/plots folder:
plot_file_name = "vgg_3_broken4_unf"+extra_name+".pdf"
#y limits of plot:
y_lims=(0.5,1.0)
elif which_one=="4_enc" or which_one==5:
modelidents = ("vgg_3/trained_vgg_3_autoencoder_epoch10_supervised_up_down_broken4_epoch52.h5",
"vgg_3/trained_vgg_3_autoencoder_epoch10_supervised_up_down_broken4_epoch52.h5",
"vgg_3/trained_vgg_3_autoencoder_epoch10_supervised_up_down_accdeg_epoch24.h5")
#Which dataset each to use
dataset_array = ("xzt_broken4", "xzt", "xzt")
#Plot properties: All in the array are plotted in one figure, with own label each
title_of_plot='Autoencoder-encoder network performance with manipulated simulations'
#in the results/plots folder:
plot_file_name = "vgg_3_broken4_enc"+extra_name+".pdf"
#y limits of plot:
y_lims=(0.5,1.0)
elif which_one=="4_pic_enc" or which_one==6:
modelidents = ("vgg_5_picture/trained_vgg_5_picture_autoencoder_epoch48_supervised_up_down_broken4_epoch53.h5",
"vgg_5_picture/trained_vgg_5_picture_autoencoder_epoch48_supervised_up_down_broken4_epoch53.h5",
"vgg_5_picture/trained_vgg_5_picture_autoencoder_epoch48_supervised_up_down_epoch74.h5")
#Which dataset each to use
dataset_array = ("xzt_broken4", "xzt", "xzt")
#Plot properties: All in the array are plotted in one figure, with own label each
title_of_plot='600 neuron Autoencoder-encoder network performance\nwith manipulated simulations'
#in the results/plots folder:
plot_file_name = "vgg_5_picture_broken4_enc"+extra_name+".pdf"
#y limits of plot:
y_lims=(0.7,0.95)
elif which_one=="4_200_enc" or which_one==7:
modelidents = ("vgg_5_200/trained_vgg_5_200_autoencoder_epoch94_supervised_up_down_broken4_epoch59.h5",
"vgg_5_200/trained_vgg_5_200_autoencoder_epoch94_supervised_up_down_broken4_epoch59.h5",
"vgg_5_200/trained_vgg_5_200_autoencoder_epoch94_supervised_up_down_epoch45.h5")
#Which dataset each to use
dataset_array = ("xzt_broken4", "xzt", "xzt")
#Plot properties: All in the array are plotted in one figure, with own label each
title_of_plot='200 neuron Autoencoder-encoder network performance\nwith manipulated simulations'
#in the results/plots folder:
plot_file_name = "vgg_5_200_broken4_enc"+extra_name+".pdf"
#y limits of plot:
y_lims=(0.7,0.95)
elif which_one=="4_64_enc" or which_one==8:
modelidents = ("vgg_5_64/trained_vgg_5_64_autoencoder_epoch64_supervised_up_down_broken4_epoch57.h5",
"vgg_5_64/trained_vgg_5_64_autoencoder_epoch64_supervised_up_down_broken4_epoch57.h5",
"vgg_5_64/trained_vgg_5_64_autoencoder_epoch64_supervised_up_down_epoch26.h5")
#Which dataset each to use
dataset_array = ("xzt_broken4", "xzt", "xzt")
#Plot properties: All in the array are plotted in one figure, with own label each
title_of_plot='64 neuron Autoencoder-encoder network performance\nwith manipulated simulations'
#in the results/plots folder:
plot_file_name = "vgg_5_64_broken4_enc"+extra_name+".pdf"
#y limits of plot:
y_lims=(0.7,0.95)
elif which_one=="4_64_enc_nodrop" or which_one==26:
modelidents = ("vgg_5_64/trained_vgg_5_64_autoencoder_epoch82_supervised_up_down_broken4_nodrop_epoch52.h5",
"vgg_5_64/trained_vgg_5_64_autoencoder_epoch82_supervised_up_down_broken4_nodrop_epoch52.h5",
"vgg_5_64/trained_vgg_5_64_autoencoder_epoch64_supervised_up_down_nodrop_epoch69.h5")
#Which dataset each to use
dataset_array = ("xzt_broken4", "xzt", "xzt")
#Plot properties: All in the array are plotted in one figure, with own label each
title_of_plot='64 neuron Autoencoder-encoder network performance\nwith manipulated simulations'
#in the results/plots folder:
plot_file_name = "vgg_5_64_broken4_enc_nodrop"+extra_name+".pdf"
#y limits of plot:
y_lims=(0.7,0.95)
elif which_one=="4_32_enc" or which_one==9:
modelidents = ("vgg_5_32-eps01/trained_vgg_5_32-eps01_autoencoder_epoch31_supervised_up_down_broken4_epoch1.h5",
"vgg_5_32-eps01/trained_vgg_5_32-eps01_autoencoder_epoch31_supervised_up_down_broken4_epoch1.h5",
"vgg_5_32-eps01/trained_vgg_5_32-eps01_autoencoder_epoch31_supervised_up_down_epoch48.h5")
dataset_array = ("xzt_broken4", "xzt", "xzt")
title_of_plot='32 neuron Autoencoder-encoder network performance\nwith manipulated simulations'
#in the results/plots folder:
plot_file_name = "vgg_5_32_broken4_enc"+extra_name+".pdf"
#y limits of plot:
y_lims=(0.7,0.95)
legend_loc="lower right"
elif which_one=="4_32_enc_nodrop" or which_one==23:
modelidents = ("vgg_5_32-eps01/trained_vgg_5_32-eps01_autoencoder_epoch22_supervised_up_down_broken4_nodrop_epoch47.h5",
"vgg_5_32-eps01/trained_vgg_5_32-eps01_autoencoder_epoch22_supervised_up_down_broken4_nodrop_epoch47.h5",
"vgg_5_32-eps01/trained_vgg_5_32-eps01_autoencoder_epoch31_supervised_up_down_nodrop_epoch79.h5")
dataset_array = ("xzt_broken4", "xzt", "xzt")
title_of_plot='32 neuron Autoencoder-encoder network performance\nwith manipulated simulations'
#in the results/plots folder:
plot_file_name = "vgg_5_32_broken4_enc_nodrop"+extra_name+".pdf"
#y limits of plot:
y_lims=(0.7,0.95)
legend_loc="lower right"
elif which_one=="4flip_unf" or which_one==10:
modelidents = ("vgg_3/trained_vgg_3_supervised_up_down_new_epoch5.h5",
"vgg_3/trained_vgg_3_supervised_up_down_new_epoch5.h5",
"vgg_3-broken4/trained_vgg_3-broken4_supervised_up_down_epoch4.h5")
#Which dataset each to use
dataset_array = ("xzt", "xzt_broken4", "xzt_broken4")
#Plot properties: All in the array are plotted in one figure, with own label each
title_of_plot='Unfrozen network performance with manipulated data'
#in the results/plots folder:
plot_file_name = "vgg_3_broken4_flip_unf"+extra_name+".pdf"
#y limits of plot:
y_lims=(0.75,1.0)
elif which_one=="4flip_enc" or which_one==11:
modelidents = ("vgg_3-broken4/trained_vgg_3-broken4_autoencoder_epoch12_supervised_up_down_xzt_epoch62.h5",
"vgg_3-broken4/trained_vgg_3-broken4_autoencoder_epoch12_supervised_up_down_xzt_epoch62.h5",
"vgg_3-broken4/trained_vgg_3-broken4_autoencoder_epoch10_supervised_up_down_broken4_epoch59.h5")
#Which dataset each to use
dataset_array = ("xzt", "xzt_broken4", "xzt_broken4")
#Plot properties: All in the array are plotted in one figure, with own label each
title_of_plot='Autoencoder-encoder network performance with manipulated data'
#in the results/plots folder:
plot_file_name = "vgg_3_broken4_flip_enc"+extra_name+".pdf"
#y limits of plot:
y_lims=(0.75,1)
elif which_one=="5_enc" or which_one==12:
modelidents = ("vgg_3/trained_vgg_3_autoencoder_epoch10_supervised_up_down_broken5_epoch58.h5",
"vgg_3/trained_vgg_3_autoencoder_epoch10_supervised_up_down_broken5_epoch58.h5",
"vgg_3/trained_vgg_3_autoencoder_epoch10_supervised_up_down_accdeg_epoch24.h5")
#Which dataset each to use
dataset_array = ("xzt_broken5", "xzt", "xzt")
#Plot properties: All in the array are plotted in one figure, with own label each
title_of_plot='Autoencoder-encoder network performance with manipulated simulations'
#in the results/plots folder:
plot_file_name = "vgg_3_broken5_enc"+extra_name+".pdf"
#y limits of plot:
y_lims=(0.7,1.0)
legend_loc="lower right"
elif which_one=="5_unf" or which_one==13:
broken_model = "vgg_3-broken5/trained_vgg_3-broken5_supervised_up_down_epoch6.h5"
real_model = "vgg_3/trained_vgg_3_supervised_up_down_new_epoch5.h5"
brokendata_tag = "xzt_broken5"
realdata_tag = "xzt"
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
#Plot properties: All in the array are plotted in one figure, with own label each
title_of_plot='Unfrozen network performance with manipulated simulations'
#in the results/plots folder:
plot_file_name = "vgg_3_broken5_unf"+extra_name+".pdf"
#y limits of plot:
y_lims=(0.7,1.0)
legend_loc="lower right"
elif which_one=="4_200_large_enc" or which_one==14:
broken_model = "vgg_5_200_large/trained_vgg_5_200_large_autoencoder_epoch39_supervised_up_down_broken4_epoch34.h5"
real_model = get_path_best_epoch("vgg_5_200_large", full_path=False)
brokendata_tag = "xzt_broken4"
realdata_tag = "xzt"
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
title_of_plot='Large 200 neuron Autoencoder-encoder network performance\nwith manipulated simulations'
#in the results/plots folder:
plot_file_name = "vgg_5_200_large_broken4_enc"+extra_name+".pdf"
y_lims=(0.7,0.95)
elif which_one=="4_200_small_enc" or which_one==15:
broken_model = "vgg_5_200_small/trained_vgg_5_200_small_autoencoder_epoch77_supervised_up_down_broken4_epoch57.h5"
real_model = get_path_best_epoch("vgg_5_200_small", full_path=False)
brokendata_tag = "xzt_broken4"
realdata_tag = "xzt"
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
title_of_plot='Small 200 neuron Autoencoder-encoder network performance\nwith manipulated simulations'
#in the results/plots folder:
plot_file_name = "vgg_5_200_small_broken4_enc"+extra_name+".pdf"
y_lims=(0.7,0.95)
# ----------------------------- Energy regression -----------------------------
elif which_one=="energy_12_enc" or which_one==16:
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_200_small_broken12_enc"+extra_name+".pdf"
plot_type = "mre"
#y_lims=(0.7,0.95)
broken_model = "vgg_3/trained_vgg_3_autoencoder_epoch8_supervised_energy_broken12_epoch48.h5"
real_model = get_path_best_epoch("vgg_3_2000_E", full_path=False)
brokendata_tag = "xzt_broken12"
realdata_tag = "xzt"
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
elif which_one=="energy_12_unf" or which_one==17:
brokendata_tag = "xzt_broken12"
realdata_tag = "xzt"
broken_model = "vgg_3-broken12/trained_vgg_3-broken12_supervised_energy_epoch11.h5"
real_model = get_path_best_epoch("2000_unf_E", full_path=False)
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_200_small_broken12_unf"+extra_name+".pdf"
plot_type = "mre"
#y_lims=(0.7,0.95)
elif which_one=="energy_4_2000_unf" or which_one==19:
brokendata_tag = "xzt_broken4"
realdata_tag = "xzt"
broken_model = "vgg_3-broken4/trained_vgg_3-broken4_supervised_energy_epoch10.h5"
real_model = get_path_best_epoch("2000_unf_E", full_path=False)
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_2000_broken4_unf"+extra_name+".pdf"
plot_type = "mre"
y_lims=(0.2,0.6)
elif which_one=="energy_4_2000_enc" or which_one==20:
brokendata_tag = "xzt_broken4"
realdata_tag = "xzt"
broken_model = "vgg_3/trained_vgg_3_autoencoder_epoch8_supervised_energy_broken4_nodrop_epoch5.h5"
real_model = get_path_best_epoch("vgg_3_2000_E_nodrop", full_path=False)
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_2000_broken4_enc"+extra_name+".pdf"
plot_type = "mre"
y_lims=(0.2,0.6)
elif which_one=="energy_13_2000_unf" or which_one==21:
brokendata_tag = "xzt_broken13"
realdata_tag = "xzt"
broken_model = "vgg_3-broken13/trained_vgg_3-broken13_supervised_energy_epoch19.h5"
real_model = get_path_best_epoch("2000_unf_E", full_path=False)
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_2000_broken13_unf"+extra_name+".pdf"
plot_type = "mre"
y_lims=(0.02,0.78)
elif which_one=="energy_13_2000_enc" or which_one==22:
brokendata_tag = "xzt_broken13"
realdata_tag = "xzt"
broken_model = "vgg_3/trained_vgg_3_autoencoder_epoch8_supervised_energy_broken13_nodrop_epoch9.h5"
real_model = get_path_best_epoch("vgg_3_2000_E_nodrop", full_path=False)
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_2000_broken13_enc"+extra_name+".pdf"
plot_type = "mre"
y_lims=(0.02,0.78)
#Broken 14 (rauschen prop zu E, bis zu 2 kHz plus)
#Bottleneck scan
elif which_one=="energy_14_2000_unf" or which_one==24:
brokendata_tag = "xzt_broken14"
realdata_tag = "xzt"
broken_model = "vgg_3-broken14/trained_vgg_3-broken14_supervised_energy_epoch15.h5"
real_model = get_path_best_epoch("2000_unf_E", full_path=False)
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_2000_broken14_unf"+extra_name+".pdf"
plot_type = "mre"
y_lims=(0.08,0.68)
elif which_one=="energy_14_2000_enc" or which_one==25:
brokendata_tag = "xzt_broken14"
realdata_tag = "xzt"
broken_model = "vgg_3/trained_vgg_3_autoencoder_epoch8_supervised_energy_broken14_nodrop_epoch7.h5"
real_model = get_path_best_epoch("vgg_3_2000_E_nodrop", full_path=False)
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_2000_broken14_enc"+extra_name+".pdf"
plot_type = "mre"
y_lims=(0.08,0.68)
elif which_one=="energy_14_600_pic_enc" or which_one==27:
brokendata_tag = "xzt_broken14"
realdata_tag = "xzt"
broken_model = "vgg_5_picture/trained_vgg_5_picture_autoencoder_epoch44_supervised_energy_broken14_nodrop_epoch12.h5"
real_model = get_path_best_epoch("vgg_5_600_picture_E_nodrop", full_path=False)
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_picture_broken14_enc"+extra_name+".pdf"
plot_type = "mre"
y_lims=(0.08,0.68)
elif which_one=="energy_14_200_dense_enc" or which_one==28:
brokendata_tag = "xzt_broken14"
realdata_tag = "xzt"
broken_model = "vgg_5_200_dense-new/trained_vgg_5_200_dense-new_autoencoder_epoch101_supervised_energy_broken14_nodrop_epoch45.h5"
real_model = get_path_best_epoch("vgg_5_200_dense_E_nodrop", full_path=False)
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_200_dense_broken14_enc"+extra_name+".pdf"
plot_type = "mre"
y_lims=(0.08,0.68)
elif which_one=="energy_14_64_enc" or which_one==29:
brokendata_tag = "xzt_broken14"
realdata_tag = "xzt"
broken_model = "vgg_5_64/trained_vgg_5_64_autoencoder_epoch78_supervised_energy_broken14_nodrop_epoch49.h5"
real_model = get_path_best_epoch("vgg_5_64_E_nodrop", full_path=False)
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_64_broken14_enc"+extra_name+".pdf"
plot_type = "mre"
y_lims=(0.08,0.68)
elif which_one=="energy_14_32_enc" or which_one==30:
brokendata_tag = "xzt_broken14"
realdata_tag = "xzt"
broken_model = "vgg_5_32-eps01/trained_vgg_5_32-eps01_autoencoder_epoch44_supervised_energy_broken14_nodrop_epoch59.h5"
real_model = get_path_best_epoch("vgg_5_32_E_nodrop", full_path=False)
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_32_broken14_enc"+extra_name+".pdf"
plot_type = "mre"
y_lims=(0.08,0.68)
elif which_one=="energy_14_200_enc" or which_one==31:
brokendata_tag = "xzt_broken14"
realdata_tag = "xzt"
broken_model = "vgg_5_200/trained_vgg_5_200_autoencoder_epoch94_supervised_energy_broken14_nodrop_epoch11.h5"
real_model = get_path_best_epoch("vgg_5_200_E_nodrop", full_path=False)
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_200_broken14_enc"+extra_name+".pdf"
plot_type = "mre"
y_lims=(0.08,0.68)
elif which_one=="energy_14_200_large_enc" or which_one==36:
brokendata_tag = "xzt_broken14"
realdata_tag = "xzt"
broken_model = "vgg_5_200_large/trained_vgg_5_200_large_autoencoder_epoch45_supervised_energy_broken14_drop035_epoch14.h5"
real_model = get_path_best_epoch("vgg_5_200_large_E_nodrop", full_path=False)
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_200_large_broken14_enc"+extra_name+".pdf"
plot_type = "mre"
y_lims=(0.08,0.68)
elif which_one=="energy_14_200_small_enc" or which_one==37:
brokendata_tag = "xzt_broken14"
realdata_tag = "xzt"
broken_model = "vgg_5_200_small/trained_vgg_5_200_small_autoencoder_epoch89_supervised_energy_broken14_nodrop_epoch11.h5"
real_model = get_path_best_epoch("vgg_5_200_small_E_nodrop", full_path=False)
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_200_small_broken14_enc"+extra_name+".pdf"
plot_type = "mre"
y_lims=(0.08,0.68)
# ----------------------------- Other tests -----------------------------
elif which_one=="energy_2_2000_unf" or which_one==32:
brokendata_tag = "xzt"
realdata_tag = "xzt_broken2"
broken_model = get_path_best_epoch("2000_unf_E", full_path=False)
real_model = "vgg_3-noise10/trained_vgg_3-noise10_supervised_energy_epoch12.h5"
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_2000_broken2_unf"+extra_name+".pdf"
plot_type = "mre"
y_lims=(0.21,0.81)
elif which_one=="energy_2_2000_enc" or which_one==33:
brokendata_tag = "xzt"
realdata_tag = "xzt_broken2"
broken_model = "vgg_3-noise10/trained_vgg_3-noise10_autoencoder_epoch5_supervised_energy_nodrop_epoch3.h5"
real_model = "vgg_3-noise10/trained_vgg_3-noise10_autoencoder_epoch7_supervised_energy_nodrop_epoch5.h5" #_broken2
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_2000_broken2_enc"+extra_name+".pdf"
plot_type = "mre"
y_lims=(0.21,0.81)
elif which_one=="energy_15_2000_unf" or which_one==34:
brokendata_tag = "xzt"
realdata_tag = "xzt_broken15"
broken_model = get_path_best_epoch("2000_unf_E", full_path=False)
real_model = "vgg_5_2000-broken15/trained_vgg_5_2000-broken15_supervised_energy_epoch12.h5"
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_2000_broken15_unf"+extra_name+".pdf"
plot_type = "mre"
y_lims=(0.18,0.55)
elif which_one=="energy_15_2000_enc" or which_one==35:
brokendata_tag = "xzt"
realdata_tag = "xzt_broken15"
broken_model = "vgg_5_64-broken15/trained_vgg_5_64-broken15_autoencoder_epoch83_supervised_energynodrop_epoch67.h5"
real_model = "vgg_5_64-broken15/trained_vgg_5_64-broken15_autoencoder_epoch83_supervised_energy_broken15_nodrop_epoch22.h5"
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
folder_in_the_plots_path = "broken_study_energy/"
plot_file_name = "vgg_5_2000_broken15_enc"+extra_name+".pdf"
plot_type = "mre"
y_lims=(0.18,0.55)
# ----------------------------- Unfreeze stuff -----------------------------
elif which_one=="unfreeze_comp" or which_one==18:
broken_model = "vgg_5_200-unfreeze/trained_vgg_5_200-unfreeze_autoencoder_epoch1_supervised_up_down_contE20_broken4_epoch30.h5"
real_model = "vgg_5_200-unfreeze/trained_vgg_5_200-unfreeze_autoencoder_epoch1_supervised_up_down_contE20_epoch30.h5"
brokendata_tag = "xzt_broken4"
realdata_tag = "xzt"
modelidents, dataset_array = get_procedure(broken_model, real_model,
brokendata_tag, realdata_tag)
#Plot properties: All in the array are plotted in one figure, with own label each
title_of_plot='Continuation of partially unfrozen network training'
#in the results/plots folder:
folder_in_the_plots_path="unfreeze/"
plot_file_name = "broken4_vgg5_200_contE20"+extra_name+".pdf"
#y limits of plot:
y_lims=(0.7,1.0)
legend_loc="lower right"
else:
raise NameError(str(which_one) + " is not known!")
title_of_plot=""
if plot_type=="mre":
#energy plot
label_array=["On 'simulations'", "On 'measured' data", "Lower limit on 'measured' data"]
else:
label_array=["On 'simulations'", "On 'measured' data", "Upper limit on 'measured' data"]
if y_lims_override != None:
y_lims = y_lims_override
modelidents = [modelpath + modelident for modelident in modelidents]
save_plot_as = plot_path + folder_in_the_plots_path + plot_file_name
return modelidents, dataset_array ,title_of_plot, save_plot_as, y_lims, class_type, plot_type, legend_loc, label_array, color_array
#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
def make_evaluation(info_tag, extra_name, y_lims_override, show_the_plot=True):
"""
Main function:
Make an evaluation based on the info_tag (Generate+Save or load evaluation data, save plot).
A plot that shows acc or loss over the mc energy in a histogram, evaluated on different
datasets.
Often, there will be three models plotted:
0: On 'simulations'
1: On 'measured' data
2: Upper lim
"""
modelidents, dataset_array, title_of_plot, save_plot_as, y_lims, class_type, plot_type, legend_loc, label_array, color_array = get_info(info_tag, extra_name=extra_name, y_lims_override=y_lims_override)
#make plot of multiple data:
if plot_type == "acc":
#For up-down networks:
#generate or load data automatically:
#this will be a list of binned evaluations, one for every model
hist_data_array = make_or_load_files(modelidents, dataset_array, class_type=class_type, bins=bins)
print_statistics_in_numbers(hist_data_array, plot_type)
y_label_of_plot="Accuracy"
fig = make_binned_data_plot(hist_data_array, label_array, title_of_plot, y_label=y_label_of_plot, y_lims=y_lims, color_array=color_array, legend_loc=legend_loc)
elif plot_type == "mre":
#Median relative error for energy regression, seperated for track and shower
#Data is loaded by the energy evaluation function, which is not
#fully compatible with this one :-( so additional infos copied from there manually
hist_data_array=[]
hist_data_single=[]
for model_no,model_path in enumerate(modelidents):
dataset_tag = dataset_array[model_no]
print("Working on", model_path.split("trained_")[1][:-3], "using dataset", dataset_tag)
zero_center=True
energy_bins_2d=np.arange(3,101,1)
energy_bins_1d=20
hist_data_2d, energy_mae_plot_data = make_or_load_hist_data(model_path,
dataset_tag, zero_center, energy_bins_2d, energy_bins_1d, samples=None,
include_mae_single=True)
#only interested in the mae plot data
hist_data_array.append(energy_mae_plot_data[:2])
hist_data_single.append(energy_mae_plot_data[2])
print_statistics_in_numbers(hist_data_array, plot_type, hist_data_single=hist_data_single)
y_label_of_plot='Median fractional energy resolution'
#Make the single plot and save without displaying
fig_single = make_energy_mae_plot_mean_only_single(hist_data_single, label_list=label_array, color_list=color_array, y_lims=y_lims)
fig_single_save_as=save_plot_as[:-4]+"_single.pdf"
fig_single.savefig(fig_single_save_as)
print("Single plot saved to", fig_single_save_as)
plt.close(fig_single)
fig = make_energy_mae_plot_mean_only(hist_data_array, label_list=label_array, color_list=color_array, y_lims=y_lims)
elif plot_type == "mse":
#Intended for Autoencoders, not been used in a long time...
y_label_of_plot="Loss"
fig = make_binned_data_plot(hist_data_array, label_array, title_of_plot, y_label=y_label_of_plot, y_lims=y_lims, color_array=color_array, legend_loc=legend_loc)
else:
print("Plot type", plot_type, "not supported. Not generating plots, but hist_data is still saved.")
fig.savefig(save_plot_as)
print("Plot saved to", save_plot_as)
if show_the_plot == True:
plt.show(fig)
else:
plt.close(fig)
return
def print_statistics_in_numbers(hist_data_array, plot_type, return_line=False, hist_data_single=None):
"""
Prints the average overall loss of performance,
averaged over all bins (not all events).
For this, three hist_datas are necessary:
hist_data_array
[0]: On simulations (broken on broken)
[1]: On measured (broken on real)
[2]: Upper limit (real on real)
"""
print("\n----------Statistics of this evaluation-----------------")
print("\tAveraged over energy bins, not events!")
if plot_type == "acc":
#hist_data contains [energy, binned_acc] for every model
on_simulations_data = hist_data_array[0][1]
on_measured_data = hist_data_array[1][1]
upper_limit_data = hist_data_array[2][1]
dropoff_sim_measured = ( (on_simulations_data - on_measured_data)/on_measured_data ).mean()
dropoff_upper_limit_measured = ((upper_limit_data - on_measured_data)/on_measured_data ).mean()
print("Acc on Sims:\tOn measured\tUpper lim")
print(np.mean(on_simulations_data),"\t", np.mean(on_measured_data),"\t", np.mean(upper_limit_data))
print("\nAverage relative %-acc reduction across all bins: 100 * (x - measured) / measured")
print("From simulation to measured\tFrom upper lim to measured:")
print(dropoff_sim_measured*100,"\t",dropoff_upper_limit_measured*100)
print("--------------------------------------------------------\n")
header = ("(Sim-Meas)/Meas","(Upperlim-Meas)/Meas")
line=(dropoff_sim_measured*100, dropoff_upper_limit_measured*100)
elif plot_type=="mre":
#hist_data_array is for every model the tuple:
#[energy_mae_plot_data_track, energy_mae_plot_data_shower]
#each containing [energy, binned mre]
#hist_data_single contains for every model the unseperated data tuple: [energy, binned mre]
on_simulations_data_track = np.array(hist_data_array[0][0][1])
on_measured_data_track = np.array(hist_data_array[1][0][1])
upper_limit_data_track = np.array(hist_data_array[2][0][1])
on_simulations_data_shower = np.array(hist_data_array[0][1][1])
on_measured_data_shower = np.array(hist_data_array[1][1][1])
upper_limit_data_shower = np.array(hist_data_array[2][1][1])
on_simulations_data_single = np.array(hist_data_single[0][1])
on_measured_data_single = np.array(hist_data_single[1][1])
upper_limit_data_single = np.array(hist_data_single[2][1])
print("First three are MRE, last two are average relative % increase across all bins: -1 * 100 * (x - measured) / measured")
def print_one_table (on_simulations_data, on_measured_data, upper_limit_data, printig_header="Track like events:"):
dropoff_sim_measured = (-1*(on_simulations_data - on_measured_data)/on_measured_data).mean()
dropoff_upper_limit = (-1*(upper_limit_data - on_measured_data )/on_measured_data ).mean()
print(printig_header)
print("On Sims:\tOn measured\tUpper lim\tFrom simulation to measured\tFrom upper lim to measured:")
print(np.mean(on_simulations_data),"\t", np.mean(on_measured_data),"\t", np.mean(upper_limit_data),"\t", dropoff_sim_measured*100,"\t",dropoff_upper_limit*100)
print("--------------------------------------------------------\n")
print_one_table(on_simulations_data_track, on_measured_data_track, upper_limit_data_track, "Track like events:")
print_one_table(on_simulations_data_shower, on_measured_data_shower, upper_limit_data_shower, "Shower like events:")
print_one_table(on_simulations_data_single, on_measured_data_single, upper_limit_data_single, "All events:")
header = None
line=None
else:
raise NameError("Unknown plottype"+plot_type)
if return_line:
return header, line
if __name__ == "__main__":
params = parse_input()
which_ones = params["info_tags"]
if "all" in which_ones:
show_the_plot = False
current_tag=0
while True:
try:
make_evaluation(current_tag, extra_name, y_lims_override, show_the_plot)
current_tag+=1
except NameError:
print("Done. Made a total of", current_tag, "plots.")
break
else:
show_the_plot = True
for info_tag in which_ones:
make_evaluation(info_tag, extra_name, y_lims_override, show_the_plot)
#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#not supported anymore...
if make_difference_plot == True or make_difference_plot == "both":
raise
#which plots to make diff of; (first - second) / first
make_diff_of_list=((0,1),(2,1))
title_list=("Relative loss of accuracy: 'simulations' to 'measured' data",
"Realtive difference in accuracy: Upper limit to 'measured' data")
if which_broken_study==2:
which_ones = ("2_unf", "2_enc")
save_as_list=(plot_path + "vgg_3_broken2_sim_real"+extra_name+".pdf",
plot_path + "vgg_3_broken2_upper_real"+extra_name+".pdf")
y_lims_list=((-0.02,0.1),(-0.02,0.1))
elif which_broken_study==4:
which_ones = ("4_unf", "4_enc")
save_as_list=(plot_path + "vgg_3_broken4_sim_real"+extra_name+".pdf",
plot_path + "vgg_3_broken4_upper_real"+extra_name+".pdf")
y_lims_list=((-0.02,0.1),(-0.02,0.1))
else:
raise()
for i in range(len(make_diff_of_list)):
#label_array=["On 'simulations'", "On 'measured' data", "Upper limit on 'measured' data"]
modelidents,dataset_array,title_of_plot,plot_file_name,y_lims = get_info(which_ones[0], y_lims_override=y_lims_override)
modelnames=[] # a tuple of eg "vgg_1_xzt_supervised_up_down_epoch6"
# (created from "trained_vgg_1_xzt_supervised_up_down_epoch6.h5" )
for modelident in modelidents:
modelnames.append(modelident.split("trained_")[1][:-3])
hist_data_array_unf = make_or_load_files(modelnames, dataset_array, modelidents=modelidents, class_type=class_type, bins=bins)
modelidents,dataset_array,title_of_plot,plot_file_name,y_lims = get_info(which_ones[1], y_lims_override=y_lims_override)
modelnames=[] # a tuple of eg "vgg_1_xzt_supervised_up_down_epoch6"
# (created from "trained_vgg_1_xzt_supervised_up_down_epoch6.h5" )
for modelident in modelidents:
modelnames.append(modelident.split("trained_")[1][:-3])
hist_data_array_enc = make_or_load_files(modelnames, dataset_array, modelidents=modelidents, class_type=class_type, bins=bins)
label_array=["Unfrozen", "Autoencoder-encoder"]
#Overwrite default color palette. Leave empty for auto
color_array=[]
#loss, acc, None
plot_type = "acc"
#Info about model
class_type = (2, 'up_down')
modelpath = "/home/woody/capn/mppi013h/Km3-Autoencoder/models/"
plot_path = "/home/woody/capn/mppi013h/Km3-Autoencoder/results/plots/"
title_of_plot=title_list[i]
save_plot_as = save_as_list[i]
y_lims=y_lims_list[i]
make_diff_of=make_diff_of_list[i]
hist_data_array_diff=[]
hist_1=np.array(hist_data_array_unf[make_diff_of[0]])
hist_2=np.array(hist_data_array_unf[make_diff_of[1]])
diff_hist=[hist_1[0], (hist_1[1]-hist_2[1])/hist_1[1]]
hist_data_array_diff.append(diff_hist)
hist_1=np.array(hist_data_array_enc[make_diff_of[0]])
hist_2=np.array(hist_data_array_enc[make_diff_of[1]])
diff_hist=[hist_1[0], (hist_1[1]-hist_2[1])/hist_1[1]]
hist_data_array_diff.append(diff_hist)
#make plot of multiple data:
if plot_type == "acc":
y_label_of_plot="Difference in accuracy"
make_energy_to_accuracy_plot_comp_data(hist_data_array_diff, label_array, title_of_plot, filepath=save_plot_as, y_label=y_label_of_plot, y_lims=y_lims, color_array=color_array)
elif plot_type == "loss":
y_label_of_plot="Loss"
make_energy_to_loss_plot_comp_data(hist_data_array_diff, label_array, title_of_plot, filepath=save_plot_as, y_label=y_label_of_plot, color_array=color_array)
elif plot_type == None:
print("plot_type==None: Not generating plots")
else:
print("Plot type", plot_type, "not supported. Not generating plots, but hist_data is still saved.")
print("Plot saved to", save_plot_as)
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data Valuation using Reinforcement Learning (DVRL)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
import numpy as np
from sklearn import metrics
import tensorflow.compat.v1 as tf
import tqdm
from dvrl import dvrl_metrics
from tensorflow.contrib import layers as contrib_layers
class Dvrl(object):
"""Data Valuation using Reinforcement Learning (DVRL) class.
Attributes:
x_train: training feature
y_train: training labels
x_valid: validation features
y_valid: validation labels
problem: 'regression' or 'classification'
pred_model: predictive model (object)
parameters: network parameters such as hidden_dim, iterations,
activation function, layer_number, learning rate
checkpoint_file_name: File name for saving and loading the trained model
flags: flag for training with stochastic gradient descent (flag_sgd)
and flag for using pre-trained model (flag_pretrain)
"""
def __init__(self, x_train, y_train, x_valid, y_valid,
problem, pred_model, parameters, checkpoint_file_name, flags):
"""Initializes DVRL."""
# Inputs
self.x_train = x_train
self.y_train = y_train
self.x_valid = x_valid
self.y_valid = y_valid
self.problem = problem
# One-hot encoded labels
if self.problem == 'classification':
self.y_train_onehot = \
np.eye(len(np.unique(y_train)))[y_train.astype(int)]
self.y_valid_onehot = \
np.eye(len(np.unique(y_train)))[y_valid.astype(int)]
elif self.problem == 'regression':
self.y_train_onehot = np.reshape(y_train, [len(y_train), 1])
self.y_valid_onehot = np.reshape(y_valid, [len(y_valid), 1])
# Network parameters
self.hidden_dim = parameters['hidden_dim']
self.comb_dim = parameters['comb_dim']
self.outer_iterations = parameters['iterations']
self.act_fn = parameters['activation']
self.layer_number = parameters['layer_number']
self.batch_size = np.min([parameters['batch_size'], len(x_train[:, 0])])
self.learning_rate = parameters['learning_rate']
# Basic parameters
self.epsilon = 1e-8 # Adds to the log to avoid overflow
self.threshold = 0.9 # Encourages exploration
# Flags
self.flag_sgd = flags['sgd']
self.flag_pretrain = flags['pretrain']
# If the pred_model uses stochastic gradient descent (SGD) for training
if self.flag_sgd:
self.inner_iterations = parameters['inner_iterations']
self.batch_size_predictor = np.min([parameters['batch_size_predictor'],
len(x_valid[:, 0])])
# Checkpoint file name
self.checkpoint_file_name = checkpoint_file_name
# Basic parameters
self.data_dim = len(x_train[0, :])
self.label_dim = len(self.y_train_onehot[0, :])
# Training Inputs
# x_input can be raw input or its encoded representation, e.g. using a
# pre-trained neural network. Using encoded representation can be beneficial
# to reduce computational cost for high dimensional inputs, like images.
self.x_input = tf.placeholder(tf.float32, [None, self.data_dim])
self.y_input = tf.placeholder(tf.float32, [None, self.label_dim])
# Prediction difference
# y_hat_input is the prediction difference between predictive models
# trained on the training set and validation set.
# (adding y_hat_input into data value estimator as the additional input
# is observed to improve data value estimation quality in some cases)
self.y_hat_input = tf.placeholder(tf.float32, [None, self.label_dim])
# Selection vector
self.s_input = tf.placeholder(tf.float32, [None, 1])
# Rewards (Reinforcement signal)
self.reward_input = tf.placeholder(tf.float32)
# Pred model (Note that any model architecture can be used as the predictor
# model, either randomly initialized or pre-trained with the training data.
# The condition for predictor model to have fit (e.g. using certain number
# of back-propagation iterations) and predict functions as its subfunctions.
self.pred_model = pred_model
# Final model
self.final_model = pred_model
# With randomly initialized predictor
if (not self.flag_pretrain) & self.flag_sgd:
if not os.path.exists('tmp'):
os.makedirs('tmp')
pred_model.fit(self.x_train, self.y_train_onehot,
batch_size=len(self.x_train), epochs=0)
# Saves initial randomization
pred_model.save_weights('tmp/pred_model.h5')
# With pre-trained model, pre-trained model should be saved as
# 'tmp/pred_model.h5'
# Baseline model
if self.flag_sgd:
self.ori_model = copy.copy(self.pred_model)
self.ori_model.load_weights('tmp/pred_model.h5')
# Trains the model
self.ori_model.fit(x_train, self.y_train_onehot,
batch_size=self.batch_size_predictor,
epochs=self.inner_iterations, verbose=False)
else:
self.ori_model = copy.copy(self.pred_model)
self.ori_model.fit(x_train, y_train)
# Valid baseline model
if 'summary' in dir(self.pred_model):
self.val_model = copy.copy(self.pred_model)
self.val_model.load_weights('tmp/pred_model.h5')
# Trains the model
self.val_model.fit(x_valid, self.y_valid_onehot,
batch_size=self.batch_size_predictor,
epochs=self.inner_iterations, verbose=False)
else:
self.val_model = copy.copy(self.pred_model)
self.val_model.fit(x_valid, y_valid)
def data_value_evaluator(self):
"""Returns data value evaluator model.
Here, we assume a simple multi-layer perceptron architecture for the data
value evaluator model. For data types like tabular, multi-layer perceptron
is already efficient at extracting the relevant information.
For high-dimensional data types like images or text,
it is important to introduce inductive biases to the architecture to
extract information efficiently. In such cases, there are two options:
(i) Input the encoded representations (e.g. the last layer activations of
ResNet for images, or the last layer activations of BERT for text) and use
the multi-layer perceptron on top of it. The encoded representations can
simply come from a pre-trained predictor model using the entire dataset.
(ii) Modify the data value evaluator model definition below to have the
appropriate inductive bias (e.g. using convolutional layers for images,
or attention layers text).
Returns:
dve: data value estimations
"""
with tf.variable_scope('data_value_estimator', reuse=tf.AUTO_REUSE):
inputs = tf.concat((self.x_input, self.y_input), axis=1)
# Stacks multi-layered perceptron
inter_layer = contrib_layers.fully_connected(
inputs, self.hidden_dim, activation_fn=self.act_fn)
for _ in range(int(self.layer_number - 3)):
inter_layer = contrib_layers.fully_connected(
inter_layer, self.hidden_dim, activation_fn=self.act_fn)
inter_layer = contrib_layers.fully_connected(
inter_layer, self.comb_dim, activation_fn=self.act_fn)
# Combines with y_hat
comb_layer = tf.concat((inter_layer, self.y_hat_input), axis=1)
comb_layer = contrib_layers.fully_connected(
comb_layer, self.comb_dim, activation_fn=self.act_fn)
dve = contrib_layers.fully_connected(
comb_layer, 1, activation_fn=tf.nn.sigmoid)
return dve
def train_dvrl(self, perf_metric):
"""Trains DVRL based on the specified objective function.
Args:
perf_metric: 'auc', 'accuracy', 'log-loss' for classification
'mae', 'mse', 'rmspe' for regression
"""
# Generates selected probability
est_data_value = self.data_value_evaluator()
# Generator loss (REINFORCE algorithm)
prob = tf.reduce_sum(self.s_input * tf.log(est_data_value + self.epsilon) +\
(1-self.s_input) * \
tf.log(1 - est_data_value + self.epsilon))
dve_loss = (-self.reward_input * prob) + \
1e3 * (tf.maximum(tf.reduce_mean(est_data_value) \
- self.threshold, 0) + \
tf.maximum((1-self.threshold) - \
tf.reduce_mean(est_data_value), 0))
# Variable
dve_vars = [v for v in tf.trainable_variables() \
if v.name.startswith('data_value_estimator')]
# Solver
dve_solver = tf.train.AdamOptimizer(self.learning_rate).minimize(
dve_loss, var_list=dve_vars)
# Baseline performance
if self.flag_sgd:
y_valid_hat = self.ori_model.predict(self.x_valid)
else:
if self.problem == 'classification':
y_valid_hat = self.ori_model.predict_proba(self.x_valid)
elif self.problem == 'regression':
y_valid_hat = self.ori_model.predict(self.x_valid)
if perf_metric == 'auc':
valid_perf = metrics.roc_auc_score(self.y_valid, y_valid_hat[:, 1])
elif perf_metric == 'accuracy':
valid_perf = metrics.accuracy_score(self.y_valid, np.argmax(y_valid_hat,
axis=1))
elif perf_metric == 'log_loss':
valid_perf = -metrics.log_loss(self.y_valid, y_valid_hat)
elif perf_metric == 'rmspe':
valid_perf = dvrl_metrics.rmspe(self.y_valid, y_valid_hat)
elif perf_metric == 'mae':
valid_perf = metrics.mean_absolute_error(self.y_valid, y_valid_hat)
elif perf_metric == 'mse':
valid_perf = metrics.mean_squared_error(self.y_valid, y_valid_hat)
# Prediction differences
if self.flag_sgd:
y_train_valid_pred = self.val_model.predict(self.x_train)
else:
if self.problem == 'classification':
y_train_valid_pred = self.val_model.predict_proba(self.x_train)
elif self.problem == 'regression':
y_train_valid_pred = self.val_model.predict(self.x_train)
y_train_valid_pred = np.reshape(y_train_valid_pred, [-1, 1])
if self.problem == 'classification':
y_pred_diff = np.abs(self.y_train_onehot - y_train_valid_pred)
elif self.problem == 'regression':
y_pred_diff = \
np.abs(self.y_train_onehot - y_train_valid_pred)/self.y_train_onehot
# Main session
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# Model save at the end
saver = tf.train.Saver(dve_vars)
for _ in tqdm.tqdm(range(self.outer_iterations)):
# Batch selection
batch_idx = \
np.random.permutation(len(self.x_train[:, 0]))[:self.batch_size]
x_batch = self.x_train[batch_idx, :]
y_batch_onehot = self.y_train_onehot[batch_idx]
y_batch = self.y_train[batch_idx]
y_hat_batch = y_pred_diff[batch_idx]
# Generates selection probability
est_dv_curr = sess.run(
est_data_value,
feed_dict={
self.x_input: x_batch,
self.y_input: y_batch_onehot,
self.y_hat_input: y_hat_batch
})
# Samples the selection probability
sel_prob_curr = np.random.binomial(1, est_dv_curr, est_dv_curr.shape)
# Exception (When selection probability is 0)
if np.sum(sel_prob_curr) == 0:
est_dv_curr = 0.5 * np.ones(np.shape(est_dv_curr))
sel_prob_curr = np.random.binomial(1, est_dv_curr, est_dv_curr.shape)
# Trains predictor
# If the predictor is neural network
if 'summary' in dir(self.pred_model):
new_model = self.pred_model
new_model.load_weights('tmp/pred_model.h5')
# Train the model
new_model.fit(x_batch, y_batch_onehot,
sample_weight=sel_prob_curr[:, 0],
batch_size=self.batch_size_predictor,
epochs=self.inner_iterations, verbose=False)
y_valid_hat = new_model.predict(self.x_valid)
else:
new_model = self.pred_model
new_model.fit(x_batch, y_batch, sel_prob_curr[:, 0])
# Prediction
if 'summary' in dir(new_model):
y_valid_hat = new_model.predict(self.x_valid)
else:
if self.problem == 'classification':
y_valid_hat = new_model.predict_proba(self.x_valid)
elif self.problem == 'regression':
y_valid_hat = new_model.predict(self.x_valid)
# Reward computation
if perf_metric == 'auc':
dvrl_perf = metrics.roc_auc_score(self.y_valid, y_valid_hat[:, 1])
elif perf_metric == 'accuracy':
dvrl_perf = metrics.accuracy_score(self.y_valid, np.argmax(y_valid_hat,
axis=1))
elif perf_metric == 'log_loss':
dvrl_perf = -metrics.log_loss(self.y_valid, y_valid_hat)
elif perf_metric == 'rmspe':
dvrl_perf = dvrl_metrics.rmspe(self.y_valid, y_valid_hat)
elif perf_metric == 'mae':
dvrl_perf = metrics.mean_absolute_error(self.y_valid, y_valid_hat)
elif perf_metric == 'mse':
dvrl_perf = metrics.mean_squared_error(self.y_valid, y_valid_hat)
if self.problem == 'classification':
reward_curr = dvrl_perf - valid_perf
elif self.problem == 'regression':
reward_curr = valid_perf - dvrl_perf
# Trains the generator
_, _ = sess.run(
[dve_solver, dve_loss],
feed_dict={
self.x_input: x_batch,
self.y_input: y_batch_onehot,
self.y_hat_input: y_hat_batch,
self.s_input: sel_prob_curr,
self.reward_input: reward_curr
})
# Saves trained model
saver.save(sess, self.checkpoint_file_name)
# Trains DVRL predictor
# Generate data values
final_data_value = sess.run(
est_data_value, feed_dict={
self.x_input: self.x_train,
self.y_input: self.y_train_onehot,
self.y_hat_input: y_pred_diff})[:, 0]
# Trains final model
# If the final model is neural network
if 'summary' in dir(self.pred_model):
self.final_model.load_weights('tmp/pred_model.h5')
# Train the model
self.final_model.fit(self.x_train, self.y_train_onehot,
sample_weight=final_data_value,
batch_size=self.batch_size_predictor,
epochs=self.inner_iterations, verbose=False)
else:
self.final_model.fit(self.x_train, self.y_train, final_data_value)
def data_valuator(self, x_train, y_train):
"""Returns data values using the data valuator model.
Args:
x_train: training features
y_train: training labels
Returns:
final_dat_value: final data values of the training samples
"""
# One-hot encoded labels
if self.problem == 'classification':
y_train_onehot = np.eye(len(np.unique(y_train)))[y_train.astype(int)]
y_train_valid_pred = self.val_model.predict_proba(x_train)
elif self.problem == 'regression':
y_train_onehot = np.reshape(y_train, [len(y_train), 1])
y_train_valid_pred = np.reshape(self.val_model.predict(x_train),
[-1, 1])
# Generates y_train_hat
if self.problem == 'classification':
y_train_hat = np.abs(y_train_onehot - y_train_valid_pred)
elif self.problem == 'regression':
y_train_hat = np.abs(y_train_onehot - y_train_valid_pred)/y_train_onehot
# Restores the saved model
imported_graph = \
tf.train.import_meta_graph(self.checkpoint_file_name + '.meta')
sess = tf.Session()
imported_graph.restore(sess, self.checkpoint_file_name)
# Estimates data value
est_data_value = self.data_value_evaluator()
final_data_value = sess.run(
est_data_value, feed_dict={
self.x_input: x_train,
self.y_input: y_train_onehot,
self.y_hat_input: y_train_hat})[:, 0]
return final_data_value
def dvrl_predictor(self, x_test):
"""Returns predictions using the predictor model.
Args:
x_test: testing features
Returns:
y_test_hat: predictions of the predictive model with DVRL
"""
if self.flag_sgd:
y_test_hat = self.final_model.predict(x_test)
else:
if self.problem == 'classification':
y_test_hat = self.final_model.predict_proba(x_test)
elif self.problem == 'regression':
y_test_hat = self.final_model.predict(x_test)
return y_test_hat
|
|
#!/usr/bin/env python
"""GRR specific AFF4 objects."""
import re
import StringIO
import time
import logging
from grr.lib import access_control
from grr.lib import aff4
from grr.lib import flow
from grr.lib import queue_manager
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import utils
from grr.lib.aff4_objects import standard
from grr.proto import flows_pb2
class SpaceSeparatedStringArray(rdfvalue.RDFString):
"""A special string which stores strings as space separated."""
def __iter__(self):
for value in self._value.split():
yield value
class VersionString(rdfvalue.RDFString):
@property
def versions(self):
version = str(self)
result = []
for x in version.split("."):
try:
result.append(int(x))
except ValueError:
break
return result
class VFSGRRClient(standard.VFSDirectory):
"""A Remote client."""
class SchemaCls(standard.VFSDirectory.SchemaCls):
"""The schema for the client."""
client_index = rdfvalue.RDFURN("aff4:/index/client")
CERT = aff4.Attribute("metadata:cert", rdfvalue.RDFX509Cert,
"The PEM encoded cert of the client.")
FILESYSTEM = aff4.Attribute("aff4:filesystem", rdfvalue.Filesystems,
"Filesystems on the client.")
CLIENT_INFO = aff4.Attribute(
"metadata:ClientInfo", rdfvalue.ClientInformation,
"GRR client information", "GRR client", default="")
LAST_BOOT_TIME = aff4.Attribute("metadata:LastBootTime",
rdfvalue.RDFDatetime,
"When the machine was last booted",
"BootTime")
FIRST_SEEN = aff4.Attribute("metadata:FirstSeen", rdfvalue.RDFDatetime,
"First time the client registered with us",
"FirstSeen")
# Information about the host.
HOSTNAME = aff4.Attribute("metadata:hostname", rdfvalue.RDFString,
"Hostname of the host.", "Host",
index=client_index)
FQDN = aff4.Attribute("metadata:fqdn", rdfvalue.RDFString,
"Fully qualified hostname of the host.", "FQDN",
index=client_index)
SYSTEM = aff4.Attribute("metadata:system", rdfvalue.RDFString,
"Operating System class.", "System")
UNAME = aff4.Attribute("metadata:uname", rdfvalue.RDFString,
"Uname string.", "Uname")
OS_RELEASE = aff4.Attribute("metadata:os_release", rdfvalue.RDFString,
"OS Major release number.", "Release")
OS_VERSION = aff4.Attribute("metadata:os_version", VersionString,
"OS Version number.", "Version")
# ARCH values come from platform.uname machine value, e.g. x86_64, AMD64.
ARCH = aff4.Attribute("metadata:architecture", rdfvalue.RDFString,
"Architecture.", "Architecture")
INSTALL_DATE = aff4.Attribute("metadata:install_date", rdfvalue.RDFDatetime,
"Install Date.", "Install")
# The knowledge base is used for storing data about the host and users.
# This is currently a slightly odd object as we only use some of the fields.
# The proto itself is used in Artifact handling outside of GRR (e.g. Plaso).
# Over time we will migrate fields into this proto, but for now it is a mix.
KNOWLEDGE_BASE = aff4.Attribute("metadata:knowledge_base",
rdfvalue.KnowledgeBase,
"Artifact Knowledge Base", "KnowledgeBase")
# Deprecated for new clients - DO NOT USE.
GRR_CONFIG = aff4.Attribute("aff4:client_config", rdfvalue.GRRConfig,
"Running configuration for the GRR client.")
GRR_CONFIGURATION = aff4.Attribute(
"aff4:client_configuration", rdfvalue.Dict,
"Running configuration for the GRR client.", "Config")
USER = aff4.Attribute("aff4:users", rdfvalue.Users,
"A user of the system.", "Users")
USERNAMES = aff4.Attribute("aff4:user_names", SpaceSeparatedStringArray,
"A space separated list of system users.",
"Usernames",
index=client_index)
# This information is duplicated from the INTERFACES attribute but is done
# to allow for fast searching by mac address.
MAC_ADDRESS = aff4.Attribute("aff4:mac_addresses", rdfvalue.RDFString,
"A hex encoded MAC address.", "MAC",
index=client_index)
PING = aff4.Attribute("metadata:ping", rdfvalue.RDFDatetime,
"The last time the server heard from this client.",
"LastCheckin", versioned=False, default=0)
CLOCK = aff4.Attribute("metadata:clock", rdfvalue.RDFDatetime,
"The last clock read on the client "
"(Can be used to estimate client clock skew).",
"Clock", versioned=False)
CLIENT_IP = aff4.Attribute("metadata:client_ip", rdfvalue.RDFString,
"The ip address this client connected from.",
"Client_ip", versioned=False)
# This is the last foreman rule that applied to us
LAST_FOREMAN_TIME = aff4.Attribute(
"aff4:last_foreman_time", rdfvalue.RDFDatetime,
"The last time the foreman checked us.", versioned=False)
SUMMARY = aff4.Attribute(
"aff4:summary", rdfvalue.ClientSummary,
"A summary of this client", versioned=False)
# Valid client ids
CLIENT_ID_RE = re.compile(r"^C\.[0-9a-fA-F]{16}$")
def Initialize(self):
# Our URN must be a valid client.id.
self.client_id = rdfvalue.ClientURN(self.urn)
def Update(self, attribute=None, priority=None):
if attribute == self.Schema.CONTAINS:
flow_id = flow.GRRFlow.StartFlow(client_id=self.client_id,
flow_name="Interrogate",
token=self.token, priority=priority)
return flow_id
def OpenMember(self, path, mode="rw"):
return aff4.AFF4Volume.OpenMember(self, path, mode=mode)
AFF4_PREFIXES = {rdfvalue.PathSpec.PathType.OS: "/fs/os",
rdfvalue.PathSpec.PathType.TSK: "/fs/tsk",
rdfvalue.PathSpec.PathType.REGISTRY: "/registry",
rdfvalue.PathSpec.PathType.MEMORY: "/devices/memory"}
@staticmethod
def PathspecToURN(pathspec, client_urn):
"""Returns a mapping between a pathspec and an AFF4 URN.
Args:
pathspec: The PathSpec instance to convert.
client_urn: A URN of any object within the client. We use it to find the
client id.
Returns:
A urn that corresponds to this pathspec.
Raises:
ValueError: If pathspec is not of the correct type.
"""
client_urn = rdfvalue.ClientURN(client_urn)
if not isinstance(pathspec, rdfvalue.RDFValue):
raise ValueError("Pathspec should be an rdfvalue.")
# If the first level is OS and the second level is TSK its probably a mount
# point resolution. We map it into the tsk branch. For example if we get:
# path: \\\\.\\Volume{1234}\\
# pathtype: OS
# mount_point: /c:/
# nested_path {
# path: /windows/
# pathtype: TSK
# }
# We map this to aff4://client_id/fs/tsk/\\\\.\\Volume{1234}\\/windows/
dev = pathspec[0].path
if pathspec[0].HasField("offset"):
# We divide here just to get prettier numbers in the GUI
dev += ":" + str(pathspec[0].offset / 512)
if (len(pathspec) > 1 and
pathspec[0].pathtype == rdfvalue.PathSpec.PathType.OS and
pathspec[1].pathtype == rdfvalue.PathSpec.PathType.TSK):
result = [VFSGRRClient.AFF4_PREFIXES[rdfvalue.PathSpec.PathType.TSK],
dev]
# Skip the top level pathspec.
pathspec = pathspec[1]
else:
# For now just map the top level prefix based on the first pathtype
result = [VFSGRRClient.AFF4_PREFIXES[pathspec[0].pathtype]]
for p in pathspec:
component = p.path
# The following encode different pathspec properties into the AFF4 path in
# such a way that unique files on the client are mapped to unique URNs in
# the AFF4 space. Note that this transformation does not need to be
# reversible since we always use the PathSpec when accessing files on the
# client.
if p.HasField("offset"):
component += ":" + str(p.offset / 512)
# Support ADS names.
if p.HasField("stream_name"):
component += ":" + p.stream_name
result.append(component)
return client_urn.Add("/".join(result))
def GetSummary(self):
"""Gets a client summary object."""
summary = self.Get(self.Schema.SUMMARY)
if summary is None:
summary = rdfvalue.ClientSummary(client_id=self.urn)
summary.system_info.node = self.Get(self.Schema.HOSTNAME)
summary.system_info.system = self.Get(self.Schema.SYSTEM)
summary.system_info.release = self.Get(self.Schema.OS_RELEASE)
summary.system_info.version = str(self.Get(self.Schema.OS_VERSION, ""))
summary.system_info.fqdn = self.Get(self.Schema.FQDN)
summary.system_info.machine = self.Get(self.Schema.ARCH)
summary.system_info.install_date = self.Get(self.Schema.INSTALL_DATE)
summary.users = self.Get(self.Schema.USER)
summary.interfaces = self.Get(self.Schema.INTERFACES)
summary.client_info = self.Get(self.Schema.CLIENT_INFO)
return summary
class UpdateVFSFileArgs(rdfvalue.RDFProtoStruct):
protobuf = flows_pb2.UpdateVFSFileArgs
class UpdateVFSFile(flow.GRRFlow):
"""A flow to update VFS file."""
args_type = UpdateVFSFileArgs
def Init(self):
self.state.Register("get_file_flow_urn")
@flow.StateHandler()
def Start(self):
"""Calls the Update() method of a given VFSFile/VFSDirectory object."""
self.Init()
fd = aff4.FACTORY.Open(self.args.vfs_file_urn, mode="rw",
token=self.token)
# Account for implicit directories.
if fd.Get(fd.Schema.TYPE) is None:
fd = fd.Upgrade("VFSDirectory")
self.state.get_file_flow_urn = fd.Update(
attribute=self.args.attribute,
priority=rdfvalue.GrrMessage.Priority.HIGH_PRIORITY)
class VFSFile(aff4.AFF4Image):
"""A VFSFile object."""
class SchemaCls(aff4.AFF4Image.SchemaCls):
"""The schema for AFF4 files in the GRR VFS."""
STAT = standard.VFSDirectory.SchemaCls.STAT
CONTENT_LOCK = aff4.Attribute(
"aff4:content_lock", rdfvalue.RDFURN,
"This lock contains a URN pointing to the flow that is currently "
"updating this flow.")
PATHSPEC = aff4.Attribute(
"aff4:pathspec", rdfvalue.PathSpec,
"The pathspec used to retrieve this object from the client.")
FINGERPRINT = aff4.Attribute("aff4:fingerprint",
rdfvalue.FingerprintResponse,
"Protodict containing arrays of hashes.")
def Update(self, attribute=None, priority=None):
"""Update an attribute from the client."""
if attribute == self.Schema.CONTENT:
# List the directory on the client
currently_running = self.Get(self.Schema.CONTENT_LOCK)
# Is this flow still active?
if currently_running:
flow_obj = aff4.FACTORY.Open(currently_running, token=self.token)
if flow_obj.IsRunning():
return
# The client_id is the first element of the URN
client_id = self.urn.Path().split("/", 2)[1]
# Get the pathspec for this object
pathspec = self.Get(self.Schema.STAT).pathspec
flow_urn = flow.GRRFlow.StartFlow(client_id=client_id,
flow_name="GetFile", token=self.token,
pathspec=pathspec, priority=priority)
self.Set(self.Schema.CONTENT_LOCK(flow_urn))
self.Close()
return flow_urn
class MemoryImage(VFSFile):
"""The server representation of the client's memory device."""
_behaviours = frozenset(["Container"])
class SchemaCls(VFSFile.SchemaCls):
LAYOUT = aff4.Attribute("aff4:memory/geometry", rdfvalue.MemoryInformation,
"The memory layout of this image.")
class VFSMemoryFile(aff4.AFF4MemoryStream):
"""A VFS file under a VFSDirectory node which does not have storage."""
class SchemaCls(aff4.AFF4MemoryStream.SchemaCls):
"""The schema for AFF4 files in the GRR VFS."""
# Support also VFSFile attributes.
STAT = VFSFile.SchemaCls.STAT
HASH = VFSFile.SchemaCls.HASH
PATHSPEC = VFSFile.SchemaCls.PATHSPEC
CONTENT_LOCK = VFSFile.SchemaCls.CONTENT_LOCK
FINGERPRINT = VFSFile.SchemaCls.FINGERPRINT
class VFSAnalysisFile(VFSFile):
"""A VFS file which has no Update method."""
def Update(self, attribute=None):
pass
class GRRSignedBlob(aff4.AFF4MemoryStream):
"""A container for storing a signed binary blob such as a driver."""
class SchemaCls(aff4.AFF4MemoryStream.SchemaCls):
"""Signed blob attributes."""
BINARY = aff4.Attribute("aff4:signed_blob", rdfvalue.SignedBlob,
"Signed blob proto for deployment to clients."
"This is used for signing drivers, binaries "
"and python code.")
def Initialize(self):
contents = ""
if "r" in self.mode:
contents = self.Get(self.Schema.BINARY)
if contents:
contents = contents.data
self.fd = StringIO.StringIO(contents)
self.size = self.fd.len
class GRRMemoryDriver(GRRSignedBlob):
"""A driver for acquiring memory."""
class SchemaCls(GRRSignedBlob.SchemaCls):
INSTALLATION = aff4.Attribute(
"aff4:driver/installation", rdfvalue.DriverInstallTemplate,
"The driver installation control protobuf.", "installation",
default=rdfvalue.DriverInstallTemplate(
driver_name="pmem", device_path=r"\\.\pmem"))
class GRRForeman(aff4.AFF4Object):
"""The foreman starts flows for clients depending on rules."""
class SchemaCls(aff4.AFF4Object.SchemaCls):
"""Attributes specific to VFSDirectory."""
RULES = aff4.Attribute("aff4:rules", rdfvalue.ForemanRules,
"The rules the foreman uses.",
default=rdfvalue.ForemanRules())
def ExpireRules(self):
"""Removes any rules with an expiration date in the past."""
rules = self.Get(self.Schema.RULES)
new_rules = self.Schema.RULES()
now = time.time() * 1e6
expired_session_ids = set()
for rule in rules:
if rule.expires > now:
new_rules.Append(rule)
else:
for action in rule.actions:
if action.hunt_id:
expired_session_ids.add(action.hunt_id)
if expired_session_ids:
# Notify the worker to mark this hunt as terminated.
priorities = dict()
for session_id in expired_session_ids:
priorities[session_id] = rdfvalue.GrrMessage.Priority.MEDIUM_PRIORITY
manager = queue_manager.QueueManager(token=self.token)
manager.MultiNotifyQueue(list(expired_session_ids), priorities)
if len(new_rules) < len(rules):
self.Set(self.Schema.RULES, new_rules)
self.Flush()
def _CheckIfHuntTaskWasAssigned(self, client_id, hunt_id):
"""Will return True if hunt's task was assigned to this client before."""
for _ in aff4.FACTORY.Stat(
[client_id.Add("flows/%s:hunt" %
rdfvalue.RDFURN(hunt_id).Basename())],
token=self.token):
return True
return False
def _EvaluateRules(self, objects, rule, client_id):
"""Evaluates the rules."""
try:
# Do the attribute regex first.
for regex_rule in rule.regex_rules:
path = client_id.Add(regex_rule.path)
fd = objects[path]
attribute = aff4.Attribute.NAMES[regex_rule.attribute_name]
value = utils.SmartStr(fd.Get(attribute))
if not regex_rule.attribute_regex.Search(value):
return False
# Now the integer rules.
for integer_rule in rule.integer_rules:
path = client_id.Add(integer_rule.path)
fd = objects[path]
attribute = aff4.Attribute.NAMES[integer_rule.attribute_name]
try:
value = int(fd.Get(attribute))
except (ValueError, TypeError):
# Not an integer attribute.
return False
op = integer_rule.operator
if op == rdfvalue.ForemanAttributeInteger.Operator.LESS_THAN:
if value >= integer_rule.value:
return False
elif op == rdfvalue.ForemanAttributeInteger.Operator.GREATER_THAN:
if value <= integer_rule.value:
return False
elif op == rdfvalue.ForemanAttributeInteger.Operator.EQUAL:
if value != integer_rule.value:
return False
else:
# Unknown operator.
return False
return True
except KeyError:
# The requested attribute was not found.
return False
def _RunActions(self, rule, client_id):
"""Run all the actions specified in the rule.
Args:
rule: Rule which actions are to be executed.
client_id: Id of a client where rule's actions are to be executed.
Returns:
Number of actions started.
"""
actions_count = 0
for action in rule.actions:
try:
# Say this flow came from the foreman.
token = self.token.Copy()
token.username = "Foreman"
if action.HasField("hunt_id"):
if self._CheckIfHuntTaskWasAssigned(client_id, action.hunt_id):
logging.info("Foreman: ignoring hunt %s on client %s: was started "
"here before", client_id, action.hunt_id)
else:
logging.info("Foreman: Starting hunt %s on client %s.",
action.hunt_id, client_id)
flow_cls = flow.GRRFlow.classes[action.hunt_name]
flow_cls.StartClients(action.hunt_id, [client_id])
actions_count += 1
else:
flow.GRRFlow.StartFlow(
client_id=client_id, flow_name=action.flow_name, token=token,
**action.argv.ToDict())
actions_count += 1
# There could be all kinds of errors we don't know about when starting the
# flow/hunt so we catch everything here.
except Exception as e: # pylint: disable=broad-except
logging.exception("Failure running foreman action on client %s: %s",
action.hunt_id, e)
return actions_count
def AssignTasksToClient(self, client_id):
"""Examines our rules and starts up flows based on the client.
Args:
client_id: Client id of the client for tasks to be assigned.
Returns:
Number of assigned tasks.
"""
client_id = rdfvalue.ClientURN(client_id)
rules = self.Get(self.Schema.RULES)
if not rules: return 0
client = aff4.FACTORY.Open(client_id, mode="rw", token=self.token)
try:
last_foreman_run = client.Get(client.Schema.LAST_FOREMAN_TIME) or 0
except AttributeError:
last_foreman_run = 0
latest_rule = max([rule.created for rule in rules])
if latest_rule <= int(last_foreman_run):
return 0
# Update the latest checked rule on the client.
client.Set(client.Schema.LAST_FOREMAN_TIME(latest_rule))
client.Close()
# For efficiency we collect all the objects we want to open first and then
# open them all in one round trip.
object_urns = {}
relevant_rules = []
expired_rules = False
now = time.time() * 1e6
for rule in rules:
if rule.expires < now:
expired_rules = True
continue
if rule.created <= int(last_foreman_run):
continue
relevant_rules.append(rule)
for regex in rule.regex_rules:
aff4_object = client_id.Add(regex.path)
object_urns[str(aff4_object)] = aff4_object
for int_rule in rule.integer_rules:
aff4_object = client_id.Add(int_rule.path)
object_urns[str(aff4_object)] = aff4_object
# Retrieve all aff4 objects we need.
objects = {}
for fd in aff4.FACTORY.MultiOpen(object_urns, token=self.token):
objects[fd.urn] = fd
actions_count = 0
for rule in relevant_rules:
if self._EvaluateRules(objects, rule, client_id):
actions_count += self._RunActions(rule, client_id)
if expired_rules:
self.ExpireRules()
return actions_count
class GRRAFF4Init(registry.InitHook):
"""Ensure critical AFF4 objects exist for GRR."""
# Must run after the AFF4 subsystem is ready.
pre = ["AFF4InitHook", "ACLInit"]
def Run(self):
try:
# Make the foreman
fd = aff4.FACTORY.Create("aff4:/foreman", "GRRForeman",
token=aff4.FACTORY.root_token)
fd.Close()
except access_control.UnauthorizedAccess:
pass
# We add these attributes to all objects. This means that every object we create
# has a URN link back to the flow that created it.
aff4.AFF4Object.SchemaCls.FLOW = aff4.Attribute(
"aff4:flow", rdfvalue.RDFURN, "A currently scheduled flow.")
class AFF4CollectionView(rdfvalue.RDFValueArray):
"""A view specifies how an AFF4Collection is seen."""
class RDFValueCollectionView(rdfvalue.RDFValueArray):
"""A view specifies how an RDFValueCollection is seen."""
class VolatilityResponse(aff4.AFF4Volume):
_behaviours = frozenset(["Collection"])
class SchemaCls(standard.VFSDirectory.SchemaCls):
DESCRIPTION = aff4.Attribute("aff4:description", rdfvalue.RDFString,
"This collection's description", "description")
RESULT = aff4.Attribute("aff4:volatility_result",
rdfvalue.VolatilityResult,
"The result returned by the flow.")
class MRUCollection(aff4.AFF4Object):
"""Stores all of the MRU files from the registry."""
class SchemaCls(aff4.AFF4Object.SchemaCls):
LAST_USED_FOLDER = aff4.Attribute(
"aff4:mru", rdfvalue.MRUFolder, "The Most Recently Used files.",
default="")
class VFSFileSymlink(aff4.AFF4Stream):
"""A Delegate object for another URN."""
delegate = None
class SchemaCls(VFSFile.SchemaCls):
DELEGATE = aff4.Attribute("aff4:delegate", rdfvalue.RDFURN,
"The URN of the delegate of this object.")
def Initialize(self):
"""Open the delegate object."""
if "r" in self.mode:
delegate = self.Get(self.Schema.DELEGATE)
if delegate:
self.delegate = aff4.FACTORY.Open(delegate, mode=self.mode,
token=self.token, age=self.age_policy)
def Read(self, length):
if "r" not in self.mode:
raise IOError("VFSFileSymlink was not opened for reading.")
return self.delegate.Read(length)
def Seek(self, offset, whence):
return self.delegate.Seek(offset, whence)
def Tell(self):
return self.delegate.Tell()
def Close(self, sync):
super(VFSFileSymlink, self).Close(sync=sync)
if self.delegate:
return self.delegate.Close(sync)
def Write(self):
raise IOError("VFSFileSymlink not writeable.")
class AFF4RegexNotificationRule(aff4.AFF4NotificationRule):
"""AFF4 rule that matches path to a regex and publishes an event."""
class SchemaCls(aff4.AFF4Object.SchemaCls):
"""Schema for AFF4RegexNotificationRule."""
CLIENT_PATH_REGEX = aff4.Attribute("aff4:change_rule/client_path_regex",
rdfvalue.RDFString,
"Regex to match the urn.")
EVENT_NAME = aff4.Attribute("aff4:change_rule/event_name",
rdfvalue.RDFString,
"Event to trigger on match.")
NOTIFY_ONLY_IF_NEW = aff4.Attribute("aff4:change_rule/notify_only_if_new",
rdfvalue.RDFInteger,
"If True (1), then notify only when "
"the file is created for the first "
"time")
def _UpdateState(self):
regex_str = self.Get(self.Schema.CLIENT_PATH_REGEX)
if not regex_str:
raise IOError("Regular expression not specified for the rule.")
self.regex = re.compile(utils.SmartStr(regex_str))
self.event_name = self.Get(self.Schema.EVENT_NAME)
if not self.event_name:
raise IOError("Event name not specified for the rule.")
def Initialize(self):
if "r" in self.mode:
self._UpdateState()
def OnWriteObject(self, aff4_object):
if not self.event_name:
self._UpdateState()
client_name, path = aff4_object.urn.Split(2)
if not aff4.AFF4Object.VFSGRRClient.CLIENT_ID_RE.match(client_name):
return
if self.regex.match(path):
# TODO(user): maybe add a timestamp attribute to the rule so
# that we get notified only for the new writes after a certain
# timestamp?
if (self.IsAttributeSet(self.Schema.NOTIFY_ONLY_IF_NEW) and
self.Get(self.Schema.NOTIFY_ONLY_IF_NEW)):
fd = aff4.FACTORY.Open(aff4_object.urn, age=aff4.ALL_TIMES,
token=self.token)
stored_vals = list(fd.GetValuesForAttribute(fd.Schema.TYPE))
if len(stored_vals) > 1:
return
event = rdfvalue.GrrMessage(
name="AFF4RegexNotificationRuleMatch",
args=aff4_object.urn.SerializeToString(),
auth_state=rdfvalue.GrrMessage.AuthorizationState.AUTHENTICATED,
source=client_name)
flow.Events.PublishEvent(utils.SmartStr(self.event_name), event,
token=self.token)
class VFSBlobImage(aff4.BlobImage, aff4.VFSFile):
"""BlobImage with VFS attributes for use in client namespace."""
class SchemaCls(aff4.BlobImage.SchemaCls, aff4.VFSFile.SchemaCls):
pass
|
|
#!/usr/bin/env python
# MIT License
# Copyright (c) 2015, 2017 Marie Lemoine-Busserolle
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
################################################################################
# Import some useful Python utilities/modules #
################################################################################
# STDLIB
import sys, glob, shutil, os, time, logging, glob, urllib, re, pkg_resources
from pyraf import iraf, iraffunctions
import astropy.io.fits
import numpy as np
# LOCAL
# Import config parsing.
from ..configobj.configobj import ConfigObj
# Import custom Nifty functions.
from ..nifsUtils import datefmt, listit, writeList, checkLists, makeSkyList, MEFarith, convertRAdec, copyResultsToScience
# Define constants
# Paths to Nifty data.
RECIPES_PATH = pkg_resources.resource_filename('nifty', 'recipes/')
RUNTIME_DATA_PATH = pkg_resources.resource_filename('nifty', 'runtimeData/')
def start(kind, telluricDirectoryList="", scienceDirectoryList=""):
"""
start(kind): Do a full reduction of either Science or Telluric data.
nifsReduce- for the telluric and science data reduction.
Reduces NIFS telluric and science frames and attempts a flux calibration.
Parameters are loaded from runtimeData/config.cfg. This script will
automatically detect if it is being run on telluric data or science data.
There are 6 steps.
INPUT:
+ Raw files
- Science frames
- Sky frames
+ Calibration files
- MDF shift file
- Bad Pixel Mask (BPM)
- Flat field frame
- Reduced arc frame
- Reduced ronchi mask frame
- arc and ronchi database/ files
OUTPUT:
- If telluric reduction an efficiency spectrum used to telluric correct and absolute flux
calibrate science frames
- If science reduction a reduced science data cube.
Args:
kind (string): either 'Telluric' or 'Science'.
telluricDirectoryList (string): Used by low memory pipeline.
scienceDirectoryList (string): Used by low memory pipeline.
"""
# TODO(nat): Right now the pipeline will crash if you decide to skip, say, doing a bad
# pixel correction. This is because each step adds a prefix to the frame name, and most following
# steps depend on that prefix being there.
# One way to fix this is if a step is to be skipped, iraf.copy() is called instead to copy the frame and
# add the needed prefix. Messy but it might work for now.
###########################################################################
## ##
## BEGIN - GENERAL REDUCTION SETUP ##
## ##
###########################################################################
# Store current working directory for later use.
path = os.getcwd()
# Set up the logging file.
log = os.getcwd()+'/Nifty.log'
logging.info('\n#################################################')
logging.info('# #')
logging.info('# Start the NIFS Science and Telluric Reduction #')
logging.info('# #')
logging.info('#################################################\n')
# Set up/prepare IRAF.
iraf.gemini()
iraf.gemtools()
iraf.gnirs()
iraf.nifs()
# Reset to default parameters the used IRAF tasks.
iraf.unlearn(iraf.gemini,iraf.gemtools,iraf.gnirs,iraf.nifs,iraf.imcopy)
# From http://bishop.astro.pomona.edu/Penprase/webdocuments/iraf/beg/beg-image.html:
# Before doing anything involving image display the environment variable
# stdimage must be set to the correct frame buffer size for the display
# servers (as described in the dev$graphcap file under the section "STDIMAGE
# devices") or to the correct image display device. The task GDEVICES is
# helpful for determining this information for the display servers.
iraf.set(stdimage='imt2048')
# Prepare the IRAF package for NIFS.
# NSHEADERS lists the header parameters used by the various tasks in the
# NIFS package (excluding headers values which have values fixed by IRAF or
# FITS conventions).
iraf.nsheaders("nifs",logfile=log)
# Set clobber to 'yes' for the script. This still does not make the gemini
# tasks overwrite files, so:
# YOU WILL LIKELY HAVE TO REMOVE FILES IF YOU RE_RUN THE SCRIPT.
user_clobber=iraf.envget("clobber")
iraf.reset(clobber='yes')
# This helps make sure all variables are initialized to prevent bugs.
scienceSkySubtraction = None
scienceOneDExtraction = None
extractionXC = None
extractionYC = None
extractionRadius = None
telluricSkySubtraction = None
# Load reduction parameters from runtimeData/config.cfg.
with open('./config.cfg') as config_file:
config = ConfigObj(config_file, unrepr=True)
# Read general pipeline config.
over = config['over']
manualMode = config['manualMode']
calDirList = config['calibrationDirectoryList']
scienceOneDExtraction = config['scienceOneDExtraction']
extractionXC = config['extractionXC']
extractionYC = config['extractionYC']
extractionRadius = config['extractionRadius']
if kind == 'Telluric':
# Telluric reduction specific config.
telluricReductionConfig = config['telluricReductionConfig']
if telluricDirectoryList:
observationDirectoryList = telluricDirectoryList
elif not telluricDirectoryList:
observationDirectoryList = config['telluricDirectoryList']
start = telluricReductionConfig['telStart']
stop = telluricReductionConfig['telStop']
telluricSkySubtraction = telluricReductionConfig['telluricSkySubtraction']
if kind == 'Science':
# Science reduction specific config.
scienceReductionConfig = config['scienceReductionConfig']
if scienceDirectoryList:
observationDirectoryList = scienceDirectoryList
elif not scienceDirectoryList:
observationDirectoryList = config['scienceDirectoryList']
start = scienceReductionConfig['sciStart']
stop = scienceReductionConfig['sciStop']
scienceSkySubtraction = scienceReductionConfig['scienceSkySubtraction']
###########################################################################
## ##
## COMPLETE - GENERAL REDUCTION SETUP ##
## ##
###########################################################################
# nifsReduce has two nested loops that reduced data.
# It loops through each science (or telluric) directory, and
# runs through a series of calibrations steps on the data in that directory.
# Loop through all the observation (telluric or science) directories to perform a reduction on each one.
for observationDirectory in observationDirectoryList:
###########################################################################
## ##
## BEGIN - OBSERVATION SPECIFIC SETUP ##
## ##
###########################################################################
# Print the current directory of data being reduced.
logging.info("\n#################################################################################")
logging.info(" ")
logging.info(" Currently working on reductions in")
logging.info(" in "+ str(observationDirectory))
logging.info(" ")
logging.info("#################################################################################\n")
os.chdir(observationDirectory)
tempObs = observationDirectory.split(os.sep)
obsid = tempObs[-1]
# Change the iraf directory to the current directory.
pwd = os.getcwd()
iraffunctions.chdir(pwd)
# Copy relevant calibrations over to the science directory.
# Open and store the name of the MDF shift reference file from shiftfile into shift.
shift = 'calibrations/shiftFile'
# Open and store the name of the flat frame
flat = 'calibrations/finalFlat'
# Open and store the bad pixel mask
finalBadPixelMask = 'calibrations/finalBadPixelMask'
# Ronchi, arc and database must all be in local calibrations directory
# Open and store the name of the reduced spatial correction ronchi flat frame name from ronchifile in ronchi.
ronchi = 'finalRonchi'
# Open and store the name of the reduced wavelength calibration arc frame from arclist in arc.
arc = 'finalArc'
if os.path.exists(os.getcwd()+'/'+ronchi+".fits"):
if over:
iraf.delete(os.getcwd()+'/calibrations/finalRonchi.fits')
# Copy the spatial calibration ronchi flat frame from Calibrations_grating to the observation directory.
shutil.copy(os.getcwd()+'/calibrations/finalRonchi.fits', ronchi+'.fits')
else:
print "\nOutput exists and -over not set - skipping copy of reduced ronchi"
else:
shutil.copy(os.getcwd()+'/calibrations/finalRonchi.fits', ronchi+'.fits')
if os.path.exists(os.getcwd()+'/'+arc+".fits"):
if over:
iraf.delete(os.getcwd()+'/calibrations/finalArc.fits')
# Copy the spatial calibration arc flat frame from Calibrations_grating to the observation directory.
shutil.copy(os.getcwd()+'/calibrations/finalArc.fits', arc+'.fits')
else:
print "\nOutput exists and -over not set - skipping copy of reduced arc"
else:
shutil.copy(os.getcwd()+'/calibrations/finalArc.fits', arc+'.fits')
# Make sure the database files are in place. Current understanding is that
# these should be local to the reduction directory, so need to be copied from
# the calDir.
if os.path.isdir("./database"):
if over:
shutil.rmtree("./database")
os.mkdir("./database")
for item in glob.glob("calibrations/database/*"):
shutil.copy(item, "./database/")
else:
print "\nOutput exists and -over not set - skipping copy of database directory"
else:
os.mkdir('./database/')
for item in glob.glob("calibrations/database/*"):
shutil.copy(item, "./database/")
if telluricSkySubtraction or scienceSkySubtraction:
# Read the list of sky frames in the observation directory.
try:
skyFrameList = open("skyFrameList", "r").readlines()
skyFrameList = [frame.strip() for frame in skyFrameList]
except:
logging.info("\n#####################################################################")
logging.info("#####################################################################")
logging.info("")
logging.info(" WARNING in reduce: No sky frames were found in a directory.")
logging.info(" Please make a skyFrameList in: " + str(os.getcwd()))
logging.info("")
logging.info("#####################################################################")
logging.info("#####################################################################\n")
raise SystemExit
sky = skyFrameList[0]
# If we are doing a telluric reduction, open the list of telluric frames in the observation directory.
# If we are doing a science reduction, open the list of science frames in the observation directory.
if kind == 'Telluric':
tellist = open('tellist', 'r').readlines()
tellist = [frame.strip() for frame in tellist]
elif kind == 'Science':
scienceFrameList = open("scienceFrameList", "r").readlines()
scienceFrameList = [frame.strip() for frame in scienceFrameList]
# For science frames, check to see if the number of sky frames matches the number of science frames.
# IF NOT duplicate the sky frames and rewrite the sky file and skyFrameList.
if scienceSkySubtraction:
if not len(skyFrameList)==len(scienceFrameList):
skyFrameList = makeSkyList(skyFrameList, scienceFrameList, observationDirectory)
###########################################################################
## ##
## COMPLETE - OBSERVATION SPECIFIC SETUP ##
## BEGIN DATA REDUCTION FOR AN OBSERVATION ##
## ##
###########################################################################
# Check start and stop values for reduction steps. Ask user for a correction if
# input is not valid.
valindex = start
while valindex > stop or valindex < 1 or stop > 6:
logging.info("\n#####################################################################")
logging.info("#####################################################################")
logging.info("")
logging.info(" WARNING in reduce: invalid start/stop values of observation")
logging.info(" reduction steps.")
logging.info("")
logging.info("#####################################################################")
logging.info("#####################################################################\n")
valindex = int(raw_input("\nPlease enter a valid start value (1 to 7, default 1): "))
stop = int(raw_input("\nPlease enter a valid stop value (1 to 7, default 7): "))
while valindex <= stop :
###########################################################################
## STEP 1: Prepare raw data; science, telluric and sky frames ->n ##
###########################################################################
if valindex == 1:
if manualMode:
a = raw_input("About to enter step 1: locate the spectrum.")
if kind=='Telluric':
tellist = prepare(tellist, shift, finalBadPixelMask, log, over)
elif kind=='Science':
scienceFrameList = prepare(scienceFrameList, shift, finalBadPixelMask, log, over)
if telluricSkySubtraction or scienceSkySubtraction:
skyFrameList = prepare(skyFrameList, shift, finalBadPixelMask, log, over)
logging.info("\n##############################################################################")
logging.info("")
logging.info(" STEP 1: Locate the Spectrum (and prepare raw data) ->n - COMPLETED ")
logging.info("")
logging.info("##############################################################################\n")
###########################################################################
## STEP 2: Sky Subtraction ->sn ##
###########################################################################
elif valindex == 2:
if manualMode:
a = raw_input("About to enter step 2: sky subtraction.")
# Combine telluric sky frames.
if kind=='Telluric':
if telluricSkySubtraction:
if len(skyFrameList)>1:
combineImages(skyFrameList, "gn"+sky, log, over)
else:
copyImage(skyFrameList, 'gn'+sky+'.fits', over)
skySubtractTel(tellist, "gn"+sky, log, over)
else:
for image in tellist:
iraf.copy('n'+image+'.fits', 'sn'+image+'.fits')
if kind=='Science':
if scienceSkySubtraction:
skySubtractObj(scienceFrameList, skyFrameList, log, over)
else:
for image in scienceFrameList:
iraf.copy('n'+image+'.fits', 'sn'+image+'.fits')
logging.info("\n##############################################################################")
logging.info("")
logging.info(" STEP 2: Sky Subtraction ->sn - COMPLETED ")
logging.info("")
logging.info("##############################################################################\n")
##############################################################################
## STEP 3: Flat field, slice, subtract dark and correct bad pixels ->brsn ##
##############################################################################
elif valindex == 3:
if manualMode:
a = raw_input("About to enter step 3: flat fielding and bad pixels correction.")
if kind=='Telluric':
applyFlat(tellist, flat, log, over, kind)
fixBad(tellist, log, over)
elif kind=='Science':
applyFlat(scienceFrameList, flat, log, over, kind)
fixBad(scienceFrameList, log, over)
logging.info("\n##############################################################################")
logging.info("")
logging.info(" STEP 3: Flat fielding and Bad Pixels Correction ->brsn - COMPLETED ")
logging.info("")
logging.info("##############################################################################\n")
###########################################################################
## STEP 4: Derive and apply 2D to 3D transformation ->tfbrsn ##
###########################################################################
elif valindex == 4:
if manualMode:
a = raw_input("About to enter step 4: 2D to 3D transformation and Wavelength Calibration.")
if kind=='Telluric':
fitCoords(tellist, arc, ronchi, log, over, kind)
transform(tellist, log, over)
elif kind=='Science':
fitCoords(scienceFrameList, arc, ronchi, log, over, kind)
transform(scienceFrameList, log, over)
logging.info("\n##############################################################################")
logging.info("")
logging.info(" STEP 4: 2D to 3D transformation and Wavelength Calibration ->tfbrsn - COMPLETED ")
logging.info("")
logging.info("##############################################################################\n")
############################################################################
## STEP 5 (tellurics): For telluric data derive a telluric ##
## correction ->gxtfbrsn ##
## STEP 5 (science): For science apply an efficiency correction and make ##
## a data cube (not necessarily in that order). ##
## (i) Python method applies correction to nftransformed cube. ##
## Good for faint objects. ->cptfbrsn ##
## (ii) iraf.telluric method applies correction to ##
## nftransformed result (not quite a data cube) then ##
## nftransforms cube. ->catfbrsn ##
## (iii) If no telluric correction/flux calibration to be ##
## applied make a plain data cube. ->ctfbrsn ##
############################################################################
elif valindex == 5:
if manualMode:
a = raw_input("About to enter step 5.")
# For telluric data:
# Make a combined extracted 1D standard star spectrum.
if kind=='Telluric':
extractOneD(tellist, kind, log, over, extractionXC, extractionYC, extractionRadius)
# TODO(nat): add this as a parameter; encapsulate this.
copyToScience = True
if copyToScience:
# Copy final extracted results to science directory.
try:
with open("scienceMatchedTellsList", "r") as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
for i in range(len(lines)):
if "obs" in lines[i]:
k = 1
while i+k != len(lines) and "obs" not in lines[i+k]:
copyResultsToScience("gxtfbrsn"+tellist[0]+".fits", "0_tel"+lines[i+k]+".fits", over)
k+=1
except IOError:
logging.info("\nNo scienceMatchedTellsList found in "+ os.getcwd() +" . Skipping copy of extracted spectra to science directory.")
logging.info("\n##############################################################################")
logging.info("")
logging.info(" STEP 5a: Extract 1D Spectra and Make Combined 1D Standard Star Spectrum")
logging.info(" ->gxtfbrsn - COMPLETED")
logging.info("")
logging.info("##############################################################################\n")
#TODO(nat): add this as a parameter.
makeTelluricCube = True
if makeTelluricCube:
makeCube('tfbrsn', tellist, log, over)
logging.info("\n##############################################################################")
logging.info("")
logging.info(" STEP 5b: Make uncorrected standard star data cubes, ->ctfbrsn - COMPLETED")
logging.info("")
logging.info("##############################################################################\n")
# For Science data:
# Possibly extract 1D spectra, and make uncorrected cubes.
elif kind=='Science':
if scienceOneDExtraction:
extractOneD(scienceFrameList, kind, log, over, extractionXC, extractionYC, extractionRadius)
copyExtracted(scienceFrameList, over)
logging.info("\n##############################################################################")
logging.info("")
logging.info(" STEP 5a: Make extracted 1D Science spectra, ->ctgbrsn - COMPLETED")
logging.info("")
logging.info("##############################################################################\n")
makeCube('tfbrsn', scienceFrameList, log, over)
# TODO(nat): encapsulate this inside a function.
if os.path.exists('products_uncorrected'):
if over:
shutil.rmtree('products_uncorrected')
os.mkdir('products_uncorrected')
else:
logging.info("\nOutput exists and -over not set - skipping creating of products_uncorrected directory")
else:
os.mkdir('products_uncorrected')
for item in scienceFrameList:
if os.path.exists('products_uncorrected/ctfbrsn'+item+'.fits'):
if over:
os.remove('products_uncorrected/ctfbrsn'+item+'.fits')
shutil.copy('ctfbrsn'+item+'.fits', 'products_uncorrected/ctfbrsn'+item+'.fits')
else:
logging.info("\nOutput exists and -over not set - skipping copy of uncorrected cube")
else:
shutil.copy('ctfbrsn'+item+'.fits', 'products_uncorrected/ctfbrsn'+item+'.fits')
if os.path.exists('products_telluric_corrected'):
if over:
shutil.rmtree('products_telluric_corrected')
os.mkdir('products_telluric_corrected')
else:
logging.info("\nOutput exists and -over not set - skipping creating of products_telluric_corrected directory")
else:
os.mkdir('products_telluric_corrected')
for item in scienceFrameList:
if os.path.exists('products_telluric_corrected/ctfbrsn'+item+'.fits'):
if over:
os.remove('products_telluric_corrected/ctfbrsn'+item+'.fits')
shutil.copy('ctfbrsn'+item+'.fits', 'products_telluric_corrected/ctfbrsn'+item+'.fits')
else:
logging.info("\nOutput exists and -over not set - skipping copy of uncorrected cube")
else:
shutil.copy('ctfbrsn'+item+'.fits', 'products_telluric_corrected/ctfbrsn'+item+'.fits')
logging.info("\n##############################################################################")
logging.info("")
logging.info(" STEP 5b: Make uncorrected science data cubes, ->ctfbrsn - COMPLETED")
logging.info("")
logging.info("##############################################################################\n")
valindex += 1
logging.info("\n##############################################################################")
logging.info("")
logging.info(" COMPLETE - Reductions completed for " + str(observationDirectory))
logging.info("")
logging.info("##############################################################################\n")
# Return to directory script was begun from.
os.chdir(path)
##################################################################################################################
# ROUTINES #
##################################################################################################################
def prepare(inlist, shiftima, finalBadPixelMask, log, over):
"""Prepare list of frames using iraf.nfprepare. Output: -->n.
Processing with NFPREPARE (this task is used only for NIFS data
but other instruments have their own preparation tasks
with similar actions) will rename the data extension and add
variance and data quality extensions. By default (see NSHEADERS)
the extension names are SCI for science data, VAR for variance, and
DQ for data quality (0 = good). Generation of the data quality
plane (DQ) is important in order to fix hot and dark pixels on the
NIFS detector in subsequent steps in the data reduction process.
Various header keywords (used later) are also added in NFPREPARE.
NFPREPARE will also add an MDF file (extension MDF) describing the
NIFS image slicer pattern and how the IFU maps to the sky field.
"""
# Update frames with mdf offset value and generate variance and data quality extensions.
for frame in inlist:
if os.path.exists("n"+frame+".fits"):
if over:
os.remove("n"+frame+".fits")
else:
logging.info("Output file exists and -over not set - skipping prepare_list")
continue
iraf.nfprepare(frame, rawpath="", shiftimage=shiftima, fl_vardq="yes", bpm=finalBadPixelMask, fl_int='yes', fl_corr='no', fl_nonl='no', logfile=log)
inlist = checkLists(inlist, '.', 'n', '.fits')
return inlist
#--------------------------------------------------------------------------------------------------------------------------------#
def combineImages(inlist, out, log, over):
"""Gemcombine multiple frames. Output: -->gn."""
if os.path.exists(out+".fits"):
if over:
iraf.delete(out+".fits")
else:
logging.info("Output file exists and -over not set - skipping combine_ima")
return
iraf.gemcombine(listit(inlist,"n"),output=out,fl_dqpr='yes', fl_vardq='yes',masktype="none", combine="median", logfile=log)
#--------------------------------------------------------------------------------------------------------------------------------#
def copyImage(input, output, over):
"""Copy a frame (used to add the correct prefix when skipping steps)."""
if os.path.exists(output):
if over:
iraf.delete(output)
else:
logging.info("Output file exists and -over not set - skipping copy_ima")
return
iraf.copy('n'+input[0]+'.fits', output)
#--------------------------------------------------------------------------------------------------------------------------------#
def skySubtractObj(objlist, skyFrameList, log, over):
""""Sky subtraction for science using iraf.gemarith. Output: ->sgn"""
for i in range(len(objlist)):
frame = str(objlist[i])
sky = str(skyFrameList[i])
if os.path.exists("sn"+frame+".fits"):
if over:
os.remove("sn"+frame+".fits")
else:
logging.info("Output file exists and -over not set - skipping skysub_list")
continue
iraf.gemarith ("n"+frame, "-", "n"+sky, "sn"+frame, fl_vardq="yes", logfile=log)
#--------------------------------------------------------------------------------------------------------------------------------#
def skySubtractTel(tellist, sky, log, over):
"""Sky subtraction for telluric using iraf.gemarith. Output: ->sgn"""
for frame in tellist:
if os.path.exists("sn"+frame+".fits"):
if over:
os.remove("sn"+frame+".fits")
else:
logging.info("Output file exists and -over not set - skipping skySubtractTel.")
continue
iraf.gemarith ("n"+frame, "-", sky, "sn"+frame, fl_vardq="yes", logfile=log)
#--------------------------------------------------------------------------------------------------------------------------------#
def applyFlat(objlist, flat, log, over, kind, dark=""):
"""Flat field and cut the data with iraf.nsreduce. Output: ->rsgn.
NSREDUCE is used for basic reduction of raw data - it provides a
single, unified interface to several tasks and also allows for
the subtraction of dark frames and dividing by the flat. For
NIFS reduction, NSREDUCE is used to call the NSCUT and NSAPPWAVE
routines.
"""
# By default don't subtract darks from tellurics.
fl_dark = "no"
if dark != "":
fl_dark = "yes"
for frame in objlist:
frame = str(frame).strip()
if os.path.exists("rsn"+frame+".fits"):
if over:
os.remove("rsn"+frame+".fits")
else:
logging.info("Output file exists and -over not set - skipping apply_flat_list")
continue
iraf.nsreduce("sn"+frame, fl_cut="yes", fl_nsappw="yes", fl_dark="no", fl_sky="no", fl_flat="yes", flatimage=flat, fl_vardq="yes",logfile=log)
#--------------------------------------------------------------------------------------------------------------------------------#
def fixBad(objlist, log, over):
"""Interpolate over bad pixels flagged in the DQ plane with iraf.nffixbad. Output: -->brsn.
NFFIXBAD - Fix Hot/Cold pixels on the NIFS detector.
This routine uses the information in the Data Quality
extensions to fix hot and cold pixels in the NIFS science
fields. NFFIXBAD is a wrapper script which calls the task
FIXPIX, using the DQ plane to define the pixels to be corrected.
"""
for frame in objlist:
frame = str(frame).strip()
if os.path.exists("brsn"+frame+".fits"):
if over:
os.remove("brsn"+frame+".fits")
else:
logging.info("Output file exists and -over not set - skipping fixbad_list")
continue
iraf.nffixbad("rsn"+frame,logfile=log)
#--------------------------------------------------------------------------------------------------------------------------------#
def fitCoords(objlist, arc, ronchi, log, over, kind):
"""Derive the 2D to 3D spatial/spectral transformation with iraf.nsfitcoords.
Output: -->fbrsn
NFFITCOORDS - Compute 2D dispersion and distortion maps.
This routine uses as inputs the output from the NSWAVELENGTH
and NFSDIST routines. NFFITCOORDS takes the spatial and
spectral rectification information from NSWAVELENGTH and
NFSDIST and converts this into a calculation of where the data
information should map to in a final IFU dataset.
"""
for frame in objlist:
frame = str(frame).strip()
if os.path.exists("fbrsn"+frame+".fits"):
if over:
os.remove("fbrsn"+frame+".fits")
else:
logging.info("Output file exists and -over not set - skipping fitcoord_list")
continue
iraf.nsfitcoords("brsn"+frame, lamptransf=arc, sdisttransf=ronchi, database="database", lxorder=3, lyorder=2, sxorder=3, syorder=3, logfile=log)
#--------------------------------------------------------------------------------------------------------------------------------#
def transform(objlist, log, over):
"""Apply the transformation determined in iraf.nffitcoords with
iraf.nstransform. Output: -->tfbrsgn
NSTRANSFORM - Spatially rectify and wavelength calibrate data.
NFTRANSFORM applies the wavelength solution found by
NSWAVELENGTH and the spatial correction found by NFSDIST,
aligning all the IFU extensions consistently onto a common
coordinate system. The output of this routine is still in 2D
format, with each of the IFU slices represented by its own data
extension.
"""
for frame in objlist:
frame = str(frame).strip()
if os.path.exists("tfbrsn"+frame+".fits"):
if over:
iraf.delete("tfbrsn"+frame+".fits")
else:
logging.info("Output file exists and -over not set - skipping transform_list")
continue
iraf.nstransform("fbrsn"+frame, logfile=log)
#--------------------------------------------------------------------------------------------------------------------------------#
def makeCube(pre, scienceFrameList, log, over):
""" Reformat the data into a 3-D datacube using iraf.nifcube. Output: -->ctfbrsgn.
NIFCUBE - Construct 3D NIFS datacubes.
NIFCUBE takes input from data output by either NFFITCOORDS or
NFTRANSFORM and converts the 2D data images into data cubes
that have coordinates of x, y, lambda.
"""
for frame in scienceFrameList:
if os.path.exists("c"+pre+frame+".fits"):
if over:
iraf.delete("c"+pre+frame+".fits")
else:
logging.info("Output file exists and -over not set - skipping make_cube_list")
continue
iraf.nifcube (pre+frame, outcubes = 'c'+pre+frame, logfile=log)
#--------------------------------------------------------------------------------------------------------------------------------#
def extractOneD(inputList, kind, log, over, extractionXC=15.0, extractionYC=33.0, extractionRadius=2.5):
"""Extracts 1-D spectra with iraf.nfextract and combines them with iraf.gemcombine.
iraf.nfextract is currently only done interactively. Output: -->xtfbrsn and gxtfbrsn
NFEXTRACT - Extract NIFS spectra.
This could be used to extract a 1D spectra from IFU data and is
particularly useful for extracting the bright spectra of
telluric calibrator stars. Note that this routine only works
on data that has been run through NFTRANSFORM.
"""
for frame in inputList:
frame = str(frame).strip()
if os.path.exists("xtfbrsn"+frame+".fits"):
if over:
iraf.delete("xtfbrsn"+frame+".fits")
else:
logging.info("Output file exists and -over not set - skipping nfextract in extract1D")
continue
iraf.nfextract("tfbrsn"+frame, outpref="x", xc=extractionXC, yc=extractionYC, diameter=extractionRadius, fl_int='no', logfile=log)
inputList = checkLists(inputList, '.', 'xtfbrsn', '.fits')
# Combine all the 1D spectra to one final output file with the name of the first input file.
combined = str(inputList[0]).strip()
if len(inputList) > 1:
if os.path.exists("gxtfbrsn"+combined+".fits"):
if over:
iraf.delete("gxtfbrsn"+combined+".fits")
else:
logging.info("Output file exists and -over not set - skipping gemcombine in extract1D")
return
iraf.gemcombine(listit(inputList,"xtfbrsn"),output="gxtfbrsn"+combined, statsec="[*]", combine="median",masktype="none",fl_vardq="yes", logfile=log)
else:
if over:
iraf.delete("gxtfbrsn"+combined+".fits")
iraf.copy(input="xtfbrsn"+combined+".fits", output="gxtfbrsn"+combined+".fits")
if kind == 'Telluric':
# Put the name of the final combined file into a text file called
# telluricfile to be used by the pipeline later.
open("telluricfile", "w").write("gxtfbrsn"+combined)
elif kind == 'Science':
open("combinedOneD", "w").write("gxtfbrsn"+combined)
#--------------------------------------------------------------------------------------------------------------------------------#
def copyExtracted(scienceFrameList, over):
"""
Copy all extracted 1D spectra to objectname/ExtractedOneD/date_obsname/,
and combined 1D spectra to objectname/ExtractedOneD
"""
# TODO(nat): make this clearer.
obsDir = os.getcwd()
temp1 = os.path.split(obsDir)
temp2 = os.path.split(temp1[0])
temp3 = os.path.split(temp2[0])
temp4 = os.path.split(temp3[0])
objname = temp4[1]
date = temp3[1]
obsid = temp1[1]
obsPath = temp3[0]
os.chdir(obsDir)
# Create a directory called ExtractedOneD and copy all the data cubes to this directory.
if not os.path.exists(obsPath+'/ExtractedOneD/'):
os.mkdir(obsPath+'/ExtractedOneD/')
logging.info('I am creating a directory called ExtractedOneD')
ExtractedOneD = obsPath+'/ExtractedOneD'
# Make the appropriate directory structure.
if not os.path.exists(ExtractedOneD+'/'+date+'_'+obsid):
os.mkdir(ExtractedOneD+'/'+date+'_'+obsid)
logging.info('I am creating a directory with date and abs ID inside ExtractedOneD ')
# Get the filenames of the uncombined spectra.
uncombinedSpectra = glob.glob('xtfbrsnN*.fits')
# Copy the uncombined spectra to the appropriate directory.
for spectra in uncombinedSpectra:
if os.path.exists(ExtractedOneD+'/'+date+'_'+obsid+'/'+spectra):
if over:
os.remove(ExtractedOneD+'/'+date+'_'+obsid+'/'+spectra)
shutil.copy(spectra, ExtractedOneD+'/'+date+'_'+obsid)
else:
logging.info("Output file exists and -over not set - skipping copy one D spectra")
else:
shutil.copy(spectra, ExtractedOneD+'/'+date+'_'+obsid)
# Get the file name of the combined spectra
combinedSpectrum = glob.glob('gxtfbrsnN*.fits')
combinedSpectrum = combinedSpectrum[0]
# Copy the combined spectrum to the appropriate directory.
if os.path.exists(ExtractedOneD+'/'+combinedSpectrum):
if over:
os.remove(ExtractedOneD+'/'+combinedSpectrum)
shutil.copy(combinedSpectrum, ExtractedOneD)
else:
logging.info("Output file exists and -over not set - skipping copy combined one D spectra")
else:
shutil.copy(spectra, ExtractedOneD+'/combined'+date+'_'+obsid+'.fits')
#--------------------------------------------------------------------------------------------------------------------------------#
if __name__ == '__main__':
a = raw_input('Enter <Science> for science reduction or <Telluric> for telluric reduction: ')
start(a)
|
|
# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import sys
import time
import unittest
from fabric.api import local
import nose
from lib.noseplugin import OptionParser, parser_option
from lib import base
from lib.base import (
BGP_FSM_IDLE,
BGP_FSM_ACTIVE,
BGP_FSM_ESTABLISHED,
GRACEFUL_RESTART_TIME,
)
from lib.gobgp import GoBGPContainer
class GoBGPTestBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
gobgp_ctn_image_name = parser_option.gobgp_image
base.TEST_PREFIX = parser_option.test_prefix
g1 = GoBGPContainer(name='g1', asn=65000, router_id='192.168.0.1',
ctn_image_name=gobgp_ctn_image_name,
log_level=parser_option.gobgp_log_level)
g2 = GoBGPContainer(name='g2', asn=65001, router_id='192.168.0.2',
ctn_image_name=gobgp_ctn_image_name,
log_level=parser_option.gobgp_log_level)
ctns = [g1, g2]
initial_wait_time = max(ctn.run() for ctn in ctns)
time.sleep(initial_wait_time)
g1.add_route('10.10.10.0/24')
g1.add_route('10.10.20.0/24')
g1.add_peer(g2, graceful_restart=True)
g2.add_peer(g1, graceful_restart=True)
cls.bgpds = {'g1': g1, 'g2': g2}
# test each neighbor state is turned establish
def test_01_neighbor_established(self):
g1 = self.bgpds['g1']
g2 = self.bgpds['g2']
g1.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=g2)
def test_02_graceful_restart(self):
g1 = self.bgpds['g1']
g2 = self.bgpds['g2']
g1.stop_gobgp()
g2.wait_for(expected_state=BGP_FSM_ACTIVE, peer=g1)
self.assertEqual(len(g2.get_global_rib('10.10.20.0/24')), 1)
self.assertEqual(len(g2.get_global_rib('10.10.10.0/24')), 1)
for d in g2.get_global_rib():
for p in d['paths']:
self.assertTrue(p['stale'])
# Confirm the paths on the adj-RIB-in table are synced with the Global
# table.
self.assertEqual(len(g2.get_adj_rib_in(g1, '10.10.20.0/24')), 1)
self.assertEqual(len(g2.get_adj_rib_in(g1, '10.10.10.0/24')), 1)
for p in g2.get_adj_rib_in(g1):
self.assertTrue(p['stale'])
g1.routes = {}
g1.start_gobgp(graceful_restart=True)
g1.add_route('10.10.20.0/24')
def test_03_neighbor_established(self):
g1 = self.bgpds['g1']
g2 = self.bgpds['g2']
g1.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=g2)
# Confirm the restart timer not expired.
self.assertEqual(
g2.local(
"grep 'graceful restart timer expired' %s/gobgpd.log"
" | wc -l" % (g2.SHARED_VOLUME), capture=True),
'0')
time.sleep(1)
self.assertEqual(len(g2.get_global_rib('10.10.20.0/24')), 1)
self.assertEqual(len(g2.get_global_rib('10.10.10.0/24')), 0)
for d in g2.get_global_rib():
for p in d['paths']:
self.assertFalse(p.get('stale', False))
# Confirm the stale paths are also removed from the adj-RIB-in table.
# https://github.com/osrg/gobgp/pull/1707
self.assertEqual(len(g2.get_adj_rib_in(g1, '10.10.20.0/24')), 1)
self.assertEqual(len(g2.get_adj_rib_in(g1, '10.10.10.0/24')), 0)
for p in g2.get_adj_rib_in(g1):
self.assertFalse(p.get('stale', False))
def test_04_add_non_graceful_restart_enabled_peer(self):
g1 = self.bgpds['g1']
# g2 = self.bgpds['g2']
gobgp_ctn_image_name = parser_option.gobgp_image
g3 = GoBGPContainer(name='g3', asn=65002, router_id='192.168.0.3',
ctn_image_name=gobgp_ctn_image_name,
log_level=parser_option.gobgp_log_level)
self.bgpds['g3'] = g3
time.sleep(g3.run())
g3.add_route('10.10.30.0/24')
g1.add_peer(g3)
g3.add_peer(g1)
g1.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=g3)
time.sleep(1)
self.assertEqual(len(g3.get_global_rib('10.10.20.0/24')), 1)
def test_05_graceful_restart(self):
g1 = self.bgpds['g1']
g2 = self.bgpds['g2']
g3 = self.bgpds['g3']
g1.stop_gobgp()
g2.wait_for(expected_state=BGP_FSM_ACTIVE, peer=g1)
self.assertEqual(len(g2.get_global_rib('10.10.20.0/24')), 1)
self.assertEqual(len(g2.get_global_rib('10.10.30.0/24')), 1)
for d in g2.get_global_rib():
for p in d['paths']:
self.assertTrue(p['stale'])
self.assertEqual(len(g3.get_global_rib('10.10.20.0/24')), 0)
self.assertEqual(len(g3.get_global_rib('10.10.30.0/24')), 1)
def test_06_test_restart_timer_expire(self):
time.sleep(GRACEFUL_RESTART_TIME + 5)
g2 = self.bgpds['g2']
self.assertEqual(len(g2.get_global_rib()), 0)
def test_07_multineighbor_established(self):
g1 = self.bgpds['g1']
g2 = self.bgpds['g2']
g3 = self.bgpds['g3']
g1.start_gobgp()
g1.del_peer(g2)
g1.del_peer(g3)
g2.del_peer(g1)
g3.del_peer(g1)
g1.add_peer(g2, graceful_restart=True, llgr=True)
g1.add_peer(g3, graceful_restart=True, llgr=True)
g2.add_peer(g1, graceful_restart=True, llgr=True)
g3.add_peer(g1, graceful_restart=True, llgr=True)
g2.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=g1)
g3.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=g1)
def test_08_multineighbor_graceful_restart(self):
g1 = self.bgpds['g1']
g2 = self.bgpds['g2']
g3 = self.bgpds['g3']
g1.stop_gobgp()
g2.wait_for(expected_state=BGP_FSM_ACTIVE, peer=g1)
g3.wait_for(expected_state=BGP_FSM_ACTIVE, peer=g1)
g1.start_gobgp(graceful_restart=True)
count = 0
while (g1.get_neighbor_state(g2) != BGP_FSM_ESTABLISHED
or g1.get_neighbor_state(g3) != BGP_FSM_ESTABLISHED):
count += 1
# assert connections are not refused
self.assertTrue(g1.get_neighbor_state(g2) != BGP_FSM_IDLE)
self.assertTrue(g1.get_neighbor_state(g3) != BGP_FSM_IDLE)
if count > 120:
raise Exception('timeout')
time.sleep(1)
if __name__ == '__main__':
output = local("which docker 2>&1 > /dev/null ; echo $?", capture=True)
if int(output) is not 0:
print "docker not found"
sys.exit(1)
nose.main(argv=sys.argv, addplugins=[OptionParser()],
defaultTest=sys.argv[0])
|
|
import os
import tempfile
from http.server import BaseHTTPRequestHandler, HTTPServer
from subprocess import CalledProcessError
from threading import Thread
import pytest
import requests
import pkgpanda.util
from pkgpanda import UserManagement
from pkgpanda.exceptions import ValidationError
PathSeparator = '/' # Currently same for both windows and linux. Constant may vary in near future by platform
def test_remove_file_pass():
"""
Remove a known directory. Should succeed silently.
"""
test_dir = tempfile.gettempdir() + PathSeparator + 'test'
# Here we really don't care if there is a left over dir since we will be removing it
# but we need to make sure there is one
pkgpanda.util.make_directory(test_dir)
assert os.path.isdir(test_dir)
# Build the temporary test file with a random name
fno, test_path = tempfile.mkstemp(dir=test_dir)
os.close(fno) # Close the reference so we don't have dangling file handles
test_data = "Test Data\n"
with open(test_path, "w") as f:
f.write(test_data)
pkgpanda.util.remove_file(test_path)
assert not os.path.exists(test_path), 'Directory item not removed'
pkgpanda.util.remove_directory(test_dir)
assert not os.path.exists(test_dir)
def test_remove_file_fail():
"""
Remove a non existant directory item. Should fail silently without exceptions.
"""
test_dir = tempfile.gettempdir() + PathSeparator + 'remove_directory_fail'
test_path = test_dir + PathSeparator + "A"
# Make sure there is no left over directory
pkgpanda.util.remove_directory(test_dir)
assert not os.path.isdir(test_dir)
# We will try to remove a non-existant file
try:
pkgpanda.util.remove_file(test_path)
except Exception:
assert False, "Unexpected exception when trying to delete non existant directory item. Should fail silently"
assert not os.path.exists(test_path)
def test_make_directory_pass():
"""
Create a known directory and verify. Postcondition: the directory should exist
"""
test_dir = tempfile.gettempdir() + PathSeparator + 'make_directory_pass'
# Make sure there is no left over directory
pkgpanda.util.remove_directory(test_dir)
assert not os.path.isdir(test_dir)
# Make the directory and check for its existence as a dir
pkgpanda.util.make_directory(test_dir)
assert os.path.isdir(test_dir)
# Cleanup
pkgpanda.util.remove_directory(test_dir)
def test_make_directory_fail():
"""
Attempt to create a directory with a null name. Postcondition: Should throw an OSError
"""
test_dir = "" # Lets make nothing...
# Try to make the directory and check for its error
try:
pkgpanda.util.make_directory(test_dir)
except OSError as e:
assert e.errno == 2 # File not foundError
return
assert False, 'did not see expected OSError when trying to build unnamed directory'
def test_copy_file_pass():
"""
Copy a file from a known directory to another known file path.
Postcondition: The file should have been copied.
The copy should contain the same contents as the original.
"""
# Make sure we don't have the temp dirs and files left over
test_src_dir = tempfile.gettempdir() + PathSeparator + 'test_src'
test_dst_dir = tempfile.gettempdir() + PathSeparator + 'test_dst'
pkgpanda.util.remove_directory(test_src_dir)
pkgpanda.util.remove_directory(test_dst_dir)
assert not os.path.isdir(test_src_dir)
assert not os.path.isdir(test_dst_dir)
# Build the dirs for copying to/from
pkgpanda.util.make_directory(test_src_dir)
pkgpanda.util.make_directory(test_dst_dir)
# Build the source file
fno, src_path = tempfile.mkstemp(dir=test_src_dir)
os.close(fno)
# Build the temporary dest file with a random name
fno, dst_path = tempfile.mkstemp(dir=test_dst_dir)
os.close(fno) # Close the reference so we don't have dangling file handles
test_data = "Test Data\n"
with open(src_path, "w") as f:
f.write(test_data)
# copy the source file to the destination directory
pkgpanda.util.copy_file(src_path, dst_path)
lines = []
with open(dst_path, "r") as f:
lines = f.readlines()
assert lines[0] == test_data
def test_file_fail():
"""
Copy a file from a known directory to another known file path whose directory does not exist.
Postcondition: Should throw a CalledProcessError or an OSError
"""
# Make sure we don't have the temp dirs and files left over
test_src_dir = tempfile.gettempdir() + PathSeparator + 'test_src'
test_dst_dir = tempfile.gettempdir() + PathSeparator + 'test_dst'
pkgpanda.util.remove_directory(test_src_dir)
pkgpanda.util.remove_directory(test_dst_dir)
assert not os.path.isdir(test_src_dir)
assert not os.path.isdir(test_dst_dir)
# Build the dirs for copying to/from
pkgpanda.util.make_directory(test_src_dir)
# Build the source file
fno, src_path = tempfile.mkstemp(dir=test_src_dir)
os.close(fno)
dst_path = test_dst_dir + PathSeparator + os.path.basename(src_path)
test_data = "Test Data\n"
with open(src_path, "w") as f:
f.write(test_data)
# copy the source file to the destination directory
try:
pkgpanda.util.copy_file(src_path, dst_path)
except CalledProcessError as e:
return
except OSError as e:
return
assert False, 'did not see expected OSError when trying to copy to non-existant directory item'
def test_copy_directory_pass():
"""
Copy a directory of files from a known directory to another known file path whose directory does not exist.
Postcondition: Should have recursively created the directories and files for the entire tree
"""
# Make sure we don't have the temp dirs and files left over
test_src_dir = tempfile.gettempdir() + PathSeparator + 'test_src'
test_dst_dir = tempfile.gettempdir() + PathSeparator + 'test_dst'
pkgpanda.util.remove_directory(test_src_dir)
pkgpanda.util.remove_directory(test_dst_dir)
assert not os.path.isdir(test_src_dir)
assert not os.path.isdir(test_dst_dir)
# Build the dirs for copying to/from
pkgpanda.util.make_directory(test_src_dir)
# Build the temporary source file with a random name
fno, src_path = tempfile.mkstemp(dir=test_src_dir)
os.close(fno) # Close the reference so we don't have dangling file handles
dst_path = test_dst_dir + PathSeparator + os.path.basename(src_path)
test_data = "Test Data\n"
with open(src_path, "w") as f:
f.write(test_data)
# copy the source file to the destination directory
pkgpanda.util.copy_directory(test_src_dir, test_dst_dir)
with open(dst_path, "r") as f:
lines = f.readlines()
assert lines[0] == test_data
def test_copy_directory_fail():
"""
Attempt to copy a directory of files from a none existant directory to another
known file path whose directory does not exist.
Postcondition: We should get either a
"""
# Make sure we don't have the temp dirs and files left over
test_src_dir = tempfile.gettempdir() + PathSeparator + 'test_src'
test_dst_dir = tempfile.gettempdir() + PathSeparator + 'test_dst'
pkgpanda.util.remove_directory(test_src_dir)
pkgpanda.util.remove_directory(test_dst_dir)
assert not os.path.isdir(test_src_dir)
assert not os.path.isdir(test_dst_dir)
# try to copy the source file to the destination directory
try:
pkgpanda.util.copy_directory(test_src_dir, test_dst_dir)
except CalledProcessError as e:
return
except OSError as e:
return
assert False, 'did not see expected OSError when trying to copy to non-existant directory tree'
def test_remove_directory():
test_dir = tempfile.gettempdir() + PathSeparator + 'test'
# Here we really don't care if there is a left over dir since we will be removing it
# but we need to make sure there is one
pkgpanda.util.make_directory(test_dir)
assert os.path.isdir(test_dir)
# Add some subdirectories and files
pkgpanda.util.make_directory(test_dir + PathSeparator + 'A')
# Build a file
fno, file_path = tempfile.mkstemp(dir=test_dir)
os.close(fno)
test_data = "Test Data\n"
with open(file_path, "r+") as f:
f.write(test_data)
# Build a file
fno, file_path = tempfile.mkstemp(dir=test_dir + PathSeparator + 'A')
os.close(fno)
test_data = "Test Data 2\n"
with open(file_path, "r+") as f:
f.write(test_data)
pkgpanda.util.remove_directory(test_dir)
assert not os.path.exists(file_path)
assert not os.path.isdir(test_dir + PathSeparator + 'A')
assert not os.path.isdir(test_dir)
def test_variant_variations():
assert pkgpanda.util.variant_str(None) == ''
assert pkgpanda.util.variant_str('test') == 'test'
assert pkgpanda.util.variant_object('') is None
assert pkgpanda.util.variant_object('test') == 'test'
assert pkgpanda.util.variant_name(None) == '<default>'
assert pkgpanda.util.variant_name('test') == 'test'
assert pkgpanda.util.variant_prefix(None) == ''
assert pkgpanda.util.variant_prefix('test') == 'test.'
def test_validate_username():
def good(name):
UserManagement.validate_username(name)
def bad(name):
with pytest.raises(ValidationError):
UserManagement.validate_username(name)
good('dcos_mesos')
good('dcos_a')
good('dcos__')
good('dcos_a_b_c')
good('dcos_diagnostics')
good('dcos_a1')
good('dcos_1')
bad('dcos')
bad('d')
bad('d_a')
bad('foobar_asdf')
bad('dcos_***')
bad('dc/os_foobar')
bad('dcos_foo:bar')
bad('3dcos_foobar')
bad('dcos3_foobar')
@pytest.mark.skipif(
pkgpanda.util.is_windows,
reason="Windows does not have Unix groups",
)
def test_validate_group():
# We import grp here so that this module can be imported on Windows.
import grp
group_name_which_exists = grp.getgrall()[0].gr_name
UserManagement.validate_group(group_name_which_exists)
with pytest.raises(ValidationError):
UserManagement.validate_group('group-should-not-exist')
def test_split_by_token():
split_by_token = pkgpanda.util.split_by_token
# Token prefix and suffix must not be empty.
with pytest.raises(ValueError):
list(split_by_token('', ')', 'foo'))
with pytest.raises(ValueError):
list(split_by_token('(', '', 'foo'))
with pytest.raises(ValueError):
list(split_by_token('', '', 'foo'))
# Empty string.
assert list(split_by_token('{{ ', ' }}', '')) == [('', False)]
# String with no tokens.
assert list(split_by_token('{{ ', ' }}', 'no tokens')) == [('no tokens', False)]
# String with one token.
assert list(split_by_token('{{ ', ' }}', '{{ token_name }}')) == [('{{ token_name }}', True)]
assert list(split_by_token('{{ ', ' }}', 'foo {{ token_name }}')) == [('foo ', False), ('{{ token_name }}', True)]
assert list(split_by_token('{{ ', ' }}', '{{ token_name }} foo')) == [('{{ token_name }}', True), (' foo', False)]
# String with multiple tokens.
assert list(split_by_token('{{ ', ' }}', 'foo {{ token_a }} bar {{ token_b }} \n')) == [
('foo ', False), ('{{ token_a }}', True), (' bar ', False), ('{{ token_b }}', True), (' \n', False)
]
# Token decoration is stripped when requested.
assert list(split_by_token('[[', ']]', 'foo [[token_a]] bar[[token_b ]]', strip_token_decoration=True)) == [
('foo ', False), ('token_a', True), (' bar', False), ('token_b ', True)
]
# Token prefix and suffix can be the same.
assert list(split_by_token('||', '||', 'foo ||token_a|| bar ||token_b|| \n')) == [
('foo ', False), ('||token_a||', True), (' bar ', False), ('||token_b||', True), (' \n', False)
]
assert list(split_by_token('||', '||', 'foo ||token_a|| bar ||token_b|| \n', strip_token_decoration=True)) == [
('foo ', False), ('token_a', True), (' bar ', False), ('token_b', True), (' \n', False)
]
# Missing token suffix.
with pytest.raises(Exception):
list(split_by_token('(', ')', '(foo) (bar('))
# Missing suffix for middle token.
with pytest.raises(Exception):
list(split_by_token('[[', ']]', '[[foo]] [[bar [[baz]]'))
# Missing token prefix.
with pytest.raises(Exception):
list(split_by_token('[[', ']]', 'foo]] [[bar]]'))
# Nested tokens.
with pytest.raises(Exception):
list(split_by_token('[[', ']]', '[[foo]] [[bar [[baz]] ]]'))
# Docstring examples.
assert list(split_by_token('{', '}', 'some text {token} some more text')) == [
('some text ', False), ('{token}', True), (' some more text', False)
]
assert list(split_by_token('{', '}', 'some text {token} some more text', strip_token_decoration=True)) == [
('some text ', False), ('token', True), (' some more text', False)
]
# TODO: DCOS_OSS-3508 - muted Windows tests requiring investigation
@pytest.mark.skipif(pkgpanda.util.is_windows, reason="Windows and Linux permissions parsed differently")
def test_write_string(tmpdir):
"""
`pkgpanda.util.write_string` writes or overwrites a file with permissions
for User to read and write, Group to read and Other to read.
Permissions of the given filename are preserved, or a new file is created
with 0o644 permissions.
This test was written to make current functionality regression-safe which
is why no explanation is given for these particular permission
requirements.
"""
filename = os.path.join(str(tmpdir), 'foo_filename')
pkgpanda.util.write_string(filename=filename, data='foo_contents')
with open(filename) as f:
assert f.read() == 'foo_contents'
pkgpanda.util.write_string(filename=filename, data='foo_contents_2')
with open(filename) as f:
assert f.read() == 'foo_contents_2'
st_mode = os.stat(filename).st_mode
expected_permission = 0o644
assert (st_mode & 0o777) == expected_permission
os.chmod(filename, 0o777)
pkgpanda.util.write_string(filename=filename, data='foo_contents_3')
with open(filename) as f:
assert f.read() == 'foo_contents_3'
st_mode = os.stat(filename).st_mode
expected_permission = 0o777
assert (st_mode & 0o777) == expected_permission
class MockDownloadServerRequestHandler(BaseHTTPRequestHandler):
def do_GET(self): # noqa: N802
body = b'foobar'
self.send_response(requests.codes.ok)
self.send_header('Content-Type', 'text/plain')
if 'no_content_length' not in self.path:
self.send_header('Content-Length', '6')
self.end_headers()
if self.server.requests_received == 0:
# Don't send the last byte of the response body.
self.wfile.write(body[:len(body) - 1])
else:
self.wfile.write(body)
self.server.requests_received += 1
return
class MockHTTPDownloadServer(HTTPServer):
requests_received = 0
def reset_requests_received(self):
self.requests_received = 0
@pytest.fixture(scope='module')
def mock_download_server():
mock_server = MockHTTPDownloadServer(('localhost', 0), MockDownloadServerRequestHandler)
mock_server_thread = Thread(target=mock_server.serve_forever, daemon=True)
mock_server_thread.start()
return mock_server
def test_download_remote_file(tmpdir, mock_download_server):
mock_download_server.reset_requests_received()
url = 'http://localhost:{port}/foobar.txt'.format(port=mock_download_server.server_port)
out_file = os.path.join(str(tmpdir), 'foobar.txt')
response = pkgpanda.util._download_remote_file(out_file, url)
response_is_ok = response.ok
assert response_is_ok
assert mock_download_server.requests_received == 2
with open(out_file, 'rb') as f:
assert f.read() == b'foobar'
def test_download_remote_file_without_content_length(tmpdir, mock_download_server):
mock_download_server.reset_requests_received()
url = 'http://localhost:{port}/foobar.txt?no_content_length=true'.format(
port=mock_download_server.server_port)
out_file = os.path.join(str(tmpdir), 'foobar.txt')
response = pkgpanda.util._download_remote_file(out_file, url)
response_is_ok = response.ok
assert response_is_ok
assert mock_download_server.requests_received == 1
with open(out_file, 'rb') as f:
assert f.read() == b'fooba'
|
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function, with_statement
import contextlib
import datetime
import functools
import socket
import sys
import threading
import time
from tornado import gen
from tornado.ioloop import IOLoop, TimeoutError
from tornado.log import app_log
from tornado.stack_context import ExceptionStackContext, StackContext, wrap, NullContext
from tornado.testing import AsyncTestCase, bind_unused_port, ExpectLog
from tornado.test.util import unittest, skipIfNonUnix, skipOnTravis
try:
from concurrent import futures
except ImportError:
futures = None
class TestIOLoop(AsyncTestCase):
@skipOnTravis
def test_add_callback_wakeup(self):
# Make sure that add_callback from inside a running IOLoop
# wakes up the IOLoop immediately instead of waiting for a timeout.
def callback():
self.called = True
self.stop()
def schedule_callback():
self.called = False
self.io_loop.add_callback(callback)
# Store away the time so we can check if we woke up immediately
self.start_time = time.time()
self.io_loop.add_timeout(self.io_loop.time(), schedule_callback)
self.wait()
self.assertAlmostEqual(time.time(), self.start_time, places=2)
self.assertTrue(self.called)
@skipOnTravis
def test_add_callback_wakeup_other_thread(self):
def target():
# sleep a bit to let the ioloop go into its poll loop
time.sleep(0.01)
self.stop_time = time.time()
self.io_loop.add_callback(self.stop)
thread = threading.Thread(target=target)
self.io_loop.add_callback(thread.start)
self.wait()
delta = time.time() - self.stop_time
self.assertLess(delta, 0.1)
thread.join()
def test_add_timeout_timedelta(self):
self.io_loop.add_timeout(datetime.timedelta(microseconds=1), self.stop)
self.wait()
def test_multiple_add(self):
sock, port = bind_unused_port()
try:
self.io_loop.add_handler(sock.fileno(), lambda fd, events: None,
IOLoop.READ)
# Attempting to add the same handler twice fails
# (with a platform-dependent exception)
self.assertRaises(Exception, self.io_loop.add_handler,
sock.fileno(), lambda fd, events: None,
IOLoop.READ)
finally:
self.io_loop.remove_handler(sock.fileno())
sock.close()
def test_remove_without_add(self):
# remove_handler should not throw an exception if called on an fd
# was never added.
sock, port = bind_unused_port()
try:
self.io_loop.remove_handler(sock.fileno())
finally:
sock.close()
def test_add_callback_from_signal(self):
# cheat a little bit and just run this normally, since we can't
# easily simulate the races that happen with real signal handlers
self.io_loop.add_callback_from_signal(self.stop)
self.wait()
def test_add_callback_from_signal_other_thread(self):
# Very crude test, just to make sure that we cover this case.
# This also happens to be the first test where we run an IOLoop in
# a non-main thread.
other_ioloop = IOLoop()
thread = threading.Thread(target=other_ioloop.start)
thread.start()
other_ioloop.add_callback_from_signal(other_ioloop.stop)
thread.join()
other_ioloop.close()
def test_add_callback_while_closing(self):
# Issue #635: add_callback() should raise a clean exception
# if called while another thread is closing the IOLoop.
closing = threading.Event()
def target():
other_ioloop.add_callback(other_ioloop.stop)
other_ioloop.start()
closing.set()
other_ioloop.close(all_fds=True)
other_ioloop = IOLoop()
thread = threading.Thread(target=target)
thread.start()
closing.wait()
for i in range(1000):
try:
other_ioloop.add_callback(lambda: None)
except RuntimeError as e:
self.assertEqual("IOLoop is closing", str(e))
break
def test_handle_callback_exception(self):
# IOLoop.handle_callback_exception can be overridden to catch
# exceptions in callbacks.
def handle_callback_exception(callback):
self.assertIs(sys.exc_info()[0], ZeroDivisionError)
self.stop()
self.io_loop.handle_callback_exception = handle_callback_exception
with NullContext():
# remove the test StackContext that would see this uncaught
# exception as a test failure.
self.io_loop.add_callback(lambda: 1 / 0)
self.wait()
@skipIfNonUnix # just because socketpair is so convenient
def test_read_while_writeable(self):
# Ensure that write events don't come in while we're waiting for
# a read and haven't asked for writeability. (the reverse is
# difficult to test for)
client, server = socket.socketpair()
try:
def handler(fd, events):
self.assertEqual(events, IOLoop.READ)
self.stop()
self.io_loop.add_handler(client.fileno(), handler, IOLoop.READ)
self.io_loop.add_timeout(self.io_loop.time() + 0.01,
functools.partial(server.send, b'asdf'))
self.wait()
self.io_loop.remove_handler(client.fileno())
finally:
client.close()
server.close()
def test_remove_timeout_after_fire(self):
# It is not an error to call remove_timeout after it has run.
handle = self.io_loop.add_timeout(self.io_loop.time(), self.stop)
self.wait()
self.io_loop.remove_timeout(handle)
def test_remove_timeout_cleanup(self):
# Add and remove enough callbacks to trigger cleanup.
# Not a very thorough test, but it ensures that the cleanup code
# gets executed and doesn't blow up. This test is only really useful
# on PollIOLoop subclasses, but it should run silently on any
# implementation.
for i in range(2000):
timeout = self.io_loop.add_timeout(self.io_loop.time() + 3600,
lambda: None)
self.io_loop.remove_timeout(timeout)
# HACK: wait two IOLoop iterations for the GC to happen.
self.io_loop.add_callback(lambda: self.io_loop.add_callback(self.stop))
self.wait()
def test_remove_timeout_from_timeout(self):
calls = [False, False]
# Schedule several callbacks and wait for them all to come due at once.
# t2 should be cancelled by t1, even though it is already scheduled to
# be run before the ioloop even looks at it.
now = self.io_loop.time()
def t1():
calls[0] = True
self.io_loop.remove_timeout(t2_handle)
self.io_loop.add_timeout(now + 0.01, t1)
def t2():
calls[1] = True
t2_handle = self.io_loop.add_timeout(now + 0.02, t2)
self.io_loop.add_timeout(now + 0.03, self.stop)
time.sleep(0.03)
self.wait()
self.assertEqual(calls, [True, False])
def test_timeout_with_arguments(self):
# This tests that all the timeout methods pass through *args correctly.
results = []
self.io_loop.add_timeout(self.io_loop.time(), results.append, 1)
self.io_loop.add_timeout(datetime.timedelta(seconds=0),
results.append, 2)
self.io_loop.call_at(self.io_loop.time(), results.append, 3)
self.io_loop.call_later(0, results.append, 4)
self.io_loop.call_later(0, self.stop)
self.wait()
self.assertEqual(results, [1, 2, 3, 4])
def test_add_timeout_return(self):
# All the timeout methods return non-None handles that can be
# passed to remove_timeout.
handle = self.io_loop.add_timeout(self.io_loop.time(), lambda: None)
self.assertFalse(handle is None)
self.io_loop.remove_timeout(handle)
def test_call_at_return(self):
handle = self.io_loop.call_at(self.io_loop.time(), lambda: None)
self.assertFalse(handle is None)
self.io_loop.remove_timeout(handle)
def test_call_later_return(self):
handle = self.io_loop.call_later(0, lambda: None)
self.assertFalse(handle is None)
self.io_loop.remove_timeout(handle)
def test_close_file_object(self):
"""When a file object is used instead of a numeric file descriptor,
the object should be closed (by IOLoop.close(all_fds=True),
not just the fd.
"""
# Use a socket since they are supported by IOLoop on all platforms.
# Unfortunately, sockets don't support the .closed attribute for
# inspecting their close status, so we must use a wrapper.
class SocketWrapper(object):
def __init__(self, sockobj):
self.sockobj = sockobj
self.closed = False
def fileno(self):
return self.sockobj.fileno()
def close(self):
self.closed = True
self.sockobj.close()
sockobj, port = bind_unused_port()
socket_wrapper = SocketWrapper(sockobj)
io_loop = IOLoop()
io_loop.add_handler(socket_wrapper, lambda fd, events: None,
IOLoop.READ)
io_loop.close(all_fds=True)
self.assertTrue(socket_wrapper.closed)
def test_handler_callback_file_object(self):
"""The handler callback receives the same fd object it passed in."""
server_sock, port = bind_unused_port()
fds = []
def handle_connection(fd, events):
fds.append(fd)
conn, addr = server_sock.accept()
conn.close()
self.stop()
self.io_loop.add_handler(server_sock, handle_connection, IOLoop.READ)
with contextlib.closing(socket.socket()) as client_sock:
client_sock.connect(('127.0.0.1', port))
self.wait()
self.io_loop.remove_handler(server_sock)
self.io_loop.add_handler(server_sock.fileno(), handle_connection,
IOLoop.READ)
with contextlib.closing(socket.socket()) as client_sock:
client_sock.connect(('127.0.0.1', port))
self.wait()
self.assertIs(fds[0], server_sock)
self.assertEqual(fds[1], server_sock.fileno())
self.io_loop.remove_handler(server_sock.fileno())
server_sock.close()
def test_mixed_fd_fileobj(self):
server_sock, port = bind_unused_port()
def f(fd, events):
pass
self.io_loop.add_handler(server_sock, f, IOLoop.READ)
with self.assertRaises(Exception):
# The exact error is unspecified - some implementations use
# IOError, others use ValueError.
self.io_loop.add_handler(server_sock.fileno(), f, IOLoop.READ)
self.io_loop.remove_handler(server_sock.fileno())
server_sock.close()
def test_reentrant(self):
"""Calling start() twice should raise an error, not deadlock."""
returned_from_start = [False]
got_exception = [False]
def callback():
try:
self.io_loop.start()
returned_from_start[0] = True
except Exception:
got_exception[0] = True
self.stop()
self.io_loop.add_callback(callback)
self.wait()
self.assertTrue(got_exception[0])
self.assertFalse(returned_from_start[0])
def test_exception_logging(self):
"""Uncaught exceptions get logged by the IOLoop."""
# Use a NullContext to keep the exception from being caught by
# AsyncTestCase.
with NullContext():
self.io_loop.add_callback(lambda: 1 / 0)
self.io_loop.add_callback(self.stop)
with ExpectLog(app_log, "Exception in callback"):
self.wait()
def test_exception_logging_future(self):
"""The IOLoop examines exceptions from Futures and logs them."""
with NullContext():
@gen.coroutine
def callback():
self.io_loop.add_callback(self.stop)
1 / 0
self.io_loop.add_callback(callback)
with ExpectLog(app_log, "Exception in callback"):
self.wait()
def test_spawn_callback(self):
# An added callback runs in the test's stack_context, so will be
# re-arised in wait().
self.io_loop.add_callback(lambda: 1 / 0)
with self.assertRaises(ZeroDivisionError):
self.wait()
# A spawned callback is run directly on the IOLoop, so it will be
# logged without stopping the test.
self.io_loop.spawn_callback(lambda: 1 / 0)
self.io_loop.add_callback(self.stop)
with ExpectLog(app_log, "Exception in callback"):
self.wait()
@skipIfNonUnix
def test_remove_handler_from_handler(self):
# Create two sockets with simultaneous read events.
client, server = socket.socketpair()
try:
client.send(b'abc')
server.send(b'abc')
# After reading from one fd, remove the other from the IOLoop.
chunks = []
def handle_read(fd, events):
chunks.append(fd.recv(1024))
if fd is client:
self.io_loop.remove_handler(server)
else:
self.io_loop.remove_handler(client)
self.io_loop.add_handler(client, handle_read, self.io_loop.READ)
self.io_loop.add_handler(server, handle_read, self.io_loop.READ)
self.io_loop.call_later(0.03, self.stop)
self.wait()
# Only one fd was read; the other was cleanly removed.
self.assertEqual(chunks, [b'abc'])
finally:
client.close()
server.close()
# Deliberately not a subclass of AsyncTestCase so the IOLoop isn't
# automatically set as current.
class TestIOLoopCurrent(unittest.TestCase):
def setUp(self):
self.io_loop = IOLoop()
def tearDown(self):
self.io_loop.close()
def test_current(self):
def f():
self.current_io_loop = IOLoop.current()
self.io_loop.stop()
self.io_loop.add_callback(f)
self.io_loop.start()
self.assertIs(self.current_io_loop, self.io_loop)
class TestIOLoopAddCallback(AsyncTestCase):
def setUp(self):
super(TestIOLoopAddCallback, self).setUp()
self.active_contexts = []
def add_callback(self, callback, *args, **kwargs):
self.io_loop.add_callback(callback, *args, **kwargs)
@contextlib.contextmanager
def context(self, name):
self.active_contexts.append(name)
yield
self.assertEqual(self.active_contexts.pop(), name)
def test_pre_wrap(self):
# A pre-wrapped callback is run in the context in which it was
# wrapped, not when it was added to the IOLoop.
def f1():
self.assertIn('c1', self.active_contexts)
self.assertNotIn('c2', self.active_contexts)
self.stop()
with StackContext(functools.partial(self.context, 'c1')):
wrapped = wrap(f1)
with StackContext(functools.partial(self.context, 'c2')):
self.add_callback(wrapped)
self.wait()
def test_pre_wrap_with_args(self):
# Same as test_pre_wrap, but the function takes arguments.
# Implementation note: The function must not be wrapped in a
# functools.partial until after it has been passed through
# stack_context.wrap
def f1(foo, bar):
self.assertIn('c1', self.active_contexts)
self.assertNotIn('c2', self.active_contexts)
self.stop((foo, bar))
with StackContext(functools.partial(self.context, 'c1')):
wrapped = wrap(f1)
with StackContext(functools.partial(self.context, 'c2')):
self.add_callback(wrapped, 1, bar=2)
result = self.wait()
self.assertEqual(result, (1, 2))
class TestIOLoopAddCallbackFromSignal(TestIOLoopAddCallback):
# Repeat the add_callback tests using add_callback_from_signal
def add_callback(self, callback, *args, **kwargs):
self.io_loop.add_callback_from_signal(callback, *args, **kwargs)
@unittest.skipIf(futures is None, "futures module not present")
class TestIOLoopFutures(AsyncTestCase):
def test_add_future_threads(self):
with futures.ThreadPoolExecutor(1) as pool:
self.io_loop.add_future(pool.submit(lambda: None),
lambda future: self.stop(future))
future = self.wait()
self.assertTrue(future.done())
self.assertTrue(future.result() is None)
def test_add_future_stack_context(self):
ready = threading.Event()
def task():
# we must wait for the ioloop callback to be scheduled before
# the task completes to ensure that add_future adds the callback
# asynchronously (which is the scenario in which capturing
# the stack_context matters)
ready.wait(1)
assert ready.isSet(), "timed out"
raise Exception("worker")
def callback(future):
self.future = future
raise Exception("callback")
def handle_exception(typ, value, traceback):
self.exception = value
self.stop()
return True
# stack_context propagates to the ioloop callback, but the worker
# task just has its exceptions caught and saved in the Future.
with futures.ThreadPoolExecutor(1) as pool:
with ExceptionStackContext(handle_exception):
self.io_loop.add_future(pool.submit(task), callback)
ready.set()
self.wait()
self.assertEqual(self.exception.args[0], "callback")
self.assertEqual(self.future.exception().args[0], "worker")
class TestIOLoopRunSync(unittest.TestCase):
def setUp(self):
self.io_loop = IOLoop()
def tearDown(self):
self.io_loop.close()
def test_sync_result(self):
self.assertEqual(self.io_loop.run_sync(lambda: 42), 42)
def test_sync_exception(self):
with self.assertRaises(ZeroDivisionError):
self.io_loop.run_sync(lambda: 1 / 0)
def test_async_result(self):
@gen.coroutine
def f():
yield gen.Task(self.io_loop.add_callback)
raise gen.Return(42)
self.assertEqual(self.io_loop.run_sync(f), 42)
def test_async_exception(self):
@gen.coroutine
def f():
yield gen.Task(self.io_loop.add_callback)
1 / 0
with self.assertRaises(ZeroDivisionError):
self.io_loop.run_sync(f)
def test_current(self):
def f():
self.assertIs(IOLoop.current(), self.io_loop)
self.io_loop.run_sync(f)
def test_timeout(self):
@gen.coroutine
def f():
yield gen.Task(self.io_loop.add_timeout, self.io_loop.time() + 1)
self.assertRaises(TimeoutError, self.io_loop.run_sync, f, timeout=0.01)
if __name__ == "__main__":
unittest.main()
|
|
from collections import OrderedDict
import pytest
from flattentool.schema import (
JsonLoaderLocalRefsDisabled,
SchemaParser,
get_property_type_set,
)
from flattentool.sheet import Sheet
type_string = {"type": "string"}
def test_sub_sheet_list_like():
# SubSheet object should be appendable and iterable...
# .append() is used in json_input.py at https://github.com/OpenDataServices/flatten-tool/blob/master/flattentool/json_input.py#L33
sub_sheet = Sheet()
assert list(sub_sheet) == []
sub_sheet.append("a")
sub_sheet.append("b")
assert list(sub_sheet) == ["a", "b"]
# ... but also has an add_field method, which also appends
sub_sheet.add_field("c")
assert list(sub_sheet) == ["a", "b", "c"]
# but with the option to add an id_field, which appears at the start of the list
sub_sheet.add_field("d", id_field=True)
assert list(sub_sheet) == ["d", "a", "b", "c"]
def test_get_property_type_set():
assert get_property_type_set({"type": "a"}) == set(["a"])
assert get_property_type_set({"type": ["a"]}) == set(["a"])
assert get_property_type_set({"type": ["a", "b"]}) == set(["a", "b"])
def test_filename_and_dict_error(tmpdir):
"""A value error should be raised if both schema_filename and
root_schema_dict are supplied to SchemaParser"""
tmpfile = tmpdir.join("test_schema.json")
tmpfile.write("{}")
with pytest.raises(ValueError):
SchemaParser(schema_filename=tmpfile.strpath, root_schema_dict={})
# Supplying neither should also raise a ValueError
with pytest.raises(ValueError):
SchemaParser()
def test_references_followed(tmpdir):
"""JSON references should be followed when a JSON file is read."""
tmpfile = tmpdir.join("test_schema.json")
tmpfile.write('{"a":{"$ref":"#/b"}, "b":"c"}')
parser = SchemaParser(schema_filename=tmpfile.strpath)
assert parser.root_schema_dict["a"] == "c"
def test_order_preserved(tmpdir):
"""Order should be preserved when a JSON file is read."""
tmpfile = tmpdir.join("test_schema.json")
tmpfile.write('{"a":{}, "c":{}, "b":{}, "d":{}}')
parser = SchemaParser(schema_filename=tmpfile.strpath)
assert list(parser.root_schema_dict.keys()) == ["a", "c", "b", "d"]
def test_main_sheet_basic():
parser = SchemaParser(
root_schema_dict={
"properties": {
"Atest": type_string,
# type is allowed to be empty, and we should assume string
"Btest": {},
}
}
)
parser.parse()
assert set(parser.main_sheet) == set(["Atest", "Btest"])
def test_main_sheet_nested():
parser = SchemaParser(
root_schema_dict={
"properties": {
"Atest": {"type": "object", "properties": {"Ctest": type_string}}
}
}
)
parser.parse()
assert set(parser.main_sheet) == set(["Atest/Ctest"])
def test_sub_sheet():
parser = SchemaParser(
root_schema_dict={
"properties": {
"Atest": {
"type": "array",
"items": {"type": "object", "properties": {"Btest": type_string}},
},
}
}
)
parser.parse()
assert set(parser.main_sheet) == set([])
assert set(parser.sub_sheets) == set(["Atest"])
assert list(parser.sub_sheets["Atest"]) == ["Atest/0/Btest"]
def object_in_array_example_properties(parent_name, child_name):
return {
"id": type_string,
parent_name: {
"type": "array",
"items": {"type": "object", "properties": {child_name: type_string}},
},
}
class TestSubSheetParentID(object):
def test_parent_is_object(self):
parser = SchemaParser(
root_schema_dict={
"properties": {
"Atest": {
"type": "object",
"properties": object_in_array_example_properties(
"Btest", "Ctest"
),
}
}
}
)
parser.parse()
assert set(parser.main_sheet) == set(["Atest/id"])
assert set(parser.sub_sheets) == set(["Ate_Btest"])
assert list(parser.sub_sheets["Ate_Btest"]) == [
"Atest/id",
"Atest/Btest/0/Ctest",
]
def test_parent_is_array(self):
parser = SchemaParser(
root_schema_dict={
"properties": {
"Atest": {
"type": "array",
"items": {
"type": "object",
"properties": object_in_array_example_properties(
"Btest", "Ctest"
),
},
}
}
}
)
parser.parse()
assert set(parser.main_sheet) == set()
assert set(parser.sub_sheets) == set(["Atest", "Ate_Btest"])
assert list(parser.sub_sheets["Atest"]) == ["Atest/0/id"]
assert list(parser.sub_sheets["Ate_Btest"]) == [
"Atest/0/id",
"Atest/0/Btest/0/Ctest",
]
def test_two_parents(self):
parser = SchemaParser(
root_schema_dict={
"properties": OrderedDict(
[
(
"Atest",
{
"type": "array",
"items": {
"type": "object",
"properties": object_in_array_example_properties(
"Btest", "Ctest"
),
},
},
),
(
"Dtest",
{
"type": "array",
"items": {
"type": "object",
"properties": object_in_array_example_properties(
"Btest", "Etest"
),
},
},
),
]
)
}
)
parser.parse()
assert set(parser.main_sheet) == set()
assert set(parser.sub_sheets) == set(
["Atest", "Dtest", "Ate_Btest", "Dte_Btest"]
)
assert list(parser.sub_sheets["Atest"]) == ["Atest/0/id"]
assert list(parser.sub_sheets["Dtest"]) == ["Dtest/0/id"]
assert list(parser.sub_sheets["Ate_Btest"]) == [
"Atest/0/id",
"Atest/0/Btest/0/Ctest",
]
assert list(parser.sub_sheets["Dte_Btest"]) == [
"Dtest/0/id",
"Dtest/0/Btest/0/Etest",
]
def test_parent_is_object_nested(self):
parser = SchemaParser(
root_schema_dict={
"properties": {
"Atest": {
"type": "object",
"properties": {
"Btest": {
"type": "object",
"properties": object_in_array_example_properties(
"Btest", "Ctest"
),
}
},
}
}
}
)
parser.parse()
assert set(parser.main_sheet) == set(["Atest/Btest/id"])
assert set(parser.sub_sheets) == set(["Ate_Bte_Btest"])
assert list(parser.sub_sheets["Ate_Bte_Btest"]) == [
"Atest/Btest/id",
"Atest/Btest/Btest/0/Ctest",
]
class TestSubSheetMainID(object):
def test_parent_is_object(self):
parser = SchemaParser(
root_schema_dict={
"properties": {
"id": type_string,
"Atest": {
"type": "object",
"properties": object_in_array_example_properties(
"Btest", "Ctest"
),
},
}
}
)
parser.parse()
assert set(parser.main_sheet) == set(["id", "Atest/id"])
assert set(parser.sub_sheets) == set(["Ate_Btest"])
assert list(parser.sub_sheets["Ate_Btest"]) == [
"id",
"Atest/id",
"Atest/Btest/0/Ctest",
]
def test_parent_is_array(self):
parser = SchemaParser(
root_schema_dict={
"properties": {
"id": type_string,
"Atest": {
"type": "array",
"items": {
"type": "object",
"properties": object_in_array_example_properties(
"Btest", "Ctest"
),
},
},
}
}
)
parser.parse()
assert set(parser.main_sheet) == set(["id"])
assert set(parser.sub_sheets) == set(["Atest", "Ate_Btest"])
assert list(parser.sub_sheets["Atest"]) == ["id", "Atest/0/id"]
assert list(parser.sub_sheets["Ate_Btest"]) == [
"id",
"Atest/0/id",
"Atest/0/Btest/0/Ctest",
]
def test_two_parents(self):
parser = SchemaParser(
root_schema_dict={
"properties": OrderedDict(
[
("id", type_string),
(
"Atest",
{
"type": "array",
"items": {
"type": "object",
"properties": object_in_array_example_properties(
"Btest", "Ctest"
),
},
},
),
(
"Dtest",
{
"type": "array",
"items": {
"type": "object",
"properties": object_in_array_example_properties(
"Btest", "Etest"
),
},
},
),
]
)
}
)
parser.parse()
assert set(parser.main_sheet) == set(["id"])
assert set(parser.sub_sheets) == set(
["Atest", "Dtest", "Ate_Btest", "Dte_Btest"]
)
assert list(parser.sub_sheets["Atest"]) == ["id", "Atest/0/id"]
assert list(parser.sub_sheets["Dtest"]) == ["id", "Dtest/0/id"]
assert list(parser.sub_sheets["Ate_Btest"]) == [
"id",
"Atest/0/id",
"Atest/0/Btest/0/Ctest",
]
assert list(parser.sub_sheets["Dte_Btest"]) == [
"id",
"Dtest/0/id",
"Dtest/0/Btest/0/Etest",
]
def test_custom_main_sheet_name(self):
parser = SchemaParser(
root_schema_dict={
"properties": {
"id": type_string,
"Atest": {
"type": "object",
"properties": object_in_array_example_properties(
"Btest", "Ctest"
),
},
}
}
)
parser.parse()
assert set(parser.main_sheet) == set(["id", "Atest/id"])
assert set(parser.sub_sheets) == set(["Ate_Btest"])
assert list(parser.sub_sheets["Ate_Btest"]) == [
"id",
"Atest/id",
"Atest/Btest/0/Ctest",
]
@pytest.mark.parametrize("type_", ["string", "number"])
def test_simple_array(type_):
parser = SchemaParser(
root_schema_dict={
"properties": {"Atest": {"type": "array", "items": {"type": type_}}}
}
)
parser.parse()
assert set(parser.main_sheet) == set(["Atest"])
@pytest.mark.parametrize("type_", ["string", "number"])
def test_nested_simple_array(type_):
parser = SchemaParser(
root_schema_dict={
"properties": {
"Atest": {
"type": "array",
"items": {"type": "array", "items": {"type": type_}},
}
}
}
)
parser.parse()
assert set(parser.main_sheet) == set(["Atest"])
def test_references_sheet_names(tmpdir):
"""
The referenced name used to be used for the sheet name,
but is NOT any more.
"""
tmpfile = tmpdir.join("test_schema.json")
tmpfile.write(
"""{
"properties": { "Atest": {
"type": "array",
"items": {"$ref": "#/Btest"}
} },
"Btest": { "type": "object", "properties": {"Ctest":{"type": "string"}} }
}"""
)
parser = SchemaParser(schema_filename=tmpfile.strpath)
parser.parse()
assert set(parser.sub_sheets) == set(["Atest"]) # used to be Btest
assert list(parser.sub_sheets["Atest"]) == ["Atest/0/Ctest"]
def test_rollup():
parser = SchemaParser(
root_schema_dict={
"properties": {
"Atest": {
"type": "array",
"rollUp": ["Btest"],
"items": {
"type": "object",
"properties": {"Btest": type_string, "Ctest": type_string},
},
},
}
},
rollup=True,
)
parser.parse()
assert set(parser.main_sheet) == set(["Atest/0/Btest"])
assert set(parser.sub_sheets) == set(["Atest"])
assert set(parser.sub_sheets["Atest"]) == set(["Atest/0/Btest", "Atest/0/Ctest"])
def test_bad_rollup(recwarn):
"""
When rollUp is specified, but the field is missing in the schema, we expect
a warning.
"""
parser = SchemaParser(
root_schema_dict={
"properties": {
"Atest": {
"type": "array",
"rollUp": ["Btest"],
"items": {"type": "object", "properties": {"Ctest": type_string}},
},
}
},
rollup=True,
)
parser.parse()
w = recwarn.pop(UserWarning)
assert "Btest in rollUp but not in schema" in str(w.message)
assert set(parser.main_sheet) == set()
assert set(parser.sub_sheets) == set(["Atest"])
assert set(parser.sub_sheets["Atest"]) == set(["Atest/0/Ctest"])
def test_sub_sheet_custom_id():
parser = SchemaParser(
root_schema_dict={
"properties": {
"Atest": {
"type": "array",
"items": {"type": "object", "properties": {"Btest": type_string}},
},
}
},
root_id="custom",
)
parser.parse()
assert set(parser.main_sheet) == set([])
assert set(parser.sub_sheets) == set(["Atest"])
assert list(parser.sub_sheets["Atest"]) == ["custom", "Atest/0/Btest"]
def test_sub_sheet_empty_string_root_id():
parser = SchemaParser(
root_schema_dict={
"properties": {
"Atest": {
"type": "array",
"items": {"type": "object", "properties": {"Btest": type_string}},
},
}
},
root_id="",
)
parser.parse()
assert set(parser.main_sheet) == set([])
assert set(parser.sub_sheets) == set(["Atest"])
assert list(parser.sub_sheets["Atest"]) == ["Atest/0/Btest"]
@pytest.mark.parametrize("use_titles", [True, False])
def test_use_titles(recwarn, use_titles):
parser = SchemaParser(
root_schema_dict={
"properties": {
"Atest": {
"title": "ATitle",
"type": "array",
"items": {
"type": "object",
"properties": {"Btest": {"type": "string", "title": "BTitle"}},
},
},
"Ctest": {"type": "string", "title": "CTitle"},
}
},
use_titles=use_titles,
)
parser.parse()
assert len(recwarn) == 0
if use_titles:
assert set(parser.main_sheet) == set(["CTitle"])
assert set(parser.sub_sheets) == set(["ATitle"])
assert list(parser.sub_sheets["ATitle"]) == ["ATitle:BTitle"]
# Array title missing
parser = SchemaParser(
root_schema_dict={
"properties": {
"Atest": {
"type": "array",
"items": {
"type": "object",
"properties": {"Btest": {"type": "string", "title": "BTitle"}},
},
},
"Ctest": {"type": "string", "title": "CTitle"},
}
},
use_titles=use_titles,
)
parser.parse()
if use_titles:
assert set(parser.main_sheet) == set(["CTitle"])
assert set(parser.sub_sheets) == set(["Atest"])
assert list(parser.sub_sheets["Atest"]) == []
assert len(recwarn) == 1
w = recwarn.pop(UserWarning)
assert "Field Atest does not have a title" in str(w.message)
else:
assert len(recwarn) == 0
# Object containing array title missing
parser = SchemaParser(
root_schema_dict={
"properties": {
"Xtest": {
"type": "object",
"properties": {
"Atest": {
"type": "array",
"title": "ATitle",
"items": {
"type": "object",
"properties": {
"Btest": {"type": "string", "title": "BTitle"}
},
},
}
},
},
"Ctest": {"type": "string", "title": "CTitle"},
}
},
use_titles=use_titles,
)
parser.parse()
if use_titles:
assert set(parser.main_sheet) == set(["CTitle"])
assert set(parser.sub_sheets) == set(["Xte_Atest"])
assert list(parser.sub_sheets["Xte_Atest"]) == []
assert len(recwarn) == 1
w = recwarn.pop(UserWarning)
assert "Field Xtest/Atest/0/Btest is missing a title" in str(w.message)
else:
assert len(recwarn) == 0
@pytest.mark.parametrize("use_titles", [True, False])
def test_use_titles3(recwarn, use_titles):
# Array containing a nested object title missing
parser = SchemaParser(
root_schema_dict={
"properties": {
"Atest": {
"type": "array",
"title": "ATitle",
"items": {
"type": "object",
"properties": {
"Btest": {
"type": "object",
"properties": {
"Ctest": {"type": "string", "title": "CTitle"}
},
}
},
},
},
"Ctest": {"type": "string", "title": "CTitle"},
}
},
use_titles=use_titles,
)
parser.parse()
if use_titles:
assert set(parser.main_sheet) == set(["CTitle"])
assert set(parser.sub_sheets) == set(["ATitle"])
assert list(parser.sub_sheets["ATitle"]) == []
assert len(recwarn) == 1
w = recwarn.pop(UserWarning)
assert "Field Atest/0/Btest/Ctest is missing a title" in str(w.message)
else:
assert len(recwarn) == 0
@pytest.mark.parametrize("use_titles", [True, False])
def test_use_titles2(recwarn, use_titles):
# Object containing object title missing
parser = SchemaParser(
root_schema_dict={
"properties": {
"Xtest": {
"type": "object",
"properties": {
"Atest": {
"type": "object",
"title": "ATitle",
"properties": {
"Btest": {"type": "string", "title": "BTitle"}
},
}
},
},
"Ctest": {"type": "string", "title": "CTitle"},
}
},
use_titles=use_titles,
)
parser.parse()
if use_titles:
assert set(parser.main_sheet) == set(["CTitle"])
assert set(parser.sub_sheets) == set([])
assert len(recwarn) == 1
w = recwarn.pop(UserWarning)
assert "Field Xtest/Atest/Btest does not have a title, skipping" in str(
w.message
)
else:
assert len(recwarn) == 0
# Main sheet title missing
parser = SchemaParser(
root_schema_dict={
"properties": {
"Atest": {
"title": "ATitle",
"type": "array",
"items": {
"type": "object",
"properties": {"Btest": {"type": "string", "title": "BTitle"}},
},
},
"Ctest": {"type": "string"},
}
},
use_titles=use_titles,
)
parser.parse()
if use_titles:
assert set(parser.main_sheet) == set([])
assert set(parser.sub_sheets) == set(["ATitle"])
assert list(parser.sub_sheets["ATitle"]) == ["ATitle:BTitle"]
assert len(recwarn) == 1
w = recwarn.pop(UserWarning)
assert "Field Ctest does not have a title" in str(w.message)
else:
assert len(recwarn) == 0
def test_use_titles5(recwarn):
# Child sheet title missing
parser = SchemaParser(
root_schema_dict={
"properties": {
"Atest": {
"title": "ATitle",
"type": "array",
"items": {
"type": "object",
"properties": {"Btest": {"type": "string"}},
},
},
"Ctest": {"type": "string", "title": "CTitle"},
}
},
use_titles=True,
)
parser.parse()
assert set(parser.main_sheet) == set(["CTitle"])
assert set(parser.sub_sheets) == set(["ATitle"])
assert list(parser.sub_sheets["ATitle"]) == []
w = recwarn.pop(UserWarning)
assert "Field Atest/0/Btest is missing a title" in str(w.message)
def test_titles_rollup():
parser = SchemaParser(
root_schema_dict={
"properties": {
"Atest": {
"type": "array",
"title": "ATitle",
"rollUp": ["Btest"],
"items": {
"type": "object",
"properties": {
"Btest": {"type": "string", "title": "BTitle",},
"Ctest": {"type": "string", "title": "CTitle",},
},
},
},
}
},
rollup=True,
use_titles=True,
)
parser.parse()
assert set(parser.main_sheet) == set(["ATitle:BTitle"])
assert set(parser.sub_sheets) == set(["ATitle"])
assert set(parser.sub_sheets["ATitle"]) == set(["ATitle:BTitle", "ATitle:CTitle"])
def test_schema_from_uri(httpserver):
httpserver.serve_content('{"a":{"$ref":"#/b"}, "b":"c"}', 404)
parser = SchemaParser(schema_filename=httpserver.url)
assert parser.root_schema_dict["a"] == "c"
test_json_loader_local_refs_disabled_is_ref_local_data_returns_true = [
(
"file:///home/odsc/work/flatten-tool/examples/create-template/refs/definitions.json#/definition/address"
),
("definitions.json#/definition/address"),
]
@pytest.mark.parametrize(
"data", test_json_loader_local_refs_disabled_is_ref_local_data_returns_true
)
def test_json_loader_local_refs_disabled_is_ref_local_true(data):
assert True == JsonLoaderLocalRefsDisabled().is_ref_local(data)
test_json_loader_local_refs_disabled_is_ref_local_data_returns_false = [
(
"https://raw.githubusercontent.com/openownership/data-standard/master/schema/beneficial-ownership-statements.json"
),
(
"http://raw.githubusercontent.com/openownership/data-standard/master/schema/beneficial-ownership-statements.json"
),
]
@pytest.mark.parametrize( # noqa
"data", test_json_loader_local_refs_disabled_is_ref_local_data_returns_false
)
def test_json_loader_local_refs_disabled_is_ref_local_true(data): # noqa
assert False == JsonLoaderLocalRefsDisabled().is_ref_local(data)
|
|
"""Common settings and globals."""
from os.path import abspath, basename, dirname, join, normpath
from os import environ
from sys import path
import urlparse
import djcelery
redis_url = urlparse.urlparse(environ.get('REDISCLOUD_URL',
'redis://localhost:6379/0'))
import djcelery
########## PATH CONFIGURATION
# Absolute filesystem path to the Django project directory:
DJANGO_ROOT = dirname(dirname(abspath(__file__)))
# Absolute filesystem path to the top-level project folder:
SITE_ROOT = dirname(DJANGO_ROOT)
# Site name:
SITE_NAME = basename(DJANGO_ROOT)
# Add our project to our pythonpath, this way we don't need to type our project
# name in our dotted import paths:
path.append(DJANGO_ROOT)
########## END PATH CONFIGURATION
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG CONFIGURATION
########## MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('Your Name', 'your_email@example.com'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
########## END MANAGER CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
########## END DATABASE CONFIGURATION
########## CACHE CONFIGURATION
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': '%s:%s' % (redis_url.hostname, redis_url.port),
'OPTIONS': {
'DB': 0,
'PASSWORD': redis_url.password,
}
}
}
########## END CACHE CONFIGURATION
########## CELERY SETTINGS
djcelery.setup_loader()
BROKER_URL = redis_url.geturl()
BROKER_BACKEND = "redis"
REDIS_CONNECT_RETRY = True
CELERY_RESULT_BACKEND = BROKER_URL
CELERY_REDIS_MAX_CONNECTIONS = 256
########## END CELERY SETTINGS
########## GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'Europe/Stockholm'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = False
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = False
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
########## END GENERAL CONFIGURATION
########## MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = normpath(join(SITE_ROOT, 'media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
########## END MEDIA CONFIGURATION
########## STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = normpath(join(SITE_ROOT, 'static'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
# normpath(join(SITE_ROOT, 'static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
########## END STATIC FILE CONFIGURATION
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = r"^ws1di#ap0@ko($8$nc+7oey^-=y4l7z*%e(6ti*1kzyynq(4q"
########## END SECRET CONFIGURATION
########## FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
normpath(join(SITE_ROOT, 'fixtures')),
)
########## END FIXTURE CONFIGURATION
########## TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
normpath(join(SITE_ROOT, 'templates')),
)
########## END TEMPLATE CONFIGURATION
########## MIDDLEWARE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#middleware-classes
MIDDLEWARE_CLASSES = (
# Default Django middleware.
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
########## END MIDDLEWARE CONFIGURATION
########## URL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = '%s.urls' % SITE_NAME
########## END URL CONFIGURATION
########## APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin panel and documentation:
'django.contrib.admin',
# 'django.contrib.admindocs',
)
THIRD_PARTY_APPS = (
# Database migration helpers:
'south',
'djcelery',
'gunicorn',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'hackaway.article',
'hackaway.tag',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
########## END APP CONFIGURATION
########## LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django.request': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
}
}
########## END LOGGING CONFIGURATION
########## WSGI CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'wsgi.application'
########## END WSGI CONFIGURATION
########## CELERY CONFIGURATION
# See: http://docs.celeryproject.org/en/latest/django/first-steps-with-django.html
djcelery.setup_loader()
########## END CELERY CONFIGURATION
|
|
"""
################################################################################
#
# PyISPconfig - Benjamin Bouvier (benjamin.bouvier29@gmail.com)
#
################################################################################
# Copyright (c) 2012, Bouvier
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of Benjamin Bouvier. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
import random
from SOAPpy import SOAP
from SOAPpy import *
class PyISPconfig(object):
""" A simple wrapper around the ISPconfig API """
def __init__(self, ip, login, password, port=8080):
"""
The constructor.
Param:
ip -- The server's IP.
login -- remote user's Login.
password -- remote user's Password.
port -- The port of IspConfig on remote server (default 8080).
"""
self.ip = ip
self.login = login
self.password = password
self.port = port
self.session_id = None
self.error = {}
#Creates the base url
self.url = "https://%s:%s/remote/" % (self.ip, self.port)
#Soap connexion to remote server
self.server = SOAPProxy(self.url)
#Setting off debug mode
self.server.config.dumpSOAPOut = 0
self.server.config.dumpSOAPIn = 0
#Login as remote user
if not self.session_id:
self.session_id = self._call("login", (self.login, self.password))
def _call(self, method, params=None):
"""
Do an soap request to the ISPconfig API and return the result.
"""
if self.session_id:
#Add the session_id a the beginning of params
session_id = (self.session_id,)
if params:
if isinstance(params, int):
params = session_id + (params,)
elif isinstance(params, dict):
params = session_id + (params,)
elif isinstance(params, list):
params = session_id + (params,)
elif isinstance(params, tuple):
params = session_id + params
else:
params = session_id + (params,)
try:
#Invoke asked method on the server
response = self.server.invoke(method, args=params)
except SOAP.faultType as e:
self.error = {"error": True, "type": "faultType", "detail": e.faultstring}
return False
else:
if not response:
self.error = {"error": True, "type": "string", "detail": "SOAP request return null"}
return response
def check_response(self, response, type, error_message="Problem during check response"):
"""
Checking type of a response and return error message.
Param:
params -- The array
"""
if isinstance(response, type):
return response
else:
if isinstance(response, dict) and response.get('error'):
if response['error']:
self.error = {"error": True, "type": response['type'], "detail": response['detail']}
return False
else:
self.error = {"error": True, "type": "string", "detail": error_message}
return False
def array_to_dict_response(self, params):
"""
Convert the array element recieved by SOAP in a Dictionary .
Param:
params -- The array
"""
dictionary = {}
rs = SOAP.simplify(params)
if isinstance(rs, list):
for test in rs:
dictionary[test['item']['key']] = test['item']['value']
return dictionary
elif isinstance(rs, dict):
for test in rs['item']:
dictionary[test['key']] = test['value']
return dictionary
else:
return False
def update_default_dict(self, default, params=None):
"""
Update default dict params if needed
Param:
default -- The default Dictionary .
params -- The Dictionary containing parameters to update.
"""
if params:
default.update(params)
return default
"""
def update_tuple_list(self,params_by_default, params = None):
Update default params tuple if needed
Param:
params_by_default -- The default list of tuple.
params -- The list of tuple containing the parameters to update.
if params:
for x, y in params:
i = 0
for x_default, y_default in params_by_default:
if x_default == x:
params_by_default[i] = (x_default,y)
i +=1
return params_by_default
"""
def dict_to_tuple(self, dict):
"""
Convert Dictionary to tuple for ISP config API
Param:
dict -- The Dictionary parameters.
"""
list = []
for k, v in dict.iteritems():
list.append((k, v))
return list
#
# Actions on Client
#
def client_get(self, id):
"""
Retrieves information about a client.
Param:
id -- Client id.
Output:
Return a Dictionary with key/values of the chosen client.
"""
response = self.array_to_dict_response(self._call("client_get", id))
if response:
return self.check_response(response, dict, "Error during 'client_get' method")
else:
self.error = {"error": True, "type": "string", "detail": "Client ID %s doesn't exist" % id}
return False
def client_add(self, params=None, reseller_id=0):
"""
Add a new client
Param:
reseller_id -- Reseller's ID.
param -- Dictionary containing client's informations.
Output:
Returns the ID of the newly added Client.
"""
reseller_id = 0
default = {"company_name": "awesomecompany",
"contact_name": "mynamecopntact",
"customer_no": "1",
"vat_id": "1",
"street": "fleetstreet",
"zip": "21337",
"city": "london",
"state": "bavaria",
"country": "UK",
"telephone": "123456789",
"mobile": "987654321",
"fax": "546718293",
"email": "e@mail.int",
"internet": "",
"icq": "111111111",
"notes": "awesome",
"dafault_mailserver": 1,
"limit_maildomain": -1,
"limit_mailbox": -1,
"limit_mailalias": -1,
"limit_mailaliasdomain": -1,
"limit_mailforward": -1,
"limit_mailcatchall": -1,
"limit_mailrouting": 0,
"limit_mailfilter": -1,
"limit_fetchmail": -1,
"limit_mailquota": -1,
"limit_spamfilter_wblist": 0,
"limit_spamfilter_user": 0,
"limit_spamfilter_policy": 1,
"default_webserver": 1,
"limit_web_ip": "",
"limit_web_domain": -1,
"limit_web_quota": -1,
"web_php_options": "no",
"limit_web_subdomain": -1,
"limit_web_aliasdomain": -1,
"limit_ftp_user": -1,
"limit_shell_user": 0,
"ssh_chroot": "no",
"limit_webdav_user": 0,
"default_dnsserver": 1,
"limit_dns_zone": -1,
"limit_dns_slave_zone": -1,
"limit_dns_record": -1,
"default_dbserver": 1,
"limit_database": -1,
"limit_cron": 0,
"limit_cron_type": "url",
"limit_cron_frequency": 5,
"limit_traffic_quota": -1,
"limit_client": 0,
"parent_client_id": 0,
"username": "user2",
"password": "brush",
"language": "en",
"usertheme": "default",
"template_master": 0,
"template_additional": "",
"created_at": 0}
params = self.dict_to_tuple(self.update_default_dict(default, params))
#Execute method
return self.check_response(self._call("client_add", (reseller_id, params)), int, "Error during 'client_add' method")
def client_get_id(self, id):
"""
Retrieves the client ID of the system user
Param:
username -- System user ID
Output:
Returns the client ID of the user with the entered system user ID.
"""
response = self._call("client_get_id", id)
if response:
return self.check_response(response, dict, "Error during 'client_get_id' method")
else:
self.error = {"error": True, "type": "string", "detail": "client ID of the system user %s doesn't exist" % id}
return False
def client_get_by_username(self, username):
"""
Return the client's information by username
Param:
username -- Client's username
Output:
Return a Dictionary with key/values of the chosen client.
"""
response = self.array_to_dict_response(self._call("client_get_by_username", username))
if response:
return self.check_response(response, dict, "Error during 'client_get_by_username' method")
else:
self.error = {"error": True, "type": "string", "detail": "Client username %s doesn't exist" % username}
return False
def client_change_password(self, id, password):
"""
Return the client's information
Param:
username -- Client's username
Output:
Returns '1' if password has been changed.
"""
response = self._call("client_change_password", (id, password))
if response:
return self.check_response(response, int, "Error during 'client_change_password' method")
else:
self.error = {"error": True, "type": "string", "detail": "Problem during password's modification"}
return False
#
# Actions on Databases
#
def sites_database_add(self, client_id, params=None):
"""
Adds a new database.
Param:
id -- Client's id
param -- Dictionary containing client's informations.
Output:
Returns the ID of the newly added database.
"""
db_name_exist = db_username_exist = False
existing_db_name = existing_username = False
rand = random.randint(1, 10000)
new_db_params = None
default = {"server_id": 1,
"type": "mysql",
"database_name": "db_user%s%s" % (client_id, rand),
"database_user": "db_user%s%s" % (client_id, rand),
"database_password": "db_user",
"database_charset": "UTF8",
"remote_access": "y",
"remote_ips": "",
"active": "y"}
user_db = self.sites_database_get_all_by_user(client_id)
#Update default params
default = self.update_default_dict(default, params)
# Check database name and username doesn't exist
for db in user_db:
if db['database_name'] == default["database_name"]:
db_name_exist = True
existing_db_name = db['database_name']
if db['database_user'] == default["database_user"]:
db_username_exist = True
existing_username = db['database_user']
# Check new database's name doesn't exist and changes it
if db_name_exist or db_username_exist:
while db_name_exist or db_username_exist:
#Create new params
db_name_exist = db_username_exist = False
rand = random.randint(1, 10000)
new_db_params = {"database_name": "%s%s" % (existing_db_name, rand),
"database_user": "%s%s" % (existing_username, rand), }
#Recheck params doesn't exist in db
for db in user_db:
if db['database_name'] == new_db_params["database_name"]:
db_name_exist = True
existing_db_name = db['database_name']
if db['database_user'] == new_db_params["database_user"]:
db_username_exist = True
existing_username = db['database_user']
#Update params by new params
default = self.update_default_dict(default, new_db_params)
#SOAPRequest
default = self.dict_to_tuple(default)
response = self._call("sites_database_add", (client_id, default))
#Check response
return self.check_response(response, int, "Error during 'sites_database_add' method")
def sites_database_get(self, id):
"""
Retrieves information about a database.
Param:
id -- Databse's id
Output:
Return a Dictionary with key/values of the chosen database.
"""
response = self.array_to_dict_response(self._call("sites_database_get", id))
if response:
return self.check_response(response, dict, "Error during 'sites_database_get' method")
else:
self.error = {"error": True, "type": "string", "detail": "Database ID %s doesn't exist" % id}
return False
def sites_database_delete(self, id):
"""
Deletes a database.
Param:
id -- Databse's id
Output:
Returns the number of deleted records.
"""
response = self._call("sites_database_delete", id)
if response:
return self.check_response(response, int, "Error during 'sites_database_delete' method")
else:
self.error = {"error": True, "type": "string", "detail": "Problem during deleting Database ID %s" % id}
return False
def sites_database_get_all_by_user(self, client_id):
"""
Returns information about the databases of the system user.
Param:
client_id -- Client's id
Output:
Return a list of Dictionaries with key/values with databases's values.
"""
response = self._call("sites_database_get_all_by_user", client_id)
if not response:
self.error = {"error": True, "type": "string", "detail": "No database for client ID %s" % client_id}
return False
else:
list = []
if isinstance(response, typedArrayType):
for answer in response:
list.append(self.array_to_dict_response(answer))
#Check response
return self.check_response(list, type(list), "Error during 'sites_database_get_all_by_user' method")
def sites_database_update(self, db_id, params=None):
"""
Updates a database.
Param:
client_id -- Client's id
db_id -- Databse's id
params -- Dictionary containing database's informations to update.
Output:
Returns the number of affected rows.
"""
new_params = None
dict_example = {'server_id': '1',
'sys_perm_other': '',
'sys_perm_user': 'riud',
'sys_userid': '1',
'sys_groupid': '9',
'remote_access': 'y',
'active': 'y',
'database_id': '3',
'database_charset': 'utf8',
'sys_perm_group': 'ru',
'database_password': '*E7FFA47F56E1835B4A9EB44301E23746C127E263',
'remote_ips': '',
'type': 'mysql',
'database_name': 'c8c8db_name2',
'database_user': 'c8c8db_name2'}
#Get original database configuration
origin = self.sites_database_get(db_id)
if not origin:
return {"error": True, "type": "string", "detail": "Database doesn't exist"}
else:
#Update original database configuration
new_params = self.dict_to_tuple(self.update_default_dict(origin, params))
#SOAPRequest
response = self._call("sites_database_update", (origin['sys_groupid'], db_id, new_params))
#Check response
return self.check_response(response, int, "Error during 'sites_database_update' method")
#
# Actions on Server
#
def server_get(self, server_id):
"""
Returns server information by its ID.
Param:
server_id -- Server's id
Output:
Return a Dictionary with key/values with the server parameter's values.
"""
#SOAPRequest
response = self._call("server_get", server_id)
if response:
response = self.array_to_dict_response(response)
return self.check_response(response, dict, "Error during 'server_get' method")
else:
return {"error": True, "type": "string", "detail": "Server doesn't exist"}
#
# TODO
# Problem with self._call("server_get_serverid_by_ip",ipaddress)
# return an empty arraytype
def server_get_serverid_by_ip(self, ipaddress):
"""
Returns server information by its IP.
Param:
ipaddress -- Server's ip
Output:
Return a Dictionary with key/values with the server parameter's values.
"""
response = self.array_to_dict_response(self._call("server_get_serverid_by_ip", ipaddress))
if response:
return self.check_response(response, dict, "Error during 'server_get_serverid_by_ip' method")
else:
return {"error": True, "type": "string", "detail": "Server doesn't exist with %s IP Adress" % ipaddress}
def logout(self):
"""
Cancels a remote session.
Output:
None.
"""
self._call("logout")
return True
def error_message(self):
"""
Display readable error message.
Output:
Return string error message.
"""
if(self.error.get('error') and self.error['error']):
return str(self.error['detail'])
else:
return "No error message"
#
# Actions on DNS
#
def dns_zone_get_id(self, domain):
"""
Return the dns zone id by domain name
Param:
domain -- Client's username
Output:
Returns the ID of the find dns zone by id.
"""
response = self._call("dns_zone_get_id", domain)
if response:
return response
else:
self.error = {"error": True, "type": "string", "detail": "DNS zone %s doesn't exist" % domain}
return False
def dns_zone_get(self, zone_id):
"""
Returns dns zone information by its id.
Param:
zone_id - ID of DNS zone
Output:
Return a Dictionary with key/values with the zone parameter's values.
"""
response = self.array_to_dict_response(self._call("dns_zone_get", zone_id))
if response:
return self.check_response(response, dict, "Error during 'dns_zone_get' method")
else:
return {"error": True, "type": "string", "detail": "Zone doesn't exist with ID %s" % zone_id}
def dns_a_get_id(self, dns_zone_id, record):
"""
Return the record id by name and zone ID
Param:
dns_zone_id - zone ID
record - name of record in zone dns_zone_id
Output:
Returns the ID of the find record in zone.
"""
response = self._call("dns_a_get_id", (dns_zone_id,record))
if response:
return response
else:
self.error = {"error": True, "type": "string", "detail": "DNS record %s doesn't exist" % record}
return False
def dns_a_add(self, client_id, params=None):
"""
Adds a new DNS A record.
Param:
id -- Client's id
param -- Dictionary containing record's informations.
Output:
Returns the ID of the newly added record.
"""
default = {"server_id": 1,
"zone": 1,
"name": "www",
"data": "127.0.0.1",
"ttl": "3600",
"type": "A",
"active": "y"}
# Search server id by zone
if params['zone']:
server_id = self.dns_zone_get(params['zone'])
if server_id:
params['server_id'] = server_id['server_id']
#Update default params
default = self.update_default_dict(default, params)
#SOAPRequest
default = self.dict_to_tuple(default)
response = self._call("dns_a_add", (client_id, default))
#Check response
return self.check_response(response, int, "Error during 'dns_a_add' method")
def dns_a_delete(self, id):
"""
Deletes A record.
Param:
id - ID of DNS zone to delete
Output:
Returns the number of deleted records.
"""
response = self._call("dns_a_delete", id)
if response:
return self.check_response(response, int, "Error during 'dns_a_delete' method")
else:
self.error = {"error": True, "type": "string", "detail": "Problem during deleting A record ID %s" % id}
return False
def dns_mx_get_id(self, dns_zone_id, record):
"""
Return the record id by name and zone ID
Param:
dns_zone_id - zone ID
record - name of record in zone dns_zone_id
Output:
Returns the ID of the find record in zone.
"""
response = self._call("dns_mx_get_id", (dns_zone_id,record))
if response:
return response
else:
self.error = {"error": True, "type": "string", "detail": "DNS record %s doesn't exist" % record}
return False
def dns_mx_add(self, client_id, params=None):
"""
Adds a new DNS MX record.
Param:
id -- Client's id
param -- Dictionary containing record's informations.
Output:
Returns the ID of the newly added record.
"""
default = {"server_id": 1,
"zone": 1,
"name": "www",
"data": "127.0.0.1",
"aux": 10,
"ttl": "3600",
"type": "MX",
"active": "y"}
# Search server id by zone
if params['zone']:
server_id = self.dns_zone_get(params['zone'])
if server_id:
params['server_id'] = server_id['server_id']
#Update default params
default = self.update_default_dict(default, params)
#SOAPRequest
default = self.dict_to_tuple(default)
response = self._call("dns_mx_add", (client_id, default))
#Check response
return self.check_response(response, int, "Error during 'dns_mx_add' method")
def dns_mx_delete(self, id):
"""
Deletes A record.
Param:
id - ID of DNS zone to delete
Output:
Returns the number of deleted records.
"""
response = self._call("dns_mx_delete", id)
if response:
return self.check_response(response, int, "Error during 'dns_mx_delete' method")
else:
self.error = {"error": True, "type": "string", "detail": "Problem during deleting A record ID %s" % id}
return False
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The worker communicates with the scheduler and does two things:
1. Sends all tasks that has to be run
2. Gets tasks from the scheduler that should be run
When running in local mode, the worker talks directly to a :py:class:`~luigi.scheduler.Scheduler` instance.
When you run a central server, the worker will talk to the scheduler using a :py:class:`~luigi.rpc.RemoteScheduler` instance.
Everything in this module is private to luigi and may change in incompatible
ways between versions. The exception is the exception types and the
:py:class:`worker` config class.
"""
import collections
import getpass
import logging
import multiprocessing
import os
import signal
import subprocess
import sys
try:
import Queue
except ImportError:
import queue as Queue
import random
import socket
import threading
import time
import traceback
import types
from luigi import six
from luigi import notifications
from luigi.event import Event
from luigi.task_register import load_task
from luigi.scheduler import DISABLED, DONE, FAILED, PENDING, UNKNOWN, Scheduler, RetryPolicy
from luigi.scheduler import WORKER_STATE_ACTIVE, WORKER_STATE_DISABLED
from luigi.target import Target
from luigi.task import Task, flatten, getpaths, Config
from luigi.task_register import TaskClassException
from luigi.task_status import RUNNING
from luigi.parameter import FloatParameter, IntParameter, BoolParameter
try:
import simplejson as json
except ImportError:
import json
logger = logging.getLogger('luigi-interface')
# Prevent fork() from being called during a C-level getaddrinfo() which uses a process-global mutex,
# that may not be unlocked in child process, resulting in the process being locked indefinitely.
fork_lock = threading.Lock()
# Why we assert on _WAIT_INTERVAL_EPS:
# multiprocessing.Queue.get() is undefined for timeout=0 it seems:
# https://docs.python.org/3.4/library/multiprocessing.html#multiprocessing.Queue.get.
# I also tried with really low epsilon, but then ran into the same issue where
# the test case "test_external_dependency_worker_is_patient" got stuck. So I
# unscientifically just set the final value to a floating point number that
# "worked for me".
_WAIT_INTERVAL_EPS = 0.00001
def _is_external(task):
return task.run is None or task.run == NotImplemented
def _get_retry_policy_dict(task):
return RetryPolicy(task.retry_count, task.disable_hard_timeout, task.disable_window_seconds)._asdict()
class TaskException(Exception):
pass
GetWorkResponse = collections.namedtuple('GetWorkResponse', (
'task_id',
'running_tasks',
'n_pending_tasks',
'n_unique_pending',
'n_pending_last_scheduled',
'worker_state',
))
class TaskProcess(multiprocessing.Process):
""" Wrap all task execution in this class.
Mainly for convenience since this is run in a separate process. """
def __init__(self, task, worker_id, result_queue, status_reporter,
use_multiprocessing=False, worker_timeout=0):
super(TaskProcess, self).__init__()
self.task = task
self.worker_id = worker_id
self.result_queue = result_queue
self.status_reporter = status_reporter
if task.worker_timeout is not None:
worker_timeout = task.worker_timeout
self.timeout_time = time.time() + worker_timeout if worker_timeout else None
self.use_multiprocessing = use_multiprocessing or self.timeout_time is not None
def _run_get_new_deps(self):
self.task.set_tracking_url = self.status_reporter.update_tracking_url
self.task.set_status_message = self.status_reporter.update_status
task_gen = self.task.run()
self.task.set_tracking_url = None
self.task.set_status_message = None
if not isinstance(task_gen, types.GeneratorType):
return None
next_send = None
while True:
try:
if next_send is None:
requires = six.next(task_gen)
else:
requires = task_gen.send(next_send)
except StopIteration:
return None
new_req = flatten(requires)
if all(t.complete() for t in new_req):
next_send = getpaths(requires)
else:
new_deps = [(t.task_module, t.task_family, t.to_str_params())
for t in new_req]
return new_deps
def run(self):
logger.info('[pid %s] Worker %s running %s', os.getpid(), self.worker_id, self.task)
if self.use_multiprocessing:
# Need to have different random seeds if running in separate processes
random.seed((os.getpid(), time.time()))
status = FAILED
expl = ''
missing = []
new_deps = []
try:
# Verify that all the tasks are fulfilled! For external tasks we
# don't care about unfulfilled dependencies, because we are just
# checking completeness of self.task so outputs of dependencies are
# irrelevant.
if not _is_external(self.task):
missing = [dep.task_id for dep in self.task.deps() if not dep.complete()]
if missing:
deps = 'dependency' if len(missing) == 1 else 'dependencies'
raise RuntimeError('Unfulfilled %s at run time: %s' % (deps, ', '.join(missing)))
self.task.trigger_event(Event.START, self.task)
t0 = time.time()
status = None
if _is_external(self.task):
# External task
# TODO(erikbern): We should check for task completeness after non-external tasks too!
# This will resolve #814 and make things a lot more consistent
if self.task.complete():
status = DONE
else:
status = FAILED
expl = 'Task is an external data dependency ' \
'and data does not exist (yet?).'
else:
new_deps = self._run_get_new_deps()
status = DONE if not new_deps else PENDING
if new_deps:
logger.info(
'[pid %s] Worker %s new requirements %s',
os.getpid(), self.worker_id, self.task)
elif status == DONE:
self.task.trigger_event(
Event.PROCESSING_TIME, self.task, time.time() - t0)
expl = self.task.on_success()
logger.info('[pid %s] Worker %s done %s', os.getpid(),
self.worker_id, self.task)
self.task.trigger_event(Event.SUCCESS, self.task)
except KeyboardInterrupt:
raise
except BaseException as ex:
status = FAILED
logger.exception("[pid %s] Worker %s failed %s", os.getpid(), self.worker_id, self.task)
self.task.trigger_event(Event.FAILURE, self.task, ex)
raw_error_message = self.task.on_failure(ex)
expl = raw_error_message
finally:
self.result_queue.put(
(self.task.task_id, status, expl, missing, new_deps))
def _recursive_terminate(self):
import psutil
try:
parent = psutil.Process(self.pid)
children = parent.children(recursive=True)
# terminate parent. Give it a chance to clean up
super(TaskProcess, self).terminate()
parent.wait()
# terminate children
for child in children:
try:
child.terminate()
except psutil.NoSuchProcess:
continue
except psutil.NoSuchProcess:
return
def terminate(self):
"""Terminate this process and its subprocesses."""
# default terminate() doesn't cleanup child processes, it orphans them.
try:
return self._recursive_terminate()
except ImportError:
return super(TaskProcess, self).terminate()
class TaskStatusReporter(object):
"""
Reports task status information to the scheduler.
This object must be pickle-able for passing to `TaskProcess` on systems
where fork method needs to pickle the process object (e.g. Windows).
"""
def __init__(self, scheduler, task_id, worker_id):
self._task_id = task_id
self._worker_id = worker_id
self._scheduler = scheduler
def update_tracking_url(self, tracking_url):
self._scheduler.add_task(
task_id=self._task_id,
worker=self._worker_id,
status=RUNNING,
tracking_url=tracking_url
)
def update_status(self, message):
self._scheduler.set_task_status_message(self._task_id, message)
class SingleProcessPool(object):
"""
Dummy process pool for using a single processor.
Imitates the api of multiprocessing.Pool using single-processor equivalents.
"""
def apply_async(self, function, args):
return function(*args)
def close(self):
pass
def join(self):
pass
class DequeQueue(collections.deque):
"""
deque wrapper implementing the Queue interface.
"""
def put(self, obj, block=None, timeout=None):
return self.append(obj)
def get(self, block=None, timeout=None):
try:
return self.pop()
except IndexError:
raise Queue.Empty
class AsyncCompletionException(Exception):
"""
Exception indicating that something went wrong with checking complete.
"""
def __init__(self, trace):
self.trace = trace
class TracebackWrapper(object):
"""
Class to wrap tracebacks so we can know they're not just strings.
"""
def __init__(self, trace):
self.trace = trace
def check_complete(task, out_queue):
"""
Checks if task is complete, puts the result to out_queue.
"""
logger.debug("Checking if %s is complete", task)
try:
is_complete = task.complete()
except Exception:
is_complete = TracebackWrapper(traceback.format_exc())
out_queue.put((task, is_complete))
class worker(Config):
# NOTE: `section.config-variable` in the config_path argument is deprecated in favor of `worker.config_variable`
ping_interval = FloatParameter(default=1.0,
config_path=dict(section='core', name='worker-ping-interval'))
keep_alive = BoolParameter(default=False,
config_path=dict(section='core', name='worker-keep-alive'))
count_uniques = BoolParameter(default=False,
config_path=dict(section='core', name='worker-count-uniques'),
description='worker-count-uniques means that we will keep a '
'worker alive only if it has a unique pending task, as '
'well as having keep-alive true')
count_last_scheduled = BoolParameter(default=False,
description='Keep a worker alive only if there are '
'pending tasks which it was the last to '
'schedule.')
wait_interval = FloatParameter(default=1.0,
config_path=dict(section='core', name='worker-wait-interval'))
wait_jitter = FloatParameter(default=5.0)
max_reschedules = IntParameter(default=1,
config_path=dict(section='core', name='worker-max-reschedules'))
timeout = IntParameter(default=0,
config_path=dict(section='core', name='worker-timeout'))
task_limit = IntParameter(default=None,
config_path=dict(section='core', name='worker-task-limit'))
retry_external_tasks = BoolParameter(default=False,
config_path=dict(section='core', name='retry-external-tasks'),
description='If true, incomplete external tasks will be '
'retested for completion while Luigi is running.')
send_failure_email = BoolParameter(default=True,
description='If true, send e-mails directly from the worker'
'on failure')
no_install_shutdown_handler = BoolParameter(default=False,
description='If true, the SIGUSR1 shutdown handler will'
'NOT be install on the worker')
class KeepAliveThread(threading.Thread):
"""
Periodically tell the scheduler that the worker still lives.
"""
def __init__(self, scheduler, worker_id, ping_interval, rpc_message_callback):
super(KeepAliveThread, self).__init__()
self._should_stop = threading.Event()
self._scheduler = scheduler
self._worker_id = worker_id
self._ping_interval = ping_interval
self._rpc_message_callback = rpc_message_callback
def stop(self):
self._should_stop.set()
def run(self):
while True:
self._should_stop.wait(self._ping_interval)
if self._should_stop.is_set():
logger.info("Worker %s was stopped. Shutting down Keep-Alive thread" % self._worker_id)
break
with fork_lock:
response = None
try:
response = self._scheduler.ping(worker=self._worker_id)
except: # httplib.BadStatusLine:
logger.warning('Failed pinging scheduler')
# handle rpc messages
if response:
for message in response["rpc_messages"]:
self._rpc_message_callback(message)
def rpc_message_callback(fn):
fn.is_rpc_message_callback = True
return fn
class Worker(object):
"""
Worker object communicates with a scheduler.
Simple class that talks to a scheduler and:
* tells the scheduler what it has to do + its dependencies
* asks for stuff to do (pulls it in a loop and runs it)
"""
def __init__(self, scheduler=None, worker_id=None, worker_processes=1, assistant=False, **kwargs):
if scheduler is None:
scheduler = Scheduler()
self.worker_processes = int(worker_processes)
self._worker_info = self._generate_worker_info()
if not worker_id:
worker_id = 'Worker(%s)' % ', '.join(['%s=%s' % (k, v) for k, v in self._worker_info])
self._config = worker(**kwargs)
assert self._config.wait_interval >= _WAIT_INTERVAL_EPS, "[worker] wait_interval must be positive"
assert self._config.wait_jitter >= 0.0, "[worker] wait_jitter must be equal or greater than zero"
self._id = worker_id
self._scheduler = scheduler
self._assistant = assistant
self._stop_requesting_work = False
self.host = socket.gethostname()
self._scheduled_tasks = {}
self._suspended_tasks = {}
self._batch_running_tasks = {}
self._batch_families_sent = set()
self._first_task = None
self.add_succeeded = True
self.run_succeeded = True
self.unfulfilled_counts = collections.defaultdict(int)
# note that ``signal.signal(signal.SIGUSR1, fn)`` only works inside the main execution thread, which is why we
# provide the ability to conditionally install the hook.
if not self._config.no_install_shutdown_handler:
try:
signal.signal(signal.SIGUSR1, self.handle_interrupt)
signal.siginterrupt(signal.SIGUSR1, False)
except AttributeError:
pass
# Keep info about what tasks are running (could be in other processes)
self._task_result_queue = multiprocessing.Queue()
self._running_tasks = {}
# Stuff for execution_summary
self._add_task_history = []
self._get_work_response_history = []
def _add_task(self, *args, **kwargs):
"""
Call ``self._scheduler.add_task``, but store the values too so we can
implement :py:func:`luigi.execution_summary.summary`.
"""
task_id = kwargs['task_id']
status = kwargs['status']
runnable = kwargs['runnable']
task = self._scheduled_tasks.get(task_id)
if task:
msg = (task, status, runnable)
self._add_task_history.append(msg)
kwargs['owners'] = task._owner_list()
if task_id in self._batch_running_tasks:
for batch_task in self._batch_running_tasks.pop(task_id):
self._add_task_history.append((batch_task, status, True))
self._scheduler.add_task(*args, **kwargs)
logger.info('Informed scheduler that task %s has status %s', task_id, status)
def __enter__(self):
"""
Start the KeepAliveThread.
"""
self._keep_alive_thread = KeepAliveThread(self._scheduler, self._id,
self._config.ping_interval,
self._handle_rpc_message)
self._keep_alive_thread.daemon = True
self._keep_alive_thread.start()
return self
def __exit__(self, type, value, traceback):
"""
Stop the KeepAliveThread and kill still running tasks.
"""
self._keep_alive_thread.stop()
self._keep_alive_thread.join()
for task in self._running_tasks.values():
if task.is_alive():
task.terminate()
return False # Don't suppress exception
def _generate_worker_info(self):
# Generate as much info as possible about the worker
# Some of these calls might not be available on all OS's
args = [('salt', '%09d' % random.randrange(0, 999999999)),
('workers', self.worker_processes)]
try:
args += [('host', socket.gethostname())]
except BaseException:
pass
try:
args += [('username', getpass.getuser())]
except BaseException:
pass
try:
args += [('pid', os.getpid())]
except BaseException:
pass
try:
sudo_user = os.getenv("SUDO_USER")
if sudo_user:
args.append(('sudo_user', sudo_user))
except BaseException:
pass
return args
def _validate_task(self, task):
if not isinstance(task, Task):
raise TaskException('Can not schedule non-task %s' % task)
if not task.initialized():
# we can't get the repr of it since it's not initialized...
raise TaskException('Task of class %s not initialized. Did you override __init__ and forget to call super(...).__init__?' % task.__class__.__name__)
def _log_complete_error(self, task, tb):
log_msg = "Will not run {task} or any dependencies due to error in complete() method:\n{tb}".format(task=task, tb=tb)
logger.warning(log_msg)
def _log_dependency_error(self, task, tb):
log_msg = "Will not run {task} or any dependencies due to error in deps() method:\n{tb}".format(task=task, tb=tb)
logger.warning(log_msg)
def _log_unexpected_error(self, task):
logger.exception("Luigi unexpected framework error while scheduling %s", task) # needs to be called from within except clause
def _announce_scheduling_failure(self, task, expl):
try:
self._scheduler.announce_scheduling_failure(
worker=self._id,
task_name=str(task),
family=task.task_family,
params=task.to_str_params(only_significant=True),
expl=expl,
owners=task._owner_list(),
)
except Exception:
raise
formatted_traceback = traceback.format_exc()
self._email_unexpected_error(task, formatted_traceback)
def _email_complete_error(self, task, formatted_traceback):
self._announce_scheduling_failure(task, formatted_traceback)
if self._config.send_failure_email:
self._email_error(task, formatted_traceback,
subject="Luigi: {task} failed scheduling. Host: {host}",
headline="Will not run {task} or any dependencies due to error in complete() method",
)
def _email_dependency_error(self, task, formatted_traceback):
self._announce_scheduling_failure(task, formatted_traceback)
if self._config.send_failure_email:
self._email_error(task, formatted_traceback,
subject="Luigi: {task} failed scheduling. Host: {host}",
headline="Will not run {task} or any dependencies due to error in deps() method",
)
def _email_unexpected_error(self, task, formatted_traceback):
# this sends even if failure e-mails are disabled, as they may indicate
# a more severe failure that may not reach other alerting methods such
# as scheduler batch notification
self._email_error(task, formatted_traceback,
subject="Luigi: Framework error while scheduling {task}. Host: {host}",
headline="Luigi framework error",
)
def _email_task_failure(self, task, formatted_traceback):
if self._config.send_failure_email:
self._email_error(task, formatted_traceback,
subject="Luigi: {task} FAILED. Host: {host}",
headline="A task failed when running. Most likely run() raised an exception.",
)
def _email_error(self, task, formatted_traceback, subject, headline):
formatted_subject = subject.format(task=task, host=self.host)
formatted_headline = headline.format(task=task, host=self.host)
command = subprocess.list2cmdline(sys.argv)
message = notifications.format_task_error(
formatted_headline, task, command, formatted_traceback)
notifications.send_error_email(formatted_subject, message, task.owner_email)
def _handle_task_load_error(self, exception, task_ids):
msg = 'Cannot find task(s) sent by scheduler: {}'.format(','.join(task_ids))
logger.exception(msg)
subject = 'Luigi: {}'.format(msg)
error_message = notifications.wrap_traceback(exception)
for task_id in task_ids:
self._add_task(
worker=self._id,
task_id=task_id,
status=FAILED,
runnable=False,
expl=error_message,
)
notifications.send_error_email(subject, error_message)
def add(self, task, multiprocess=False):
"""
Add a Task for the worker to check and possibly schedule and run.
Returns True if task and its dependencies were successfully scheduled or completed before.
"""
if self._first_task is None and hasattr(task, 'task_id'):
self._first_task = task.task_id
self.add_succeeded = True
if multiprocess:
queue = multiprocessing.Manager().Queue()
pool = multiprocessing.Pool()
else:
queue = DequeQueue()
pool = SingleProcessPool()
self._validate_task(task)
pool.apply_async(check_complete, [task, queue])
# we track queue size ourselves because len(queue) won't work for multiprocessing
queue_size = 1
try:
seen = set([task.task_id])
while queue_size:
current = queue.get()
queue_size -= 1
item, is_complete = current
for next in self._add(item, is_complete):
if next.task_id not in seen:
self._validate_task(next)
seen.add(next.task_id)
pool.apply_async(check_complete, [next, queue])
queue_size += 1
except (KeyboardInterrupt, TaskException):
raise
except Exception as ex:
self.add_succeeded = False
formatted_traceback = traceback.format_exc()
self._log_unexpected_error(task)
task.trigger_event(Event.BROKEN_TASK, task, ex)
self._email_unexpected_error(task, formatted_traceback)
raise
finally:
pool.close()
pool.join()
return self.add_succeeded
def _add_task_batcher(self, task):
family = task.task_family
if family not in self._batch_families_sent:
task_class = type(task)
batch_param_names = task_class.batch_param_names()
if batch_param_names:
self._scheduler.add_task_batcher(
worker=self._id,
task_family=family,
batched_args=batch_param_names,
max_batch_size=task.max_batch_size,
)
self._batch_families_sent.add(family)
def _add(self, task, is_complete):
if self._config.task_limit is not None and len(self._scheduled_tasks) >= self._config.task_limit:
logger.warning('Will not run %s or any dependencies due to exceeded task-limit of %d', task, self._config.task_limit)
deps = None
status = UNKNOWN
runnable = False
else:
formatted_traceback = None
try:
self._check_complete_value(is_complete)
except KeyboardInterrupt:
raise
except AsyncCompletionException as ex:
formatted_traceback = ex.trace
except BaseException:
formatted_traceback = traceback.format_exc()
if formatted_traceback is not None:
self.add_succeeded = False
self._log_complete_error(task, formatted_traceback)
task.trigger_event(Event.DEPENDENCY_MISSING, task)
self._email_complete_error(task, formatted_traceback)
deps = None
status = UNKNOWN
runnable = False
elif is_complete:
deps = None
status = DONE
runnable = False
task.trigger_event(Event.DEPENDENCY_PRESENT, task)
elif _is_external(task):
deps = None
status = PENDING
runnable = worker().retry_external_tasks
task.trigger_event(Event.DEPENDENCY_MISSING, task)
logger.warning('Data for %s does not exist (yet?). The task is an '
'external data depedency, so it can not be run from'
' this luigi process.', task)
else:
try:
deps = task.deps()
self._add_task_batcher(task)
except Exception as ex:
formatted_traceback = traceback.format_exc()
self.add_succeeded = False
self._log_dependency_error(task, formatted_traceback)
task.trigger_event(Event.BROKEN_TASK, task, ex)
self._email_dependency_error(task, formatted_traceback)
deps = None
status = UNKNOWN
runnable = False
else:
status = PENDING
runnable = True
if task.disabled:
status = DISABLED
if deps:
for d in deps:
self._validate_dependency(d)
task.trigger_event(Event.DEPENDENCY_DISCOVERED, task, d)
yield d # return additional tasks to add
deps = [d.task_id for d in deps]
self._scheduled_tasks[task.task_id] = task
self._add_task(
worker=self._id,
task_id=task.task_id,
status=status,
deps=deps,
runnable=runnable,
priority=task.priority,
resources=task.process_resources(),
params=task.to_str_params(),
family=task.task_family,
module=task.task_module,
batchable=task.batchable,
retry_policy_dict=_get_retry_policy_dict(task),
)
def _validate_dependency(self, dependency):
if isinstance(dependency, Target):
raise Exception('requires() can not return Target objects. Wrap it in an ExternalTask class')
elif not isinstance(dependency, Task):
raise Exception('requires() must return Task objects')
def _check_complete_value(self, is_complete):
if is_complete not in (True, False):
if isinstance(is_complete, TracebackWrapper):
raise AsyncCompletionException(is_complete.trace)
raise Exception("Return value of Task.complete() must be boolean (was %r)" % is_complete)
def _add_worker(self):
self._worker_info.append(('first_task', self._first_task))
self._scheduler.add_worker(self._id, self._worker_info)
def _log_remote_tasks(self, get_work_response):
logger.debug("Done")
logger.debug("There are no more tasks to run at this time")
if get_work_response.running_tasks:
for r in get_work_response.running_tasks:
logger.debug('%s is currently run by worker %s', r['task_id'], r['worker'])
elif get_work_response.n_pending_tasks:
logger.debug(
"There are %s pending tasks possibly being run by other workers",
get_work_response.n_pending_tasks)
if get_work_response.n_unique_pending:
logger.debug(
"There are %i pending tasks unique to this worker",
get_work_response.n_unique_pending)
if get_work_response.n_pending_last_scheduled:
logger.debug(
"There are %i pending tasks last scheduled by this worker",
get_work_response.n_pending_last_scheduled)
def _get_work_task_id(self, get_work_response):
if get_work_response.get('task_id') is not None:
return get_work_response['task_id']
elif 'batch_id' in get_work_response:
try:
task = load_task(
module=get_work_response.get('task_module'),
task_name=get_work_response['task_family'],
params_str=get_work_response['task_params'],
)
except Exception as ex:
self._handle_task_load_error(ex, get_work_response['batch_task_ids'])
self.run_succeeded = False
return None
self._scheduler.add_task(
worker=self._id,
task_id=task.task_id,
module=get_work_response.get('task_module'),
family=get_work_response['task_family'],
params=task.to_str_params(),
status=RUNNING,
batch_id=get_work_response['batch_id'],
)
return task.task_id
else:
return None
def _get_work(self):
if self._stop_requesting_work:
return GetWorkResponse(None, 0, 0, 0, 0, WORKER_STATE_DISABLED)
if self.worker_processes > 0:
logger.debug("Asking scheduler for work...")
r = self._scheduler.get_work(
worker=self._id,
host=self.host,
assistant=self._assistant,
current_tasks=list(self._running_tasks.keys()),
)
else:
logger.debug("Checking if tasks are still pending")
r = self._scheduler.count_pending(worker=self._id)
running_tasks = r['running_tasks']
task_id = self._get_work_task_id(r)
self._get_work_response_history.append({
'task_id': task_id,
'running_tasks': running_tasks,
})
if task_id is not None and task_id not in self._scheduled_tasks:
logger.info('Did not schedule %s, will load it dynamically', task_id)
try:
# TODO: we should obtain the module name from the server!
self._scheduled_tasks[task_id] = \
load_task(module=r.get('task_module'),
task_name=r['task_family'],
params_str=r['task_params'])
except TaskClassException as ex:
self._handle_task_load_error(ex, [task_id])
task_id = None
self.run_succeeded = False
if task_id is not None and 'batch_task_ids' in r:
batch_tasks = filter(None, [
self._scheduled_tasks.get(batch_id) for batch_id in r['batch_task_ids']])
self._batch_running_tasks[task_id] = batch_tasks
return GetWorkResponse(
task_id=task_id,
running_tasks=running_tasks,
n_pending_tasks=r['n_pending_tasks'],
n_unique_pending=r['n_unique_pending'],
# TODO: For a tiny amount of time (a month?) we'll keep forwards compatibility
# That is you can user a newer client than server (Sep 2016)
n_pending_last_scheduled=r.get('n_pending_last_scheduled', 0),
worker_state=r.get('worker_state', WORKER_STATE_ACTIVE),
)
def _run_task(self, task_id):
task = self._scheduled_tasks[task_id]
task_process = self._create_task_process(task)
self._running_tasks[task_id] = task_process
if task_process.use_multiprocessing:
with fork_lock:
task_process.start()
else:
# Run in the same process
task_process.run()
def _create_task_process(self, task):
reporter = TaskStatusReporter(self._scheduler, task.task_id, self._id)
return TaskProcess(
task, self._id, self._task_result_queue, reporter,
use_multiprocessing=bool(self.worker_processes > 1),
worker_timeout=self._config.timeout
)
def _purge_children(self):
"""
Find dead children and put a response on the result queue.
:return:
"""
for task_id, p in six.iteritems(self._running_tasks):
if not p.is_alive() and p.exitcode:
error_msg = 'Task {} died unexpectedly with exit code {}'.format(task_id, p.exitcode)
p.task.trigger_event(Event.PROCESS_FAILURE, p.task, error_msg)
elif p.timeout_time is not None and time.time() > float(p.timeout_time) and p.is_alive():
p.terminate()
error_msg = 'Task {} timed out after {} seconds and was terminated.'.format(task_id, p.task.worker_timeout)
p.task.trigger_event(Event.TIMEOUT, p.task, error_msg)
else:
continue
logger.info(error_msg)
self._task_result_queue.put((task_id, FAILED, error_msg, [], []))
def _handle_next_task(self):
"""
We have to catch three ways a task can be "done":
1. normal execution: the task runs/fails and puts a result back on the queue,
2. new dependencies: the task yielded new deps that were not complete and
will be rescheduled and dependencies added,
3. child process dies: we need to catch this separately.
"""
while True:
self._purge_children() # Deal with subprocess failures
try:
task_id, status, expl, missing, new_requirements = (
self._task_result_queue.get(
timeout=self._config.wait_interval))
except Queue.Empty:
return
task = self._scheduled_tasks[task_id]
if not task or task_id not in self._running_tasks:
continue
# Not a running task. Probably already removed.
# Maybe it yielded something?
# external task if run not implemented, retry-able if config option is enabled.
external_task_retryable = _is_external(task) and self._config.retry_external_tasks
if status == FAILED and not external_task_retryable:
self._email_task_failure(task, expl)
new_deps = []
if new_requirements:
new_req = [load_task(module, name, params)
for module, name, params in new_requirements]
for t in new_req:
self.add(t)
new_deps = [t.task_id for t in new_req]
self._add_task(worker=self._id,
task_id=task_id,
status=status,
expl=json.dumps(expl),
resources=task.process_resources(),
runnable=None,
params=task.to_str_params(),
family=task.task_family,
module=task.task_module,
new_deps=new_deps,
assistant=self._assistant)
self._running_tasks.pop(task_id)
# re-add task to reschedule missing dependencies
if missing:
reschedule = True
# keep out of infinite loops by not rescheduling too many times
for task_id in missing:
self.unfulfilled_counts[task_id] += 1
if (self.unfulfilled_counts[task_id] >
self._config.max_reschedules):
reschedule = False
if reschedule:
self.add(task)
self.run_succeeded &= (status == DONE) or (len(new_deps) > 0)
return
def _sleeper(self):
# TODO is exponential backoff necessary?
while True:
jitter = self._config.wait_jitter
wait_interval = self._config.wait_interval + random.uniform(0, jitter)
logger.debug('Sleeping for %f seconds', wait_interval)
time.sleep(wait_interval)
yield
def _keep_alive(self, get_work_response):
"""
Returns true if a worker should stay alive given.
If worker-keep-alive is not set, this will always return false.
For an assistant, it will always return the value of worker-keep-alive.
Otherwise, it will return true for nonzero n_pending_tasks.
If worker-count-uniques is true, it will also
require that one of the tasks is unique to this worker.
"""
if not self._config.keep_alive:
return False
elif self._assistant:
return True
elif self._config.count_last_scheduled:
return get_work_response.n_pending_last_scheduled > 0
elif self._config.count_uniques:
return get_work_response.n_unique_pending > 0
else:
return get_work_response.n_pending_tasks > 0
def handle_interrupt(self, signum, _):
"""
Stops the assistant from asking for more work on SIGUSR1
"""
if signum == signal.SIGUSR1:
self._start_phasing_out()
def _start_phasing_out(self):
"""
Go into a mode where we dont ask for more work and quit once existing
tasks are done.
"""
self._config.keep_alive = False
self._stop_requesting_work = True
def run(self):
"""
Returns True if all scheduled tasks were executed successfully.
"""
logger.info('Running Worker with %d processes', self.worker_processes)
sleeper = self._sleeper()
self.run_succeeded = True
self._add_worker()
while True:
while len(self._running_tasks) >= self.worker_processes > 0:
logger.debug('%d running tasks, waiting for next task to finish', len(self._running_tasks))
self._handle_next_task()
get_work_response = self._get_work()
if get_work_response.worker_state == WORKER_STATE_DISABLED:
self._start_phasing_out()
if get_work_response.task_id is None:
if not self._stop_requesting_work:
self._log_remote_tasks(get_work_response)
if len(self._running_tasks) == 0:
if self._keep_alive(get_work_response):
six.next(sleeper)
continue
else:
break
else:
self._handle_next_task()
continue
# task_id is not None:
logger.debug("Pending tasks: %s", get_work_response.n_pending_tasks)
self._run_task(get_work_response.task_id)
while len(self._running_tasks):
logger.debug('Shut down Worker, %d more tasks to go', len(self._running_tasks))
self._handle_next_task()
return self.run_succeeded
def _handle_rpc_message(self, message):
logger.info("Worker %s got message %s" % (self._id, message))
# the message is a dict {'name': <function_name>, 'kwargs': <function_kwargs>}
name = message['name']
kwargs = message['kwargs']
# find the function and check if it's callable and configured to work
# as a message callback
func = getattr(self, name, None)
tpl = (self._id, name)
if not callable(func):
logger.error("Worker %s has no function '%s'" % tpl)
elif not getattr(func, "is_rpc_message_callback", False):
logger.error("Worker %s function '%s' is not available as rpc message callback" % tpl)
else:
logger.info("Worker %s successfully dispatched rpc message to function '%s'" % tpl)
func(**kwargs)
@rpc_message_callback
def set_worker_processes(self, n):
# set the new value
self.worker_processes = max(1, n)
# tell the scheduler
self._scheduler.add_worker(self._id, {'workers': self.worker_processes})
|
|
# -*- coding: utf-8 -*-
from collections import OrderedDict
import pytest
from marshmallow import fields, Schema
from marshmallow.exceptions import ValidationError
from tests.base import * # noqa
class TestStrict:
class StrictUserSchema(UserSchema):
class Meta:
strict = True
def test_strict_meta_option(self):
with pytest.raises(ValidationError):
self.StrictUserSchema().load({'email': 'foo.com'})
def test_strict_meta_option_is_inherited(self):
class StrictUserSchema(UserSchema):
class Meta:
strict = True
class ChildStrictSchema(self.StrictUserSchema):
pass
with pytest.raises(ValidationError):
ChildStrictSchema().load({'email': 'foo.com'})
class TestUnordered:
class UnorderedSchema(Schema):
name = fields.Str()
email = fields.Str()
class Meta:
ordered = False
def test_unordered_dump_returns_dict(self):
schema = self.UnorderedSchema()
u = User('steve', email='steve@steve.steve')
result = schema.dump(u)
assert not isinstance(result.data, OrderedDict)
assert type(result.data) is dict
def test_unordered_load_returns_dict(self):
schema = self.UnorderedSchema()
result = schema.load({'name': 'steve', 'email': 'steve@steve.steve'})
assert not isinstance(result.data, OrderedDict)
assert type(result.data) is dict
class KeepOrder(Schema):
class Meta:
ordered = True
name = fields.String(allow_none=True)
email = fields.Email(allow_none=True)
age = fields.Integer()
created = fields.DateTime()
id = fields.Integer(allow_none=True)
homepage = fields.Url()
birthdate = fields.Date()
class OrderedMetaSchema(Schema):
id = fields.Int(allow_none=True)
email = fields.Email(allow_none=True)
class Meta:
fields = ('name', 'email', 'age', 'created',
'id', 'homepage', 'birthdate')
ordered = True
class OrderedNestedOnly(Schema):
class Meta:
ordered = True
user = fields.Nested(KeepOrder)
class TestFieldOrdering:
@pytest.mark.parametrize('with_meta', (False, True))
def test_ordered_option_is_inherited(self, user, with_meta):
class ParentUnordered(Schema):
class Meta:
ordered = False
# KeepOrder is before ParentUnordered in MRO,
# so ChildOrderedSchema will be ordered
class ChildOrderedSchema(KeepOrder, ParentUnordered):
if with_meta:
class Meta:
pass
schema = ChildOrderedSchema()
assert schema.opts.ordered is True
assert schema.dict_class == OrderedDict
data, errors = schema.dump(user)
assert not errors
keys = list(data)
assert keys == ['name', 'email', 'age', 'created', 'id', 'homepage', 'birthdate']
# KeepOrder is before ParentUnordered in MRO,
# so ChildOrderedSchema will be ordered
class ChildUnorderedSchema(ParentUnordered, KeepOrder):
class Meta:
pass
schema = ChildUnorderedSchema()
assert schema.opts.ordered is False
def test_ordering_is_off_by_default(self):
class DummySchema(Schema):
pass
schema = DummySchema()
assert schema.ordered is False
def test_declared_field_order_is_maintained_on_dump(self, user):
ser = KeepOrder()
data, errs = ser.dump(user)
keys = list(data)
assert keys == ['name', 'email', 'age', 'created', 'id', 'homepage', 'birthdate']
def test_declared_field_order_is_maintained_on_load(self, serialized_user):
schema = KeepOrder()
data, errs = schema.load(serialized_user.data)
assert not errs
keys = list(data)
assert keys == ['name', 'email', 'age', 'created', 'id', 'homepage', 'birthdate']
def test_nested_field_order_with_only_arg_is_maintained_on_dump(self, user):
schema = OrderedNestedOnly()
data, errs = schema.dump({'user': user})
user_data = data['user']
keys = list(user_data)
assert keys == ['name', 'email', 'age', 'created', 'id', 'homepage', 'birthdate']
def test_nested_field_order_with_only_arg_is_maintained_on_load(self):
schema = OrderedNestedOnly()
data, errs = schema.load({'user': {
'name': 'Foo',
'email': 'Foo@bar.com',
'age': 42,
'created': dt.datetime.now().isoformat(),
'id': 123,
'homepage': 'http://foo.com',
'birthdate': dt.datetime.now().isoformat(),
}})
user_data = data['user']
keys = list(user_data)
assert keys == ['name', 'email', 'age', 'created', 'id', 'homepage', 'birthdate']
def test_nested_field_order_with_exlude_arg_is_maintained(self, user):
class HasNestedExclude(Schema):
class Meta:
ordered = True
user = fields.Nested(KeepOrder, exclude=('birthdate', ))
ser = HasNestedExclude()
data, errs = ser.dump({'user': user})
user_data = data['user']
keys = list(user_data)
assert keys == ['name', 'email', 'age', 'created', 'id', 'homepage']
def test_meta_fields_order_is_maintained_on_dump(self, user):
ser = OrderedMetaSchema()
data, errs = ser.dump(user)
keys = list(data)
assert keys == ['name', 'email', 'age', 'created', 'id', 'homepage', 'birthdate']
def test_meta_fields_order_is_maintained_on_load(self, serialized_user):
schema = OrderedMetaSchema()
data, errs = schema.load(serialized_user.data)
assert not errs
keys = list(data)
assert keys == ['name', 'email', 'age', 'created', 'id', 'homepage', 'birthdate']
class TestIncludeOption:
class AddFieldsSchema(Schema):
name = fields.Str()
class Meta:
include = {
'from': fields.Str()
}
def test_fields_are_added(self):
s = self.AddFieldsSchema()
in_data = {'name': 'Steve', 'from': 'Oskosh'}
result = s.load({'name': 'Steve', 'from': 'Oskosh'})
assert result.data == in_data
def test_ordered_included(self):
class AddFieldsOrdered(Schema):
name = fields.Str()
email = fields.Str()
class Meta:
include = OrderedDict([
('from', fields.Str()),
('in', fields.Str()),
('@at', fields.Str())
])
ordered = True
s = AddFieldsOrdered()
in_data = {'name': 'Steve', 'from': 'Oskosh', 'email': 'steve@steve.steve',
'in': 'VA', '@at': 'Charlottesville'}
# declared fields, then "included" fields
expected_fields = ['name', 'email', 'from', 'in', '@at']
assert list(AddFieldsOrdered._declared_fields.keys()) == expected_fields
result = s.load(in_data)
assert list(result.data.keys()) == expected_fields
def test_added_fields_are_inherited(self):
class AddFieldsChild(self.AddFieldsSchema):
email = fields.Str()
s = AddFieldsChild()
assert 'email' in s._declared_fields.keys()
assert 'from' in s._declared_fields.keys()
assert isinstance(s._declared_fields['from'], fields.Str)
|
|
import xml.dom.minidom
import sys, os, glob, time, string
import multiprocessing
def file_exists(filename):
try:
with open(filename): return True
except:
return False
def parsefile(infile):
filename = str(infile)
print "Current file is: " + infile
uniid = filename.split('y')[1].split('_')[0]
depid = filename.split('y')[1].split('_')[1].split('.')[0]
#print "Uni:" uniid
#print "Dep:" depid
outfile = infile + '.sql'
#check if file exists for resume purpose
try:
with open(outfile): return
except:
pass
f = open(outfile, "w")
print >> f,"USE lubm8000;"
inf = open(infile, "r")
indata = inf.read()
inf.close()
xmldoc = xml.dom.minidom.parseString(indata)
full_prof_list = xmldoc.getElementsByTagName('ub:FullProfessor')
assoc_prof_list = xmldoc.getElementsByTagName('ub:AssociateProfessor')
assist_prof_list = xmldoc.getElementsByTagName('ub:AssistantProfessor')
lect_list = xmldoc.getElementsByTagName('ub:Lecturer')
under_stud_list = xmldoc.getElementsByTagName('ub:UndergraduateStudent')
grad_stud_list = xmldoc.getElementsByTagName('ub:GraduateStudent')
research_list = xmldoc.getElementsByTagName('ub:ResearchGroup')
pub_list = xmldoc.getElementsByTagName('ub:Publication')
ta_list = xmldoc.getElementsByTagName('ub:TeachingAssistant')
ra_list = xmldoc.getElementsByTagName('ub:ResearchAssistant')
uni = xmldoc.getElementsByTagName('ub:University')[0].attributes['rdf:about'].value
dep = xmldoc.getElementsByTagName('ub:Department')[0].attributes['rdf:about'].value
print >> f,"INSERT INTO departments VALUES (%s, %s);" % (depid, uniid)
# full professors
for prof in full_prof_list :
nameuri = prof.attributes['rdf:about'].value
name = prof.getElementsByTagName('ub:name')[0].childNodes[0].nodeValue
tid = name.split('r')[2]
# degrees
under_d = prof.getElementsByTagName('ub:University')[0].attributes['rdf:about'].value
under = under_d.split('.')[1].split('y')[1]
grad_d = prof.getElementsByTagName('ub:University')[1].attributes['rdf:about'].value
grad = grad_d.split('.')[1].split('y')[1]
doc_d = prof.getElementsByTagName('ub:University')[2].attributes['rdf:about'].value
doc = doc_d.split('.')[1].split('y')[1]
# personal info
email = prof.getElementsByTagName('ub:emailAddress')[0].childNodes[0].nodeValue
phone = prof.getElementsByTagName('ub:telephone')[0].childNodes[0].nodeValue
res_int = prof.getElementsByTagName('ub:researchInterest')[0].childNodes[0].nodeValue
res = res_int.split('h')[1]
print >> f,"INSERT INTO teachers VALUES (%s, %s, %s, %s, '%s', %s, %s, %s, '%s', '%s', %s);" % (depid, uniid, 3, tid, name, under, grad, doc, email, phone, res)
# courses
course_list = prof.getElementsByTagName('ub:teacherOf')
for course in course_list :
c = course.attributes['rdf:resource'].value
cname = c.split('/')[3]
if cname[0] == 'C':
ctype = 0
cid = cname.split('e')[1]
else:
ctype = 1
cid = cname.split('e')[2]
print >> f,"INSERT INTO courses VALUES (%s, %s, %s, %s, %s, %s);" % (depid, uniid, ctype, cid, tid, 3)
head = xmldoc.getElementsByTagName('ub:headOf')[0].parentNode.attributes['rdf:about'].value
print >> f,"INSERT INTO heads VALUES (%s, %s, %s, %s);" % (depid, uniid, 3, head.split('r')[4])
# assoc professors
for prof in assoc_prof_list :
nameuri = prof.attributes['rdf:about'].value
name = prof.getElementsByTagName('ub:name')[0].childNodes[0].nodeValue
tid = name.split('r')[2]
# degrees
under_d = prof.getElementsByTagName('ub:University')[0].attributes['rdf:about'].value
under = under_d.split('.')[1].split('y')[1]
grad_d = prof.getElementsByTagName('ub:University')[1].attributes['rdf:about'].value
grad = grad_d.split('.')[1].split('y')[1]
doc_d = prof.getElementsByTagName('ub:University')[2].attributes['rdf:about'].value
doc = doc_d.split('.')[1].split('y')[1]
# personal info
email = prof.getElementsByTagName('ub:emailAddress')[0].childNodes[0].nodeValue
phone = prof.getElementsByTagName('ub:telephone')[0].childNodes[0].nodeValue
res_int = prof.getElementsByTagName('ub:researchInterest')[0].childNodes[0].nodeValue
res = res_int.split('h')[1]
print >> f,"INSERT INTO teachers VALUES (%s, %s, %s, %s, '%s', %s, %s, %s, '%s', '%s', %s);" % (depid, uniid, 2, tid, name, under, grad, doc, email, phone, res)
# courses
course_list = prof.getElementsByTagName('ub:teacherOf')
for course in course_list :
c = course.attributes['rdf:resource'].value
cname = c.split('/')[3]
if cname[0] == 'C':
ctype = 0
cid = cname.split('e')[1]
else:
ctype = 1
cid = cname.split('e')[2]
print >> f,"INSERT INTO courses VALUES (%s, %s, %s, %s, %s, %s);" % (depid, uniid, ctype, cid, tid, 2)
# assist professors
for prof in assist_prof_list :
nameuri = prof.attributes['rdf:about'].value
name = prof.getElementsByTagName('ub:name')[0].childNodes[0].nodeValue
tid = name.split('r')[2]
# degrees
under_d = prof.getElementsByTagName('ub:University')[0].attributes['rdf:about'].value
under = under_d.split('.')[1].split('y')[1]
grad_d = prof.getElementsByTagName('ub:University')[1].attributes['rdf:about'].value
grad = grad_d.split('.')[1].split('y')[1]
doc_d = prof.getElementsByTagName('ub:University')[2].attributes['rdf:about'].value
doc = doc_d.split('.')[1].split('y')[1]
# personal info
email = prof.getElementsByTagName('ub:emailAddress')[0].childNodes[0].nodeValue
phone = prof.getElementsByTagName('ub:telephone')[0].childNodes[0].nodeValue
res_int = prof.getElementsByTagName('ub:researchInterest')[0].childNodes[0].nodeValue
res = res_int.split('h')[1]
print >> f,"INSERT INTO teachers VALUES (%s, %s, %s, %s, '%s', %s, %s, %s, '%s', '%s', %s);" % (depid, uniid, 1, tid, name, under, grad, doc, email, phone, res)
# courses
course_list = prof.getElementsByTagName('ub:teacherOf')
for course in course_list :
c = course.attributes['rdf:resource'].value
cname = c.split('/')[3]
if cname[0] == 'C':
ctype = 0
cid = cname.split('e')[1]
else:
ctype = 1
cid = cname.split('e')[2]
print >> f,"INSERT INTO courses VALUES (%s, %s, %s, %s, %s, %s);" % (depid, uniid, ctype, cid, tid, 1)
# lecturers
for prof in lect_list :
nameuri = prof.attributes['rdf:about'].value
name = prof.getElementsByTagName('ub:name')[0].childNodes[0].nodeValue
tid = name.split('r')[2]
# degrees
under_d = prof.getElementsByTagName('ub:University')[0].attributes['rdf:about'].value
under = under_d.split('.')[1].split('y')[1]
grad_d = prof.getElementsByTagName('ub:University')[1].attributes['rdf:about'].value
grad = grad_d.split('.')[1].split('y')[1]
doc_d = prof.getElementsByTagName('ub:University')[2].attributes['rdf:about'].value
doc = doc_d.split('.')[1].split('y')[1]
# personal info
email = prof.getElementsByTagName('ub:emailAddress')[0].childNodes[0].nodeValue
phone = prof.getElementsByTagName('ub:telephone')[0].childNodes[0].nodeValue
print >> f,"INSERT INTO teachers VALUES (%s, %s, %s, %s, '%s', %s, %s, %s, '%s', '%s', %s);" % (depid, uniid, 0, tid, name, under, grad, doc, email, phone, "NULL")
# courses
course_list = prof.getElementsByTagName('ub:teacherOf')
for course in course_list :
c = course.attributes['rdf:resource'].value
cname = c.split('/')[3]
if cname[0] == 'C':
ctype = 0
cid = cname.split('e')[1]
else:
ctype = 1
cid = cname.split('e')[2]
print >> f,"INSERT INTO courses VALUES (%s, %s, %s, %s, %s, %s);" % (depid, uniid, ctype, cid, tid, 0)
# under grad students
for stud in under_stud_list :
nameuri = stud.attributes['rdf:about'].value
name = stud.getElementsByTagName('ub:name')[0].childNodes[0].nodeValue
studid = name.split('t')[3]
# personal info
email = stud.getElementsByTagName('ub:emailAddress')[0].childNodes[0].nodeValue
phone = stud.getElementsByTagName('ub:telephone')[0].childNodes[0].nodeValue
advisor = ""
try:
advisor = stud.getElementsByTagName('ub:advisor')[0].attributes['rdf:resource'].value
adv = advisor.split('/')[3]
aid = adv.split('r')[2];
atype = adv.rstrip('0123456789')
if atype == "Lecturer":
atypenum = 0
elif atype == "AssistantProfessor":
atypenum = 1
elif atype == "AssociateProfessor":
atypenum = 2
else:
atypenum = 3
print >> f,"INSERT INTO students VALUES (%s, %s, %s, %s, '%s', '%s', '%s', '%s', %s, %s);" % (depid, uniid, 0, studid, name, "NULL", email, phone, atypenum, aid)
except:
print >> f,"INSERT INTO students VALUES (%s, %s, %s, %s, '%s', '%s', '%s', '%s', '%s', '%s');" % (depid, uniid, 0, studid, name, "NULL", email, phone, "NULL", "NULL")
pass
# courses
course_list = stud.getElementsByTagName('ub:takesCourse')
for course in course_list :
c = course.attributes['rdf:resource'].value
cname = c.split('/')[3]
if cname[0] == 'C':
ctype = 0
cid = cname.split('e')[1]
else:
ctype = 1
cid = cname.split('e')[2]
print >> f,"INSERT INTO takescourses VALUES (%s, %s, %s, %s, %s, %s);" % (depid, uniid, 0, studid, ctype, cid)
# grad students
for stud in grad_stud_list :
nameuri = stud.attributes['rdf:about'].value
name = stud.getElementsByTagName('ub:name')[0].childNodes[0].nodeValue
studid = name.split('t')[3]
under_d = stud.getElementsByTagName('ub:University')[0].attributes['rdf:about'].value
under = under_d.split('.')[1].split('y')[1]
# personal info
email = stud.getElementsByTagName('ub:emailAddress')[0].childNodes[0].nodeValue
phone = stud.getElementsByTagName('ub:telephone')[0].childNodes[0].nodeValue
advisor = ""
try:
advisor = stud.getElementsByTagName('ub:advisor')[0].attributes['rdf:resource'].value
adv = advisor.split('/')[3]
aid = adv.split('r')[2];
atype = adv.rstrip('0123456789')
if atype == "Lecturer":
atypenum = 0
elif atype == "AssistantProfessor":
atypenum = 1
elif atype == "AssociateProfessor":
atypenum = 2
else:
atypenum = 3
print >> f,"INSERT INTO students VALUES (%s, %s, %s, %s, '%s', %s, '%s', '%s', %s, %s);" % (depid, uniid, 1, studid, name, under, email, phone, atypenum, aid)
except:
print >> f,"INSERT INTO students VALUES (%s, %s, %s, %s, '%s', %s, '%s', '%s', '%s', '%s');" % (depid, uniid, 1, studid, name, under, email, phone, "NULL", "NULL")
pass
# courses
course_list = stud.getElementsByTagName('ub:takesCourse')
for course in course_list :
c = course.attributes['rdf:resource'].value
cname = c.split('/')[3]
if cname[0] == 'C':
ctype = 0
cid = cname.split('e')[1]
else:
ctype = 1
cid = cname.split('e')[2]
print >> f,"INSERT INTO takescourses VALUES (%s, %s, %s, %s, %s, %s);" % (depid, uniid, 1, studid, ctype, cid)
# research groups
for r in research_list :
rg = r.attributes['rdf:about'].value
rgid = rg.split('p')[3]
print >> f,"INSERT INTO researchgroups VALUES (%s, %s, %s);" % (depid, uniid, rgid)
# publications
for p in pub_list :
name = p.attributes['rdf:about'].value
a = name.split('/')[3]
pub = name.split('/')[4]
aid = a.split('r')[2]
atype = a.rstrip('0123456789')
pubid = pub.split('n')[1]
if atype == "Lecturer":
atypenum = 0
elif atype == "AssistantProfessor":
atypenum = 1
elif atype == "AssociateProfessor":
atypenum = 2
else:
atypenum = 3
#author_list = p.getElementsByTagName('ub:publicationAuthor')
#a = author_list[0].attributes['rdf:resource'].value
print >> f,"INSERT INTO publications VALUES (%s, %s, %s, %s, %s);" % (depid, uniid, pubid, atypenum, aid)
# TA and RA
for ta in ta_list :
name = ta.attributes['rdf:about'].value
studid = name.split('t')[8]
course = ta.getElementsByTagName('ub:teachingAssistantOf')[0].attributes['rdf:resource'].value
cid = course.split('/')[3].split('e')[1]
print >> f,"INSERT INTO ta VALUES (%s, %s, %s, %s, %s);" % (depid, uniid, studid, 0, cid)
for ra in ra_list :
name = ra.attributes['rdf:about'].value
studid = name.split('t')[8]
print >> f,"INSERT INTO ra VALUES (%s, %s, %s);" % (depid, uniid, studid)
f.close()
return
# parse in parallel
#ppservers = ()
#job_server = pp.Server(ppservers=ppservers)
if __name__ == '__main__':
#po = Pool()
start_time = time.time()
jobs = []
for infile in glob.glob( os.path.join('.', '*.owl') ):
#print "Current file is: " + infile
p = multiprocessing.Process(target=parsefile, args=(infile,))
jobs.append(p)
p.start()
#po.apply_async(parsefile, (infile,))
#po.close()
#po.join()
print "Time elapsed: ", time.time() - start_time, "s"
#job_server.print_stats()
|
|
# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Rdata Types.
@var _by_text: The rdata type textual name to value mapping
@type _by_text: dict
@var _by_value: The rdata type value to textual name mapping
@type _by_value: dict
@var _metatypes: If an rdatatype is a metatype, there will be a mapping
whose key is the rdatatype value and whose value is True in this dictionary.
@type _metatypes: dict
@var _singletons: If an rdatatype is a singleton, there will be a mapping
whose key is the rdatatype value and whose value is True in this dictionary.
@type _singletons: dict"""
import re
import dns.exception
NONE = 0
A = 1
NS = 2
MD = 3
MF = 4
CNAME = 5
SOA = 6
MB = 7
MG = 8
MR = 9
NULL = 10
WKS = 11
PTR = 12
HINFO = 13
MINFO = 14
MX = 15
TXT = 16
RP = 17
AFSDB = 18
X25 = 19
ISDN = 20
RT = 21
NSAP = 22
NSAP_PTR = 23
SIG = 24
KEY = 25
PX = 26
GPOS = 27
AAAA = 28
LOC = 29
NXT = 30
SRV = 33
NAPTR = 35
KX = 36
CERT = 37
A6 = 38
DNAME = 39
OPT = 41
APL = 42
DS = 43
SSHFP = 44
IPSECKEY = 45
RRSIG = 46
NSEC = 47
DNSKEY = 48
DHCID = 49
NSEC3 = 50
NSEC3PARAM = 51
TLSA = 52
HIP = 55
SPF = 99
UNSPEC = 103
TKEY = 249
TSIG = 250
IXFR = 251
AXFR = 252
MAILB = 253
MAILA = 254
ANY = 255
URI = 256
CAA = 257
TA = 32768
DLV = 32769
_by_text = {
'NONE' : NONE,
'A' : A,
'NS' : NS,
'MD' : MD,
'MF' : MF,
'CNAME' : CNAME,
'SOA' : SOA,
'MB' : MB,
'MG' : MG,
'MR' : MR,
'NULL' : NULL,
'WKS' : WKS,
'PTR' : PTR,
'HINFO' : HINFO,
'MINFO' : MINFO,
'MX' : MX,
'TXT' : TXT,
'RP' : RP,
'AFSDB' : AFSDB,
'X25' : X25,
'ISDN' : ISDN,
'RT' : RT,
'NSAP' : NSAP,
'NSAP-PTR' : NSAP_PTR,
'SIG' : SIG,
'KEY' : KEY,
'PX' : PX,
'GPOS' : GPOS,
'AAAA' : AAAA,
'LOC' : LOC,
'NXT' : NXT,
'SRV' : SRV,
'NAPTR' : NAPTR,
'KX' : KX,
'CERT' : CERT,
'A6' : A6,
'DNAME' : DNAME,
'OPT' : OPT,
'APL' : APL,
'DS' : DS,
'SSHFP' : SSHFP,
'IPSECKEY' : IPSECKEY,
'RRSIG' : RRSIG,
'NSEC' : NSEC,
'DNSKEY' : DNSKEY,
'DHCID' : DHCID,
'NSEC3' : NSEC3,
'NSEC3PARAM' : NSEC3PARAM,
'TLSA' : TLSA,
'HIP' : HIP,
'SPF' : SPF,
'UNSPEC' : UNSPEC,
'TKEY' : TKEY,
'TSIG' : TSIG,
'IXFR' : IXFR,
'AXFR' : AXFR,
'MAILB' : MAILB,
'MAILA' : MAILA,
'ANY' : ANY,
'URI' : URI,
'CAA' : CAA,
'TA' : TA,
'DLV' : DLV,
}
# We construct the inverse mapping programmatically to ensure that we
# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
# would cause the mapping not to be true inverse.
_by_value = dict([(y, x) for x, y in _by_text.iteritems()])
_metatypes = {
OPT : True
}
_singletons = {
SOA : True,
NXT : True,
DNAME : True,
NSEC : True,
# CNAME is technically a singleton, but we allow multiple CNAMEs.
}
_unknown_type_pattern = re.compile('TYPE([0-9]+)$', re.I);
class UnknownRdatatype(dns.exception.DNSException):
"""DNS resource record type is unknown."""
def from_text(text):
"""Convert text into a DNS rdata type value.
@param text: the text
@type text: string
@raises dns.rdatatype.UnknownRdatatype: the type is unknown
@raises ValueError: the rdata type value is not >= 0 and <= 65535
@rtype: int"""
value = _by_text.get(text.upper())
if value is None:
match = _unknown_type_pattern.match(text)
if match == None:
raise UnknownRdatatype
value = int(match.group(1))
if value < 0 or value > 65535:
raise ValueError("type must be between >= 0 and <= 65535")
return value
def to_text(value):
"""Convert a DNS rdata type to text.
@param value: the rdata type value
@type value: int
@raises ValueError: the rdata type value is not >= 0 and <= 65535
@rtype: string"""
if value < 0 or value > 65535:
raise ValueError("type must be between >= 0 and <= 65535")
text = _by_value.get(value)
if text is None:
text = 'TYPE' + `value`
return text
def is_metatype(rdtype):
"""True if the type is a metatype.
@param rdtype: the type
@type rdtype: int
@rtype: bool"""
if rdtype >= TKEY and rdtype <= ANY or _metatypes.has_key(rdtype):
return True
return False
def is_singleton(rdtype):
"""True if the type is a singleton.
@param rdtype: the type
@type rdtype: int
@rtype: bool"""
if _singletons.has_key(rdtype):
return True
return False
|
|
#!/usr/bin/python
# import petsc4py
# import sys
# petsc4py.init(sys.argv)
# from petsc4py import PETSc
# from MatrixOperations import *
from dolfin import *
import numpy as np
import matplotlib.pylab as plt
import os
import scipy.io
from PyTrilinos import Epetra, EpetraExt, AztecOO, ML, Amesos
from scipy2Trilinos import scipy_csr_matrix2CrsMatrix
def SaveEpertaMatrix(A,name):
from PyTrilinos import EpetraExt
from numpy import array,loadtxt
import scipy.sparse as sps
import scipy.io
test ="".join([name,".txt"])
EpetraExt.RowMatrixToMatlabFile(test,A)
data = loadtxt(test)
col,row,values = data[:,0]-1,data[:,1]-1,data[:,2]
Asparse = sps.csr_matrix((values, (row, col)))
testmat ="".join([name,".mat"])
scipy.io.savemat( testmat, {name: Asparse},oned_as='row')
def NullSpace(A,name):
from PyTrilinos import EpetraExt, Epetra
from numpy import array,loadtxt
import scipy.sparse as sps
import scipy.io
import matplotlib.pylab as plt
test ="".join([name,".txt"])
EpetraExt.RowMatrixToMatlabFile(test,A)
data = loadtxt(test)
col,row,values = data[:,0]-1,data[:,1]-1,data[:,2]
Asparse = sps.csr_matrix((values, (row, col)))
(Nb,Mb) = Asparse.shape
Aublas1 = Asparse[0:Nb-1,0:Mb-1]
# plt.spy(Aublas1)
# if (Nb < 1000):
# plt.show()
comm = Epetra.PyComm()
Ap = scipy_csr_matrix2CrsMatrix(Aublas1, comm)
return Ap
#MO.SwapBackend('epetra')
#os.system("echo $PATH")
m = 2
errL2u = np.zeros((m-1,1))
errL2p = np.zeros((m-1,1))
NN = np.zeros((m-1,1))
DoF = np.zeros((m-1,1))
Vdim = np.zeros((m-1,1))
Qdim = np.zeros((m-1,1))
Wdim = np.zeros((m-1,1))
nn = 2
dim = 2
Solving = 'No'
Saving = 'no'
case = 1
parameters['linear_algebra_backend'] = ''
for xx in xrange(1,m):
print xx
nn = 2**(xx+4)
# Create mesh and define function space
nn = int(nn)
NN[xx-1] = nn
# nn = 32
# mesh = UnitSquareMesh(16,16)
# mesh = UnitSquareMesh(nn, nn)
mesh = RectangleMesh(-1, -1, 1, 1, nn, nn,'right')
# tic()
parameters['reorder_dofs_serial'] = False
V = VectorFunctionSpace(mesh, "CG", 2)
Q = FunctionSpace(mesh, "CG", 1)
parameters['reorder_dofs_serial'] = False
# print 'time to create function spaces', toc(),'\n\n'
W = V*Q
Vdim[xx-1] = V.dim()
Qdim[xx-1] = Q.dim()
Wdim[xx-1] = W.dim()
print "\n\nV: ",Vdim[xx-1],"Q: ",Qdim[xx-1],"W: ",Wdim[xx-1],"\n\n"
def boundary(x, on_boundary):
return on_boundary
# u0 = Expression(("sin(pi*x[1])","sin(pi*x[0])"))
# u0 = Expression(("pow(x[1],2)-1","pow(x[0],2)-1"))
if case == 1:
u0 = Expression(("20*x[0]*pow(x[1],3)","5*pow(x[0],4)-5*pow(x[1],4)"))
p0 = Expression("60*pow(x[0],2)*x[1]-20*pow(x[1],3)")
elif case == 2:
Su0 = Expression(("pow(x[1],2)-x[1]","pow(x[0],2)-x[0]"))
p0 = Expression("x[1]+x[0]-1")
elif case == 3:
u0 = Expression(("cos(2*pi*x[1])*sin(2*pi*x[0]) ","-cos(2*pi*x[0])*sin(2*pi*x[1]) "))
p0 = Expression("sin(2*pi*x[0])*sin(2*pi*x[1]) ")
bc = DirichletBC(W.sub(0),u0, boundary)
bc1 = DirichletBC(W.sub(1), p0, boundary)
bcs = [bc]
# v, u = TestFunction(V), TrialFunction(V)
# q, p = TestFunction(Q), TrialFunction(Q)
(u, p) = TrialFunctions(W)
(v, q) = TestFunctions(W)
# f = Expression(("-pi*pi*sin(pi*x[1])+2*x[1]","-pi*pi*sin(pi*x[0])"))
if case == 1:
f = -Expression(("120*x[0]*x[1]*(1-mu)+ 400*x[0]*pow(x[1],6)+(5*pow(x[0],4)-5*pow(x[1],4))*60*x[0]*x[1]*x[1]","60*(pow(x[0],2)-pow(x[1],2))*(1-mu)+400*pow(x[0],4)*pow(x[1],3)-(5*pow(x[0],4)-5*pow(x[1],4))*20*x[1]*x[1]*x[1]"), mu = 1e0)
elif case == 2:
f = -Expression(("-1","-1"))
elif case == 3:
f = -Expression(("8*pi*pi*cos(2*pi*x[1])*sin(2*pi*x[0]) + 2*pi*cos(2*pi*x[0])*sin(2*pi*x[1])","2*pi*cos(2*pi*x[0])*sin(2*pi*x[1]) - 8*pi*pi*cos(2*pi*x[0])*sin(2*pi*x[1])"))
u_k = Function(V)
mu = Constant(1e0)
n = FacetNormal(mesh)
h = CellSize(mesh)
h_avg =avg(h)
d = 0
u_k = Function(V)
a11 = -mu*inner(grad(v), grad(u))*dx - inner(dolfin.dot(u_k,grad(u)),v)*dx
a12 = div(v)*p*dx
a21 = div(u)*q*dx
L1 = inner(v, f)*dx
a = a11+a12+a21
eps = 1.0 # error measure ||u-u_k||
tol = 1.0E-5 # tolerance
iter = 0 # iteration counter
maxiter = 100 # max no of iterations allowed
# i = p*q*dx
# AA = assemble(a11)
while eps > tol and iter < maxiter:
iter += 1
x = Function(W)
uu = Function(W)
tic()
AA, bb = assemble_system(a, L1, bcs)
print toc()
tic()
A_epetra = as_backend_type(AA).mat()
A_epetra =NullSpace(A_epetra,"A_epetra")
# As = AA.sparray()[0:-1,0:-1]
# print toc()
# tic()
# A = PETSc.Mat().createAIJ(size=As.shape,csr=(As.indptr, As.indices, As.data))
print toc()
pause
# PP, btmp = assemble_system(i+a11, L1, bcs)
DoF = V.dim() + Q.dim()
x_epetra = Epetra.Vector(0*bb.array())
A_epetra = as_backend_type(AA).mat()
# P_epetra = down_cast(PP).mat()
b_epetra = as_backend_type(bb).vec()
# x_epetra = down_cast(uu.vector()).vec()
A_epetra =NullSpace(A_epetra,"A_epetra")
# P_epetra =NullSpace(P_epetra,"P_epetra")
print toc()
bbb =bb.array()
Nb = bbb.shape
b =bbb[0:Nb[0]-1]
b_epetra = Epetra.Vector(b)
xxx = x.vector().array()
x =xxx[0:Nb[0]-1]
x_epetra = Epetra.Vector(x)
pause()
# mlList = {"max levels" : 200,
# "output" : 10,
# "smoother: type" : "symmetric Gauss-Seidel",
# "aggregation: type" : "Uncoupled"
# }
# prec = ML.MultiLevelPreconditioner(P_epetra, False)
# prec.SetParameterList(mlList)
# prec.ComputePreconditioner()
# solver = AztecOO.AztecOO(A_epetra, x_epetra, b_epetra)
# solver.SetPrecOperator(prec)
# solver.SetAztecOption(AztecOO.AZ_solver, AztecOO.AZ_gmres);
# solver.SetAztecOption(AztecOO.AZ_output, 100);
# err = solver.Iterate(20000, 1e-10)
tic()
problem = Epetra.LinearProblem(A_epetra,x_epetra,b_epetra)
print '\n\n\n\n\n\n'
factory = Amesos.Factory()
solver = factory.Create("Amesos_Umfpack", problem)
# solver = factory.Create("MUMPS", problem)
amesosList = {"PrintTiming" : True, "PrintStatus" : True }
solver.SetParameters(amesosList)
solver.SymbolicFactorization()
solver.NumericFactorization()
solver.Solve()
soln = problem.GetLHS()
print "||x_computed||_2 =", soln.Norm2()
# solver.PrintTiming()
print '\n\n\n\n\n\n'
uu = x_epetra[0:Vdim[xx-1][0]]
# time = time+toc()
u1 = Function(V)
u1.vector()[:] = u1.vector()[:] + uu.array
diff = u1.vector().array() - u_k.vector().array()
eps = np.linalg.norm(diff, ord=np.Inf)
print '\n\n\niter=%d: norm=%g' % (iter, eps)
# u_k.assign(uu) # update for next iteration
u_k.assign(u1)
#
if case == 1:
ue = Expression(("20*x[0]*pow(x[1],3)","5*pow(x[0],4)-5*pow(x[1],4)"))
pe = Expression("60*pow(x[0],2)*x[1]-20*pow(x[1],3)+5")
elif case == 2:
ue = Expression(("pow(x[1],2)-x[1]","pow(x[0],2)-x[0]"))
pe = Expression("x[1]+x[0]-1")
elif case == 3:
ue = Expression(("cos(2*pi*x[1])*sin(2*pi*x[0]) ","-cos(2*pi*x[0])*sin(2*pi*x[1]) "))
pe = Expression("sin(2*pi*x[0])*sin(2*pi*x[1]) ")
# pp = x_epetra[Vdim[xx-1][0]:]
# pa = Function(Q)
# pa1 = Function(Q)
# pa2 = Function(Q)
# pa1.vector()[:] = pp.array
# pa2.vector()[:] = 0*pp.array+1
# pa2.vector().array()
# pa.vector()[:] = pp.array + assemble(pa1*dx)/assemble(pa2*dx)
# uu = x_epetra[0:Vdim[xx-1][0]]
# ua = Function(V)
# ua.vector()[:] = uu.array
u = interpolate(ue,V)
p = interpolate(pe,Q)
Nv = u.vector().array().shape
x = x_epetra[0:Nv[0]]
ua = Function(V)
ua.vector()[:] = x.array
pp = x_epetra[Nv[0]:]
pp = pp.array
n = pp.shape
pp = np.insert(pp,n,0)
pa = Function(Q)
pa.vector()[:] = pp
pend = assemble(pa*dx)
ones = Function(Q)
ones.vector()[:]=(0*pp+1)
pp = Function(Q)
pp.vector()[:] = pa.vector().array()- assemble(pa*dx)/assemble(ones*dx)
pInterp = interpolate(pe,Q)
pe = Function(Q)
pe.vector()[:] = pInterp.vector().array()
const = - assemble(pe*dx)/assemble(ones*dx)
pe.vector()[:] = pe.vector()[:]+const
errL2u[xx-1] = errornorm(ue,ua,norm_type="L2", degree_rise=4,mesh=mesh)
errL2p[xx-1] = errornorm(pe,pp,norm_type="L2", degree_rise=4,mesh=mesh)
print errL2u[xx-1]
print errL2p[xx-1]
del solver
# scipy.io.savemat('Vdim.mat', {'VDoF':Vdim})
# scipy.io.savemat('DoF.mat', {'DoF':DoF})
plt.loglog(NN,errL2u)
plt.title('Error plot for CG2 elements - Velocity L2 convergence = %f' % np.log2(np.average((errL2u[0:m-2]/errL2u[1:m-1]))))
plt.xlabel('N')
plt.ylabel('L2 error')
plt.figure()
plt.loglog(NN,errL2p)
plt.title('Error plot for CG1 elements - Pressure L2 convergence = %f' % np.log2(np.average((errL2p[0:m-2]/errL2p[1:m-1]))))
plt.xlabel('N')
plt.ylabel('L2 error')
# plt.show()
print "Velocity Elements rate of convergence ", np.log2(np.average((errL2u[0:m-2]/errL2u[1:m-1])))
print "Pressure Elements rate of convergence ", np.log2(np.average((errL2p[0:m-2]/errL2p[1:m-1])))
print "\n\n"
import prettytable
table = prettytable.PrettyTable(["DoF","V-L2","P-L2"])
for x in xrange(1,m):
table.add_row([Wdim[x-1][0],errL2u[x-1][0],errL2p[x-1][0]])
print table
# plt.loglog(N,erru)
# plt.title('Error plot for P2 elements - convergence = %f' % np.log2(np.average((erru[0:m-2]/erru[1:m-1]))))
# plt.xlabel('N')
# plt.ylabel('L2 error')
# plt.figure()
# plt.loglog(N,errp)
# plt.title('Error plot for P1 elements - convergence = %f' % np.log2(np.average((errp[0:m-2]/errp[1:m-1]))))
# plt.xlabel('N')
# plt.ylabel('L2 error')
plot(ua)
# plot(interpolate(ue,V))
plot(pp)
# plot(interpolate(pe,Q))
interactive()
# plt.show()
|
|
#!/usr/bin/python2.5
#
# Copyright 2010 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starts an interactive shell which allows to create statistic entities.
Usage is simple:
In order to seed all available statistics, just type:
>>> seed_all()
In order to seed one statistic:
>>> seed_one(link_id)
where link_id is for the desired statistic
In order to change program in scope:
>>> set_program(key_name)
where key_name represents a new program
In order to terminate the script:
>>> exit()
"""
import sys
import interactive
interactive.setup()
from django.utils import simplejson
from soc.logic import dicts
from soc.modules.gsoc.logic.models.program import logic as program_logic
from soc.modules.statistic.logic.models.statistic import logic as \
statistic_logic
from soc.modules.statistic.models.statistic import Statistic
SUCCESS_MSG_FMT = 'Statistic %s has been sucessfully added.'
FAILURE_MSG_FMT = 'An error occured while adding %s statistic.'
DOES_NOT_EXISTS_MSG_FMT = 'Statistic %s does not exists.'
VISUALIZATION_SETS = {
"cumulative_standard": [
"Table",
"BarChart",
"ColumnChart",
"ImageChartBar",
],
"cumulative_countries": [
"Table"
],
"single_standard": [
"Table",
"BarChart",
"ColumnChart",
"ImageChartBar",
"ImageChartP",
"ImageChartP3",
"PieChart",
"ScatterChart"
],
"single_countries": [
"Table",
"GeoMap"
]
}
STATISTIC_PROPERTIES = {
"applications_per_program": (
"Applications Per Program",
{
"params": {"fields": ["program", "__key__"]},
"type": "per_field",
"model": "gsoc_student_proposal",
"choice_instructions":
{
"model": "gsoc_program",
},
"transformer": "pretty_names",
},
{
"description": [("program", "string", "Program"),
("number", "number", "Number")],
"options": {
'Applications Per Program': {
"visualizations": VISUALIZATION_SETS['single_standard']
}
}
},
"host"),
"applications_per_student": (
"Applications Per Student",
{
"type": "per_field",
"model": "gsoc_student_proposal",
"choice_instructions":
{
"program_field": "student",
"model": "gsoc_student",
"filter": "property_filter",
"property_conditions": {
"status": ['active', 'inactive']
},
},
"transformer": "enumerate",
"params": {
"fields": ["scope", "__key__"],
"program_field": "program",
}
},
{
"description": [("app_number", "string", "Number of Applications"),
("student_number", "number", "Number of Students")],
"options": {
'Applications Per Student': {
"visualizations": VISUALIZATION_SETS['single_standard']
}
}
},
"host"),
"mentors_per_continent": (
"Mentors Per Continent",
{
"type": "per_field",
"field": "continent",
"model": "gsoc_mentor",
"subsets": {"all":{}, "referenced":{}, "no-referenced":{}},
"filter": "property_filter",
"params": {
"ref_logic": "gsoc_student_project",
"ref_field": "mentor",
"program_field": "program",
"property_conditions": {
"status": ["active", "inactive"]
},
}
},
{
"description": [("continent", "string", "Continent"),
("all_mentors", "number", "Mentors"),
("pro_mentors", "number", "Mentors with projects"),
("nop_mentors", "number",
"Mentors without projects")],
"options": {
'Mentors Per Continent (cumulative)': {
"visualizations": VISUALIZATION_SETS['cumulative_standard'],
"columns": [0, 1, 2]
},
'Mentors Per Continent (all)': {
"visualizations": VISUALIZATION_SETS['single_standard'],
"columns": [0]
},
'Mentors Per Continent (with projects)': {
"visualizations": VISUALIZATION_SETS['single_standard'],
"columns": [1]
},
'Mentors Per Continent (without projects)': {
"visualizations": VISUALIZATION_SETS['single_standard'],
"columns": [2]
}
}
},
"org_admin"),
"mentors_per_country": (
"Mentors Per Country",
{
"type": "per_field",
"field": "country",
"model": "gsoc_mentor",
"subsets": {"all":{}, "referenced":{}, "no-referenced":{}},
"filter": "property_filter",
"transformer": "get-vis-names",
"params": {
"fields": ["res_country"],
"ref_logic": "gsoc_student_project",
"ref_field": "mentor",
"program_field": "program",
"property_conditions": {
"status": ["active", "inactive"]
},
}
},
{
"description": [("country", "string", "Country"),
("all_mentors", "number", "Mentors"),
("pro_mentors", "number", "Mentors with projects"),
("nop_mentors", "number",
"Mentors without projects")],
"options": {
'Mentors Per Country (cumulative)': {
"visualizations": VISUALIZATION_SETS['cumulative_countries'],
"columns": [0, 1, 2]
},
'Mentors Per Country (all)': {
"visualizations": VISUALIZATION_SETS['single_countries'],
"columns": [0]
},
'Mentors Per Country (with projects)': {
"visualizations": VISUALIZATION_SETS['single_countries'],
"columns": [1]
},
'Mentors Per Country (without projects)': {
"visualizations": VISUALIZATION_SETS['single_countries'],
"columns": [2]
}
}
},
"org_admin"),
"mentors_per_organization": (
"Mentors Per Organization",
{
"type": "per_field",
"model": "gsoc_mentor",
"choice_instructions": {
"program_field": "scope",
"model": "gsoc_organization",
"filter": "property_filter",
"property_conditions": {
"status": ['new', 'active', 'inactive']
},
},
"transformer": "pretty_names",
"filter": "property_filter",
"params": {
"fields": ["scope", "__key__"],
"program_field": "program",
"property_conditions": {
"status": ['active', 'inactive']
},
}
},
{
"description": [("org_name", "string", "Organization"),
("student_number", "number", "Number of Mentors")],
"options": {
'Mentors Per Organization': {
"visualizations": ["Table"]
}
}
},
"host"),
"organizations_per_program": (
"Organizations Per Program",
{
"type": "per_field",
"model": "gsoc_organization",
"choice_instructions": {
"model": "gsoc_program",
},
"transformer": "pretty_names",
"filter": "property_filter",
"params": {
"fields": ["scope", "__key__"],
"property_conditions": {
"status": ['new', 'active', 'inactive']
},
}
},
{
"description": [("program", "string", "Program"),
("number", "number", "Number")],
"options": {
'Organizations Per Program': {
"visualizations": VISUALIZATION_SETS['single_standard']
}
}
},
"host"),
"organization_admins_per_age": (# strange visualizations
"Organization Admins Per Age",
{
"type": "per_field",
"field": "age",
"model": "gsoc_org_admin",
"transformer": "remove-out-of-range",
"filter": "property_filter",
"params": {
"program_field": "program",
"property_conditions": {
"status": ['active', 'inactive']
},
}
},
{
"description": [("age", "number", "Age"),
("number", "number", "Number")],
"options": {
'Organization Admins Per Age': {
"visualizations": VISUALIZATION_SETS['single_standard']
}
}
},
"host"),
"student_projects_per_continent": (
"Student Projects Per Continent",
{
"type": "per_field",
"field": "continent",
"model": "gsoc_student_project",
"filter": "property_filter",
"params": {
"fields": ["student"],
"property_conditions": {
"status": ["accepted", "completed", "failed"]
},
"program_field": "program",
}
},
{
"description": [("continent", "string", "Continent"),
("number", "number", "Number")],
"options": {
'Student Projects Per Continent': {
"visualizations": VISUALIZATION_SETS['single_standard']
}
}
},
"host"),
"student_projects_per_country": (
"Student Projects Per Country",
{
"type": "per_field",
"model": "gsoc_student_project",
"field": "country",
"transformer": "get-vis-names",
"filter": "property_filter",
"params": {
"fields": ["student", "res_country"],
"property_conditions": {
"status": ["accepted", "completed", "failed"]
},
"program_field": "program",
}
},
{
"description": [("country", "string", "Country"),
("number", "number", "Number")],
"options": {
'Student Projects Per Country': {
"visualizations": VISUALIZATION_SETS['single_countries']
}
}
},
"host"),
"student_projects_per_organization": (
"Student Projects Per Organization",
{
"type": "per_field",
"filter": "property_filter",
"model": "gsoc_student_project",
"transformer": "pretty_names",
"subsets": [
('all', {}),
('within_range', {'constraints': [
{'field': 'passed_evaluations',
'type': 'size',
'min_value': 1,
'max_value': 2}
]}),
('within_range', {'constraints': [
{'field': 'passed_evaluations',
'type': 'size',
'min_value': 2,
'max_value': 2}
]})
],
"choice_instructions": {
"program_field": "scope",
"model": "gsoc_organization",
"filter": "property_filter",
"property_conditions": {
"status": ['new', 'active', 'inactive']
},
},
"params": {
"fields": ["scope", "__key__"],
"program_field": "program",
"property_conditions": {
"status": ["accepted", "completed", "failed"]
},
}
},
{
"description": [("organization", "string", "Organization"),
("accepted_projects", "number", "Accepted"),
("midterm_projects", "number", "Midterm Passed"),
("passed_projects", "number", "Final Passed")],
"options": {
'Student Projects Per Organization (cumulative)': {
"visualizations": ['Table'],
"columns": [0, 1, 2]
},
'Accepted Student Projects Per Organization': {
"visualizations": ["Table", "ColumnChart"],
"columns": [0]
},
'Midterm-Passed Student Projects Per Organization': {
"visualizations": ["Table", "ColumnChart"],
"columns": [1]
},
'Final-Passed Student Projects Per Organization': {
"visualizations": ["Table", "ColumnChart"],
"columns": [2]
},
}
},
"host"),
"student_proposals_per_continent": (
"Student Proposals Per Continent",
{
"type": "per_field",
"field": "continent",
"model": "gsoc_student_proposal",
"params": {
"fields": ["scope"],
"program_field": "program",
}
},
{
"description": [("continent", "string", "Continent"),
("number", "number", "Number")],
"options": {
'Student Proposals Per Continent': {
"visualizations": VISUALIZATION_SETS['single_standard']
}
}
},
"host"),
"student_proposals_per_country": (
"Student Proposals Per Country",
{
"type": "per_field",
"field": "country",
"model": "gsoc_student_proposal",
"transformer": "get-vis-names",
"params": {
"fields": ["scope", "res_country"],
"program_field": "program",
}
},
{
"description": [("country", "string", "Country"),
("number", "number", "Number")],
"options": {
'Student Proposals Per Country': {
"visualizations": VISUALIZATION_SETS['single_countries']
}
}
},
"host"),
"student_proposals_per_organization": (
"Student Proposals Per Organization",
{
"type": "per_field",
"model": "gsoc_student_proposal",
"choice_instructions": {
"program_field": "scope",
"model": "gsoc_organization",
"filter": "property_filter",
"property_conditions": {
"status": ['new', 'active', 'inactive']
},
},
"transformer": "pretty_names",
"params": {
"fields": ["org", "__key__"],
"program_field": "program",
}
},
{
"description": [("organization", "string", "Organization"),
("number", "number", "Number")],
"options": {
'Student Proposals Per Organization': {
"visualizations": ["Table", "ColumnChart"]
}
}
},
"host"),
"students_per_age": (
"Students Per Age",
{
"type": "per_field",
"field": "age",
"filter": "property_filter",
"model": "gsoc_student",
"subsets": {"all":{}, "referenced":{}, "no-referenced":{}},
"transformer": "remove-out-of-range",
"params": {
"ref_logic": "gsoc_student_project",
"ref_field": "student",
"program_field": "scope",
"property_conditions": {
"status": ["active", "inactive"]
},
}
},
{
"description": [("age", "string", "Age"),
("all_students", "number", "Students"),
("pro_students", "number",
"Students with projects"),
("nop_students", "number",
"Students without projects")],
"options": {
'Students Per Age (cumulative)': {
"visualizations": VISUALIZATION_SETS['cumulative_standard'],
"columns": [0, 1, 2]
},
'Students Per Age (all)': {
"visualizations": VISUALIZATION_SETS['single_standard'],
"columns": [0]
},
'Students Per Age (with projects)': {
"visualizations": VISUALIZATION_SETS['single_standard'],
"columns": [1]
},
'Students Per Age (without projects)': {
"visualizations": VISUALIZATION_SETS['single_standard'],
"columns": [2]
}
}
},
"host"),
"students_per_continent": (
"Students Per Continent",
{
"type": "per_field",
"field": "continent",
"filter": "property_filter",
"model": "gsoc_student",
"subsets": {"all":{}, "referenced":{}, "no-referenced":{}},
"params": {
"ref_logic": "gsoc_student_project",
"ref_field": "student",
"program_field": "scope",
"property_conditions": {
"status": ["active", "inactive"]
},
}
},
{
"description": [("age", "string", "Continent"),
("all_students", "number", "Students"),
("pro_students", "number",
"Students with projects"),
("nop_students", "number",
"Students without projects")],
"options": {
'Students Per Continent (cumulative)': {
"visualizations": [
"Table",
"BarChart",
"ColumnChart",
"ImageChartBar",
],
"columns": [0, 1, 2]
},
'Students Per Continent (all)': {
"visualizations": VISUALIZATION_SETS['single_standard'],
"columns": [0]
},
'Students Per Continent (with projects)': {
"visualizations": VISUALIZATION_SETS['single_standard'],
"columns": [1]
},
'Students Per Continent (without projects)': {
"visualizations": VISUALIZATION_SETS['single_standard'],
"columns": [2]
}
},
},
"host"),
"students_per_country": (
"Students Per Country",
{
"type": "per_field",
"field": "country",
"filter": "property_filter",
"model": "gsoc_student",
"subsets": {"all":{}, "referenced":{}, "no-referenced":{}},
"transformer": "get-vis-names",
"params": {
"fields": ["res_country"],
"ref_logic": "gsoc_student_project",
"ref_field": "student",
"program_field": "scope",
"property_conditions": {
"status": ["active", "inactive"]
},
}
},
{
"description": [("country", "string", "Country"),
("all_students", "number", "Students"),
("pro_students", "number",
"Students with projects"),
("nop_students", "number",
"Students without projects")],
"options": {
'Students Per Country (cumulative)': {
"visualizations": VISUALIZATION_SETS['cumulative_countries'],
"columns": [0, 1, 2]
},
'Students Per Country (all)': {
"visualizations": VISUALIZATION_SETS['single_countries'],
"columns": [0]
},
'Students Per Country (with projects)': {
"visualizations": VISUALIZATION_SETS['single_countries'],
"columns": [1]
},
'Students Per Country (without projects)': {
"visualizations": VISUALIZATION_SETS['single_countries'],
"columns": [2]
}
},
},
"host"),
"students_per_degree": (
"Students Per Degree",
{
"type": "per_field",
"field": "degree",
"filter": "property_filter",
"model": "gsoc_student",
"subsets": [("all", {}), ("referenced", {}), ("no-referenced", {})],
"params": {
"fields": ["degree"],
"ref_logic": "gsoc_student_project",
"ref_field": "student",
"program_field": "scope",
"property_conditions": {
"status": ["active", "inactive"]
},
}
},
{
"description": [("degree", "string", "Degree"),
("all_students", "number", "Students"),
("pro_students", "number",
"Students with projects"),
("nop_students", "number",
"Students without projects")],
"options": {
'Students Per Degree (cumulative)': {
"visualizations": VISUALIZATION_SETS['cumulative_standard'],
"columns": [0, 1, 2]
},
'Students Per Degree (all)': {
"visualizations": VISUALIZATION_SETS['single_standard'],
"columns": [0]
},
'Students Per Degree (with projects)': {
"visualizations": VISUALIZATION_SETS['single_standard'],
"columns": [1]
},
'Students Per Degree (without projects)': {
"visualizations": VISUALIZATION_SETS['single_standard'],
"columns": [2]
}
}
},
"host"),
"students_per_graduation_year": (
"Students Per Graduation Year",
{
"type": "per_field",
"field": "expected_graduation",
"filter": "property_filter",
"model": "gsoc_student",
"subsets": [("all", {}), ("referenced", {}), ("no-referenced", {})],
"transformer": "remove-out-of-range",
"params": {
"fields": ["expected_graduation"],
"ref_logic": "gsoc_student_project",
"ref_field": "student",
"program_field": "scope",
"property_conditions": {
"status": ["active", "inactive"]
},
}
},
{
"description": [("graduation_year", "string", "Graduation Year"),
("all_students", "number", "Students"),
("pro_students", "number",
"Students with projects"),
("nop_students", "number",
"Students without projects")],
"options": {
'Students Per Graduation Year (cumulative)': {
"visualizations": VISUALIZATION_SETS['cumulative_standard'],
"columns": [0, 1, 2]
},
'Students Per Graduation Year (all)': {
"visualizations": VISUALIZATION_SETS['single_standard'],
"columns": [0]
},
'Students Per Graduation Year (with projects)': {
"visualizations": VISUALIZATION_SETS['single_standard'],
"columns": [1]
},
'Students Per Graduation Year (without projects)': {
"visualizations": VISUALIZATION_SETS['single_standard'],
"columns": [2]
}
}
},
"host"),
"students_per_tshirt_style": (
"Students Per T-Shirt Style",
{
"type": "per_field",
"field": "tshirt_style",
"filter": "property_filter",
"model": "gsoc_student",
"subsets": [
("all", {}),
("referenced", {}),
("no-referenced", {})],
"params": {
"fields": ["tshirt_style"],
"ref_logic": "gsoc_student_project",
"ref_field": "student",
"program_field": "scope",
"property_conditions": {
"status": ["active", "inactive"]
},
}
},
{
"description": [("style", "string", "Style"),
("all_students", "number", "Students"),
("pro_students", "number",
"Students with projects"),
("nop_students", "number",
"Students without projects")],
"options": {
'Students Per T-Shirt Style (cumulative)': {
"visualizations": [
"Table",
"BarChart",
"ColumnChart",
"ImageChartBar",
],
"columns": [0, 1, 2]
},
'Students Per T-Shirt Style (all)': {
"visualizations": VISUALIZATION_SETS['single_standard'],
"columns": [0]
},
'Students Per T-Shirt Style (with projects)': {
"visualizations": VISUALIZATION_SETS['single_standard'],
"columns": [1]
},
'Students Per T-Shirt Style (without projects)': {
"visualizations": VISUALIZATION_SETS['single_standard'],
"columns": [2]
}
},
},
"host"),
"gsoc2010_overall": (
"GSoC2010 Overall",
{
"type": "overall",
"items": (
{
"name": "Number of Students",
"type": "number",
"model": "gsoc_student",
"program_field": "scope",
"filter": "property_filter",
"params": {
"property_conditions": {
"status": ["active", "inactive"]
},
}
},
{
"name": "Number of Mentors",
"type": "number",
"model": "gsoc_mentor",
"program_field": "program",
"filter": "property_filter",
"params": {
"property_conditions": {
"status": ["active", "inactive"]
},
}
},
{
"name": "Number of Student Proposals",
"type": "number",
"model": "gsoc_student_proposal",
"program_field": "program",
},
{
"name": "Number of Student Projects",
"type": "number",
"model": "gsoc_student_project",
"program_field": "program",
"filter": "property_filter",
"params": {
"property_conditions": {
"status": ["accepted", "completed", "failed"]
},
}
},
{
"name": "Number of Organization Admins",
"type": "number",
"model": "gsoc_org_admin",
"program_field": "program",
"filter": "property_filter",
"params": {
"property_conditions": {
"status": ["active", "inactive"]
},
}
},
{
"name": "Number of Mentors With Projects",
"type": "number",
"model": "gsoc_student_project",
"program_field": "program",
"fields": ["mentor"],
"filter": "property_filter",
"params": {
"property_conditions": {
"status": ["accepted", "completed", "failed"]
},
}
},
{
"name": "Number of Students With Projects",
"type": "number",
"model": "gsoc_student_project",
"program_field": "program",
"fields": ["student"],
"filter": "property_filter",
"params": {
"property_conditions": {
"status": ["accepted", "completed", "failed"]
},
}
},
{
"name": "Number of Students With Proposals",
"type": "number",
"model": "gsoc_student_proposal",
"program_field": "program",
"fields": ["scope"]
},
{
"name": "Average Number of Projects Per Mentor",
"type": "average",
"model": "gsoc_mentor",
"program_field": "program",
"ref_logic": "gsoc_student_project",
"ref_field": "mentor"
},
{
"name": "Average Number of Proposals Per Student",
"type": "average",
"model": "gsoc_student",
"program_field": "scope",
"ref_logic": "gsoc_student_proposal",
"ref_field": "scope"
},
)
},
{
"description": [("stat_name", "string", "Statistic Name"),
("value", "number", "Value")],
"options": {
'Google Summer of Code 2009 (overall)': {
'visualizations': [
'Table'
]
}
}
},
"host"),
}
STATISTICS_LIST = [k for k in STATISTIC_PROPERTIES]
NAMES_DICT = dict((k, v) for k, (v, _, _, _)
in STATISTIC_PROPERTIES.iteritems())
INSTRUCTIONS_DICT = dict((k, v) for k, (_, v, _, _)
in STATISTIC_PROPERTIES.iteritems())
CHARTS_DICT = dict((k, v) for k, (_, _, v, _)
in STATISTIC_PROPERTIES.iteritems())
ACCESS_DICT = dict((k, v) for k, (_, _, _, v)
in STATISTIC_PROPERTIES.iteritems())
program_keyname = 'google/gsoc2010'
def _getCommonProperties():
"""Returns properties that are common for all statistic entities.
"""
program = program_logic.getFromKeyName(program_keyname)
properties = {
'access_for_other_programs': 'invisible',
'scope': program,
'scope_path': program_keyname,
}
return properties
def _getSpecificProperties(link_id):
"""Returns properties that are specific to a particular statistic.
"""
properties = {
'link_id': link_id,
'name': NAMES_DICT[link_id],
'chart_json': simplejson.dumps(CHARTS_DICT[link_id]),
'instructions_json': simplejson.dumps(INSTRUCTIONS_DICT[link_id]),
'read_access': ACCESS_DICT[link_id]
}
return properties
def _seedStatistic(properties):
"""Saves a new statistic entity, described by properties, in data store.
"""
entity = statistic_logic.updateOrCreateFromFields(properties, silent=True)
if entity:
print SUCCESS_MSG_FMT % properties['link_id']
else:
print FALIURE_MSG_FMT % properties['link_id']
def exit():
"""Terminates the script.
"""
sys.exit(0)
def seedOne(link_id):
"""Seeds a single statistic to the data store.
Args:
link_id: link_id of the statistic that should be added.
"""
if link_id not in STATISTICS_LIST:
print DOES_NOT_EXISTS_MSG_FMT % link_id
else:
properties = _getCommonProperties()
new_properties = _getSpecificProperties(link_id)
properties.update(new_properties)
_seedStatistic(properties)
def seedAll():
"""Seeds all available statistics to the data store.
"""
properties = _getCommonProperties()
for statistic in STATISTICS_LIST:
new_properties = _getSpecificProperties(statistic)
properties.update(new_properties)
_seedStatistic(properties)
def setProgram(keyname):
"""Sets program key name.
"""
program_keyname = keyname
def main(args):
context = {
'exit': exit,
'seed_all': seedAll,
'seed_one': seedOne,
'statistics_list': STATISTICS_LIST,
'set_program': setProgram,
}
interactive.remote(args, context)
if __name__ == '__main__':
if len(sys.argv) < 2:
print "Usage: %s app_id [host]" % (sys.argv[0],)
sys.exit(1)
main(sys.argv[1:])
|
|
""" Class for testing an IPTT response (translating raw html into processed IPTT data)
for testing IPTT view response data"""
import json
from datetime import datetime, timedelta
from bs4 import BeautifulSoup
from indicators.models import Indicator
from indicators.views.views_reports import IPTT_Mixin
from factories.indicators_models import IndicatorFactory, ResultFactory
from factories.workflow_models import ProgramFactory
from factories import UserFactory, TolaUserFactory
from django import test
def process_nav(navsoup):
"""get useful values from the form section of an IPTT report, store as python dict"""
selects = []
for select in navsoup.find_all('select'):
this_select = select.extract()
options = []
for option in this_select.find_all('option'):
options.append({
'text': option.get_text(),
'value': option.get('value')
})
selects.append({
'name': this_select.get('name'),
'options': options
})
return selects
def get_ranges(header_row):
"""translates a group of cells with dates and labels (i.e. quarter 3: Jan 1, 2015 - Jul 2, 2015) to python dict"""
ranges = []
for td in header_row.find_all('td'):
daterange = td.small.extract().get_text().strip() if td.small is not None else None
ranges.append({
'range': daterange,
'start_date': datetime.strptime(
daterange.split("-")[0].strip(), "%b %d, %Y"
).strftime('%Y-%m-%d') if daterange is not None else None,
'end_date': datetime.strptime(
daterange.split("-")[1].strip(), "%b %d, %Y"
).strftime('%Y-%m-%d') if daterange is not None else None,
'name': td.get_text().strip()
})
td.extract()
return ranges
def nonestr(string):
return string if string != "" else None
def process_table(mainsoup, timeperiods=False):
"""takes the entire table portion of an IPTT page and returns the indicators/values and info gleaned from it"""
info = {}
indicators = []
info['date_range'] = mainsoup.find(id='id_span_iptt_date_range').extract().h4.get_text()
table = mainsoup.table.extract()
header_row = table.tr.extract()
info['program_name'] = header_row.find(id='id_td_iptt_program_name').extract().strong.get_text()
ranges = get_ranges(header_row)
header_row = table.tr.extract()
indicator_rows = []
for row in table.find_all('tr'):
indicator_rows.append(row.extract())
indicators.append({'ranges': []})
for _ in range(9):
key = header_row.th.extract().get_text().strip()
for k, indic_row in enumerate(indicator_rows):
value = indic_row.td.extract().get_text().strip()
if key:
indicators[k][key] = value
for c, daterange in enumerate(ranges):
for k, indic_row in enumerate(indicator_rows):
indicator_range = {
'name': daterange['name'],
'dates': daterange['range'],
'start_date': daterange['start_date'],
'end_date': daterange['end_date']
}
#'target': target, 'actual': actual, 'met': met}
for key in ['actual'] if (timeperiods and c > 0) else ['target', 'actual', 'met']:
indicator_range[key] = nonestr(indic_row.td.extract().get_text().strip())
if indicator_range[key] == u'\u2014':
indicator_range[key] = None
indicators[k]['ranges'].append(indicator_range)
return {
'info': info,
'indicators': indicators,
}
class IPTTResponse(object):
"""object for holding a processed IPTT response from the server (raw HTML) and testing it for indicator content"""
def __init__(self, html, timeperiods=False):
self.timeperiods = timeperiods
self.rawhtml = html
self.info = None
self.components = {}
self.indicators = []
self.process()
def __str__(self):
return "\n".join([json.dumps(self.info, indent=4), json.dumps(self.indicators, indent=4)])
def process(self):
soup = BeautifulSoup(self.rawhtml, 'html.parser')
self.components['head'] = soup.head.extract()
self.components['menu'] = soup.nav.extract()
self.components['nav'] = process_nav(soup.nav.extract())
main = process_table(soup.main.extract(), self.timeperiods)
self.indicators = main['indicators']
self.info = main['info']
for script in soup.find_all("script"):
script.extract()
self.leftoversoup = soup
class TestIPTTTargetPeriodsReportResponseBase(test.TestCase):
indicator_frequency = Indicator.LOP
def setUp(self):
self.user = UserFactory(first_name="FN", last_name="LN", username="iptt_tester", is_superuser=True)
self.user.set_password('password')
self.user.save()
self.tola_user = TolaUserFactory(user=self.user)
self.tola_user.save()
self.client = test.Client(enforce_csrf_checks=False)
self.client.login(username='iptt_tester', password='password')
self.response = None
startdate = datetime.strptime('2017-02-04', '%Y-%m-%d')
enddate = datetime.strptime('2019-10-01', '%Y-%m-%d')
self.program = ProgramFactory(reporting_period_start=startdate,
reporting_period_end=enddate)
def tearDown(self):
Indicator.objects.all().delete()
self.response = None
def get_indicator_for_program(self, **kwargs):
make_kwargs = {'program': self.program}
make_kwargs.update(kwargs)
indicator = IndicatorFactory(**make_kwargs)
return indicator
def get_indicator_by_frequency(self, frequency, **kwargs):
kwargs['target_frequency'] = frequency
return self.get_indicator_for_program(**kwargs)
def get_response(self, target_frequency=None, reporttype=IPTT_Mixin.REPORT_TYPE_TARGETPERIODS):
target_frequency = self.indicator_frequency if target_frequency is None else target_frequency
response = self.client.post('/indicators/iptt_report/{program}/{reporttype}/'.format(
program=self.program.id, reporttype=reporttype),
{'targetperiods': target_frequency,
'csrfmiddlewaretoken': 'asfd',
'program': self.program.id},
follow=True)
self.assertEqual(response.status_code, 200,
"response gave status code {0} instead of 200".format(response.status_code))
self.response = IPTTResponse(response.content)
return self.response
def format_assert_message(self, msg):
return "{0}:\n{1}".format(self.response, msg)
class TestIPTTTimePeriodsReportResponseBase(test.TestCase):
timeperiods = Indicator.ANNUAL
def setUp(self):
self.user = UserFactory(first_name="FN", last_name="LN", username="iptt_tester", is_superuser=True)
self.user.set_password('password')
self.user.save()
self.tola_user = TolaUserFactory(user=self.user)
self.tola_user.save()
self.client = test.Client(enforce_csrf_checks=False)
self.client.login(username='iptt_tester', password='password')
self.response = None
startdate = datetime.strptime('2017-02-04', '%Y-%m-%d')
enddate = datetime.strptime('2019-10-01', '%Y-%m-%d')
self.program = ProgramFactory(reporting_period_start=startdate,
reporting_period_end=enddate)
self.request_params = {
'csrfmiddlewaretoken': 'asdf',
'program': self.program.id
}
def tearDown(self):
Indicator.objects.all().delete()
self.response = None
def set_dates(self, start, end):
self.program.reporting_period_start = datetime.strptime(start, '%Y-%m-%d')
self.program.reporting_period_end = datetime.strptime(end, '%Y-%m-%d')
self.program.save()
def get_indicator_for_program(self, **kwargs):
make_kwargs = {'program': self.program}
make_kwargs.update(kwargs)
indicator = IndicatorFactory(**make_kwargs)
return indicator
def add_indicator(self, frequency=Indicator.ANNUAL, **kwargs):
kwargs['target_frequency'] = frequency
return self.get_indicator_for_program(**kwargs)
def add_indicator_with_data(self, frequency, values):
indicator = self.add_indicator(frequency=frequency)
collect_date = self.program.reporting_period_start + timedelta(days=1)
for value in values:
_ = ResultFactory(indicator=indicator, date_collected=collect_date, achieved=value)
if frequency == Indicator.ANNUAL:
collect_date = datetime(collect_date.year + 1, collect_date.month, collect_date.day)
elif frequency == Indicator.SEMI_ANNUAL:
collect_date = datetime(collect_date.year if collect_date.month < 7 else collect_date.year + 1,
collect_date.month + 6 if collect_date.month < 7 else collect_date.month - 6,
collect_date.day)
elif frequency == Indicator.TRI_ANNUAL:
collect_date = datetime(collect_date.year if collect_date.month < 9 else collect_date.year + 1,
collect_date.month + 4 if collect_date.month < 9 else collect_date.month - 8,
collect_date.day)
elif frequency == Indicator.QUARTERLY:
collect_date = datetime(collect_date.year if collect_date.month < 10 else collect_date.year + 1,
collect_date.month + 3 if collect_date.month < 10 else collect_date.month - 9,
collect_date.day)
elif frequency == Indicator.MONTHLY:
collect_date = datetime(collect_date.year if collect_date.month < 12 else collect_date.year + 1,
collect_date.month + 1 if collect_date.month < 12 else collect_date.month - 11,
collect_date.day)
def get_showall_response(self):
self.request_params['timeframe'] = 1
self.request_params['numrecentperiods'] = None
return self.get_response()
def get_recent_periods(self, numrecent):
self.request_params['timeframe'] = 2
self.request_params['numrecentperiods'] = numrecent
return self.get_response()
def get_date_range_periods(self, start, end):
self.request_params['start_period'] = start.strftime('%Y-%m-%d') if isinstance(start, datetime) else start
self.request_params['end_period'] = end.strftime('%Y-%m-%d') if isinstance(end, datetime) else end
return self.get_response()
def get_response(self, reporttype=IPTT_Mixin.REPORT_TYPE_TIMEPERIODS):
self.request_params['timeperiods'] = self.timeperiods
response = self.client.post('/indicators/iptt_report/{program}/{reporttype}/'.format(
program=self.program.id, reporttype=reporttype),
self.request_params,
follow=True)
self.assertEqual(response.status_code, 200,
"response gave status code {0} instead of 200".format(response.status_code))
self.response = IPTTResponse(response.content, timeperiods=True)
return self.response
def get_indicator_results(self, response, indicator_row=0):
indicator = response.indicators[indicator_row]['ranges']
return indicator[0], indicator[1:]
def format_assert_message(self, msg):
return "{0}:\n{1} timeperiods, {2}".format(self.response,
{k:v for k, v in Indicator.TARGET_FREQUENCIES}[self.timeperiods],
msg)
def number_of_ranges_test(self, start, end, expected_ranges):
self.set_dates(start, end)
self.add_indicator()
response = self.get_showall_response()
ranges = response.indicators[0]['ranges'][1:]
self.assertEqual(len(ranges), expected_ranges,
self.format_assert_message("expected {0} ranges for {1} to {2}, got {3}".format(
expected_ranges, start, end, len(ranges)
)))
|
|
import uuid
import pytest
from collections import deque
from itertools import zip_longest
from bluesky import Msg
from bluesky.preprocessors import (msg_mutator, stub_wrapper,
plan_mutator, pchain, single_gen as
single_message_gen,
finalize_wrapper)
from bluesky.utils import ensure_generator
class EchoException(Exception):
...
def EchoRE(plan, *, debug=False, msg_list=None):
'''An 'echo' RunEngine for testing.
Always sends the message back into the plan as the result.
Parameters
----------
plan : iterable
The plan to run through
debug : bool, optional (False)
print the messages on the way by
msg_list : mutable sequence, optional
If not None, mutate this object by appending messages.
This is the easiest way to capture messages if the plan
raises.
Returns
-------
msg_list : list
List of all the messages seen by the RE
'''
if msg_list is None:
msg_list = deque()
ret = None
plan = ensure_generator(plan)
while True:
try:
msg = plan.send(ret)
if debug:
print(msg)
msg_list.append(msg)
if msg.command == 'FAIL':
plan.throw(EchoException(msg))
ret = msg
except StopIteration:
break
return list(msg_list)
def echo_plan(*, command='echo', num=4):
'''Testing plan which expects to get back a message with an equal object
'''
seed = str(uuid.uuid4())[:6]
for ch in map(lambda x: chr(97 + x), range(num)):
sent = '{}_{}'.format(seed, ch)
ret = yield Msg(command, sent)
assert ret.obj == sent
def test_mutator_exceptions():
handled = False
def base_plan():
yield Msg('foo')
yield Msg('bar')
def failing_plan():
nonlocal handled
handled = False
yield Msg('pre')
try:
yield Msg('FAIL')
except EchoException:
handled = True
raise
def test_mutator(msg):
if msg.command == 'bar':
return (
failing_plan(),
single_message_gen(Msg('foo'))
)
return None, None
# check generator exit behavior
plan = plan_mutator(base_plan(), test_mutator)
next(plan)
plan.close()
# check exception fall through
plan = plan_mutator(base_plan(), test_mutator)
with pytest.raises(EchoException):
EchoRE(plan, debug=True)
assert handled
def _verify_msg_seq(msgs, *,
cmd_sq=None,
obj_sq=None,
args_sq=None,
kwargs_sq=None,
m_len=None):
def _verify_cmpt(msgs, seq, cmpt):
for m, s in zip_longest(msgs, seq):
assert getattr(m, cmpt) == s
if m_len is not None:
assert len(msgs) == m_len
if cmd_sq is not None:
_verify_cmpt(msgs, cmd_sq, 'command')
if obj_sq is not None:
_verify_cmpt(msgs, obj_sq, 'obj')
if args_sq is not None:
_verify_cmpt(msgs, args_sq, 'args')
if kwargs_sq is not None:
_verify_cmpt(msgs, kwargs_sq, 'kwargs')
def test_smoke_test():
num = 10
cmd = 'smoke'
msgs = EchoRE(echo_plan(command=cmd, num=num))
_verify_msg_seq(msgs, m_len=num,
cmd_sq=[cmd]*num,
args_sq=[()]*num,
kwargs_sq=[{}]*num)
def test_simple_replace():
new_cmd = 'replaced'
def change_command(msg):
return msg._replace(command=new_cmd)
num = 10
msgs = EchoRE(msg_mutator(echo_plan(num=num),
change_command))
_verify_msg_seq(msgs, m_len=num,
cmd_sq=[new_cmd]*num,
args_sq=[()]*num,
kwargs_sq=[{}]*num)
def test_simple_mutator():
_mut_active = True
pre_count = 3
post_count = 5
pre_cmd = 'pre'
post_cmd = 'post'
def test_mutator(msg):
nonlocal _mut_active
if _mut_active:
_mut_active = False
return (pchain(echo_plan(num=pre_count, command=pre_cmd),
single_message_gen(msg)),
echo_plan(num=post_count, command=post_cmd))
return None, None
num = 5
cmd = 'echo'
plan = plan_mutator(echo_plan(command=cmd, num=num), test_mutator)
msgs = EchoRE(plan)
total = num + pre_count + post_count
cmd_sq = ([pre_cmd]*pre_count +
[cmd] +
[post_cmd]*post_count +
[cmd]*(num-1))
_verify_msg_seq(msgs, m_len=total,
cmd_sq=cmd_sq,
args_sq=[()]*total,
kwargs_sq=[{}]*total)
def test_finialize_fail():
fail_cmd = 'fail_next'
def erroring_plan():
yield Msg(fail_cmd, None)
raise RuntimeError('saw this coming')
num = 5
cmd = 'echo'
plan = finalize_wrapper(erroring_plan(),
echo_plan(command=cmd, num=num))
msgs = list()
try:
EchoRE(plan, msg_list=msgs)
except RuntimeError:
pass
total = num + 1
_verify_msg_seq(msgs, m_len=total,
cmd_sq=[fail_cmd] + [cmd]*num,
args_sq=[()]*total,
kwargs_sq=[{}]*total)
def test_finialize_pause():
fail_cmd = 'fail_next'
def erroring_plan():
yield Msg(fail_cmd, None)
raise RuntimeError('saw this coming')
num = 5
cmd = 'echo'
plan = finalize_wrapper(erroring_plan(),
echo_plan(command=cmd, num=num),
pause_for_debug=True)
msgs = list()
try:
EchoRE(plan, msg_list=msgs)
except RuntimeError:
pass
total = num + 2
_verify_msg_seq(msgs, m_len=total,
cmd_sq=[fail_cmd, 'pause'] + [cmd]*num,
args_sq=[()]*total,
kwargs_sq=[{}, {'defer': False}] + [{}]*num)
def test_finialize_success():
suc_cmd = 'it_works'
num = 5
cmd = 'echo'
plan = finalize_wrapper(single_message_gen(Msg(suc_cmd, None)),
echo_plan(command=cmd, num=num))
msgs = list()
try:
EchoRE(plan, msg_list=msgs)
except RuntimeError:
pass
total = num + 1
_verify_msg_seq(msgs, m_len=total,
cmd_sq=[suc_cmd] + [cmd]*num,
args_sq=[()]*total,
kwargs_sq=[{}]*total)
def test_plan_mutator_exception_propogation():
class ExpectedException(Exception):
pass
num = 5
cmd1 = 'echo1'
cmd2 = 'echo2'
def bad_tail():
yield Msg('one_tail', None)
raise ExpectedException('this is a test')
def sarfing_plan():
try:
yield from echo_plan(command=cmd1, num=num)
except ExpectedException:
print('CAUGHT IT')
_mut_active = True
def test_mutator(msg):
nonlocal _mut_active
if _mut_active:
_mut_active = False
return (pchain(echo_plan(num=2, command=cmd2),
single_message_gen(msg)),
bad_tail())
return None, None
plan = plan_mutator(sarfing_plan(), test_mutator)
EchoRE(plan, debug=True)
def test_exception_in_pre_with_tail():
class SnowFlake(Exception):
...
def bad_pre():
yield Msg('pre_bad', None)
raise SnowFlake('this one')
def good_post():
yield Msg('good_post', None)
def test_mutator(msg):
if msg.command == 'TARGET':
return bad_pre(), good_post()
return None, None
def testing_plan():
yield Msg('a', None)
yield Msg('b', None)
try:
yield Msg('TARGET', None)
except SnowFlake:
pass
yield Msg('b', None)
yield Msg('a', None)
plan = plan_mutator(testing_plan(), test_mutator)
msgs = EchoRE(plan, debug=True)
_verify_msg_seq(msgs, m_len=5,
cmd_sq=['a', 'b', 'pre_bad', 'b', 'a'],
args_sq=[()]*5,
kwargs_sq=[{}]*5)
def test_plan_mutator_returns():
def testing_plan():
yield Msg('a', None)
yield Msg('TARGET', None)
yield Msg('b', None)
return 'foobar'
def outer_plan(pln):
ret = (yield from pln)
assert ret == 'foobar'
return ret
def tail_plan():
yield Msg('A', None)
return 'baz'
def test_mutator(msg):
def pre_plan():
yield Msg('pre', None)
yield msg
if msg.command == 'TARGET':
return pre_plan(), tail_plan()
return None, None
plan = plan_mutator(testing_plan(), test_mutator)
msgs = EchoRE(plan)
_verify_msg_seq(msgs, m_len=5,
cmd_sq=['a', 'pre', 'TARGET', 'A', 'b'],
args_sq=[()]*5,
kwargs_sq=[{}]*5)
def test_insert_before():
def target():
yield Msg('a', None)
ret = yield Msg('TARGET', None)
yield Msg('b', None)
assert ret.command == 'TARGET'
return ret
return ret
def insert_before(msg):
if msg.command == 'TARGET':
def pre():
yield Msg('pre', None)
ret = yield msg
assert ret is not None
assert ret.command == 'TARGET'
return ret
return pre(), None
else:
return None, None
EchoRE(plan_mutator(target(), insert_before))
def test_insert_after():
def target():
yield Msg('a', None)
ret = yield Msg('TARGET', None)
yield Msg('b', None)
assert ret is not None
assert ret.command == 'TARGET'
return ret
def insert_after(msg):
if msg.command == 'TARGET':
def post():
yield Msg('post', None)
return None, post()
else:
return None, None
EchoRE(plan_mutator(target(), insert_after))
def test_base_exception():
class SnowFlake(Exception):
...
def null_mutator(msg):
return None, None
def test_plan():
yield Msg('a', None)
raise SnowFlake('this one')
pln = plan_mutator(test_plan(), null_mutator)
try:
EchoRE(pln)
except SnowFlake as ex:
assert ex.args[0] == 'this one'
def test_msg_mutator_skip():
def skipper(msg):
if msg.command == 'SKIP':
return None
return msg
def skip_plan():
for c in 'abcd':
yield Msg(c, None)
yield Msg('SKIP', None)
pln = msg_mutator(skip_plan(), skipper)
msgs = EchoRE(pln)
_verify_msg_seq(msgs, m_len=4,
cmd_sq='abcd',
args_sq=[()]*4,
kwargs_sq=[{}]*4)
def test_stub_wrapper():
def plan():
yield Msg('open_run')
yield Msg('stage')
yield Msg('read')
yield Msg('unstage')
yield Msg('close_run')
stub_plan = list(stub_wrapper(plan()))
assert len(stub_plan) == 1
assert stub_plan[0].command == 'read'
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import distro
import re
class TranslationRule(object):
pass
class SingleRule(TranslationRule):
"""
Translate a given module name
mod: the python module name (usually the pypi name)
pkg: the unversioned translated package name
py2pkg: the python2 versioned translated package name
py3pkg: the python3 versioned translated package name
"""
def __init__(self, mod, pkg, py2pkg=None, py3pkg=None, distmap=None):
self.mod = mod
self.pkg = pkg
self.py2pkg = py2pkg if py2pkg else pkg
self.py3pkg = py3pkg if py3pkg else pkg
self.distmap = distmap
def __call__(self, mod, dist):
if mod != self.mod:
return None
if self.distmap and dist:
for distrex in self.distmap:
if re.match(distrex, dist):
return self.distmap[distrex]
return (self.pkg, self.py2pkg, self.py3pkg)
class MultiRule(TranslationRule):
def __init__(self, mods, pkgfun):
self.mods = mods
self.pkgfun = pkgfun
def __call__(self, mod, dist):
if mod in self.mods:
pkg, py2pkg, py3pkg = self.pkgfun(mod)
return (pkg, py2pkg, py3pkg)
return None
class RegexRule(TranslationRule):
def __init__(self, pattern, pkgfun):
self.pattern = pattern
self.pkgfun = pkgfun
def __call__(self, mod, dist):
if re.match(self.pattern, mod):
pkg, py2pkg, py3pkg = self.pkgfun(mod)
return (pkg, py2pkg, py3pkg)
return None
def default_rdo_tr(mod):
"""
Default translation function for Fedora/RDO based systems
"""
pkg = mod.rsplit('-python')[0]
pkg = pkg.replace('_', '-').replace('.', '-').lower()
if not pkg.startswith('python-'):
pkg = 'python-' + pkg
py2pkg = pkg
py3pkg = re.sub('python', 'python3', pkg)
return (pkg, py2pkg, py3pkg)
def default_ubuntu_tr(mod):
"""
Default translation function for Ubuntu based systems
"""
pkg = 'python-%s' % mod.lower()
py2pkg = pkg
py3pkg = 'python3-%s' % mod.lower()
return (pkg, py2pkg, py3pkg)
def default_suse_tr(mod):
"""
Default translation function for openSUSE, SLES, and other
SUSE based systems
Returns a tuple of 3 elements - the unversioned name, the python2 versioned
name and the python3 versioned name.
"""
pkg = 'python-%s' % mod
py2pkg = 'python2-%s' % mod
py3pkg = 'python3-%s' % mod
return (pkg, py2pkg, py3pkg)
def openstack_prefix_tr(mod):
pkg = 'openstack-' + mod.lower()
return (pkg, '', '')
def rdo_horizon_plugins_tr(mod):
mod = mod.replace('dashboard', 'ui')
pkg = 'openstack-' + mod
return (pkg, '', '')
def suse_horizon_plugins_tr(mod):
mod = mod.replace('dashboard', 'ui')
pkg = 'openstack-horizon-plugin-' + mod
return (pkg, '', '')
def rdo_xstatic_tr(mod):
mod = mod.replace('_', '-').replace('.', '-')
pkg = 'python-' + mod
py3pkg = 'python3-' + mod
return (pkg, pkg, py3pkg)
def same_name_python_subst_python3(mod):
py3pkg = re.sub('python', 'python3', mod)
return (mod, mod, py3pkg)
def subst_python2_python3(mod):
pkg = mod
py2pkg = re.sub('python', 'python2', mod)
py3pkg = re.sub('python', 'python3', mod)
return (pkg, py2pkg, py3pkg)
def rdo_tempest_plugins_tr(mod):
mod = mod.replace('tempest-plugin', 'tests-tempest')
pkg = 'python-' + mod
py2pkg = pkg
py3pkg = 'python3-' + mod
return (pkg, py2pkg, py3pkg)
# keep lists in alphabetic order
SERVICES_MAP = [
'Tempest', 'aodh', 'barbican', 'ceilometer', 'cinder',
'cloudkitty', 'cyborg', 'designate', 'ec2-api', 'freezer', 'freezer-api',
'freezer-dr', 'glance', 'heat', 'heat-templates', 'ironic',
'ironic-discoverd', 'ironic-inspector', 'ironic-python-agent',
'keystone', 'magnum', 'manila', 'masakari', 'masakari-monitors',
'mistral', 'monasca-agent', 'monasca-api', 'monasca-ceilometer',
'monasca-log-api', 'monasca-notification', 'monasca-persister',
'monasca-transform', 'murano', 'neutron', 'neutron-fwaas',
'neutron-lbaas', 'neutron-vpnaas', 'nova', 'octavia', 'placement',
'rally', 'sahara', 'swift', 'tempest', 'tripleo-common', 'trove', 'tuskar',
'vitrage', 'watcher', 'zaqar', 'zun']
RDO_PKG_MAP = [
# This demonstrates per-dist filter
# SingleRule('sphinx', 'python-sphinx',
# distmap={'epel-6': 'python-sphinx10'}),
SingleRule('ansible', 'ansible'),
SingleRule('ansible-runner', 'python-ansible-runner',
py3pkg='python3-ansible-runner'),
SingleRule('APScheduler', 'python-APScheduler',
py3pkg='python3-APScheduler'),
SingleRule('Babel', 'python-babel', py3pkg='python3-babel'),
SingleRule('bandit', 'bandit'),
SingleRule('distribute', 'python-setuptools', py3pkg='python3-setuptools'),
SingleRule('dnspython', 'python-dns', py3pkg='python3-dns'),
SingleRule('google-api-python-client', 'python-google-api-client',
py3pkg='python3-google-api-client'),
SingleRule('GitPython', 'GitPython', py3pkg='python3-GitPython'),
SingleRule('heat-agents', 'openstack-heat-agents',
py3pkg='openstack-heat-agents'),
SingleRule('IPy', 'python-IPy', py3pkg='python-IPy-python3'),
SingleRule('pycrypto', 'python-crypto', py3pkg='python3-crypto'),
SingleRule('pyzmq', 'python-zmq', py3pkg='python3-zmq'),
SingleRule('mysql-python', 'MySQL-python', py3pkg='python3-mysql'),
SingleRule('PyMySQL', 'python-PyMySQL', py3pkg='python3-PyMySQL'),
SingleRule('PyJWT', 'python-jwt', py3pkg='python3-jwt'),
SingleRule('MySQL-python', 'MySQL-python', py3pkg='python3-mysql'),
SingleRule('PasteDeploy', 'python-paste-deploy',
py3pkg='python3-paste-deploy'),
SingleRule('sqlalchemy-migrate', 'python-migrate',
py3pkg='python3-migrate'),
SingleRule('qpid-python', 'python-qpid'),
SingleRule('nosexcover', 'python-nose-xcover',
py3pkg='python3-nose-xcover'),
SingleRule('posix_ipc', 'python-posix_ipc', py3pkg='python3-posix_ipc'),
SingleRule('prometheus-client', 'python-prometheus_client',
py3pkg='python3-prometheus_client'),
SingleRule('sysv_ipc', 'python-sysv_ipc', py3pkg='python3-sysv_ipc'),
SingleRule('oslosphinx', 'python-oslo-sphinx',
py3pkg='python3-oslo-sphinx'),
SingleRule('ovs', 'python-openvswitch', py3pkg='python3-openvswitch'),
SingleRule('pyinotify', 'python-inotify', py3pkg='python3-inotify'),
SingleRule('pyScss', 'python-scss', py3pkg='python3-scss'),
SingleRule('tripleo-incubator', 'openstack-tripleo'),
SingleRule('pika-pool', 'python-pika_pool', py3pkg='python3-pika_pool'),
SingleRule('suds-community', 'python-suds', py3pkg='python3-suds'),
SingleRule('suds-jurko', 'python-suds', py3pkg='python3-suds'),
SingleRule('supervisor', 'supervisor', py3pkg='python3-supervisor'),
SingleRule('wsgi_intercept', 'python-wsgi_intercept',
py3pkg='python3-wsgi_intercept'),
SingleRule('Sphinx', 'python-sphinx', py3pkg='python3-sphinx'),
SingleRule('sphinx_rtd_theme', 'python-sphinx_rtd_theme',
py3pkg='python3-sphinx_rtd_theme'),
SingleRule('xattr', 'pyxattr', py3pkg='python3-pyxattr'),
SingleRule('XStatic-term.js', 'python-XStatic-termjs',
py3pkg='python3-XStatic-termjs'),
SingleRule('heat-cfntools', 'heat-cfntools'),
SingleRule('horizon', 'openstack-dashboard'),
SingleRule('openstack-placement', 'openstack-placement'),
SingleRule('networking-vsphere', 'openstack-neutron-vsphere'),
SingleRule('networking-l2gw', 'openstack-neutron-l2gw'),
SingleRule('neutron-dynamic-routing', 'openstack-neutron-dynamic-routing'),
SingleRule('m2crypto', 'm2crypto'),
SingleRule('libvirt-python', 'libvirt-python', py3pkg='libvirt-python3'),
SingleRule('tempest-horizon', 'python-horizon-tests-tempest'),
SingleRule('rtslib-fb', 'python-rtslib', py3pkg='python3-rtslib'),
SingleRule('PyYAML', 'PyYAML', py3pkg='python3-PyYAML'),
SingleRule('pyOpenSSL', 'python-pyOpenSSL', py3pkg='python3-pyOpenSSL'),
SingleRule('semantic_version', 'python-semantic_version',
py3pkg='python3-semantic_version'),
SingleRule('sphinxcontrib-svg2pdfconverter',
'python-sphinxcontrib-rsvgconverter',
py3pkg='python3-sphinxcontrib-rsvgconverter'),
# simple direct mapping no name change
MultiRule(
mods=['numpy', 'pyflakes', 'pylint',
'dib-utils',
'diskimage-builder',
'graphviz',
'instack-undercloud',
'os-apply-config',
'os-collect-config',
'os-net-config',
'os-refresh-config',
'pexpect',
'protobuf',
'sympy',
'systemd-python',
'watchdog',
'pystache', 'pysendfile'],
pkgfun=lambda mod: ((mod, mod, 'python3-' + mod))),
# OpenStack services
MultiRule(mods=SERVICES_MAP, pkgfun=openstack_prefix_tr),
# XStatic projects (name is python-pypi_name, no lowercase conversion)
RegexRule(pattern=r'^XStatic.*', pkgfun=rdo_xstatic_tr),
# Horizon plugins (normalized to openstack-<project>-ui)
RegexRule(pattern=r'^(neutron-)?\w+-(dashboard|ui)',
pkgfun=rdo_horizon_plugins_tr),
# Tempest plugins (normalized to python-<project>-tests-tempest)
RegexRule(pattern=r'\w+-tempest-plugin', pkgfun=rdo_tempest_plugins_tr)
]
SUSE_PKG_MAP = [
# not following SUSE naming policy
SingleRule('ansible', 'ansible'),
SingleRule('ansible-runner', 'ansible-runner'),
SingleRule('python-ldap', 'python-ldap'),
# OpenStack services
MultiRule(mods=SERVICES_MAP, pkgfun=openstack_prefix_tr),
# OpenStack clients
MultiRule(
mods=['python-%sclient' % c for c in (
'barbican', 'ceilometer', 'cinder', 'cloudkitty',
'congress', 'cue', 'cyborg', 'designate', 'distil', 'drac', 'fuel',
'freezer', 'heat', 'glance', 'glare', 'ironic',
'ironic-inspector-', 'k8s', 'keystone',
'magnum', 'manila', 'masakari', 'mistral', 'monasca',
'murano', 'nimble', 'neutron', 'nova', 'octavia', 'oneview',
'openstack', 'sahara', 'scci', 'senlin',
'smaug', 'solum', 'swift', 'tacker', 'tripleo', 'trove',
'vitrage', 'watcher', 'zaqar', 'zun')],
pkgfun=subst_python2_python3),
SingleRule('devel', 'python-devel', py3pkg='python3-devel'),
SingleRule('openstack-placement', 'openstack-placement'),
# ui components
SingleRule('designate-dashboard', 'openstack-horizon-plugin-designate-ui'),
SingleRule('freezer-web-ui', 'openstack-horizon-plugin-freezer-ui'),
SingleRule('group-based-policy-ui', 'openstack-horizon-plugin-gbp-ui'),
SingleRule('heat-agents', 'openstack-heat-agents',
py3pkg='openstack-heat-agents'),
SingleRule('horizon', 'openstack-dashboard'),
SingleRule('ironic-ui', 'openstack-horizon-plugin-ironic-ui'),
SingleRule('magnum-ui', 'openstack-horizon-plugin-magnum-ui'),
SingleRule('manila-ui', 'openstack-horizon-plugin-manila-ui'),
SingleRule('monasca-ui', 'openstack-horizon-plugin-monasca-ui'),
SingleRule('murano-dashboard', 'openstack-horizon-plugin-murano-ui'),
SingleRule('networking-vsphere', 'openstack-neutron-vsphere'),
SingleRule('networking-l2gw', 'openstack-neutron-l2gw'),
SingleRule('neutron-dynamic-routing', 'openstack-neutron-dynamic-routing'),
RegexRule(pattern=r'^(neutron-)?\w+-(dashboard|ui)',
pkgfun=suse_horizon_plugins_tr),
]
UBUNTU_PKG_MAP = [
SingleRule('glance_store', 'python-glance-store'),
SingleRule('GitPython', 'python-git'),
SingleRule('libvirt-python', 'python-libvirt'),
SingleRule('PyMySQL', 'python-mysql'),
SingleRule('pyOpenSSL', 'python-openssl'),
SingleRule('PyYAML', 'python-yaml'),
SingleRule('sqlalchemy-migrate', 'python-migrate'),
SingleRule('suds-jurko', 'python-suds'),
# Openstack clients
MultiRule(
mods=['python-%sclient' % c for c in (
'barbican', 'ceilometer', 'cinder', 'cloudkitty', 'congress',
'designate', 'fuel', 'heat', 'glance', 'ironic',
'keystone',
'magnum', 'manila', 'masakari', 'mistral', 'monasca',
'murano', 'neutron', 'nova', 'octavia',
'openstack', 'sahara',
'senlin', 'swift',
'trove', 'zaqar', 'zun')],
pkgfun=same_name_python_subst_python3),
]
OPENSTACK_UPSTREAM_PKG_MAP = [
SingleRule('openstack-placement', 'placement'),
SingleRule('gnocchiclient', 'python-gnocchiclient'),
SingleRule('aodhclient', 'python-aodhclient'),
SingleRule('keystoneauth1', 'keystoneauth'),
SingleRule('microversion_parse', 'microversion-parse'),
SingleRule('XStatic-smart-table', 'xstatic-angular-smart-table'),
]
def get_pkg_map(dist):
d_lower = dist.lower()
if 'suse' in d_lower or 'sles' in d_lower:
return SUSE_PKG_MAP
if 'ubuntu' in d_lower:
return UBUNTU_PKG_MAP
return RDO_PKG_MAP
def get_default_tr_func(dist):
d_lower = dist.lower()
if 'suse' in d_lower or 'sles' in d_lower:
return default_suse_tr
if 'ubuntu' in d_lower:
return default_ubuntu_tr
return default_rdo_tr
def module2package(mod, dist, pkg_map=None, py_vers=('py',)):
"""Return a corresponding package name for a python module.
mod: python module name
dist: a linux distribution as returned by
`distro.LinuxDistribution().id().partition(' ')[0]`
pkg_map: a custom package mapping. None means autodetected based on the
given dist parameter
py_vers: a list of python versions the function should return. Default is
'py' which is the unversioned translation. Possible values are
'py', 'py2' and 'py3'
"""
if not pkg_map:
pkg_map = get_pkg_map(dist)
for rule in pkg_map:
pkglist = rule(mod, dist)
if pkglist:
break
else:
tr_func = get_default_tr_func(dist)
pkglist = tr_func(mod)
output = []
for v in py_vers:
if v == 'py':
output.append(pkglist[0])
elif v == 'py2':
output.append(pkglist[1])
elif v == 'py3':
output.append(pkglist[2])
else:
raise Exception('Invalid version "%s"' % (v))
if len(output) == 1:
# just return a single value (backwards compatible)
return output[0]
else:
return output
def module2upstream(mod):
"""Return a corresponding OpenStack upstream name for a python module.
mod -- python module name
"""
for rule in OPENSTACK_UPSTREAM_PKG_MAP:
pkglist = rule(mod, dist=None)
if pkglist:
return pkglist[0]
return mod
def main():
"""for resolving names from command line"""
parser = argparse.ArgumentParser(description='Python module name to'
'package name')
group = parser.add_mutually_exclusive_group()
group.add_argument(
'--dist', help='distribution style (default: %(default)s)',
default=distro.LinuxDistribution().id().partition(' ')[0])
group.add_argument('--upstream', help='map to OpenStack project name',
action='store_true')
parser.add_argument('--pyver', help='Python versions to return. "py" is '
'the unversioned name',
action='append', choices=['py', 'py2', 'py3'],
default=[])
parser.add_argument('modulename', help='python module name')
args = vars(parser.parse_args())
pyversions = args['pyver'] if args['pyver'] else ['py']
if args['upstream']:
print(module2upstream(args['modulename']))
else:
pylist = module2package(args['modulename'], args['dist'],
py_vers=pyversions)
# When only 1 version is requested, it will be returned as a string,
# for backwards compatibility. Else, it will be a list.
if type(pylist) is list:
print(' '.join(pylist))
else:
print(pylist)
# for debugging to call the file directly
if __name__ == "__main__":
main()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualHubsOperations(object):
"""VirtualHubsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualHub"
"""Retrieves the details of a VirtualHub.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualHub, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_08_01.models.VirtualHub
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHub"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualHub', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
virtual_hub_parameters, # type: "_models.VirtualHub"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualHub"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHub"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(virtual_hub_parameters, 'VirtualHub')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualHub', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualHub', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
virtual_hub_parameters, # type: "_models.VirtualHub"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualHub"]
"""Creates a VirtualHub resource if it doesn't exist else updates the existing VirtualHub.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:param virtual_hub_parameters: Parameters supplied to create or update VirtualHub.
:type virtual_hub_parameters: ~azure.mgmt.network.v2019_08_01.models.VirtualHub
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualHub or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_08_01.models.VirtualHub]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHub"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_hub_name=virtual_hub_name,
virtual_hub_parameters=virtual_hub_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualHub', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
virtual_hub_parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualHub"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHub"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(virtual_hub_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualHub', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualHub', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
virtual_hub_parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualHub"]
"""Updates VirtualHub tags.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:param virtual_hub_parameters: Parameters supplied to update VirtualHub tags.
:type virtual_hub_parameters: ~azure.mgmt.network.v2019_08_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualHub or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_08_01.models.VirtualHub]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHub"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
virtual_hub_name=virtual_hub_name,
virtual_hub_parameters=virtual_hub_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualHub', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a VirtualHub.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_hub_name=virtual_hub_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVirtualHubsResult"]
"""Lists all the VirtualHubs in a resource group.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVirtualHubsResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_08_01.models.ListVirtualHubsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVirtualHubsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVirtualHubsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVirtualHubsResult"]
"""Lists all the VirtualHubs in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVirtualHubsResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_08_01.models.ListVirtualHubsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVirtualHubsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVirtualHubsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualHubs'} # type: ignore
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import stat
from cryptography import fernet
from oslo_log import log
import keystone.conf
from keystone.i18n import _LE, _LW, _LI
LOG = log.getLogger(__name__)
CONF = keystone.conf.CONF
def validate_key_repository(requires_write=False):
"""Validate permissions on the key repository directory."""
# NOTE(lbragstad): We shouldn't need to check if the directory was passed
# in as None because we don't set allow_no_values to True.
# ensure current user has sufficient access to the key repository
is_valid = (os.access(CONF.fernet_tokens.key_repository, os.R_OK) and
os.access(CONF.fernet_tokens.key_repository, os.X_OK))
if requires_write:
is_valid = (is_valid and
os.access(CONF.fernet_tokens.key_repository, os.W_OK))
if not is_valid:
LOG.error(
_LE('Either [fernet_tokens] key_repository does not exist or '
'Keystone does not have sufficient permission to access it: '
'%s'), CONF.fernet_tokens.key_repository)
else:
# ensure the key repository isn't world-readable
stat_info = os.stat(CONF.fernet_tokens.key_repository)
if(stat_info.st_mode & stat.S_IROTH or
stat_info.st_mode & stat.S_IXOTH):
LOG.warning(_LW(
'[fernet_tokens] key_repository is world readable: %s'),
CONF.fernet_tokens.key_repository)
return is_valid
def _convert_to_integers(id_value):
"""Cast user and group system identifiers to integers."""
# NOTE(lbragstad) os.chown() will raise a TypeError here if
# keystone_user_id and keystone_group_id are not integers. Let's
# cast them to integers if we can because it's possible to pass non-integer
# values into the fernet_setup utility.
try:
id_int = int(id_value)
except ValueError as e:
msg = _LE('Unable to convert Keystone user or group ID. Error: %s')
LOG.error(msg, e)
raise
return id_int
def create_key_directory(keystone_user_id=None, keystone_group_id=None):
"""If the configured key directory does not exist, attempt to create it."""
if not os.access(CONF.fernet_tokens.key_repository, os.F_OK):
LOG.info(_LI(
'[fernet_tokens] key_repository does not appear to exist; '
'attempting to create it'))
try:
os.makedirs(CONF.fernet_tokens.key_repository, 0o700)
except OSError:
LOG.error(_LE(
'Failed to create [fernet_tokens] key_repository: either it '
'already exists or you don\'t have sufficient permissions to '
'create it'))
if keystone_user_id and keystone_group_id:
os.chown(
CONF.fernet_tokens.key_repository,
keystone_user_id,
keystone_group_id)
elif keystone_user_id or keystone_group_id:
LOG.warning(_LW(
'Unable to change the ownership of [fernet_tokens] '
'key_repository without a keystone user ID and keystone group '
'ID both being provided: %s') %
CONF.fernet_tokens.key_repository)
def _create_new_key(keystone_user_id, keystone_group_id):
"""Securely create a new encryption key.
Create a new key that is readable by the Keystone group and Keystone user.
"""
key = fernet.Fernet.generate_key() # key is bytes
# This ensures the key created is not world-readable
old_umask = os.umask(0o177)
if keystone_user_id and keystone_group_id:
old_egid = os.getegid()
old_euid = os.geteuid()
os.setegid(keystone_group_id)
os.seteuid(keystone_user_id)
elif keystone_user_id or keystone_group_id:
LOG.warning(_LW(
'Unable to change the ownership of the new key without a keystone '
'user ID and keystone group ID both being provided: %s') %
CONF.fernet_tokens.key_repository)
# Determine the file name of the new key
key_file = os.path.join(CONF.fernet_tokens.key_repository, '0')
try:
with open(key_file, 'w') as f:
f.write(key.decode('utf-8')) # convert key to str for the file.
finally:
# After writing the key, set the umask back to it's original value. Do
# the same with group and user identifiers if a Keystone group or user
# was supplied.
os.umask(old_umask)
if keystone_user_id and keystone_group_id:
os.seteuid(old_euid)
os.setegid(old_egid)
LOG.info(_LI('Created a new key: %s'), key_file)
def initialize_key_repository(keystone_user_id=None, keystone_group_id=None):
"""Create a key repository and bootstrap it with a key.
:param keystone_user_id: User ID of the Keystone user.
:param keystone_group_id: Group ID of the Keystone user.
"""
# make sure we have work to do before proceeding
if os.access(os.path.join(CONF.fernet_tokens.key_repository, '0'),
os.F_OK):
LOG.info(_LI('Key repository is already initialized; aborting.'))
return
# bootstrap an existing key
_create_new_key(keystone_user_id, keystone_group_id)
# ensure that we end up with a primary and secondary key
rotate_keys(keystone_user_id, keystone_group_id)
def rotate_keys(keystone_user_id=None, keystone_group_id=None):
"""Create a new primary key and revoke excess active keys.
:param keystone_user_id: User ID of the Keystone user.
:param keystone_group_id: Group ID of the Keystone user.
Key rotation utilizes the following behaviors:
- The highest key number is used as the primary key (used for encryption).
- All keys can be used for decryption.
- New keys are always created as key "0," which serves as a placeholder
before promoting it to be the primary key.
This strategy allows you to safely perform rotation on one node in a
cluster, before syncing the results of the rotation to all other nodes
(during both key rotation and synchronization, all nodes must recognize all
primary keys).
"""
# read the list of key files
key_files = dict()
for filename in os.listdir(CONF.fernet_tokens.key_repository):
path = os.path.join(CONF.fernet_tokens.key_repository, str(filename))
if os.path.isfile(path):
try:
key_id = int(filename)
except ValueError: # nosec : name isn't a number, ignore the file.
pass
else:
key_files[key_id] = path
LOG.info(_LI('Starting key rotation with %(count)s key files: %(list)s'), {
'count': len(key_files),
'list': list(key_files.values())})
# determine the number of the new primary key
current_primary_key = max(key_files.keys())
LOG.info(_LI('Current primary key is: %s'), current_primary_key)
new_primary_key = current_primary_key + 1
LOG.info(_LI('Next primary key will be: %s'), new_primary_key)
# promote the next primary key to be the primary
os.rename(
os.path.join(CONF.fernet_tokens.key_repository, '0'),
os.path.join(CONF.fernet_tokens.key_repository, str(new_primary_key)))
key_files.pop(0)
key_files[new_primary_key] = os.path.join(
CONF.fernet_tokens.key_repository,
str(new_primary_key))
LOG.info(_LI('Promoted key 0 to be the primary: %s'), new_primary_key)
# add a new key to the rotation, which will be the *next* primary
_create_new_key(keystone_user_id, keystone_group_id)
max_active_keys = CONF.fernet_tokens.max_active_keys
# purge excess keys
# Note that key_files doesn't contain the new active key that was created,
# only the old active keys.
keys = sorted(key_files.keys(), reverse=True)
while len(keys) > (max_active_keys - 1):
index_to_purge = keys.pop()
key_to_purge = key_files[index_to_purge]
LOG.info(_LI('Excess key to purge: %s'), key_to_purge)
os.remove(key_to_purge)
def load_keys():
"""Load keys from disk into a list.
The first key in the list is the primary key used for encryption. All
other keys are active secondary keys that can be used for decrypting
tokens.
"""
if not validate_key_repository():
return []
# build a dictionary of key_number:encryption_key pairs
keys = dict()
for filename in os.listdir(CONF.fernet_tokens.key_repository):
path = os.path.join(CONF.fernet_tokens.key_repository, str(filename))
if os.path.isfile(path):
with open(path, 'r') as key_file:
try:
key_id = int(filename)
except ValueError: # nosec : filename isn't a number, ignore
# this file since it's not a key.
pass
else:
keys[key_id] = key_file.read()
if len(keys) != CONF.fernet_tokens.max_active_keys:
# If there haven't been enough key rotations to reach max_active_keys,
# or if the configured value of max_active_keys has changed since the
# last rotation, then reporting the discrepancy might be useful. Once
# the number of keys matches max_active_keys, this log entry is too
# repetitive to be useful.
LOG.info(_LI(
'Loaded %(count)d encryption keys (max_active_keys=%(max)d) from: '
'%(dir)s'), {
'count': len(keys),
'max': CONF.fernet_tokens.max_active_keys,
'dir': CONF.fernet_tokens.key_repository})
# return the encryption_keys, sorted by key number, descending
return [keys[x] for x in sorted(keys.keys(), reverse=True)]
|
|
# -*- coding: utf-8 -*-
import collections
import datetime
import io
import json
import os
import re
import traceback
import six
try:
import configobj
except ImportError:
configobj = None
try:
import json5
except ImportError:
json5 = None
try:
import toml
except ImportError:
toml = None
try:
import xmltodict
except ImportError:
xmltodict = None
try:
import yaml
except ImportError:
yaml = None
__all__ = ['AnyMarkupError', 'parse', 'parse_file', 'serialize', 'serialize_file']
__version__ = '0.8.1'
fmt_to_exts = {'ini': ['ini'],
'json': ['json'],
'json5': ['json5'],
'toml': ['toml'],
'xml': ['xml'],
'yaml': ['yaml', 'yml']}
fmt_to_lib = {'ini': (configobj, 'configobj'),
'json': (json, 'json'),
'json5': (json5, 'json5'),
'toml': (toml, 'toml'),
'xml': (xmltodict, 'xmltodict'),
'yaml': (yaml, 'PyYAML')}
def _is_utf8(enc_str):
return enc_str.lower() in ['utf8', 'utf-8']
class AnyMarkupError(Exception):
def __init__(self, cause, original_tb=''):
"""Wrapper for all errors that occur during anymarkup calls.
Args:
cause: either a reraised exception or a string with cause
"""
super(AnyMarkupError, self).__init__()
self.cause = cause
self.original_tb = original_tb
def __str__(self):
cause = str(self.cause)
if isinstance(self.cause, Exception):
cause = 'caught {0}: {1}'.format(type(self.cause), cause)
msg = 'AnyMarkupError: {0}'.format(cause)
if self.original_tb:
msg += '\nOriginal traceback:\n{0}'.format(self.original_tb)
return msg
def parse(inp, format=None, encoding='utf-8', force_types=True, interpolate=True):
"""Parse input from file-like object, unicode string or byte string.
Args:
inp: file-like object, unicode string or byte string with the markup
format: explicitly override the guessed `inp` markup format
encoding: `inp` encoding, defaults to utf-8
force_types:
if `True`, integers, floats, booleans and none/null
are recognized and returned as proper types instead of strings;
if `False`, everything is converted to strings
if `None`, backend return value is used
interpolate: turn on interpolation for INI files (defaults to True)
Returns:
parsed input (dict or list) containing unicode values
Raises:
AnyMarkupError if a problem occurs while parsing or inp
"""
proper_inp = inp
if hasattr(inp, 'read'):
proper_inp = inp.read()
# if proper_inp is unicode, encode it
if isinstance(proper_inp, six.text_type):
proper_inp = proper_inp.encode(encoding)
# try to guess markup type
fname = None
if hasattr(inp, 'name'):
fname = inp.name
fmt = _get_format(format, fname, proper_inp)
# make it look like file-like bytes-yielding object
proper_inp = six.BytesIO(proper_inp)
try:
res = _do_parse(proper_inp, fmt, encoding, force_types, interpolate)
except Exception as e:
# I wish there was only Python 3 and I could just use "raise ... from e"
raise AnyMarkupError(e, traceback.format_exc())
if res is None:
res = {}
return res
def parse_file(path, format=None, encoding='utf-8', force_types=True, interpolate=True):
"""A convenience wrapper of parse, which accepts path of file to parse.
Args:
path: path to file to parse
format: explicitly override the guessed `inp` markup format
encoding: file encoding, defaults to utf-8
force_types:
if `True`, integers, floats, booleans and none/null
are recognized and returned as proper types instead of strings;
if `False`, everything is converted to strings
if `None`, backend return value is used
interpolate: turn on interpolation for INI files (defaults to True)
Returns:
parsed `inp` (dict or list) containing unicode values
Raises:
AnyMarkupError if a problem occurs while parsing
"""
try:
with open(path, 'rb') as f:
return parse(f, format, encoding, force_types, interpolate)
except EnvironmentError as e:
raise AnyMarkupError(e, traceback.format_exc())
def serialize(struct, format, target=None, encoding='utf-8'):
"""Serialize given structure and return it as encoded string or write it to file-like object.
Args:
struct: structure (dict or list) with unicode members to serialize; note that list
can only be serialized to json
format: specify markup format to serialize structure as
target: binary-opened file-like object to serialize to; if None (default),
the result will be returned instead of writing to `target`
encoding: encoding to use when serializing, defaults to utf-8
Returns:
bytestring with serialized structure if `target` is None; return value of
`target.write` otherwise
Raises:
AnyMarkupError if a problem occurs while serializing
"""
# raise if "unicode-opened"
if hasattr(target, 'encoding') and target.encoding:
raise AnyMarkupError('Input file must be opened in binary mode')
fname = None
if hasattr(target, 'name'):
fname = target.name
fmt = _get_format(format, fname)
try:
serialized = _do_serialize(struct, fmt, encoding)
if target is None:
return serialized
else:
return target.write(serialized)
except Exception as e:
raise AnyMarkupError(e, traceback.format_exc())
def serialize_file(struct, path, format=None, encoding='utf-8'):
"""A convenience wrapper of serialize, which accepts path of file to serialize to.
Args:
struct: structure (dict or list) with unicode members to serialize; note that list
can only be serialized to json
path: path of the file to serialize to
format: override markup format to serialize structure as (taken from filename
by default)
encoding: encoding to use when serializing, defaults to utf-8
Returns:
number of bytes written
Raises:
AnyMarkupError if a problem occurs while serializing
"""
try:
with open(path, 'wb') as f:
return serialize(struct, format, f, encoding)
except EnvironmentError as e:
raise AnyMarkupError(e, traceback.format_exc())
def _check_lib_installed(fmt, action):
if fmt_to_lib[fmt][0] is None:
raise ImportError('Can\'t {action} {fmt}: {name} not installed'.
format(action=action, fmt=fmt, name=fmt_to_lib[fmt][1]))
def _do_parse(inp, fmt, encoding, force_types, interpolate):
"""Actually parse input.
Args:
inp: bytes yielding file-like object
fmt: format to use for parsing
encoding: encoding of `inp`
force_types:
if `True`, integers, floats, booleans and none/null
are recognized and returned as proper types instead of strings;
if `False`, everything is converted to strings
if `None`, backend return value is used
interpolate: turn on interpolation for INI files
Returns:
parsed `inp` (dict or list) containing unicode values
Raises:
various sorts of errors raised by used libraries while parsing
"""
res = {}
_check_lib_installed(fmt, 'parse')
if fmt == 'ini':
cfg = configobj.ConfigObj(inp, encoding=encoding, interpolation=interpolate)
res = cfg.dict()
elif fmt == 'json':
if six.PY3:
# python 3 json only reads from unicode objects
inp = io.TextIOWrapper(inp, encoding=encoding)
res = json.load(inp)
else:
res = json.load(inp, encoding=encoding)
elif fmt == 'json5':
if six.PY3:
inp = io.TextIOWrapper(inp, encoding=encoding)
res = json5.load(inp, encoding=encoding)
elif fmt == 'toml':
if not _is_utf8(encoding):
raise AnyMarkupError('toml is always utf-8 encoded according to specification')
if six.PY3:
# python 3 toml prefers unicode objects
inp = io.TextIOWrapper(inp, encoding=encoding)
res = toml.load(inp)
elif fmt == 'xml':
res = xmltodict.parse(inp, encoding=encoding)
elif fmt == 'yaml':
# guesses encoding by its own, there seems to be no way to pass
# it explicitly
res = yaml.safe_load(inp)
else:
raise # unknown format
# make sure it's all unicode and all int/float values were parsed correctly
# the unicode part is here because of yaml on PY2 and also as workaround for
# https://github.com/DiffSK/configobj/issues/18#issuecomment-76391689
return _ensure_proper_types(res, encoding, force_types)
def _do_serialize(struct, fmt, encoding):
"""Actually serialize input.
Args:
struct: structure to serialize to
fmt: format to serialize to
encoding: encoding to use while serializing
Returns:
encoded serialized structure
Raises:
various sorts of errors raised by libraries while serializing
"""
res = None
_check_lib_installed(fmt, 'serialize')
if fmt == 'ini':
config = configobj.ConfigObj(encoding=encoding)
for k, v in struct.items():
config[k] = v
res = b'\n'.join(config.write())
elif fmt in ['json', 'json5']:
# specify separators to get rid of trailing whitespace
# specify ensure_ascii to make sure unicode is serialized in \x... sequences,
# not in \u sequences
res = (json if fmt == 'json' else json5).dumps(struct,
indent=2,
separators=(',', ': '),
ensure_ascii=False).encode(encoding)
elif fmt == 'toml':
if not _is_utf8(encoding):
raise AnyMarkupError('toml must always be utf-8 encoded according to specification')
res = toml.dumps(struct).encode(encoding)
elif fmt == 'xml':
# passing encoding argument doesn't encode, just sets the xml property
res = xmltodict.unparse(struct, pretty=True, encoding='utf-8').encode('utf-8')
elif fmt == 'yaml':
res = yaml.safe_dump(struct, encoding='utf-8', default_flow_style=False)
else:
raise # unknown format
return res
def _ensure_proper_types(struct, encoding, force_types):
"""A convenience function that recursively makes sure the given structure
contains proper types according to value of `force_types`.
Args:
struct: a structure to check and fix
encoding: encoding to use on found bytestrings
force_types:
if `True`, integers, floats, booleans and none/null
are recognized and returned as proper types instead of strings;
if `False`, everything is converted to strings
if `None`, unmodified `struct` is returned
Returns:
a fully decoded copy of given structure
"""
if force_types is None:
return struct
# if it's an empty value
res = None
if isinstance(struct, (dict, collections.OrderedDict)):
res = type(struct)()
for k, v in struct.items():
res[_ensure_proper_types(k, encoding, force_types)] = \
_ensure_proper_types(v, encoding, force_types)
elif isinstance(struct, list):
res = []
for i in struct:
res.append(_ensure_proper_types(i, encoding, force_types))
elif isinstance(struct, six.binary_type):
res = struct.decode(encoding)
elif isinstance(struct, (six.text_type, type(None), type(True), six.integer_types, float)):
res = struct
elif isinstance(struct, datetime.datetime):
# toml can parse datetime natively
res = struct
else:
raise AnyMarkupError('internal error - unexpected type {0} in parsed markup'.
format(type(struct)))
if force_types and isinstance(res, six.text_type):
res = _recognize_basic_types(res)
elif not (force_types or
isinstance(res, (dict, collections.OrderedDict, list, six.text_type))):
res = six.text_type(res)
return res
def _recognize_basic_types(s):
"""If value of given string `s` is an integer (or long), float or boolean, convert it
to a proper type and return it.
"""
tps = [int, float]
if not six.PY3: # compat for older versions of six that don't have PY2
tps.append(long)
for tp in tps:
try:
return tp(s)
except ValueError:
pass
if s.lower() == 'true':
return True
if s.lower() == 'false':
return False
if s.lower() in ['none', 'null']:
return None
return s
def _get_format(format, fname, inp=None):
"""Try to guess markup format of given input.
Args:
format: explicit format override to use
fname: name of file, if a file was used to read `inp`
inp: optional bytestring to guess format of (can be None, if markup
format is to be guessed only from `format` and `fname`)
Returns:
guessed format (a key of fmt_to_exts dict)
Raises:
AnyMarkupError if explicit format override has unsupported value
or if it's impossible to guess the format
"""
fmt = None
err = True
if format is not None:
if format in fmt_to_exts:
fmt = format
err = False
elif fname:
# get file extension without leading dot
file_ext = os.path.splitext(fname)[1][len(os.path.extsep):]
for fmt_name, exts in fmt_to_exts.items():
if file_ext in exts:
fmt = fmt_name
err = False
if fmt is None:
if inp is not None:
fmt = _guess_fmt_from_bytes(inp)
err = False
if err:
err_string = 'Failed to guess markup format based on: '
what = []
for k, v in {format: 'specified format argument',
fname: 'filename', inp: 'input string'}.items():
if k:
what.append(v)
if not what:
what.append('nothing to guess format from!')
err_string += ', '.join(what)
raise AnyMarkupError(err_string)
return fmt
def _guess_fmt_from_bytes(inp):
"""Try to guess format of given bytestring.
Args:
inp: byte string to guess format of
Returns:
guessed format
"""
stripped = inp.strip()
fmt = None
ini_section_header_re = re.compile(br'^\[([\w-]+)\]')
if len(stripped) == 0:
# this can be anything, so choose yaml, for example
fmt = 'yaml'
else:
if stripped.startswith(b'<'):
fmt = 'xml'
else:
for l in stripped.splitlines():
line = l.strip()
# there are C-style comments in json5, but we don't auto-detect it,
# so it doesn't matter here
if not line.startswith(b'#') and line:
break
# json, ini or yaml => skip comments and then determine type
if ini_section_header_re.match(line):
fmt = 'ini'
else:
# we assume that yaml is superset of json
# TODO: how do we figure out it's not yaml?
fmt = 'yaml'
return fmt
# following code makes it possible to use OrderedDict with PyYAML
# based on https://bitbucket.org/xi/pyyaml/issue/13
def construct_ordereddict(loader, node):
try:
omap = loader.construct_yaml_omap(node)
return collections.OrderedDict(*omap)
except yaml.constructor.ConstructorError:
return loader.construct_yaml_seq(node)
def represent_ordereddict(dumper, data):
# NOTE: For block style this uses the compact omap notation, but for flow style
# it does not.
values = []
node = yaml.SequenceNode(u'tag:yaml.org,2002:omap', values, flow_style=False)
if dumper.alias_key is not None:
dumper.represented_objects[dumper.alias_key] = node
for key, value in data.items():
key_item = dumper.represent_data(key)
value_item = dumper.represent_data(value)
node_item = yaml.MappingNode(u'tag:yaml.org,2002:map', [(key_item, value_item)],
flow_style=False)
values.append(node_item)
return node
def represent_str(dumper, data):
# borrowed from http://stackoverflow.com/a/33300001
if len(data.splitlines()) > 1:
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
return dumper.represent_scalar('tag:yaml.org,2002:str', data)
if yaml is not None:
yaml.SafeLoader.add_constructor(u'tag:yaml.org,2002:omap', construct_ordereddict)
yaml.SafeDumper.add_representer(collections.OrderedDict, represent_ordereddict)
yaml.SafeDumper.add_representer(str, represent_str)
if six.PY2:
yaml.SafeDumper.add_representer(unicode, represent_str)
|
|
XXXXXXXXX XXXXX
XXXXXX
XXXXXX
XXXXX XXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX XXXXXXX X XXXXXXX XXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXX
XX XXXXXX XXXX XXX XXXX XXXXX XXXXXXX XX
XXX XX X
XXXXXXXXXXXX XXXXXXX
XXXXXXXXXXXXX XXXX XXXXXXXXXXX
XXXXXXXXXXXXXX XXXX XXXXXXXXXXX
X
XXXXXXXXXXXXXXXXXXXXXX X
XXXXXX XXXXXX
XXXXXXX X XXXXX
X
XXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX X
XXX XXXXX X XXXXXXXXXXXXXXXXXXXXXXXX X
XXXXXXXX XXXXXXXX
XXXXXXXX XXXXX
XXXXXXXXXXXXXXX XXXXX
XXXXXXX XXXXX
X XX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XX
X XX
XXXXXXXXX
XXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXX
XXXXXXXXX
XXXXXXXXXXXXXXXX XXXXXXX XXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXX
XXXXX XXX XXX XXXXX XXXXXXXX XXXX XX XXX XXXXX XXXXXXX XX XXX XX XXXXXX XX XXXX X XXXXXXXXXXX XXXX XX XXX XXXXXXXXX XXX XXXX XXXXX XX XXXXX X XXXXXXX XX XXXXX
XXXXX XX XXX XX XXXXXX XX XXX XXX XXXX XXX XX XXX XXX XX XXXX XXXXX XXX XXXXXXX XXXXXXXXXXXX XXXXXX XX XXX XXXXXXX XXXXXX XXXXXXX XX XXXXXXXXXX XX XXXX XXXX
XXXXXXX XX XXX XX XXXXXXXXXXXXX XXXX XXXXXXXXXX XXXX XXXXXXXXXXXX XXXXXXXX XX XXX XXXXXXXX XXXXXXXXXX
XXXXXX
XXXXXX XXXXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX
XXXX
XXX XXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXX
XXXXXXXX
XXXXXXX
XXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXX
XXXXXXXX
XXXXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXX
XXXXXXXXXX XXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXX
XXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXX
XXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXXX
XXXXXXXX XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXXXXXX
XXXXXXXX
XXX XXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXX
XXXXX
XXXX XXXXXXXXXXXXX
XXXX XXXXXXXXXXX
XXXXXX XXXXXXXXXX XXXXX XXXXX XX XXXX XX XXXXXXXXXX XXX XXXXX XXXXX XX XXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX X
XXX XXXXX X XXXXXXXXXXXXXXXXXXXXXXXX X
XXXXXXXX XXXXXXXXXXXXXXXXXX
XXXXXXXX XXXXX
XXXXXXXXXXXXXXX XXXXX
XXXXXXX XXXXX
X XX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XX
X XXXXXXXXX
XXXXX XXXXXXXX XX XXX XXXXX XXXXX XXX XXXXXXXXX XXXXXXXXXX XXXXXXX XXXXX XXX XXXXXX XXX XXX XX XXXX XXXXXXXXXXXX
XXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXX XXXXXXXXXXXXXX
XXXXXX XXXX XXXXX XXXXX XX XXX XXX XXXX XXXXX XXXXXXXX XXXXXX XX XXX XXXX XXXXXXXX XX XXXXXXXXXXXXXXX
XXXXXX
XXXX XXXXXXXXXXXX
XXXXX
XXXXXXX XXXXXXX XXXX X XXXXXX XXX XX XXXXXXXXXX XXX XXXXXX XXXX XX XXXXXX XXXX XXX XXXXXXX XXXXX XXXXXXXX XX XXXXX XX XXXXXXXXX XXXXXXX XXX XXXXXX XXX
XXXXXXXXXX XXX XXXX XX XXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXX XXXX XXX XXXX XXXXX XXXXXXX XX
XXX XX X
XXXXXXXXXXXX XXXXXXX
XXXXXXXXXXXXX XXXX XXXXXXXXXXX
XXXXXXXXXXXXXX XXXX XXXXXXXXXXX
X
XXXXXXXXXXXXXXXXXXXXXX X
XXXXXX XXXXXX
XXXXXXX X XXXXX
XXXXXXXX
XXXXXX
XXXXXX XXXXXXXXX XXX XXXXXXX XXXXX XXX XXXXXX XXX XXX XX XXXX XXXXXXX XX XXXXXXX XXX XXXXXXX XX XXX XXXXXXXXXX
XXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXX XXXXXXXXXXXXX
XXXXXXX XXXXX XXXXX XXXX XX XXXXX XXX XXXXXX XXXX XXXX XXX XXXX XXXXXX XX XXXXX XXXXXX XXXX XXXX XXXX XXXXXX XXXXXXXXXXXXX XX XXX XXXXXXXXXX XXXX XX
XXXXXXXXXXX
XXXXXX
XXXX XXXXXXXXXXXX
XXXXXX XXXXXX XXXX XX XXXXXXX XXX XXXXXXXXXXX XXXXXXXXXX XXX XXXX XXXXX XX XXXXX XXXXXX XXXXXX XXXX XXXX XXXX XX XXXX XX XXXXXXX XXXXXX XXXXX XXXX XXXXXXXXXXX
XXXXXXXXXX XXXXXXX XXX XX XXXXXXX XX XXX XXXXXXXXX XXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX XXXXXXXXX XX XXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXXXXX
XXXXXX
XXXXXXXXXX
XXXXXX
XXXXXXXXX
XXXX XXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXX
XXXX XXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXX XXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXX XXXXX XXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXXXX
XXXX XXXXXXXXXXXXXXXXX
XXXXXXXXX XXXXX XX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXX XXX XXXX XXXXXXXXXXX XXXXX XXX XXX XXXXXXXXXX XXX XXXXXXXXXXXX
XXXXXXXXXXXXX XXXXX XXX X XXXX XXXXX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXX XXX XXXXXXXXXXXX XX XXXXXXXXXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX XXX XXXXXXX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX XXXXXX XXXXXXXXXXXXX
XXXXXXXXXX XX XXXXXXXX XXXXX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXX
XXXXXX
XXXXXX
XXXXXXXXXX
XXXXXXX
XXXXXXX
|
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""
During "collection", pytest finds all the tests it supports. These are
called "items". The process is top-down, mostly tracing down through
the file system. Aside from its own machinery, pytest supports hooks
that find tests. Effectively, pytest starts with a set of "collectors";
objects that can provide a list of tests and sub-collectors. All
collectors in the resulting tree are visited and the tests aggregated.
For the most part, each test's (and collector's) parent is identified
as the collector that collected it.
Collectors and items are collectively identified as "nodes". The pytest
API relies on collector and item objects providing specific methods and
attributes. In addition to corresponding base classes, pytest provides
a number of concrete implementations.
The following are the known pytest node types:
Node
Collector
FSCollector
Session (the top-level collector)
File
Module
Package
DoctestTextfile
DoctestModule
PyCollector
(Module)
(...)
Class
UnitTestCase
Instance
Item
Function
TestCaseFunction
DoctestItem
Here are the unique attrs for those classes:
Node
name
nodeid (readonly)
config
session
(parent) - the parent node
(fspath) - the file from which the node was collected
----
own_marksers - explicit markers (e.g. with @pytest.mark())
keywords
extra_keyword_matches
Item
location - where the actual test source code is: (relfspath, lno, fullname)
user_properties
PyCollector
module
class
instance
obj
Function
module
class
instance
obj
function
(callspec)
(fixturenames)
funcargs
originalname - w/o decorations, e.g. [...] for parameterized
DoctestItem
dtest
obj
When parsing an item, we make use of the following attributes:
* name
* nodeid
* __class__
+ __name__
* fspath
* location
* function
+ __name__
+ __code__
+ __closure__
* own_markers
"""
from __future__ import absolute_import, print_function
import sys
import pytest
import _pytest.doctest
import _pytest.unittest
from ..info import SingleTestInfo, SingleTestPath
from ..util import fix_fileid, PATH_SEP, NORMCASE
def should_never_reach_here(item, **extra):
"""Indicates a code path we should never reach."""
print("The Python extension has run into an unexpected situation")
print("while processing a pytest node during test discovery. Please")
print("Please open an issue at:")
print(" https://github.com/microsoft/vscode-python/issues")
print("and paste the following output there.")
print()
for field, info in _summarize_item(item):
print("{}: {}".format(field, info))
if extra:
print()
print("extra info:")
for name, info in extra.items():
print("{:10}".format(name + ":"), end="")
if isinstance(info, str):
print(info)
else:
try:
print(*info)
except TypeError:
print(info)
print()
print("traceback:")
import traceback
traceback.print_stack()
msg = "Unexpected pytest node (see printed output)."
exc = NotImplementedError(msg)
exc.item = item
return exc
def parse_item(
item,
# *,
_get_item_kind=(lambda *a: _get_item_kind(*a)),
_parse_node_id=(lambda *a: _parse_node_id(*a)),
_split_fspath=(lambda *a: _split_fspath(*a)),
_get_location=(lambda *a: _get_location(*a)),
):
"""Return (TestInfo, [suite ID]) for the given item.
The suite IDs, if any, are in parent order with the item's direct
parent at the beginning. The parent of the last suite ID (or of
the test if there are no suites) is the file ID, which corresponds
to TestInfo.path.
"""
# _debug_item(item, showsummary=True)
kind, _ = _get_item_kind(item)
# Skip plugin generated tests
if kind is None:
return None, None
(nodeid, parents, fileid, testfunc, parameterized) = _parse_node_id(
item.nodeid, kind
)
# Note: testfunc does not necessarily match item.function.__name__.
# This can result from importing a test function from another module.
# Figure out the file.
testroot, relfile = _split_fspath(str(item.fspath), fileid, item)
location, fullname = _get_location(item, testroot, relfile)
if kind == "function":
if testfunc and fullname != testfunc + parameterized:
raise should_never_reach_here(
item,
fullname=fullname,
testfunc=testfunc,
parameterized=parameterized,
# ...
)
elif kind == "doctest":
if testfunc and fullname != testfunc and fullname != "[doctest] " + testfunc:
raise should_never_reach_here(
item,
fullname=fullname,
testfunc=testfunc,
# ...
)
testfunc = None
# Sort out the parent.
if parents:
parentid, _, _ = parents[0]
else:
parentid = None
# Sort out markers.
# See: https://docs.pytest.org/en/latest/reference.html#marks
markers = set()
for marker in getattr(item, "own_markers", []):
if marker.name == "parameterize":
# We've already covered these.
continue
elif marker.name == "skip":
markers.add("skip")
elif marker.name == "skipif":
markers.add("skip-if")
elif marker.name == "xfail":
markers.add("expected-failure")
# We can add support for other markers as we need them?
test = SingleTestInfo(
id=nodeid,
name=item.name,
path=SingleTestPath(
root=testroot,
relfile=relfile,
func=testfunc,
sub=[parameterized] if parameterized else None,
),
source=location,
markers=sorted(markers) if markers else None,
parentid=parentid,
)
if parents and parents[-1] == (".", None, "folder"): # This should always be true?
parents[-1] = (".", testroot, "folder")
return test, parents
def _split_fspath(
fspath,
fileid,
item,
# *,
_normcase=NORMCASE,
):
"""Return (testroot, relfile) for the given fspath.
"relfile" will match "fileid".
"""
# "fileid" comes from nodeid and is always relative to the testroot
# (with a "./" prefix). There are no guarantees about casing, so we
# normcase just be to sure.
relsuffix = fileid[1:] # Drop (only) the "." prefix.
if not _normcase(fspath).endswith(_normcase(relsuffix)):
raise should_never_reach_here(
item,
fspath=fspath,
fileid=fileid,
# ...
)
testroot = fspath[: -len(fileid) + 1] # Ignore the "./" prefix.
relfile = "." + fspath[-len(fileid) + 1 :] # Keep the pathsep.
return testroot, relfile
def _get_location(
item,
testroot,
relfile,
# *,
_matches_relfile=(lambda *a: _matches_relfile(*a)),
_is_legacy_wrapper=(lambda *a: _is_legacy_wrapper(*a)),
_unwrap_decorator=(lambda *a: _unwrap_decorator(*a)),
_pathsep=PATH_SEP,
):
"""Return (loc str, fullname) for the given item."""
# When it comes to normcase, we favor relfile (from item.fspath)
# over item.location in this function.
srcfile, lineno, fullname = item.location
if _matches_relfile(srcfile, testroot, relfile):
srcfile = relfile
else:
# pytest supports discovery of tests imported from other
# modules. This is reflected by a different filename
# in item.location.
if _is_legacy_wrapper(srcfile):
srcfile = relfile
unwrapped = _unwrap_decorator(item.function)
if unwrapped is None:
# It was an invalid legacy wrapper so we just say
# "somewhere in relfile".
lineno = None
else:
_srcfile, lineno = unwrapped
if not _matches_relfile(_srcfile, testroot, relfile):
# For legacy wrappers we really expect the wrapped
# function to be in relfile. So here we ignore any
# other file and just say "somewhere in relfile".
lineno = None
elif _matches_relfile(srcfile, testroot, relfile):
srcfile = relfile
# Otherwise we just return the info from item.location as-is.
if not srcfile.startswith("." + _pathsep):
srcfile = "." + _pathsep + srcfile
if lineno is None:
lineno = -1 # i.e. "unknown"
# from pytest, line numbers are 0-based
location = "{}:{}".format(srcfile, int(lineno) + 1)
return location, fullname
def _matches_relfile(
srcfile,
testroot,
relfile,
# *,
_normcase=NORMCASE,
_pathsep=PATH_SEP,
):
"""Return True if "srcfile" matches the given relfile."""
testroot = _normcase(testroot)
srcfile = _normcase(srcfile)
relfile = _normcase(relfile)
if srcfile == relfile:
return True
elif srcfile == relfile[len(_pathsep) + 1 :]:
return True
elif srcfile == testroot + relfile[1:]:
return True
else:
return False
def _is_legacy_wrapper(
srcfile,
# *,
_pathsep=PATH_SEP,
_pyversion=sys.version_info,
):
"""Return True if the test might be wrapped.
In Python 2 unittest's decorators (e.g. unittest.skip) do not wrap
properly, so we must manually unwrap them.
"""
if _pyversion > (3,):
return False
if (_pathsep + "unittest" + _pathsep + "case.py") not in srcfile:
return False
return True
def _unwrap_decorator(func):
"""Return (filename, lineno) for the func the given func wraps.
If the wrapped func cannot be identified then return None. Likewise
for the wrapped filename. "lineno" is None if it cannot be found
but the filename could.
"""
try:
func = func.__closure__[0].cell_contents
except (IndexError, AttributeError):
return None
else:
if not callable(func):
return None
try:
filename = func.__code__.co_filename
except AttributeError:
return None
else:
try:
lineno = func.__code__.co_firstlineno - 1
except AttributeError:
return (filename, None)
else:
return filename, lineno
def _parse_node_id(
testid,
kind,
# *,
_iter_nodes=(lambda *a: _iter_nodes(*a)),
):
"""Return the components of the given node ID, in heirarchical order."""
nodes = iter(_iter_nodes(testid, kind))
testid, name, kind = next(nodes)
parents = []
parameterized = None
if kind == "doctest":
parents = list(nodes)
fileid, _, _ = parents[0]
return testid, parents, fileid, name, parameterized
elif kind is None:
fullname = None
else:
if kind == "subtest":
node = next(nodes)
parents.append(node)
funcid, funcname, _ = node
parameterized = testid[len(funcid) :]
elif kind == "function":
funcname = name
else:
raise should_never_reach_here(
testid,
kind=kind,
# ...
)
fullname = funcname
for node in nodes:
parents.append(node)
parentid, name, kind = node
if kind == "file":
fileid = parentid
break
elif fullname is None:
# We don't guess how to interpret the node ID for these tests.
continue
elif kind == "suite":
fullname = name + "." + fullname
else:
raise should_never_reach_here(
testid,
node=node,
# ...
)
else:
fileid = None
parents.extend(nodes) # Add the rest in as-is.
return (
testid,
parents,
fileid,
fullname,
parameterized or "",
)
def _iter_nodes(
testid,
kind,
# *,
_normalize_test_id=(lambda *a: _normalize_test_id(*a)),
_normcase=NORMCASE,
_pathsep=PATH_SEP,
):
"""Yield (nodeid, name, kind) for the given node ID and its parents."""
nodeid, testid = _normalize_test_id(testid, kind)
if len(nodeid) > len(testid):
testid = "." + _pathsep + testid
if kind == "function" and nodeid.endswith("]"):
funcid, sep, parameterized = nodeid.partition("[")
if not sep:
raise should_never_reach_here(
nodeid,
# ...
)
yield (nodeid, sep + parameterized, "subtest")
nodeid = funcid
parentid, _, name = nodeid.rpartition("::")
if not parentid:
if kind is None:
# This assumes that plugins can generate nodes that do not
# have a parent. All the builtin nodes have one.
yield (nodeid, name, kind)
return
# We expect at least a filename and a name.
raise should_never_reach_here(
nodeid,
# ...
)
yield (nodeid, name, kind)
# Extract the suites.
while "::" in parentid:
suiteid = parentid
parentid, _, name = parentid.rpartition("::")
yield (suiteid, name, "suite")
# Extract the file and folders.
fileid = parentid
raw = testid[: len(fileid)]
_parentid, _, filename = _normcase(fileid).rpartition(_pathsep)
parentid = fileid[: len(_parentid)]
raw, name = raw[: len(_parentid)], raw[-len(filename) :]
yield (fileid, name, "file")
# We're guaranteed at least one (the test root).
while _pathsep in _normcase(parentid):
folderid = parentid
_parentid, _, foldername = _normcase(folderid).rpartition(_pathsep)
parentid = folderid[: len(_parentid)]
raw, name = raw[: len(parentid)], raw[-len(foldername) :]
yield (folderid, name, "folder")
# We set the actual test root later at the bottom of parse_item().
testroot = None
yield (parentid, testroot, "folder")
def _normalize_test_id(
testid,
kind,
# *,
_fix_fileid=fix_fileid,
_pathsep=PATH_SEP,
):
"""Return the canonical form for the given node ID."""
while "::()::" in testid:
testid = testid.replace("::()::", "::")
if kind is None:
return testid, testid
orig = testid
# We need to keep the testid as-is, or else pytest won't recognize
# it when we try to use it later (e.g. to run a test). The only
# exception is that we add a "./" prefix for relative paths.
# Note that pytest always uses "/" as the path separator in IDs.
fileid, sep, remainder = testid.partition("::")
fileid = _fix_fileid(fileid)
if not fileid.startswith("./"): # Absolute "paths" not expected.
raise should_never_reach_here(
testid,
fileid=fileid,
# ...
)
testid = fileid + sep + remainder
return testid, orig
def _get_item_kind(item):
"""Return (kind, isunittest) for the given item."""
if isinstance(item, _pytest.doctest.DoctestItem):
return "doctest", False
elif isinstance(item, _pytest.unittest.TestCaseFunction):
return "function", True
elif isinstance(item, pytest.Function):
# We *could* be more specific, e.g. "method", "subtest".
return "function", False
else:
return None, False
#############################
# useful for debugging
_FIELDS = [
"nodeid",
"kind",
"class",
"name",
"fspath",
"location",
"function",
"markers",
"user_properties",
"attrnames",
]
def _summarize_item(item):
if not hasattr(item, "nodeid"):
yield "nodeid", item
return
for field in _FIELDS:
try:
if field == "kind":
yield field, _get_item_kind(item)
elif field == "class":
yield field, item.__class__.__name__
elif field == "markers":
yield field, item.own_markers
# yield field, list(item.iter_markers())
elif field == "attrnames":
yield field, dir(item)
else:
yield field, getattr(item, field, "<???>")
except Exception as exc:
yield field, "<error {!r}>".format(exc)
def _debug_item(item, showsummary=False):
item._debugging = True
try:
summary = dict(_summarize_item(item))
finally:
item._debugging = False
if showsummary:
print(item.nodeid)
for key in (
"kind",
"class",
"name",
"fspath",
"location",
"func",
"markers",
"props",
):
print(" {:12} {}".format(key, summary[key]))
print()
return summary
|
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Proximal Policy Optimization agent.
Based on John Schulman's implementation in Python and Theano:
https://github.com/joschu/modular_rl/blob/master/modular_rl/ppo.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow as tf
from agents import parts
from agents import tools
from agents.algorithms.ppo import utility
class PPO(object):
"""A vectorized implementation of the PPO algorithm by John Schulman."""
def __init__(self, batch_env, step, is_training, should_log, config):
"""Create an instance of the PPO algorithm.
Args:
batch_env: In-graph batch environment.
step: Integer tensor holding the current training step.
is_training: Boolean tensor for whether the algorithm should train.
should_log: Boolean tensor for whether summaries should be returned.
config: Object containing the agent configuration as attributes.
"""
self._batch_env = batch_env
self._step = step
self._is_training = is_training
self._should_log = should_log
self._config = config
self._observ_filter = parts.StreamingNormalize(
self._batch_env.observ[0], center=True, scale=True, clip=5,
name='normalize_observ')
self._reward_filter = parts.StreamingNormalize(
self._batch_env.reward[0], center=False, scale=True, clip=10,
name='normalize_reward')
self._use_gpu = self._config.use_gpu and utility.available_gpus()
policy_params, state = self._initialize_policy()
self._initialize_memory(policy_params)
# Initialize the optimizer and penalty.
with tf.device('/gpu:0' if self._use_gpu else '/cpu:0'):
self._optimizer = self._config.optimizer(self._config.learning_rate)
self._penalty = tf.Variable(
self._config.kl_init_penalty, False, dtype=tf.float32)
# If the policy is stateful, allocate space to store its state.
with tf.variable_scope('ppo_temporary'):
with tf.device('/gpu:0'):
if state is None:
self._last_state = None
else:
var_like = lambda x: tf.Variable(lambda: tf.zeros_like(x), False)
self._last_state = tools.nested.map(var_like, state)
# Remember the action and policy parameters to write into the memory.
with tf.variable_scope('ppo_temporary'):
self._last_action = tf.Variable(
tf.zeros_like(self._batch_env.action), False, name='last_action')
self._last_policy = tools.nested.map(
lambda x: tf.Variable(tf.zeros_like(x[:, 0], False)), policy_params)
def begin_episode(self, agent_indices):
"""Reset the recurrent states and stored episode.
Args:
agent_indices: Tensor containing current batch indices.
Returns:
Summary tensor.
"""
with tf.name_scope('begin_episode/'):
if self._last_state is None:
reset_state = tf.no_op()
else:
reset_state = utility.reinit_nested_vars(
self._last_state, agent_indices)
reset_buffer = self._current_episodes.clear(agent_indices)
with tf.control_dependencies([reset_state, reset_buffer]):
return tf.constant('')
def perform(self, agent_indices, observ):
"""Compute batch of actions and a summary for a batch of observation.
Args:
agent_indices: Tensor containing current batch indices.
observ: Tensor of a batch of observations for all agents.
Returns:
Tuple of action batch tensor and summary tensor.
"""
with tf.name_scope('perform/'):
observ = self._observ_filter.transform(observ)
if self._last_state is None:
state = None
else:
state = tools.nested.map(
lambda x: tf.gather(x, agent_indices), self._last_state)
with tf.device('/gpu:0' if self._use_gpu else '/cpu:0'):
output = self._network(
observ[:, None], tf.ones(observ.shape[0]), state)
action = tf.cond(
self._is_training, output.policy.sample, output.policy.mode)
logprob = output.policy.log_prob(action)[:, 0]
# pylint: disable=g-long-lambda
summary = tf.cond(self._should_log, lambda: tf.summary.merge([
tf.summary.histogram('mode', output.policy.mode()[:, 0]),
tf.summary.histogram('action', action[:, 0]),
tf.summary.histogram('logprob', logprob)]), str)
# Remember current policy to append to memory in the experience callback.
if self._last_state is None:
assign_state = tf.no_op()
else:
assign_state = utility.assign_nested_vars(
self._last_state, output.state, agent_indices)
remember_last_action = tf.scatter_update(
self._last_action, agent_indices, action[:, 0])
policy_params = tools.nested.filter(
lambda x: isinstance(x, tf.Tensor), output.policy.parameters)
assert policy_params, 'Policy has no parameters to store.'
remember_last_policy = tools.nested.map(
lambda var, val: tf.scatter_update(var, agent_indices, val[:, 0]),
self._last_policy, policy_params, flatten=True)
with tf.control_dependencies((
assign_state, remember_last_action) + remember_last_policy):
return action[:, 0], tf.identity(summary)
def experience(
self, agent_indices, observ, action, reward, unused_done, unused_nextob):
"""Process the transition tuple of the current step.
When training, add the current transition tuple to the memory and update
the streaming statistics for observations and rewards. A summary string is
returned if requested at this step.
Args:
agent_indices: Tensor containing current batch indices.
observ: Batch tensor of observations.
action: Batch tensor of actions.
reward: Batch tensor of rewards.
unused_done: Batch tensor of done flags.
unused_nextob: Batch tensor of successor observations.
Returns:
Summary tensor.
"""
with tf.name_scope('experience/'):
return tf.cond(
self._is_training,
# pylint: disable=g-long-lambda
lambda: self._define_experience(
agent_indices, observ, action, reward), str)
def _define_experience(self, agent_indices, observ, action, reward):
"""Implement the branch of experience() entered during training."""
update_filters = tf.summary.merge([
self._observ_filter.update(observ),
self._reward_filter.update(reward)])
with tf.control_dependencies([update_filters]):
if self._config.train_on_agent_action:
# NOTE: Doesn't seem to change much.
action = self._last_action
policy = tools.nested.map(
lambda x: tf.gather(x, agent_indices), self._last_policy)
batch = (observ, action, policy, reward)
append = self._current_episodes.append(batch, agent_indices)
with tf.control_dependencies([append]):
norm_observ = self._observ_filter.transform(observ)
norm_reward = tf.reduce_mean(self._reward_filter.transform(reward))
# pylint: disable=g-long-lambda
summary = tf.cond(self._should_log, lambda: tf.summary.merge([
update_filters,
self._observ_filter.summary(),
self._reward_filter.summary(),
tf.summary.scalar('memory_size', self._num_finished_episodes),
tf.summary.histogram('normalized_observ', norm_observ),
tf.summary.histogram('action', self._last_action),
tf.summary.scalar('normalized_reward', norm_reward)]), str)
return summary
def end_episode(self, agent_indices):
"""Add episodes to the memory and perform update steps if memory is full.
During training, add the collected episodes of the batch indices that
finished their episode to the memory. If the memory is full, train on it,
and then clear the memory. A summary string is returned if requested at
this step.
Args:
agent_indices: Tensor containing current batch indices.
Returns:
Summary tensor.
"""
with tf.name_scope('end_episode/'):
return tf.cond(
self._is_training,
lambda: self._define_end_episode(agent_indices), str)
def _initialize_policy(self):
"""Initialize the policy.
Run the policy network on dummy data to initialize its parameters for later
reuse and to analyze the policy distribution. Initializes the attributes
`self._network` and `self._policy_type`.
Raises:
ValueError: Invalid policy distribution.
Returns:
Parameters of the policy distribution and policy state.
"""
with tf.device('/gpu:0' if self._use_gpu else '/cpu:0'):
network = functools.partial(
self._config.network, self._config, self._batch_env.action_space)
self._network = tf.make_template('network', network)
output = self._network(
tf.zeros_like(self._batch_env.observ)[:, None],
tf.ones(len(self._batch_env)))
if output.policy.event_shape != self._batch_env.action.shape[1:]:
message = 'Policy event shape {} does not match action shape {}.'
message = message.format(
output.policy.event_shape, self._batch_env.action.shape[1:])
raise ValueError(message)
self._policy_type = type(output.policy)
is_tensor = lambda x: isinstance(x, tf.Tensor)
policy_params = tools.nested.filter(is_tensor, output.policy.parameters)
set_batch_dim = lambda x: utility.set_dimension(x, 0, len(self._batch_env))
tools.nested.map(set_batch_dim, policy_params)
if output.state is not None:
tools.nested.map(set_batch_dim, output.state)
return policy_params, output.state
def _initialize_memory(self, policy_params):
"""Initialize temporary and permanent memory.
Args:
policy_params: Nested tuple of policy parameters with all dimensions set.
Initializes the attributes `self._current_episodes`,
`self._finished_episodes`, and `self._num_finished_episodes`. The episodes
memory serves to collect multiple episodes in parallel. Finished episodes
are copied into the next free slot of the second memory. The memory index
points to the next free slot.
"""
# We store observation, action, policy parameters, and reward.
template = (
self._batch_env.observ[0],
self._batch_env.action[0],
tools.nested.map(lambda x: x[0, 0], policy_params),
self._batch_env.reward[0])
with tf.variable_scope('ppo_temporary'):
self._current_episodes = parts.EpisodeMemory(
template, len(self._batch_env), self._config.max_length, 'episodes')
self._finished_episodes = parts.EpisodeMemory(
template, self._config.update_every, self._config.max_length, 'memory')
self._num_finished_episodes = tf.Variable(0, False)
def _define_end_episode(self, agent_indices):
"""Implement the branch of end_episode() entered during training."""
episodes, length = self._current_episodes.data(agent_indices)
space_left = self._config.update_every - self._num_finished_episodes
use_episodes = tf.range(tf.minimum(
tf.shape(agent_indices)[0], space_left))
episodes = tools.nested.map(lambda x: tf.gather(x, use_episodes), episodes)
append = self._finished_episodes.replace(
episodes, tf.gather(length, use_episodes),
use_episodes + self._num_finished_episodes)
with tf.control_dependencies([append]):
increment_index = self._num_finished_episodes.assign_add(
tf.shape(use_episodes)[0])
with tf.control_dependencies([increment_index]):
memory_full = self._num_finished_episodes >= self._config.update_every
return tf.cond(memory_full, self._training, str)
def _training(self):
"""Perform multiple training iterations of both policy and value baseline.
Training on the episodes collected in the memory. Reset the memory
afterwards. Always returns a summary string.
Returns:
Summary tensor.
"""
with tf.device('/gpu:0' if self._use_gpu else '/cpu:0'):
with tf.name_scope('training'):
assert_full = tf.assert_equal(
self._num_finished_episodes, self._config.update_every)
with tf.control_dependencies([assert_full]):
data = self._finished_episodes.data()
(observ, action, old_policy_params, reward), length = data
# We set padding frames of the parameters to ones to prevent Gaussians
# with zero variance. This would result in an infinite KL divergence,
# which, even if masked out, would result in NaN gradients.
old_policy_params = tools.nested.map(
lambda param: self._mask(param, length, 1), old_policy_params)
with tf.control_dependencies([tf.assert_greater(length, 0)]):
length = tf.identity(length)
observ = self._observ_filter.transform(observ)
reward = self._reward_filter.transform(reward)
update_summary = self._perform_update_steps(
observ, action, old_policy_params, reward, length)
with tf.control_dependencies([update_summary]):
penalty_summary = self._adjust_penalty(
observ, old_policy_params, length)
with tf.control_dependencies([penalty_summary]):
clear_memory = tf.group(
self._finished_episodes.clear(),
self._num_finished_episodes.assign(0))
with tf.control_dependencies([clear_memory]):
weight_summary = utility.variable_summaries(
tf.trainable_variables(), self._config.weight_summaries)
return tf.summary.merge([
update_summary, penalty_summary, weight_summary])
def _perform_update_steps(
self, observ, action, old_policy_params, reward, length):
"""Perform multiple update steps of value function and policy.
The advantage is computed once at the beginning and shared across
iterations. We need to decide for the summary of one iteration, and thus
choose the one after half of the iterations.
Args:
observ: Sequences of observations.
action: Sequences of actions.
old_policy_params: Parameters of the behavioral policy.
reward: Sequences of rewards.
length: Batch of sequence lengths.
Returns:
Summary tensor.
"""
return_ = utility.discounted_return(
reward, length, self._config.discount)
value = self._network(observ, length).value
if self._config.gae_lambda:
advantage = utility.lambda_advantage(
reward, value, length, self._config.discount,
self._config.gae_lambda)
else:
advantage = return_ - value
mean, variance = tf.nn.moments(advantage, axes=[0, 1], keep_dims=True)
advantage = (advantage - mean) / (tf.sqrt(variance) + 1e-8)
advantage = tf.Print(
advantage, [tf.reduce_mean(return_), tf.reduce_mean(value)],
'return and value: ')
advantage = tf.Print(
advantage, [tf.reduce_mean(advantage)],
'normalized advantage: ')
episodes = (observ, action, old_policy_params, reward, advantage)
value_loss, policy_loss, summary = parts.iterate_sequences(
self._update_step, [0., 0., ''], episodes, length,
self._config.chunk_length,
self._config.batch_size,
self._config.update_epochs,
padding_value=1)
print_losses = tf.group(
tf.Print(0, [tf.reduce_mean(value_loss)], 'value loss: '),
tf.Print(0, [tf.reduce_mean(policy_loss)], 'policy loss: '))
with tf.control_dependencies([value_loss, policy_loss, print_losses]):
return summary[self._config.update_epochs // 2]
def _update_step(self, sequence):
"""Compute the current combined loss and perform a gradient update step.
The sequences must be a dict containing the keys `length` and `sequence`,
where the latter is a tuple containing observations, actions, parameters of
the behavioral policy, rewards, and advantages.
Args:
sequence: Sequences of episodes or chunks of episodes.
Returns:
Tuple of value loss, policy loss, and summary tensor.
"""
observ, action, old_policy_params, reward, advantage = sequence['sequence']
length = sequence['length']
old_policy = self._policy_type(**old_policy_params)
value_loss, value_summary = self._value_loss(observ, reward, length)
network = self._network(observ, length)
policy_loss, policy_summary = self._policy_loss(
old_policy, network.policy, action, advantage, length)
network_loss = network.get('loss', 0.0)
loss = policy_loss + value_loss + tf.reduce_mean(network_loss)
gradients, variables = (
zip(*self._optimizer.compute_gradients(loss)))
optimize = self._optimizer.apply_gradients(
zip(gradients, variables))
summary = tf.summary.merge([
value_summary, policy_summary,
tf.summary.histogram('network_loss', network_loss),
tf.summary.scalar('avg_network_loss', tf.reduce_mean(network_loss)),
tf.summary.scalar('gradient_norm', tf.global_norm(gradients)),
utility.gradient_summaries(zip(gradients, variables))])
with tf.control_dependencies([optimize]):
return [tf.identity(x) for x in (value_loss, policy_loss, summary)]
def _value_loss(self, observ, reward, length):
"""Compute the loss function for the value baseline.
The value loss is the difference between empirical and approximated returns
over the collected episodes. Returns the loss tensor and a summary strin.
Args:
observ: Sequences of observations.
reward: Sequences of reward.
length: Batch of sequence lengths.
Returns:
Tuple of loss tensor and summary tensor.
"""
with tf.name_scope('value_loss'):
value = self._network(observ, length).value
return_ = utility.discounted_return(
reward, length, self._config.discount)
advantage = return_ - value
value_loss = 0.5 * self._mask(advantage ** 2, length)
summary = tf.summary.merge([
tf.summary.histogram('value_loss', value_loss),
tf.summary.scalar('avg_value_loss', tf.reduce_mean(value_loss))])
value_loss = tf.reduce_mean(value_loss)
return tf.check_numerics(value_loss, 'value_loss'), summary
def _policy_loss(
self, old_policy, policy, action, advantage, length):
"""Compute the policy loss composed of multiple components.
1. The policy gradient loss is importance sampled from the data-collecting
policy at the beginning of training.
2. The second term is a KL penalty between the policy at the beginning of
training and the current policy.
3. Additionally, if this KL already changed more than twice the target
amount, we activate a strong penalty discouraging further divergence.
Args:
old_policy: Action distribution of the behavioral policy.
policy: Sequences of distribution params of the current policy.
action: Sequences of actions.
advantage: Sequences of advantages.
length: Batch of sequence lengths.
Returns:
Tuple of loss tensor and summary tensor.
"""
with tf.name_scope('policy_loss'):
kl = tf.contrib.distributions.kl_divergence(old_policy, policy)
# Infinite values in the KL, even for padding frames that we mask out,
# cause NaN gradients since TensorFlow computes gradients with respect to
# the whole input tensor.
kl = tf.check_numerics(kl, 'kl')
kl = tf.reduce_mean(self._mask(kl, length), 1)
policy_gradient = tf.exp(
policy.log_prob(action) - old_policy.log_prob(action))
surrogate_loss = -tf.reduce_mean(self._mask(
policy_gradient * tf.stop_gradient(advantage), length), 1)
surrogate_loss = tf.check_numerics(surrogate_loss, 'surrogate_loss')
kl_penalty = self._penalty * kl
cutoff_threshold = self._config.kl_target * self._config.kl_cutoff_factor
cutoff_count = tf.reduce_sum(
tf.cast(kl > cutoff_threshold, tf.int32))
with tf.control_dependencies([tf.cond(
cutoff_count > 0,
lambda: tf.Print(0, [cutoff_count], 'kl cutoff! '), int)]):
kl_cutoff = (
self._config.kl_cutoff_coef *
tf.cast(kl > cutoff_threshold, tf.float32) *
(kl - cutoff_threshold) ** 2)
policy_loss = surrogate_loss + kl_penalty + kl_cutoff
entropy = tf.reduce_mean(policy.entropy(), axis=1)
if self._config.entropy_regularization:
policy_loss -= self._config.entropy_regularization * entropy
summary = tf.summary.merge([
tf.summary.histogram('entropy', entropy),
tf.summary.histogram('kl', kl),
tf.summary.histogram('surrogate_loss', surrogate_loss),
tf.summary.histogram('kl_penalty', kl_penalty),
tf.summary.histogram('kl_cutoff', kl_cutoff),
tf.summary.histogram('kl_penalty_combined', kl_penalty + kl_cutoff),
tf.summary.histogram('policy_loss', policy_loss),
tf.summary.scalar('avg_surr_loss', tf.reduce_mean(surrogate_loss)),
tf.summary.scalar('avg_kl_penalty', tf.reduce_mean(kl_penalty)),
tf.summary.scalar('avg_policy_loss', tf.reduce_mean(policy_loss))])
policy_loss = tf.reduce_mean(policy_loss, 0)
return tf.check_numerics(policy_loss, 'policy_loss'), summary
def _adjust_penalty(self, observ, old_policy_params, length):
"""Adjust the KL policy between the behavioral and current policy.
Compute how much the policy actually changed during the multiple
update steps. Adjust the penalty strength for the next training phase if we
overshot or undershot the target divergence too much.
Args:
observ: Sequences of observations.
old_policy_params: Parameters of the behavioral policy.
length: Batch of sequence lengths.
Returns:
Summary tensor.
"""
old_policy = self._policy_type(**old_policy_params)
with tf.name_scope('adjust_penalty'):
network = self._network(observ, length)
print_penalty = tf.Print(0, [self._penalty], 'current penalty: ')
with tf.control_dependencies([print_penalty]):
kl_change = tf.reduce_mean(self._mask(
tf.contrib.distributions.kl_divergence(old_policy, network.policy),
length))
kl_change = tf.Print(kl_change, [kl_change], 'kl change: ')
maybe_increase = tf.cond(
kl_change > 1.3 * self._config.kl_target,
# pylint: disable=g-long-lambda
lambda: tf.Print(self._penalty.assign(
self._penalty * 1.5), [0], 'increase penalty '),
float)
maybe_decrease = tf.cond(
kl_change < 0.7 * self._config.kl_target,
# pylint: disable=g-long-lambda
lambda: tf.Print(self._penalty.assign(
self._penalty / 1.5), [0], 'decrease penalty '),
float)
with tf.control_dependencies([maybe_increase, maybe_decrease]):
return tf.summary.merge([
tf.summary.scalar('kl_change', kl_change),
tf.summary.scalar('penalty', self._penalty)])
def _mask(self, tensor, length, padding_value=0):
"""Set padding elements of a batch of sequences to a constant.
Useful for setting padding elements to zero before summing along the time
dimension, or for preventing infinite results in padding elements.
Args:
tensor: Tensor of sequences.
length: Batch of sequence lengths.
padding_value: Value to write into padding elements.
Returns:
Masked sequences.
"""
with tf.name_scope('mask'):
range_ = tf.range(tensor.shape[1].value)
mask = range_[None, :] < length[:, None]
if tensor.shape.ndims > 2:
for _ in range(tensor.shape.ndims - 2):
mask = mask[..., None]
mask = tf.tile(mask, [1, 1] + tensor.shape[2:].as_list())
masked = tf.where(mask, tensor, padding_value * tf.ones_like(tensor))
return tf.check_numerics(masked, 'masked')
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow estimators for Linear and DNN joined training models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import six
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator.canned import dnn
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.canned import linear
from tensorflow.python.estimator.canned import optimizers
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.losses import losses
from tensorflow.python.summary import summary
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.training import sync_replicas_optimizer
from tensorflow.python.training import training_util
from tensorflow.python.util.tf_export import estimator_export
# The default learning rates are a historical artifact of the initial
# implementation.
_DNN_LEARNING_RATE = 0.001
_LINEAR_LEARNING_RATE = 0.005
def _check_no_sync_replicas_optimizer(optimizer):
if isinstance(optimizer, sync_replicas_optimizer.SyncReplicasOptimizer):
raise ValueError(
'SyncReplicasOptimizer does not support multi optimizers case. '
'Therefore, it is not supported in DNNLinearCombined model. '
'If you want to use this optimizer, please use either DNN or Linear '
'model.')
def _linear_learning_rate(num_linear_feature_columns):
"""Returns the default learning rate of the linear model.
The calculation is a historical artifact of this initial implementation, but
has proven a reasonable choice.
Args:
num_linear_feature_columns: The number of feature columns of the linear
model.
Returns:
A float.
"""
default_learning_rate = 1. / math.sqrt(num_linear_feature_columns)
return min(_LINEAR_LEARNING_RATE, default_learning_rate)
def _add_layer_summary(value, tag):
summary.scalar('%s/fraction_of_zero_values' % tag, nn.zero_fraction(value))
summary.histogram('%s/activation' % tag, value)
def _dnn_linear_combined_model_fn(features,
labels,
mode,
head,
linear_feature_columns=None,
linear_optimizer='Ftrl',
dnn_feature_columns=None,
dnn_optimizer='Adagrad',
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
dnn_dropout=None,
input_layer_partitioner=None,
config=None):
"""Deep Neural Net and Linear combined model_fn.
Args:
features: dict of `Tensor`.
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of dtype
`int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
head: A `Head` instance.
linear_feature_columns: An iterable containing all the feature columns used
by the Linear model.
linear_optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training the Linear model. Defaults to the Ftrl
optimizer.
dnn_feature_columns: An iterable containing all the feature columns used by
the DNN model.
dnn_optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training the DNN model. Defaults to the Adagrad
optimizer.
dnn_hidden_units: List of hidden units per DNN layer.
dnn_activation_fn: Activation function applied to each DNN layer. If `None`,
will use `tf.nn.relu`.
dnn_dropout: When not `None`, the probability we will drop out a given DNN
coordinate.
input_layer_partitioner: Partitioner for input layer.
config: `RunConfig` object to configure the runtime settings.
Returns:
An `EstimatorSpec` instance.
Raises:
ValueError: If both `linear_feature_columns` and `dnn_features_columns`
are empty at the same time, or `input_layer_partitioner` is missing,
or features has the wrong type.
"""
if not isinstance(features, dict):
raise ValueError('features should be a dictionary of `Tensor`s. '
'Given type: {}'.format(type(features)))
if not linear_feature_columns and not dnn_feature_columns:
raise ValueError(
'Either linear_feature_columns or dnn_feature_columns must be defined.')
num_ps_replicas = config.num_ps_replicas if config else 0
input_layer_partitioner = input_layer_partitioner or (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20))
# Build DNN Logits.
dnn_parent_scope = 'dnn'
if not dnn_feature_columns:
dnn_logits = None
else:
dnn_optimizer = optimizers.get_optimizer_instance(
dnn_optimizer, learning_rate=_DNN_LEARNING_RATE)
_check_no_sync_replicas_optimizer(dnn_optimizer)
if not dnn_hidden_units:
raise ValueError(
'dnn_hidden_units must be defined when dnn_feature_columns is '
'specified.')
dnn_partitioner = (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas))
with variable_scope.variable_scope(
dnn_parent_scope,
values=tuple(six.itervalues(features)),
partitioner=dnn_partitioner):
dnn_logit_fn = dnn._dnn_logit_fn_builder( # pylint: disable=protected-access
units=head.logits_dimension,
hidden_units=dnn_hidden_units,
feature_columns=dnn_feature_columns,
activation_fn=dnn_activation_fn,
dropout=dnn_dropout,
input_layer_partitioner=input_layer_partitioner)
dnn_logits = dnn_logit_fn(features=features, mode=mode)
linear_parent_scope = 'linear'
if not linear_feature_columns:
linear_logits = None
else:
linear_optimizer = optimizers.get_optimizer_instance(
linear_optimizer,
learning_rate=_linear_learning_rate(len(linear_feature_columns)))
_check_no_sync_replicas_optimizer(linear_optimizer)
with variable_scope.variable_scope(
linear_parent_scope,
values=tuple(six.itervalues(features)),
partitioner=input_layer_partitioner) as scope:
logit_fn = linear._linear_logit_fn_builder( # pylint: disable=protected-access
units=head.logits_dimension,
feature_columns=linear_feature_columns)
linear_logits = logit_fn(features=features)
_add_layer_summary(linear_logits, scope.name)
# Combine logits and build full model.
if dnn_logits is not None and linear_logits is not None:
logits = dnn_logits + linear_logits
elif dnn_logits is not None:
logits = dnn_logits
else:
logits = linear_logits
def _train_op_fn(loss):
"""Returns the op to optimize the loss."""
train_ops = []
global_step = training_util.get_global_step()
if dnn_logits is not None:
train_ops.append(
dnn_optimizer.minimize(
loss,
var_list=ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES,
scope=dnn_parent_scope)))
if linear_logits is not None:
train_ops.append(
linear_optimizer.minimize(
loss,
var_list=ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES,
scope=linear_parent_scope)))
train_op = control_flow_ops.group(*train_ops)
with ops.control_dependencies([train_op]):
return distribute_lib.increment_var(global_step)
return head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
@estimator_export('estimator.DNNLinearCombinedClassifier')
class DNNLinearCombinedClassifier(estimator.Estimator):
"""An estimator for TensorFlow Linear and DNN joined classification models.
Note: This estimator is also known as wide-n-deep.
Example:
```python
numeric_feature = numeric_column(...)
categorical_column_a = categorical_column_with_hash_bucket(...)
categorical_column_b = categorical_column_with_hash_bucket(...)
categorical_feature_a_x_categorical_feature_b = crossed_column(...)
categorical_feature_a_emb = embedding_column(
categorical_column=categorical_feature_a, ...)
categorical_feature_b_emb = embedding_column(
categorical_id_column=categorical_feature_b, ...)
estimator = DNNLinearCombinedClassifier(
# wide settings
linear_feature_columns=[categorical_feature_a_x_categorical_feature_b],
linear_optimizer=tf.train.FtrlOptimizer(...),
# deep settings
dnn_feature_columns=[
categorical_feature_a_emb, categorical_feature_b_emb,
numeric_feature],
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.ProximalAdagradOptimizer(...),
# warm-start settings
warm_start_from="/path/to/checkpoint/dir")
# To apply L1 and L2 regularization, you can set optimizers as follows:
tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.001)
# It is same for FtrlOptimizer.
# Input builders
def input_fn_train: # returns x, y
pass
estimator.train(input_fn=input_fn_train, steps=100)
def input_fn_eval: # returns x, y
pass
metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)
def input_fn_predict: # returns x, None
pass
predictions = estimator.predict(input_fn=input_fn_predict)
```
Input of `train` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* for each `column` in `dnn_feature_columns` + `linear_feature_columns`:
- if `column` is a `_CategoricalColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `_WeightedCategoricalColumn`, two features: the first
with `key` the id column name, the second with `key` the weight column
name. Both features' `value` must be a `SparseTensor`.
- if `column` is a `_DenseColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
Loss is calculated by using softmax cross entropy.
@compatibility(eager)
Estimators can be used while eager execution is enabled. Note that `input_fn`
and all hooks are executed inside a graph context, so they have to be written
to be compatible with graph mode. Note that `input_fn` code using `tf.data`
generally works in both graph and eager modes.
@end_compatibility
"""
def __init__(self,
model_dir=None,
linear_feature_columns=None,
linear_optimizer='Ftrl',
dnn_feature_columns=None,
dnn_optimizer='Adagrad',
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
dnn_dropout=None,
n_classes=2,
weight_column=None,
label_vocabulary=None,
input_layer_partitioner=None,
config=None,
warm_start_from=None,
loss_reduction=losses.Reduction.SUM):
"""Initializes a DNNLinearCombinedClassifier instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
linear_feature_columns: An iterable containing all the feature columns
used by linear part of the model. All items in the set must be
instances of classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the linear part of the model. Defaults to FTRL optimizer.
dnn_feature_columns: An iterable containing all the feature columns used
by deep part of the model. All items in the set must be instances of
classes derived from `FeatureColumn`.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the deep part of the model. Defaults to Adagrad optimizer.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_activation_fn: Activation function applied to each layer. If None,
will use `tf.nn.relu`.
dnn_dropout: When not None, the probability we will drop out
a given coordinate.
n_classes: Number of label classes. Defaults to 2, namely binary
classification. Must be > 1.
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example. If it is a string, it is
used as a key to fetch weight tensor from the `features`. If it is a
`_NumericColumn`, raw tensor is fetched by key `weight_column.key`,
then weight_column.normalizer_fn is applied on it to get weight tensor.
label_vocabulary: A list of strings represents possible label values. If
given, labels must be string type and have any value in
`label_vocabulary`. If it is not given, that means labels are
already encoded as integer or float within [0, 1] for `n_classes=2` and
encoded as integer values in {0, 1,..., n_classes-1} for `n_classes`>2 .
Also there will be errors if vocabulary is not provided and labels are
string.
input_layer_partitioner: Partitioner for input layer. Defaults to
`min_max_variable_partitioner` with `min_slice_size` 64 << 20.
config: RunConfig object to configure the runtime settings.
warm_start_from: A string filepath to a checkpoint to warm-start from, or
a `WarmStartSettings` object to fully configure warm-starting. If the
string filepath is provided instead of a `WarmStartSettings`, then all
weights are warm-started, and it is assumed that vocabularies and Tensor
names are unchanged.
loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how
to reduce training loss over batch. Defaults to `SUM`.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
empty at the same time.
"""
linear_feature_columns = linear_feature_columns or []
dnn_feature_columns = dnn_feature_columns or []
self._feature_columns = (
list(linear_feature_columns) + list(dnn_feature_columns))
if not self._feature_columns:
raise ValueError('Either linear_feature_columns or dnn_feature_columns '
'must be defined.')
if n_classes == 2:
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss( # pylint: disable=protected-access
weight_column=weight_column,
label_vocabulary=label_vocabulary,
loss_reduction=loss_reduction)
else:
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss( # pylint: disable=protected-access
n_classes,
weight_column=weight_column,
label_vocabulary=label_vocabulary,
loss_reduction=loss_reduction)
def _model_fn(features, labels, mode, config):
"""Call the _dnn_linear_combined_model_fn."""
return _dnn_linear_combined_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
linear_feature_columns=linear_feature_columns,
linear_optimizer=linear_optimizer,
dnn_feature_columns=dnn_feature_columns,
dnn_optimizer=dnn_optimizer,
dnn_hidden_units=dnn_hidden_units,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
input_layer_partitioner=input_layer_partitioner,
config=config)
super(DNNLinearCombinedClassifier, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config,
warm_start_from=warm_start_from)
@estimator_export('estimator.DNNLinearCombinedRegressor')
class DNNLinearCombinedRegressor(estimator.Estimator):
"""An estimator for TensorFlow Linear and DNN joined models for regression.
Note: This estimator is also known as wide-n-deep.
Example:
```python
numeric_feature = numeric_column(...)
categorical_column_a = categorical_column_with_hash_bucket(...)
categorical_column_b = categorical_column_with_hash_bucket(...)
categorical_feature_a_x_categorical_feature_b = crossed_column(...)
categorical_feature_a_emb = embedding_column(
categorical_column=categorical_feature_a, ...)
categorical_feature_b_emb = embedding_column(
categorical_column=categorical_feature_b, ...)
estimator = DNNLinearCombinedRegressor(
# wide settings
linear_feature_columns=[categorical_feature_a_x_categorical_feature_b],
linear_optimizer=tf.train.FtrlOptimizer(...),
# deep settings
dnn_feature_columns=[
categorical_feature_a_emb, categorical_feature_b_emb,
numeric_feature],
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.ProximalAdagradOptimizer(...),
# warm-start settings
warm_start_from="/path/to/checkpoint/dir")
# To apply L1 and L2 regularization, you can set optimizers as follows:
tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.001)
# It is same for FtrlOptimizer.
# Input builders
def input_fn_train: # returns x, y
pass
estimator.train(input_fn=input_fn_train, steps=100)
def input_fn_eval: # returns x, y
pass
metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)
def input_fn_predict: # returns x, None
pass
predictions = estimator.predict(input_fn=input_fn_predict)
```
Input of `train` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* for each `column` in `dnn_feature_columns` + `linear_feature_columns`:
- if `column` is a `_CategoricalColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `_WeightedCategoricalColumn`, two features: the first
with `key` the id column name, the second with `key` the weight column
name. Both features' `value` must be a `SparseTensor`.
- if `column` is a `_DenseColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
Loss is calculated by using mean squared error.
@compatibility(eager)
Estimators can be used while eager execution is enabled. Note that `input_fn`
and all hooks are executed inside a graph context, so they have to be written
to be compatible with graph mode. Note that `input_fn` code using `tf.data`
generally works in both graph and eager modes.
@end_compatibility
"""
def __init__(self,
model_dir=None,
linear_feature_columns=None,
linear_optimizer='Ftrl',
dnn_feature_columns=None,
dnn_optimizer='Adagrad',
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
dnn_dropout=None,
label_dimension=1,
weight_column=None,
input_layer_partitioner=None,
config=None,
warm_start_from=None,
loss_reduction=losses.Reduction.SUM):
"""Initializes a DNNLinearCombinedRegressor instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
linear_feature_columns: An iterable containing all the feature columns
used by linear part of the model. All items in the set must be
instances of classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the linear part of the model. Defaults to FTRL optimizer.
dnn_feature_columns: An iterable containing all the feature columns used
by deep part of the model. All items in the set must be instances of
classes derived from `FeatureColumn`.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the deep part of the model. Defaults to Adagrad optimizer.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_activation_fn: Activation function applied to each layer. If None,
will use `tf.nn.relu`.
dnn_dropout: When not None, the probability we will drop out
a given coordinate.
label_dimension: Number of regression targets per example. This is the
size of the last dimension of the labels and logits `Tensor` objects
(typically, these have shape `[batch_size, label_dimension]`).
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example. If it is a string, it is
used as a key to fetch weight tensor from the `features`. If it is a
`_NumericColumn`, raw tensor is fetched by key `weight_column.key`,
then weight_column.normalizer_fn is applied on it to get weight tensor.
input_layer_partitioner: Partitioner for input layer. Defaults to
`min_max_variable_partitioner` with `min_slice_size` 64 << 20.
config: RunConfig object to configure the runtime settings.
warm_start_from: A string filepath to a checkpoint to warm-start from, or
a `WarmStartSettings` object to fully configure warm-starting. If the
string filepath is provided instead of a `WarmStartSettings`, then all
weights are warm-started, and it is assumed that vocabularies and Tensor
names are unchanged.
loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how
to reduce training loss over batch. Defaults to `SUM`.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
empty at the same time.
"""
linear_feature_columns = linear_feature_columns or []
dnn_feature_columns = dnn_feature_columns or []
self._feature_columns = (
list(linear_feature_columns) + list(dnn_feature_columns))
if not self._feature_columns:
raise ValueError('Either linear_feature_columns or dnn_feature_columns '
'must be defined.')
def _model_fn(features, labels, mode, config):
"""Call the _dnn_linear_combined_model_fn."""
return _dnn_linear_combined_model_fn(
features=features,
labels=labels,
mode=mode,
head=head_lib._regression_head( # pylint: disable=protected-access
label_dimension=label_dimension, weight_column=weight_column,
loss_reduction=loss_reduction),
linear_feature_columns=linear_feature_columns,
linear_optimizer=linear_optimizer,
dnn_feature_columns=dnn_feature_columns,
dnn_optimizer=dnn_optimizer,
dnn_hidden_units=dnn_hidden_units,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
input_layer_partitioner=input_layer_partitioner,
config=config)
super(DNNLinearCombinedRegressor, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config,
warm_start_from=warm_start_from)
|
|
##########################################################################
#
# Copyright (c) 2011-2013, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
import GafferUI
from Qt import QtCore
from Qt import QtGui
from Qt import QtWidgets
class TabbedContainer( GafferUI.ContainerWidget ) :
__DragState = IECore.Enum.create( "None", "Waiting", "Active" )
__palette = None
def __init__( self, cornerWidget=None, **kw ) :
GafferUI.ContainerWidget.__init__( self, _TabWidget(), **kw )
self.__tabBar = GafferUI.Widget( QtWidgets.QTabBar() )
self.__tabBar._qtWidget().setDrawBase( False )
self.__tabBar._qtWidget().tabMoved.connect( Gaffer.WeakMethod( self.__moveWidget ) )
self.__tabBar.dragEnterSignal().connect( Gaffer.WeakMethod( self.__tabBarDragEnter ), scoped = False )
self.__tabBar.dragMoveSignal().connect( Gaffer.WeakMethod( self.__tabBarDragMove ), scoped = False )
self.__tabBar.dragLeaveSignal().connect( Gaffer.WeakMethod( self.__tabBarDragLeave ), scoped = False )
self.__tabBarDragState = self.__DragState.None
# See comments in Button.py
if TabbedContainer.__palette is None :
TabbedContainer.__palette = QtGui.QPalette( QtWidgets.QApplication.instance().palette( self.__tabBar._qtWidget() ) )
TabbedContainer.__palette.setColor( QtGui.QPalette.Disabled, QtGui.QPalette.Light, QtGui.QColor( 0, 0, 0, 0 ) )
self.__tabBar._qtWidget().setPalette( TabbedContainer.__palette )
self._qtWidget().setTabBar( self.__tabBar._qtWidget() )
self._qtWidget().setUsesScrollButtons( False )
self._qtWidget().setElideMode( QtCore.Qt.ElideNone )
self.__widgets = []
self.__cornerWidget = None
self.setCornerWidget( cornerWidget )
self.__currentChangedSignal = GafferUI.WidgetEventSignal()
self._qtWidget().currentChanged.connect( Gaffer.WeakMethod( self.__currentChanged ) )
def append( self, child, label="" ) :
oldParent = child.parent()
if oldParent is not None :
oldParent.removeChild( child )
self.__widgets.append( child )
self._qtWidget().addTab( child._qtWidget(), label )
# note that we are deliberately not calling child._applyVisibility(),
# because the tabbed container operates by managing the visibility
# of the children itself - interfering with that would cause all manner
# of graphical glitches.
def remove( self, child ) :
self.removeChild( child )
def insert( self, index, child, label="" ) :
l = len( self.__widgets )
if index > l :
index = l
oldParent = child.parent()
if oldParent is not None :
oldParent.removeChild( child )
self.__widgets.insert( index, child )
self._qtWidget().insertTab( index, child._qtWidget(), label )
def setLabel( self, child, labelText ) :
self._qtWidget().setTabText( self.__widgets.index( child ), labelText )
def getLabel( self, child ) :
return str( self._qtWidget().tabText( self.__widgets.index( child ) ) )
def setCurrent( self, child ) :
self._qtWidget().setCurrentIndex( self.__widgets.index( child ) )
def getCurrent( self ) :
if not self.__widgets :
return None
return self.__widgets[ self._qtWidget().currentIndex() ]
def __moveWidget( self, fromIndex, toIndex ) :
w = self.__widgets[ fromIndex ]
del self.__widgets[ fromIndex ]
self.__widgets.insert( toIndex, w )
def __getitem__( self, index ) :
return self.__widgets[index]
def __delitem__( self, index ) :
if isinstance( index, slice ) :
indices = range( *(index.indices( len( self ) ) ) )
for i in indices :
self._qtWidget().removeTab( self._qtWidget().indexOf( self[i]._qtWidget() ) )
self[i]._qtWidget().setParent( None )
self[i]._applyVisibility()
del self.__widgets[index]
else :
self.removeChild( self.__widgets[index] )
def __len__( self ) :
return len( self.__widgets )
def index( self, child ) :
return self.__widgets.index( child )
def addChild( self, child, label="" ) :
self.append( child, label )
def removeChild( self, child ) :
assert( child is self.__cornerWidget or child in self.__widgets )
if child is self.__cornerWidget :
self._qtWidget().setCornerWidget( None )
self.__cornerWidget = None
else :
# We must remove the child from __widgets before the tab, otherwise
# currentChangedSignal will be emit with the old widget.
removalIndex = self.__widgets.index( child )
self.__widgets.remove( child )
self._qtWidget().removeTab( removalIndex )
child._qtWidget().setParent( None )
child._applyVisibility()
def setCornerWidget( self, cornerWidget ) :
if self.__cornerWidget is not None :
self.removeChild( self.__cornerWidget )
if cornerWidget is not None :
oldParent = cornerWidget.parent()
if oldParent is not None :
oldParent.removeChild( cornerWidget )
self._qtWidget().setCornerWidget( cornerWidget._qtWidget() )
cornerWidget._applyVisibility()
assert( cornerWidget._qtWidget().parent() is self._qtWidget() )
else :
self._qtWidget().setCornerWidget( None )
self.__cornerWidget = cornerWidget
def getCornerWidget( self ) :
return self.__cornerWidget
## If the tabs are hidden, then the corner widget will
# also be hidden.
def setTabsVisible( self, visible ) :
self._qtWidget().tabBar().setVisible( visible )
if self.__cornerWidget is not None :
self.__cornerWidget.setVisible( visible )
def getTabsVisible( self ) :
return not self._qtWidget().tabBar().isHidden()
def currentChangedSignal( self ) :
return self.__currentChangedSignal
def _revealDescendant( self, descendant ) :
child = None
while descendant is not None :
parent = descendant.parent()
if parent is self :
child = descendant
break
descendant = parent
if child is not None :
self.setCurrent( child )
def __currentChanged( self, index ) :
current = self[index] if len(self) else None
self.__currentChangedSignal( self, current )
def __tabBarDragEnter( self, widget, event ) :
if isinstance( event.data, IECore.NullObject ) :
return False
# we delay the tab switch a little to make sure that the user isn't just passing through
self.__tabBarDragState = self.__DragState.Waiting
QtCore.QTimer.singleShot( QtWidgets.QApplication.doubleClickInterval(), self.__tabBarDragActivate )
return True
def __tabBarDragMove( self, widget, event ) :
if self.__tabBarDragState == self.__DragState.Active :
self.__switchToTabUnderCursor()
def __tabBarDragLeave( self, widget, event ) :
self.__tabBarDragState = self.__DragState.None
return True
def __tabBarDragActivate( self ) :
if self.__tabBarDragState == self.__DragState.Waiting :
self.__tabBarDragState = self.__DragState.Active
self.__switchToTabUnderCursor()
def __switchToTabUnderCursor( self ) :
p = self.__tabBar._qtWidget().mapFromGlobal( QtGui.QCursor.pos() )
tab = self.__tabBar._qtWidget().tabAt( p )
if tab >= 0 :
self._qtWidget().setCurrentIndex( tab )
# Private implementation - a QTabWidget with custom size behaviour.
class _TabWidget( QtWidgets.QTabWidget ) :
def __init__( self, parent = None ) :
QtWidgets.QTabWidget.__init__( self, parent )
# Reimplemented so that the tabs aren't taken into
# account when they're not visible.
def sizeHint( self ) :
result = QtWidgets.QTabWidget.sizeHint( self )
if self.tabBar().isHidden() :
if self.tabPosition() in ( self.North, self.South ) :
result.setHeight( result.height() - self.tabBar().sizeHint().height() )
else :
result.setWidth( result.width() - self.tabBar().sizeHint().width() )
return result
# Reimplemented so that the tabs aren't taken into
# account when they're not visible.
def minimumSizeHint( self ) :
result = QtWidgets.QTabWidget.minimumSizeHint( self )
if self.tabBar().isHidden() :
if self.tabPosition() in ( self.North, self.South ) :
result.setHeight( result.height() - self.tabBar().minimumSizeHint().height() )
else :
result.setWidth( result.width() - self.tabBar().minimumSizeHint().width() )
return result
|
|
"""
Histogram-related functions
"""
from __future__ import division, absolute_import, print_function
import functools
import operator
import warnings
import numpy as np
from numpy.compat.py3k import basestring
from numpy.core import overrides
__all__ = ['histogram', 'histogramdd', 'histogram_bin_edges']
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
# range is a keyword argument to many functions, so save the builtin so they can
# use it.
_range = range
def _hist_bin_sqrt(x, range):
"""
Square root histogram bin estimator.
Bin width is inversely proportional to the data size. Used by many
programs for its simplicity.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
return x.ptp() / np.sqrt(x.size)
def _hist_bin_sturges(x, range):
"""
Sturges histogram bin estimator.
A very simplistic estimator based on the assumption of normality of
the data. This estimator has poor performance for non-normal data,
which becomes especially obvious for large data sets. The estimate
depends only on size of the data.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
return x.ptp() / (np.log2(x.size) + 1.0)
def _hist_bin_rice(x, range):
"""
Rice histogram bin estimator.
Another simple estimator with no normality assumption. It has better
performance for large data than Sturges, but tends to overestimate
the number of bins. The number of bins is proportional to the cube
root of data size (asymptotically optimal). The estimate depends
only on size of the data.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
return x.ptp() / (2.0 * x.size ** (1.0 / 3))
def _hist_bin_scott(x, range):
"""
Scott histogram bin estimator.
The binwidth is proportional to the standard deviation of the data
and inversely proportional to the cube root of data size
(asymptotically optimal).
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x)
def _hist_bin_stone(x, range):
"""
Histogram bin estimator based on minimizing the estimated integrated squared error (ISE).
The number of bins is chosen by minimizing the estimated ISE against the unknown true distribution.
The ISE is estimated using cross-validation and can be regarded as a generalization of Scott's rule.
https://en.wikipedia.org/wiki/Histogram#Scott.27s_normal_reference_rule
This paper by Stone appears to be the origination of this rule.
http://digitalassets.lib.berkeley.edu/sdtr/ucb/text/34.pdf
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
range : (float, float)
The lower and upper range of the bins.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
n = x.size
ptp_x = np.ptp(x)
if n <= 1 or ptp_x == 0:
return 0
def jhat(nbins):
hh = ptp_x / nbins
p_k = np.histogram(x, bins=nbins, range=range)[0] / n
return (2 - (n + 1) * p_k.dot(p_k)) / hh
nbins_upper_bound = max(100, int(np.sqrt(n)))
nbins = min(_range(1, nbins_upper_bound + 1), key=jhat)
if nbins == nbins_upper_bound:
warnings.warn("The number of bins estimated may be suboptimal.", RuntimeWarning, stacklevel=2)
return ptp_x / nbins
def _hist_bin_doane(x, range):
"""
Doane's histogram bin estimator.
Improved version of Sturges' formula which works better for
non-normal data. See
stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
if x.size > 2:
sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))
sigma = np.std(x)
if sigma > 0.0:
# These three operations add up to
# g1 = np.mean(((x - np.mean(x)) / sigma)**3)
# but use only one temp array instead of three
temp = x - np.mean(x)
np.true_divide(temp, sigma, temp)
np.power(temp, 3, temp)
g1 = np.mean(temp)
return x.ptp() / (1.0 + np.log2(x.size) +
np.log2(1.0 + np.absolute(g1) / sg1))
return 0.0
def _hist_bin_fd(x, range):
"""
The Freedman-Diaconis histogram bin estimator.
The Freedman-Diaconis rule uses interquartile range (IQR) to
estimate binwidth. It is considered a variation of the Scott rule
with more robustness as the IQR is less affected by outliers than
the standard deviation. However, the IQR depends on fewer points
than the standard deviation, so it is less accurate, especially for
long tailed distributions.
If the IQR is 0, this function returns 1 for the number of bins.
Binwidth is inversely proportional to the cube root of data size
(asymptotically optimal).
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
iqr = np.subtract(*np.percentile(x, [75, 25]))
return 2.0 * iqr * x.size ** (-1.0 / 3.0)
def _hist_bin_auto(x, range):
"""
Histogram bin estimator that uses the minimum width of the
Freedman-Diaconis and Sturges estimators if the FD bandwidth is non zero
and the Sturges estimator if the FD bandwidth is 0.
The FD estimator is usually the most robust method, but its width
estimate tends to be too large for small `x` and bad for data with limited
variance. The Sturges estimator is quite good for small (<1000) datasets
and is the default in the R language. This method gives good off the shelf
behaviour.
.. versionchanged:: 1.15.0
If there is limited variance the IQR can be 0, which results in the
FD bin width being 0 too. This is not a valid bin width, so
``np.histogram_bin_edges`` chooses 1 bin instead, which may not be optimal.
If the IQR is 0, it's unlikely any variance based estimators will be of
use, so we revert to the sturges estimator, which only uses the size of the
dataset in its calculation.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
See Also
--------
_hist_bin_fd, _hist_bin_sturges
"""
fd_bw = _hist_bin_fd(x, range)
sturges_bw = _hist_bin_sturges(x, range)
del range # unused
if fd_bw:
return min(fd_bw, sturges_bw)
else:
# limited variance, so we return a len dependent bw estimator
return sturges_bw
# Private dict initialized at module load time
_hist_bin_selectors = {'stone': _hist_bin_stone,
'auto': _hist_bin_auto,
'doane': _hist_bin_doane,
'fd': _hist_bin_fd,
'rice': _hist_bin_rice,
'scott': _hist_bin_scott,
'sqrt': _hist_bin_sqrt,
'sturges': _hist_bin_sturges}
def _ravel_and_check_weights(a, weights):
""" Check a and weights have matching shapes, and ravel both """
a = np.asarray(a)
# Ensure that the array is a "subtractable" dtype
if a.dtype == np.bool_:
warnings.warn("Converting input from {} to {} for compatibility."
.format(a.dtype, np.uint8),
RuntimeWarning, stacklevel=2)
a = a.astype(np.uint8)
if weights is not None:
weights = np.asarray(weights)
if weights.shape != a.shape:
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
return a, weights
def _get_outer_edges(a, range):
"""
Determine the outer bin edges to use, from either the data or the range
argument
"""
if range is not None:
first_edge, last_edge = range
if first_edge > last_edge:
raise ValueError(
'max must be larger than min in range parameter.')
if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
raise ValueError(
"supplied range of [{}, {}] is not finite".format(first_edge, last_edge))
elif a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
first_edge, last_edge = 0, 1
else:
first_edge, last_edge = a.min(), a.max()
if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
raise ValueError(
"autodetected range of [{}, {}] is not finite".format(first_edge, last_edge))
# expand empty range to avoid divide by zero
if first_edge == last_edge:
first_edge = first_edge - 0.5
last_edge = last_edge + 0.5
return first_edge, last_edge
def _unsigned_subtract(a, b):
"""
Subtract two values where a >= b, and produce an unsigned result
This is needed when finding the difference between the upper and lower
bound of an int16 histogram
"""
# coerce to a single type
signed_to_unsigned = {
np.byte: np.ubyte,
np.short: np.ushort,
np.intc: np.uintc,
np.int_: np.uint,
np.longlong: np.ulonglong
}
dt = np.result_type(a, b)
try:
dt = signed_to_unsigned[dt.type]
except KeyError:
return np.subtract(a, b, dtype=dt)
else:
# we know the inputs are integers, and we are deliberately casting
# signed to unsigned
return np.subtract(a, b, casting='unsafe', dtype=dt)
def _get_bin_edges(a, bins, range, weights):
"""
Computes the bins used internally by `histogram`.
Parameters
==========
a : ndarray
Ravelled data array
bins, range
Forwarded arguments from `histogram`.
weights : ndarray, optional
Ravelled weights array, or None
Returns
=======
bin_edges : ndarray
Array of bin edges
uniform_bins : (Number, Number, int):
The upper bound, lowerbound, and number of bins, used in the optimized
implementation of `histogram` that works on uniform bins.
"""
# parse the overloaded bins argument
n_equal_bins = None
bin_edges = None
if isinstance(bins, basestring):
bin_name = bins
# if `bins` is a string for an automatic method,
# this will replace it with the number of bins calculated
if bin_name not in _hist_bin_selectors:
raise ValueError(
"{!r} is not a valid estimator for `bins`".format(bin_name))
if weights is not None:
raise TypeError("Automated estimation of the number of "
"bins is not supported for weighted data")
first_edge, last_edge = _get_outer_edges(a, range)
# truncate the range if needed
if range is not None:
keep = (a >= first_edge)
keep &= (a <= last_edge)
if not np.logical_and.reduce(keep):
a = a[keep]
if a.size == 0:
n_equal_bins = 1
else:
# Do not call selectors on empty arrays
width = _hist_bin_selectors[bin_name](a, (first_edge, last_edge))
if width:
n_equal_bins = int(np.ceil(_unsigned_subtract(last_edge, first_edge) / width))
else:
# Width can be zero for some estimators, e.g. FD when
# the IQR of the data is zero.
n_equal_bins = 1
elif np.ndim(bins) == 0:
try:
n_equal_bins = operator.index(bins)
except TypeError:
raise TypeError(
'`bins` must be an integer, a string, or an array')
if n_equal_bins < 1:
raise ValueError('`bins` must be positive, when an integer')
first_edge, last_edge = _get_outer_edges(a, range)
elif np.ndim(bins) == 1:
bin_edges = np.asarray(bins)
if np.any(bin_edges[:-1] > bin_edges[1:]):
raise ValueError(
'`bins` must increase monotonically, when an array')
else:
raise ValueError('`bins` must be 1d, when an array')
if n_equal_bins is not None:
# gh-10322 means that type resolution rules are dependent on array
# shapes. To avoid this causing problems, we pick a type now and stick
# with it throughout.
bin_type = np.result_type(first_edge, last_edge, a)
if np.issubdtype(bin_type, np.integer):
bin_type = np.result_type(bin_type, float)
# bin edges must be computed
bin_edges = np.linspace(
first_edge, last_edge, n_equal_bins + 1,
endpoint=True, dtype=bin_type)
return bin_edges, (first_edge, last_edge, n_equal_bins)
else:
return bin_edges, None
def _search_sorted_inclusive(a, v):
"""
Like `searchsorted`, but where the last item in `v` is placed on the right.
In the context of a histogram, this makes the last bin edge inclusive
"""
return np.concatenate((
a.searchsorted(v[:-1], 'left'),
a.searchsorted(v[-1:], 'right')
))
def _histogram_bin_edges_dispatcher(a, bins=None, range=None, weights=None):
return (a, bins, weights)
@array_function_dispatch(_histogram_bin_edges_dispatcher)
def histogram_bin_edges(a, bins=10, range=None, weights=None):
r"""
Function to calculate only the edges of the bins used by the `histogram` function.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars or str, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines the bin edges, including the rightmost
edge, allowing for non-uniform bin widths.
If `bins` is a string from the list below, `histogram_bin_edges` will use
the method chosen to calculate the optimal bin width and
consequently the number of bins (see `Notes` for more detail on
the estimators) from the data that falls within the requested
range. While the bin width will be optimal for the actual data
in the range, the number of bins will be computed to fill the
entire range, including the empty portions. For visualisation,
using the 'auto' option is suggested. Weighted data is not
supported for automated bin size selection.
'auto'
Maximum of the 'sturges' and 'fd' estimators. Provides good
all around performance.
'fd' (Freedman Diaconis Estimator)
Robust (resilient to outliers) estimator that takes into
account data variability and data size.
'doane'
An improved version of Sturges' estimator that works better
with non-normal datasets.
'scott'
Less robust estimator that that takes into account data
variability and data size.
'stone'
Estimator based on leave-one-out cross-validation estimate of
the integrated squared error. Can be regarded as a generalization
of Scott's rule.
'rice'
Estimator does not take variability into account, only data
size. Commonly overestimates number of bins required.
'sturges'
R's default method, only accounts for data size. Only
optimal for gaussian data and underestimates number of bins
for large non-gaussian datasets.
'sqrt'
Square root (of data size) estimator, used by Excel and
other programs for its speed and simplicity.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored. The first element of the range must be less than or
equal to the second. `range` affects the automatic bin
computation as well. While bin width is computed to be optimal
based on the actual data within `range`, the bin count will fill
the entire range including portions containing no data.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in
`a` only contributes its associated weight towards the bin count
(instead of 1). This is currently not used by any of the bin estimators,
but may be in the future.
Returns
-------
bin_edges : array of dtype float
The edges to pass into `histogram`
See Also
--------
histogram
Notes
-----
The methods to estimate the optimal number of bins are well founded
in literature, and are inspired by the choices R provides for
histogram visualisation. Note that having the number of bins
proportional to :math:`n^{1/3}` is asymptotically optimal, which is
why it appears in most estimators. These are simply plug-in methods
that give good starting points for number of bins. In the equations
below, :math:`h` is the binwidth and :math:`n_h` is the number of
bins. All estimators that compute bin counts are recast to bin width
using the `ptp` of the data. The final bin count is obtained from
``np.round(np.ceil(range / h))``.
'Auto' (maximum of the 'Sturges' and 'FD' estimators)
A compromise to get a good value. For small datasets the Sturges
value will usually be chosen, while larger datasets will usually
default to FD. Avoids the overly conservative behaviour of FD
and Sturges for small and large datasets respectively.
Switchover point is usually :math:`a.size \approx 1000`.
'FD' (Freedman Diaconis Estimator)
.. math:: h = 2 \frac{IQR}{n^{1/3}}
The binwidth is proportional to the interquartile range (IQR)
and inversely proportional to cube root of a.size. Can be too
conservative for small datasets, but is quite good for large
datasets. The IQR is very robust to outliers.
'Scott'
.. math:: h = \sigma \sqrt[3]{\frac{24 * \sqrt{\pi}}{n}}
The binwidth is proportional to the standard deviation of the
data and inversely proportional to cube root of ``x.size``. Can
be too conservative for small datasets, but is quite good for
large datasets. The standard deviation is not very robust to
outliers. Values are very similar to the Freedman-Diaconis
estimator in the absence of outliers.
'Rice'
.. math:: n_h = 2n^{1/3}
The number of bins is only proportional to cube root of
``a.size``. It tends to overestimate the number of bins and it
does not take into account data variability.
'Sturges'
.. math:: n_h = \log _{2}n+1
The number of bins is the base 2 log of ``a.size``. This
estimator assumes normality of data and is too conservative for
larger, non-normal datasets. This is the default method in R's
``hist`` method.
'Doane'
.. math:: n_h = 1 + \log_{2}(n) +
\log_{2}(1 + \frac{|g_1|}{\sigma_{g_1}})
g_1 = mean[(\frac{x - \mu}{\sigma})^3]
\sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}}
An improved version of Sturges' formula that produces better
estimates for non-normal datasets. This estimator attempts to
account for the skew of the data.
'Sqrt'
.. math:: n_h = \sqrt n
The simplest and fastest estimator. Only takes into account the
data size.
Examples
--------
>>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5])
>>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1))
array([0. , 0.25, 0.5 , 0.75, 1. ])
>>> np.histogram_bin_edges(arr, bins=2)
array([0. , 2.5, 5. ])
For consistency with histogram, an array of pre-computed bins is
passed through unmodified:
>>> np.histogram_bin_edges(arr, [1, 2])
array([1, 2])
This function allows one set of bins to be computed, and reused across
multiple histograms:
>>> shared_bins = np.histogram_bin_edges(arr, bins='auto')
>>> shared_bins
array([0., 1., 2., 3., 4., 5.])
>>> group_id = np.array([0, 1, 1, 0, 1, 1, 0, 1, 1])
>>> hist_0, _ = np.histogram(arr[group_id == 0], bins=shared_bins)
>>> hist_1, _ = np.histogram(arr[group_id == 1], bins=shared_bins)
>>> hist_0; hist_1
array([1, 1, 0, 1, 0])
array([2, 0, 1, 1, 2])
Which gives more easily comparable results than using separate bins for
each histogram:
>>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto')
>>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto')
>>> hist_0; hist1
array([1, 1, 1])
array([2, 1, 1, 2])
>>> bins_0; bins_1
array([0., 1., 2., 3.])
array([0. , 1.25, 2.5 , 3.75, 5. ])
"""
a, weights = _ravel_and_check_weights(a, weights)
bin_edges, _ = _get_bin_edges(a, bins, range, weights)
return bin_edges
def _histogram_dispatcher(
a, bins=None, range=None, normed=None, weights=None, density=None):
return (a, bins, weights)
@array_function_dispatch(_histogram_dispatcher)
def histogram(a, bins=10, range=None, normed=None, weights=None,
density=None):
r"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars or str, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines a monotonically increasing array of bin edges,
including the rightmost edge, allowing for non-uniform bin widths.
.. versionadded:: 1.11.0
If `bins` is a string, it defines the method used to calculate the
optimal bin width, as defined by `histogram_bin_edges`.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored. The first element of the range must be less than or
equal to the second. `range` affects the automatic bin
computation as well. While bin width is computed to be optimal
based on the actual data within `range`, the bin count will fill
the entire range including portions containing no data.
normed : bool, optional
.. deprecated:: 1.6.0
This is equivalent to the `density` argument, but produces incorrect
results for unequal bin widths. It should not be used.
.. versionchanged:: 1.15.0
DeprecationWarnings are actually emitted.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in
`a` only contributes its associated weight towards the bin count
(instead of 1). If `density` is True, the weights are
normalized, so that the integral of the density over the range
remains 1.
density : bool, optional
If ``False``, the result will contain the number of samples in
each bin. If ``True``, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the ``normed`` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `density` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize, histogram_bin_edges
Notes
-----
All but the last (righthand-most) bin is half-open. In other words,
if `bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and
the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which
*includes* 4.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist * np.diff(bin_edges))
1.0
.. versionadded:: 1.11.0
Automated Bin Selection Methods example, using 2 peak random data
with 2000 points:
>>> import matplotlib.pyplot as plt
>>> rng = np.random.RandomState(10) # deterministic random data
>>> a = np.hstack((rng.normal(size=1000),
... rng.normal(loc=5, scale=2, size=1000)))
>>> plt.hist(a, bins='auto') # arguments are passed to np.histogram
>>> plt.title("Histogram with 'auto' bins")
>>> plt.show()
"""
a, weights = _ravel_and_check_weights(a, weights)
bin_edges, uniform_bins = _get_bin_edges(a, bins, range, weights)
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = np.dtype(np.intp)
else:
ntype = weights.dtype
# We set a block size, as this allows us to iterate over chunks when
# computing histograms, to minimize memory usage.
BLOCK = 65536
# The fast path uses bincount, but that only works for certain types
# of weight
simple_weights = (
weights is None or
np.can_cast(weights.dtype, np.double) or
np.can_cast(weights.dtype, complex)
)
if uniform_bins is not None and simple_weights:
# Fast algorithm for equal bins
# We now convert values of a to bin indices, under the assumption of
# equal bin widths (which is valid here).
first_edge, last_edge, n_equal_bins = uniform_bins
# Initialize empty histogram
n = np.zeros(n_equal_bins, ntype)
# Pre-compute histogram scaling factor
norm = n_equal_bins / _unsigned_subtract(last_edge, first_edge)
# We iterate over blocks here for two reasons: the first is that for
# large arrays, it is actually faster (for example for a 10^8 array it
# is 2x as fast) and it results in a memory footprint 3x lower in the
# limit of large arrays.
for i in _range(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
if weights is None:
tmp_w = None
else:
tmp_w = weights[i:i + BLOCK]
# Only include values in the right range
keep = (tmp_a >= first_edge)
keep &= (tmp_a <= last_edge)
if not np.logical_and.reduce(keep):
tmp_a = tmp_a[keep]
if tmp_w is not None:
tmp_w = tmp_w[keep]
# This cast ensures no type promotions occur below, which gh-10322
# make unpredictable. Getting it wrong leads to precision errors
# like gh-8123.
tmp_a = tmp_a.astype(bin_edges.dtype, copy=False)
# Compute the bin indices, and for values that lie exactly on
# last_edge we need to subtract one
f_indices = _unsigned_subtract(tmp_a, first_edge) * norm
indices = f_indices.astype(np.intp)
indices[indices == n_equal_bins] -= 1
# The index computation is not guaranteed to give exactly
# consistent results within ~1 ULP of the bin edges.
decrement = tmp_a < bin_edges[indices]
indices[decrement] -= 1
# The last bin includes the right edge. The other bins do not.
increment = ((tmp_a >= bin_edges[indices + 1])
& (indices != n_equal_bins - 1))
indices[increment] += 1
# We now compute the histogram using bincount
if ntype.kind == 'c':
n.real += np.bincount(indices, weights=tmp_w.real,
minlength=n_equal_bins)
n.imag += np.bincount(indices, weights=tmp_w.imag,
minlength=n_equal_bins)
else:
n += np.bincount(indices, weights=tmp_w,
minlength=n_equal_bins).astype(ntype)
else:
# Compute via cumulative histogram
cum_n = np.zeros(bin_edges.shape, ntype)
if weights is None:
for i in _range(0, len(a), BLOCK):
sa = np.sort(a[i:i+BLOCK])
cum_n += _search_sorted_inclusive(sa, bin_edges)
else:
zero = np.zeros(1, dtype=ntype)
for i in _range(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
tmp_w = weights[i:i+BLOCK]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate((zero, sw.cumsum()))
bin_index = _search_sorted_inclusive(sa, bin_edges)
cum_n += cw[bin_index]
n = np.diff(cum_n)
# density overrides the normed keyword
if density is not None:
if normed is not None:
# 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6)
warnings.warn(
"The normed argument is ignored when density is provided. "
"In future passing both will result in an error.",
DeprecationWarning, stacklevel=2)
normed = None
if density:
db = np.array(np.diff(bin_edges), float)
return n/db/n.sum(), bin_edges
elif normed:
# 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6)
warnings.warn(
"Passing `normed=True` on non-uniform bins has always been "
"broken, and computes neither the probability density "
"function nor the probability mass function. "
"The result is only correct if the bins are uniform, when "
"density=True will produce the same result anyway. "
"The argument will be removed in a future version of "
"numpy.",
np.VisibleDeprecationWarning, stacklevel=2)
# this normalization is incorrect, but
db = np.array(np.diff(bin_edges), float)
return n/(n*db).sum(), bin_edges
else:
if normed is not None:
# 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6)
warnings.warn(
"Passing normed=False is deprecated, and has no effect. "
"Consider passing the density argument instead.",
DeprecationWarning, stacklevel=2)
return n, bin_edges
def _histogramdd_dispatcher(sample, bins=None, range=None, normed=None,
weights=None, density=None):
return (sample, bins, weights)
@array_function_dispatch(_histogramdd_dispatcher)
def histogramdd(sample, bins=10, range=None, normed=None, weights=None,
density=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : (N, D) array, or (D, N) array_like
The data to be histogrammed.
Note the unusual interpretation of sample when an array_like:
* When an array, each row is a coordinate in a D-dimensional space -
such as ``histogramgramdd(np.array([p1, p2, p3]))``.
* When an array_like, each element is the list of values for single
coordinate - such as ``histogramgramdd((X, Y, Z))``.
The first form should be preferred.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the monotonically increasing bin
edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of length D, each an optional (lower, upper) tuple giving
the outer bin edges to be used if the edges are not given explicitly in
`bins`.
An entry of None in the sequence results in the minimum and maximum
values being used for the corresponding dimension.
The default, None, is equivalent to passing a tuple of D None values.
density : bool, optional
If False, the default, returns the number of samples in each bin.
If True, returns the probability *density* function at the bin,
``bin_count / sample_count / bin_volume``.
normed : bool, optional
An alias for the density argument that behaves identically. To avoid
confusion with the broken normed argument to `histogram`, `density`
should be preferred.
weights : (N,) array_like, optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False,
the values of the returned histogram are equal to the sum of the
weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights
for the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
N, D = sample.shape
nbin = np.empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = np.asarray(weights)
try:
M = len(bins)
if M != D:
raise ValueError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# normalize the range argument
if range is None:
range = (None,) * D
elif len(range) != D:
raise ValueError('range argument must have one entry per dimension')
# Create edge arrays
for i in _range(D):
if np.ndim(bins[i]) == 0:
if bins[i] < 1:
raise ValueError(
'`bins[{}]` must be positive, when an integer'.format(i))
smin, smax = _get_outer_edges(sample[:,i], range[i])
edges[i] = np.linspace(smin, smax, bins[i] + 1)
elif np.ndim(bins[i]) == 1:
edges[i] = np.asarray(bins[i])
if np.any(edges[i][:-1] > edges[i][1:]):
raise ValueError(
'`bins[{}]` must be monotonically increasing, when an array'
.format(i))
else:
raise ValueError(
'`bins[{}]` must be a scalar or 1d array'.format(i))
nbin[i] = len(edges[i]) + 1 # includes an outlier on each end
dedges[i] = np.diff(edges[i])
# Compute the bin number each sample falls into.
Ncount = tuple(
# avoid np.digitize to work around gh-11022
np.searchsorted(edges[i], sample[:, i], side='right')
for i in _range(D)
)
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in _range(D):
# Find which points are on the rightmost edge.
on_edge = (sample[:, i] == edges[i][-1])
# Shift these points one bin to the left.
Ncount[i][on_edge] -= 1
# Compute the sample indices in the flattened histogram matrix.
# This raises an error if the array is too large.
xy = np.ravel_multi_index(Ncount, nbin)
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
hist = np.bincount(xy, weights, minlength=nbin.prod())
# Shape into a proper matrix
hist = hist.reshape(nbin)
# This preserves the (bad) behavior observed in gh-7845, for now.
hist = hist.astype(float, casting='safe')
# Remove outliers (indices 0 and -1 for each dimension).
core = D*(slice(1, -1),)
hist = hist[core]
# handle the aliasing normed argument
if normed is None:
if density is None:
density = False
elif density is None:
# an explicit normed argument was passed, alias it to the new name
density = normed
else:
raise TypeError("Cannot specify both 'normed' and 'density'")
if density:
# calculate the probability density function
s = hist.sum()
for i in _range(D):
shape = np.ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.