input
stringclasses 1
value | context
stringlengths 5.05k
188k
| answers
stringlengths 22
82
| length
int32 502
23.3k
| dataset
stringclasses 1
value | language
stringclasses 1
value | all_classes
null | _id
stringlengths 48
48
|
---|---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# Copyright (c) 2011 - 2014 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing a class to fix certain code style issues.
"""
from __future__ import unicode_literals
try:
# Python 2
from StringIO import StringIO # __IGNORE_EXCEPTION__
except ImportError:
# Python 3
from io import StringIO # __IGNORE_WARNING__
import os
import re
import tokenize
# CodeStyleCheckerDialog tries to import FixableCodeStyleIssues what fail under
# Python3. So ignore it.
try:
import pep8
except ImportError:
pass
FixableCodeStyleIssues = [
"D111", "D112", "D113", "D121", "D131", "D141",
"D142", "D143", "D144", "D145",
"D221", "D222", "D231", "D242", "D243", "D244",
"D245", "D246", "D247",
"E101", "E111", "E121", "E122", "E123", "E124",
"E125", "E126", "E127", "E128", "E133", "E201",
"E202", "E203", "E211", "E221", "E222", "E223",
"E224", "E225", "E226", "E227", "E228", "E231",
"E241", "E242", "E251", "E261", "E262", "E271",
"E272", "E273", "E274", "E301", "E302", "E303",
"E304", "E401", "E501", "E502", "E701", "E702",
"E703", "E711", "E712",
"N804", "N805", "N806",
"W191", "W291", "W292", "W293", "W391", "W603",
]
class CodeStyleFixer(object):
"""
Class implementing a fixer for certain code style issues.
"""
def __init__(self, filename, sourceLines, fixCodes, noFixCodes,
maxLineLength, inPlace, eol, backup=False):
"""
Constructor
@param filename name of the file to be fixed (string)
@param sourceLines list of source lines including eol marker
(list of string)
@param fixCodes list of codes to be fixed as a comma separated
string (string)
@param noFixCodes list of codes not to be fixed as a comma
separated string (string)
@param maxLineLength maximum allowed line length (integer)
@param inPlace flag indicating to modify the file in place (boolean)
@param eol end of line character(s) (string)
@param backup flag indicating to create a backup before fixing
anything (boolean)
"""
super(CodeStyleFixer, self).__init__()
self.__filename = filename
self.__origName = ""
self.__source = sourceLines[:] # save a copy
self.__fixCodes = [c.strip() for c in fixCodes.split(",") if c.strip()]
self.__noFixCodes = [
c.strip() for c in noFixCodes.split(",") if c.strip()]
self.__maxLineLength = maxLineLength
self.fixed = 0
self.__reindenter = None
self.__indentWord = self.__getIndentWord()
if inPlace:
self.__createBackup = backup
else:
self.__origName = self.__filename
self.__filename = os.path.join(
os.path.dirname(self.__filename),
"fixed_" + os.path.basename(self.__filename))
self.__createBackup = False
self.__eol = eol
self.__fixes = {
"D111": self.__fixD111,
"D112": self.__fixD112,
"D113": self.__fixD112,
"D121": self.__fixD121,
"D131": self.__fixD131,
"D141": self.__fixD141,
"D142": self.__fixD142,
"D143": self.__fixD143,
"D144": self.__fixD144,
"D145": self.__fixD145,
"D221": self.__fixD221,
"D222": self.__fixD221,
"D231": self.__fixD131,
"D242": self.__fixD242,
"D243": self.__fixD243,
"D244": self.__fixD242,
"D245": self.__fixD243,
"D246": self.__fixD144,
"D247": self.__fixD247,
"E101": self.__fixE101,
"E111": self.__fixE101,
"E121": self.__fixE121,
"E122": self.__fixE122,
"E123": self.__fixE123,
"E124": self.__fixE121,
"E125": self.__fixE125,
"E126": self.__fixE126,
"E127": self.__fixE127,
"E128": self.__fixE127,
"E133": self.__fixE126,
"E201": self.__fixE201,
"E202": self.__fixE201,
"E203": self.__fixE201,
"E211": self.__fixE201,
"E221": self.__fixE221,
"E222": self.__fixE221,
"E223": self.__fixE221,
"E224": self.__fixE221,
"E225": self.__fixE221,
"E226": self.__fixE221,
"E227": self.__fixE221,
"E228": self.__fixE221,
"E231": self.__fixE231,
"E241": self.__fixE221,
"E242": self.__fixE221,
"E251": self.__fixE251,
"E261": self.__fixE261,
"E262": self.__fixE261,
"E271": self.__fixE221,
"E272": self.__fixE221,
"E273": self.__fixE221,
"E274": self.__fixE221,
"E301": self.__fixE301,
"E302": self.__fixE302,
"E303": self.__fixE303,
"E304": self.__fixE304,
"E401": self.__fixE401,
"E501": self.__fixE501,
"E502": self.__fixE502,
"E701": self.__fixE701,
"E702": self.__fixE702,
"E703": self.__fixE702,
"E711": self.__fixE711,
"E712": self.__fixE711,
"N804": self.__fixN804,
"N805": self.__fixN804,
"N806": self.__fixN806,
"W191": self.__fixE101,
"W291": self.__fixW291,
"W292": self.__fixW292,
"W293": self.__fixW291,
"W391": self.__fixW391,
"W603": self.__fixW603,
}
self.__modified = False
self.__stackLogical = []
# These need to be fixed before the file is saved but after all
# other inline fixes. These work with logical lines.
self.__stack = []
# These need to be fixed before the file is saved but after all
# inline fixes.
self.__multiLineNumbers = None
self.__docLineNumbers = None
self.__lastID = 0
def saveFile(self, encoding):
"""
Public method to save the modified file.
@param encoding encoding of the source file (string)
@return error message on failure (tuple of str)
"""
import codecs
if not self.__modified:
# no need to write
return
if self.__createBackup:
# create a backup file before writing any changes
if os.path.islink(self.__filename):
bfn = '{0}~'.format(os.path.realpath(self.__filename))
else:
bfn = '{0}~'.format(self.__filename)
try:
os.remove(bfn)
except EnvironmentError:
# if there was an error, ignore it
pass
try:
os.rename(self.__filename, bfn)
except EnvironmentError:
# if there was an error, ignore it
pass
txt = "".join(self.__source)
try:
enc = 'utf-8' if encoding == 'utf-8-bom' else encoding
txt = txt.encode(enc)
if encoding == 'utf-8-bom':
txt = codecs.BOM_UTF8 + txt
with open(self.__filename, "wb") as fp:
fp.write(txt)
except (IOError, UnicodeError) as err:
# Could not save the file! Skipping it. Reason: {0}
return ("FWRITE_ERROR", (str(err),))
return
def __codeMatch(self, code):
"""
Private method to check, if the code should be fixed.
@param code to check (string)
@return flag indicating it should be fixed (boolean)
"""
def mutualStartswith(a, b):
"""
Local helper method to compare the beginnings of two strings
against each other.
@return flag indicating that one string starts with the other
(boolean)
"""
return b.startswith(a) or a.startswith(b)
if self.__noFixCodes:
for noFixCode in [c.strip() for c in self.__noFixCodes]:
if mutualStartswith(code.lower(), noFixCode.lower()):
return False
if self.__fixCodes:
for fixCode in [c.strip() for c in self.__fixCodes]:
if mutualStartswith(code.lower(), fixCode.lower()):
return True
return False
return True
def fixIssue(self, line, pos, message):
"""
Public method to fix the fixable issues.
@param line line number of issue (integer)
@param pos character position of issue (integer)
@param message message text (string)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
if isinstance(message, (tuple, list)):
code = message[0].strip()
else:
code = message.split(None, 1)[0].strip()
if line <= len(self.__source) and \
self.__codeMatch(code) and \
code in self.__fixes:
res = self.__fixes[code](code, line, pos)
if res[0] == 1:
self.__modified = True
self.fixed += 1
else:
res = (0, "", 0)
return res
def finalize(self):
"""
Public method to apply all deferred fixes.
@return dictionary containing the fix results
"""
results = {}
# step 1: do fixes operating on logical lines first
for id_, code, line, pos in self.__stackLogical:
res, msg, _ = self.__fixes[code](code, line, pos, apply=True)
if res == 1:
self.__modified = True
self.fixed += 1
results[id_] = (res, msg)
# step 2: do fixes that change the number of lines
for id_, code, line, pos in reversed(self.__stack):
res, msg, _ = self.__fixes[code](code, line, pos, apply=True)
if res == 1:
self.__modified = True
self.fixed += 1
results[id_] = (res, msg)
return results
def __getID(self):
"""
Private method to get the ID for a deferred fix.
@return ID for a deferred fix (integer)
"""
self.__lastID += 1
return self.__lastID
def __findLogical(self):
"""
Private method to extract the index of all the starts and ends of
lines.
@return tuple containing two lists of integer with start and end tuples
of lines
"""
logical_start = []
logical_end = []
last_newline = True
sio = StringIO("".join(self.__source))
parens = 0
for t in tokenize.generate_tokens(sio.readline):
if t[0] in [tokenize.COMMENT, tokenize.DEDENT,
tokenize.INDENT, tokenize.NL,
tokenize.ENDMARKER]:
continue
if not parens and t[0] in [tokenize.NEWLINE, tokenize.SEMI]:
last_newline = True
logical_end.append((t[3][0] - 1, t[2][1]))
continue
if last_newline and not parens:
logical_start.append((t[2][0] - 1, t[2][1]))
last_newline = False
if t[0] == tokenize.OP:
if t[1] in '([{':
parens += 1
elif t[1] in '}])':
parens -= 1
return logical_start, logical_end
def __getLogical(self, line, pos):
"""
Private method to get the logical line corresponding to the given
position.
@param line line number of the issue (integer)
@param pos position inside line (integer)
@return tuple of a tuple of two integers giving the start of the
logical line, another tuple of two integers giving the end
of the logical line and a list of strings with the original
source lines
"""
try:
(logical_start, logical_end) = self.__findLogical()
except (SyntaxError, tokenize.TokenError):
return None
line = line - 1
ls = None
le = None
for i in range(0, len(logical_start)):
x = logical_end[i]
if x[0] > line or (x[0] == line and x[1] > pos):
le = x
ls = logical_start[i]
break
if ls is None:
return None
original = self.__source[ls[0]:le[0] + 1]
return ls, le, original
def __getIndentWord(self):
"""
Private method to determine the indentation type.
@return string to be used for an indentation (string)
"""
sio = StringIO("".join(self.__source))
indentWord = " " # default in case of failure
try:
for token in tokenize.generate_tokens(sio.readline):
if token[0] == tokenize.INDENT:
indentWord = token[1]
break
except (SyntaxError, tokenize.TokenError):
pass
return indentWord
def __getIndent(self, line):
"""
Private method to get the indentation string.
@param line line to determine the indentation string from (string)
@return indentation string (string)
"""
return line.replace(line.lstrip(), "")
def __multilineStringLines(self):
"""
Private method to determine the line numbers that are within multi line
strings and these which are part of a documentation string.
@return tuple of a set of line numbers belonging to a multi line
string and a set of line numbers belonging to a multi line
documentation string (tuple of two set of integer)
"""
if self.__multiLineNumbers is None:
source = "".join(self.__source)
sio = StringIO(source)
self.__multiLineNumbers = set()
self.__docLineNumbers = set()
previousTokenType = ''
try:
for t in tokenize.generate_tokens(sio.readline):
tokenType = t[0]
startRow = t[2][0]
endRow = t[3][0]
if (tokenType == tokenize.STRING and startRow != endRow):
if previousTokenType != tokenize.INDENT:
self.__multiLineNumbers |= set(
range(startRow, 1 + endRow))
else:
self.__docLineNumbers |= set(
range(startRow, 1 + endRow))
previousTokenType = tokenType
except (SyntaxError, tokenize.TokenError):
pass
return self.__multiLineNumbers, self.__docLineNumbers
def __fixReindent(self, line, pos, logical):
"""
Private method to fix a badly indented line.
This is done by adding or removing from its initial indent only.
@param line line number of the issue (integer)
@param pos position inside line (integer)
@param logical logical line structure
@return flag indicating a change was done (boolean)
"""
assert logical
ls, _, original = logical
rewrapper = IndentationWrapper(original)
valid_indents = rewrapper.pep8Expected()
if not rewrapper.rel_indent:
return False
if line > ls[0]:
# got a valid continuation line number
row = line - ls[0] - 1
# always pick the first option for this
valid = valid_indents[row]
got = rewrapper.rel_indent[row]
else:
return False
line1 = ls[0] + row
# always pick the expected indent, for now.
indent_to = valid[0]
if got != indent_to:
orig_line = self.__source[line1]
new_line = ' ' * (indent_to) + orig_line.lstrip()
if new_line == orig_line:
return False
else:
self.__source[line1] = new_line
return True
else:
return False
def __fixWhitespace(self, line, offset, replacement):
"""
Private method to correct whitespace at the given offset.
@param line line to be corrected (string)
@param offset offset within line (integer)
@param replacement replacement string (string)
@return corrected line
"""
left = line[:offset].rstrip(" \t")
right = line[offset:].lstrip(" \t")
if right.startswith("#"):
return line
else:
return left + replacement + right
def __fixD111(self, code, line, pos):
"""
Private method to fix docstring enclosed in wrong quotes.
Codes: D111
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
line = line - 1
left, right = self.__source[line].split("'''", 1)
self.__source[line] = left + '"""' + right
while line < len(self.__source):
if self.__source[line].rstrip().endswith("'''"):
left, right = self.__source[line].rsplit("'''", 1)
self.__source[line] = left + '"""' + right
break
line += 1
# Triple single quotes converted to triple double quotes.
return (1, "FD111", 0)
def __fixD112(self, code, line, pos):
"""
Private method to fix docstring 'r' or 'u' in leading quotes.
Codes: D112, D113
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
line = line - 1
if code == "D112":
insertChar = "r"
elif code == "D113":
insertChar = "u"
else:
return (0, "", 0)
newText = self.__getIndent(self.__source[line]) + \
insertChar + self.__source[line].lstrip()
self.__source[line] = newText
# Introductory quotes corrected to be {0}"""
return (1, ('FD112', (insertChar,)), 0)
def __fixD121(self, code, line, pos, apply=False):
"""
Private method to fix a single line docstring on multiple lines.
Codes: D121
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@keyparam apply flag indicating, that the fix should be applied
(boolean)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
if apply:
line = line - 1
if not self.__source[line].lstrip().startswith(
('"""', 'r"""', 'u"""')):
# only correctly formatted docstrings will be fixed
return (0, "", 0)
docstring = self.__source[line].rstrip() + \
self.__source[line + 1].strip()
if docstring.endswith('"""'):
docstring += self.__eol
else:
docstring += self.__source[line + 2].lstrip()
self.__source[line + 2] = ""
self.__source[line] = docstring
self.__source[line + 1] = ""
# Single line docstring put on one line.
return (1, "FD121", 0)
else:
id = self.__getID()
self.__stack.append((id, code, line, pos))
return (-1, "", id)
def __fixD131(self, code, line, pos):
"""
Private method to fix a docstring summary not ending with a
period.
Codes: D131
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
line = line - 1
newText = ""
if self.__source[line].rstrip().endswith(('"""', "'''")) and \
self.__source[line].lstrip().startswith(('"""', 'r"""', 'u"""')):
# it is a one-liner
newText = self.__source[line].rstrip()[:-3].rstrip() + "." + \
self.__source[line].rstrip()[-3:] + self.__eol
else:
if line < len(self.__source) - 1 and \
(not self.__source[line + 1].strip() or
self.__source[line + 1].lstrip().startswith("@") or
(self.__source[line + 1].strip() in ('"""', "'''") and
not self.__source[line].lstrip().startswith("@"))):
newText = self.__source[line].rstrip() + "." + self.__eol
if newText:
self.__source[line] = newText
# Period added to summary line.
return (1, "FD131", 0)
else:
return (0, "", 0)
def __fixD141(self, code, line, pos, apply=False):
"""
Private method to fix a function/method docstring preceded by a
blank line.
Codes: D141
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@keyparam apply flag indicating, that the fix should be applied
(boolean)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
if apply:
line = line - 1
self.__source[line - 1] = ""
# Blank line before function/method docstring removed.
return (1, "FD141", 0)
else:
id = self.__getID()
self.__stack.append((id, code, line, pos))
return (-1, "", id)
def __fixD142(self, code, line, pos, apply=False):
"""
Private method to fix a class docstring not preceded by a
blank line.
Codes: D142
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@keyparam apply flag indicating, that the fix should be applied
(boolean)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
if apply:
line = line - 1
self.__source[line] = self.__eol + self.__source[line]
# Blank line inserted before class docstring.
return (1, "FD142", 0)
else:
id = self.__getID()
self.__stack.append((id, code, line, pos))
return (-1, "", id)
def __fixD143(self, code, line, pos, apply=False):
"""
Private method to fix a class docstring not followed by a
blank line.
Codes: D143
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@keyparam apply flag indicating, that the fix should be applied
(boolean)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
if apply:
line = line - 1
self.__source[line] += self.__eol
# Blank line inserted after class docstring.
return (1, "FD143", 0)
else:
id = self.__getID()
self.__stack.append((id, code, line, pos))
return (-1, "", id)
def __fixD144(self, code, line, pos, apply=False):
"""
Private method to fix a docstring summary not followed by a
blank line.
Codes: D144
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@keyparam apply flag indicating, that the fix should be applied
(boolean)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
if apply:
line = line - 1
if not self.__source[line].rstrip().endswith("."):
# only correct summary lines can be fixed here
return (0, "", 0)
self.__source[line] += self.__eol
# Blank line inserted after docstring summary.
return (1, "FD144", 0)
else:
id = self.__getID()
self.__stack.append((id, code, line, pos))
return (-1, "", id)
def __fixD145(self, code, line, pos, apply=False):
"""
Private method to fix the last paragraph of a multi-line docstring
not followed by a blank line.
Codes: D143
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@keyparam apply flag indicating, that the fix should be applied
(boolean)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
if apply:
line = line - 1
self.__source[line] = self.__eol + self.__source[line]
# Blank line inserted after last paragraph of docstring.
return (1, "FD145", 0)
else:
id = self.__getID()
self.__stack.append((id, code, line, pos))
return (-1, "", id)
def __fixD221(self, code, line, pos, apply=False):
"""
Private method to fix leading and trailing quotes of docstring
not on separate lines.
Codes: D221, D222
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@keyparam apply flag indicating, that the fix should be applied
(boolean)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
if apply:
line = line - 1
indent = self.__getIndent(self.__source[line])
source = self.__source[line].strip()
if code == "D221":
# leading
if source.startswith(("r", "u")):
first, second = source[:4], source[4:].strip()
else:
first, second = source[:3], source[3:].strip()
else:
# trailing
first, second = source[:-3].strip(), source[-3:]
newText = indent + first + self.__eol + \
indent + second + self.__eol
self.__source[line] = newText
if code == "D221":
# Leading quotes put on separate line.
msg = "FD221"
else:
# Trailing quotes put on separate line.
msg = "FD222"
return (1, msg, 0)
else:
id = self.__getID()
self.__stack.append((id, code, line, pos))
return (-1, "", id)
def __fixD242(self, code, line, pos, apply=False):
"""
Private method to fix a class or function/method docstring preceded
by a blank line.
Codes: D242, D244
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@keyparam apply flag indicating, that the fix should be applied
(boolean)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
if apply:
line = line - 1
self.__source[line - 1] = ""
if code == "D242":
# Blank line before class docstring removed.
msg = "FD242"
else:
# Blank line before function/method docstring removed.
msg = "FD244"
return (1, msg, 0)
else:
id = self.__getID()
self.__stack.append((id, code, line, pos))
return (-1, "", id)
def __fixD243(self, code, line, pos, apply=False):
"""
Private method to fix a class or function/method docstring followed
by a blank line.
Codes: D243, D245
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@keyparam apply flag indicating, that the fix should be applied
(boolean)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
if apply:
line = line - 1
self.__source[line + 1] = ""
if code == "D243":
# Blank line after class docstring removed.
msg = "FD243"
else:
# Blank line after function/method docstring removed.
msg = "FD245"
return (1, msg, 0)
else:
id = self.__getID()
self.__stack.append((id, code, line, pos))
return (-1, "", id)
def __fixD247(self, code, line, pos, apply=False):
"""
Private method to fix a last paragraph of a docstring followed
by a blank line.
Codes: D247
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@keyparam apply flag indicating, that the fix should be applied
(boolean)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
if apply:
line = line - 1
self.__source[line - 1] = ""
# Blank line after last paragraph removed.
return (1, "FD247", 0)
else:
id = self.__getID()
self.__stack.append((id, code, line, pos))
return (-1, "", id)
def __fixE101(self, code, line, pos):
"""
Private method to fix obsolete tab usage and indentation errors.
Codes: E101, E111, W191
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
if self.__reindenter is None:
self.__reindenter = Reindenter(self.__source)
self.__reindenter.run()
fixedLine = self.__reindenter.fixedLine(line - 1)
if fixedLine is not None and fixedLine != self.__source[line - 1]:
self.__source[line - 1] = fixedLine
if code in ["E101", "W191"]:
# Tab converted to 4 spaces.
msg = "FE101"
else:
# Indentation adjusted to be a multiple of four.
msg = "FE111"
return (1, msg, 0)
else:
return (0, "", 0)
def __fixE121(self, code, line, pos, apply=False):
"""
Private method to fix the indentation of continuation lines and
closing brackets.
Codes: E121, E124
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@keyparam apply flag indicating, that the fix should be applied
(boolean)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
if apply:
logical = self.__getLogical(line, pos)
if logical:
# Fix by adjusting initial indent level.
changed = self.__fixReindent(line, pos, logical)
if changed:
if code == "E121":
# Indentation of continuation line corrected.
msg = "FE121"
elif code == "E124":
# Indentation of closing bracket corrected.
msg = "FE124"
return (1, msg, 0)
return (0, "", 0)
else:
id = self.__getID()
self.__stackLogical.append((id, code, line, pos))
return (-1, "", id)
def __fixE122(self, code, line, pos, apply=False):
"""
Private method to fix a missing indentation of continuation lines.
Codes: E122
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@keyparam apply flag indicating, that the fix should be applied
(boolean)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
if apply:
logical = self.__getLogical(line, pos)
if logical:
# Fix by adding an initial indent.
modified = self.__fixReindent(line, pos, logical)
if not modified:
# fall back to simple method
line = line - 1
text = self.__source[line]
indentation = self.__getIndent(text)
self.__source[line] = indentation + \
self.__indentWord + text.lstrip()
# Missing indentation of continuation line corrected.
return (1, "FE122", 0)
return (0, "", 0)
else:
id = self.__getID()
self.__stackLogical.append((id, code, line, pos))
return (-1, "", id)
def __fixE123(self, code, line, pos, apply=False):
"""
Private method to fix the indentation of a closing bracket lines.
Codes: E123
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@keyparam apply flag indicating, that the fix should be applied
(boolean)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
if apply:
logical = self.__getLogical(line, pos)
if logical:
# Fix by deleting whitespace to the correct level.
logicalLines = logical[2]
row = line - 1
text = self.__source[row]
newText = self.__getIndent(logicalLines[0]) + text.lstrip()
if newText == text:
# fall back to slower method
changed = self.__fixReindent(line, pos, logical)
else:
self.__source[row] = newText
changed = True
if changed:
# Closing bracket aligned to opening bracket.
return (1, "FE123", 0)
return (0, "", 0)
else:
id = self.__getID()
self.__stackLogical.append((id, code, line, pos))
return (-1, "", id)
def __fixE125(self, code, line, pos, apply=False):
"""
Private method to fix the indentation of continuation lines not
distinguishable from next logical line.
Codes: E125
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@keyparam apply flag indicating, that the fix should be applied
(boolean)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
if apply:
logical = self.__getLogical(line, pos)
if logical:
# Fix by adjusting initial indent level.
modified = self.__fixReindent(line, pos, logical)
if not modified:
row = line - 1
text = self.__source[row]
self.__source[row] = self.__getIndent(text) + \
self.__indentWord + text.lstrip()
# Indentation level changed.
return (1, "FE125", 0)
return (0, "", 0)
else:
id = self.__getID()
self.__stackLogical.append((id, code, line, pos))
return (-1, "", id)
def __fixE126(self, code, line, pos, apply=False):
"""
Private method to fix over-indented/under-indented hanging
indentation.
Codes: E126, E133
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@keyparam apply flag indicating, that the fix should be applied
(boolean)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
if apply:
logical = self.__getLogical(line, pos)
if logical:
# Fix by deleting whitespace to the left.
logicalLines = logical[2]
row = line - 1
text = self.__source[row]
newText = self.__getIndent(logicalLines[0]) + \
self.__indentWord + text.lstrip()
if newText == text:
# fall back to slower method
changed = self.__fixReindent(line, pos, logical)
else:
self.__source[row] = newText
changed = True
if changed:
# Indentation level of hanging indentation changed.
return (1, "FE126", 0)
return (0, "", 0)
else:
id = self.__getID()
self.__stackLogical.append((id, code, line, pos))
return (-1, "", id)
def __fixE127(self, code, line, pos, apply=False):
"""
Private method to fix over/under indented lines.
Codes: E127, E128
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@keyparam apply flag indicating, that the fix should be applied
(boolean)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
if apply:
logical = self.__getLogical(line, pos)
if logical:
# Fix by inserting/deleting whitespace to the correct level.
logicalLines = logical[2]
row = line - 1
text = self.__source[row]
newText = text
if logicalLines[0].rstrip().endswith('\\'):
newText = self.__getIndent(logicalLines[0]) + \
self.__indentWord + text.lstrip()
else:
startIndex = None
for symbol in '([{':
if symbol in logicalLines[0]:
foundIndex = logicalLines[0].find(symbol) + 1
if startIndex is None:
startIndex = foundIndex
else:
startIndex = min(startIndex, foundIndex)
if startIndex is not None:
newText = startIndex * ' ' + text.lstrip()
if newText == text:
# fall back to slower method
changed = self.__fixReindent(line, pos, logical)
else:
self.__source[row] = newText
changed = True
if changed:
# Visual indentation corrected.
return (1, "FE127", 0)
return (0, "", 0)
else:
id = self.__getID()
self.__stackLogical.append((id, code, line, pos))
return (-1, "", id)
def __fixE201(self, code, line, pos):
"""
Private method to fix extraneous whitespace.
Codes: E201, E202, E203, E211
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
line = line - 1
text = self.__source[line]
if '"""' in text or "'''" in text or text.rstrip().endswith('\\'):
return (0, "", 0)
newText = self.__fixWhitespace(text, pos, '')
if newText == text:
return (0, "", 0)
self.__source[line] = newText
# Extraneous whitespace removed.
return (1, "FE201", 0)
def __fixE221(self, code, line, pos):
"""
Private method to fix extraneous whitespace around operator or
keyword.
Codes: E221, E222, E223, E224, E225, E226, E227, E228, E241,
E242, E271, E272, E273, E274).
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
line = line - 1
text = self.__source[line]
if '"""' in text or "'''" in text or text.rstrip().endswith('\\'):
return (0, "", 0)
newText = self.__fixWhitespace(text, pos, ' ')
if newText == text:
return (0, "", 0)
self.__source[line] = newText
if code in ["E225", "E226", "E227", "E228"]:
# Missing whitespace added.
return (1, "", 0)
else:
# Extraneous whitespace removed.
return (1, "", 0)
def __fixE231(self, code, line, pos):
"""
Private method to fix missing whitespace after ',;:'.
Codes: E231
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
line = line - 1
pos = pos + 1
self.__source[line] = self.__source[line][:pos] + \
" " + self.__source[line][pos:]
# Missing whitespace added.
return (1, "FE231", 0)
def __fixE251(self, code, line, pos):
"""
Private method to fix extraneous whitespace around keyword and
default parameter equals.
Codes: E251
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
line = line - 1
text = self.__source[line]
# This is necessary since pep8 sometimes reports columns that goes
# past the end of the physical line. This happens in cases like,
# foo(bar\n=None)
col = min(pos, len(text) - 1)
if text[col].strip():
newText = text
else:
newText = text[:col].rstrip() + text[col:].lstrip()
# There could be an escaped newline
#
# def foo(a=\
# 1)
if newText.endswith(('=\\\n', '=\\\r\n', '=\\\r')):
self.__source[line] = newText.rstrip("\n\r \t\\")
self.__source[line + 1] = self.__source[line + 1].lstrip()
else:
self.__source[line] = newText
# Extraneous whitespace removed.
return (1, "FE251", 0)
def __fixE261(self, code, line, pos):
"""
Private method to fix whitespace before or after inline comment.
Codes: E261, E262
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
line = line - 1
text = self.__source[line]
left = text[:pos].rstrip(' \t#')
right = text[pos:].lstrip(' \t#')
newText = left + (" # " + right if right.strip() else right)
self.__source[line] = newText
# Whitespace around comment sign corrected.
return (1, "FE261", 0)
def __fixE301(self, code, line, pos, apply=False):
"""
Private method to fix the need for one blank line.
Codes: E301
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@keyparam apply flag indicating, that the fix should be applied
(boolean)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
if apply:
self.__source.insert(line - 1, self.__eol)
# One blank line inserted.
return (1, "FE301", 0)
else:
id = self.__getID()
self.__stack.append((id, code, line, pos))
return (-1, "", id)
def __fixE302(self, code, line, pos, apply=False):
"""
Private method to fix the need for two blank lines.
Codes: E302
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@keyparam apply flag indicating, that the fix should be applied
(boolean)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
if apply:
# count blank lines
index = line - 1
blanks = 0
while index:
if self.__source[index - 1].strip() == "":
blanks += 1
index -= 1
else:
break
delta = blanks - 2
line -= 1
if delta < 0:
# insert blank lines (one or two)
while delta < 0:
self.__source.insert(line, self.__eol)
delta += 1
# %n blank line(s) inserted.
return (1, ("FE302+", 2 - blanks), 0)
elif delta > 0:
# delete superfluous blank lines
while delta > 0:
del self.__source[line - 1]
line -= 1
delta -= 1
# %n superfluous line(s) removed.
return (1, ("FE302-", blanks - 2), 0)
else:
return (0, "", 0)
else:
id = self.__getID()
self.__stack.append((id, code, line, pos))
return (-1, "", id)
def __fixE303(self, code, line, pos, apply=False):
"""
Private method to fix superfluous blank lines.
Codes: E303
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@keyparam apply flag indicating, that the fix should be applied
(boolean)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
if apply:
index = line - 3
while index:
if self.__source[index].strip() == "":
del self.__source[index]
index -= 1
else:
break
# Superfluous blank lines removed.
return (1, "FE303", 0)
else:
id = self.__getID()
self.__stack.append((id, code, line, pos))
return (-1, "", id)
def __fixE304(self, code, line, pos, apply=False):
"""
Private method to fix superfluous blank lines after a function
decorator.
Codes: E304
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@keyparam apply flag indicating, that the fix should be applied
(boolean)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
if apply:
index = line - 2
while index:
if self.__source[index].strip() == "":
del self.__source[index]
index -= 1
else:
break
# Superfluous blank lines after function decorator removed.
return (1, "FE304", 0)
else:
id = self.__getID()
self.__stack.append((id, code, line, pos))
return (-1, "", id)
def __fixE401(self, code, line, pos, apply=False):
"""
Private method to fix multiple imports on one line.
Codes: E401
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@keyparam apply flag indicating, that the fix should be applied
(boolean)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
if apply:
line = line - 1
text = self.__source[line]
if not text.lstrip().startswith("import"):
return (0, "", 0)
# pep8 (1.3.1) reports false positive if there is an import
# statement followed by a semicolon and some unrelated
# statement with commas in it.
if ';' in text:
return (0, "", 0)
newText = text[:pos].rstrip("\t ,") + self.__eol + \
self.__getIndent(text) + "import " + text[pos:].lstrip("\t ,")
self.__source[line] = newText
# Imports were put on separate lines.
return (1, "FE401", 0)
else:
id = self.__getID()
self.__stack.append((id, code, line, pos))
return (-1, "", id)
def __fixE501(self, code, line, pos, apply=False):
"""
Private method to fix the long lines by breaking them.
Codes: E501
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@keyparam apply flag indicating, that the fix should be applied
(boolean)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
if apply:
multilineStringLines, docStringLines = \
self.__multilineStringLines()
isDocString = line in docStringLines
line = line - 1
text = self.__source[line]
if line > 0:
prevText = self.__source[line - 1]
else:
prevText = ""
if line < len(self.__source) - 1:
nextText = self.__source[line + 1]
else:
nextText = ""
shortener = LineShortener(
text, prevText, nextText,
maxLength=self.__maxLineLength, eol=self.__eol,
indentWord=self.__indentWord, isDocString=isDocString)
changed, newText, newNextText = shortener.shorten()
if changed:
if newText != text:
self.__source[line] = newText
if newNextText and newNextText != nextText:
if newNextText == " ":
newNextText = ""
self.__source[line + 1] = newNextText
# Long lines have been shortened.
return (1, "FE501", 0)
else:
return (0, "", 0)
else:
id = self.__getID()
self.__stack.append((id, code, line, pos))
return (-1, "", id)
def __fixE502(self, code, line, pos):
"""
Private method to fix redundant backslash within brackets.
Codes: E502
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
self.__source[line - 1] = \
self.__source[line - 1].rstrip("\n\r \t\\") + self.__eol
# Redundant backslash in brackets removed.
return (1, "FE502", 0)
def __fixE701(self, code, line, pos, apply=False):
"""
Private method to fix colon-separated compound statements.
Codes: E701
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@keyparam apply flag indicating, that the fix should be applied
(boolean)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
if apply:
line = line - 1
text = self.__source[line]
pos = pos + 1
newText = text[:pos] + self.__eol + self.__getIndent(text) + \
self.__indentWord + text[pos:].lstrip("\n\r \t\\") + \
self.__eol
self.__source[line] = newText
# Compound statement corrected.
return (1, "FE701", 0)
else:
id = self.__getID()
self.__stack.append((id, code, line, pos))
return (-1, "", id)
def __fixE702(self, code, line, pos, apply=False):
"""
Private method to fix semicolon-separated compound statements.
Codes: E702, E703
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@keyparam apply flag indicating, that the fix should be applied
(boolean)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
if apply:
line = line - 1
text = self.__source[line]
if text.rstrip().endswith("\\"):
# normalize '1; \\\n2' into '1; 2'
self.__source[line] = text.rstrip("\n\r \t\\")
self.__source[line + 1] = self.__source[line + 1].lstrip()
elif text.rstrip().endswith(";"):
self.__source[line] = text.rstrip("\n\r \t;") + self.__eol
else:
first = text[:pos].rstrip("\n\r \t;") + self.__eol
second = text[pos:].lstrip("\n\r \t;")
self.__source[line] = first + self.__getIndent(text) + second
# Compound statement corrected.
return (1, "FE702", 0)
else:
id = self.__getID()
self.__stack.append((id, code, line, pos))
return (-1, "", id)
def __fixE711(self, code, line, pos):
"""
Private method to fix comparison with None.
Codes: E711, E712
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
line = line - 1
text = self.__source[line]
rightPos = pos + 2
if rightPos >= len(text):
return (0, "", 0)
left = text[:pos].rstrip()
center = text[pos:rightPos]
right = text[rightPos:].lstrip()
if not right.startswith(("None", "True", "False")):
return (0, "", 0)
if center.strip() == "==":
center = "is"
elif center.strip() == "!=":
center = "is not"
else:
return (0, "", 0)
self.__source[line] = " ".join([left, center, right])
# Comparison to None/True/False corrected.
return (1, "FE711", 0)
def __fixN804(self, code, line, pos, apply=False):
"""
Private method to fix a wrong first argument of normal and
class methods.
Codes: N804, N805
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@keyparam apply flag indicating, that the fix should be applied
(boolean)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
if apply:
line = line - 1
text = self.__source[line]
if code == "N804":
arg = "cls"
else:
arg = "self"
if text.rstrip().endswith("("):
newText = text + self.__getIndent(text) + \
self.__indentWord + arg + "," + self.__eol
else:
index = text.find("(") + 1
left = text[:index]
right = text[index:]
if right.startswith(")"):
center = arg
else:
center = arg + ", "
newText = left + center + right
self.__source[line] = newText
# '{0}' argument added.
return (1, ("FN804", (arg,)), 0)
else:
id = self.__getID()
self.__stack.append((id, code, line, pos))
return (-1, "", id)
def __fixN806(self, code, line, pos, apply=False):
"""
Private method to fix a wrong first argument of static methods.
Codes: N806
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@keyparam apply flag indicating, that the fix should be applied
(boolean)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
if apply:
line = line - 1
text = self.__source[line]
index = text.find("(") + 1
left = text[:index]
right = text[index:]
if right.startswith(("cls", "self")):
# cls or self are on the definition line
if right.startswith("cls"):
right = right[3:]
arg = "cls"
else:
right = right[4:]
arg = "self"
right = right.lstrip(", ")
newText = left + right
self.__source[line] = newText
else:
# they are on the next line
line = line + 1
text = self.__source[line]
indent = self.__getIndent(text)
right = text.lstrip()
if right.startswith("cls"):
right = right[3:]
arg = "cls"
else:
right = right[4:]
arg = "self"
right = right.lstrip(", ")
if right.startswith("):"):
# merge with previous line
self.__source[line - 1] = \
self.__source[line - 1].rstrip() + right
self.__source[line] = ""
else:
self.__source[line] = indent + right
# '{0}' argument removed.
return (1, ("FN806", arg), 0)
else:
id = self.__getID()
self.__stack.append((id, code, line, pos))
return (-1, "", id)
def __fixW291(self, code, line, pos):
"""
Private method to fix trailing whitespace.
Codes: W291, W293
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
self.__source[line - 1] = re.sub(r'[\t ]+(\r?)$', r"\1",
self.__source[line - 1])
# Whitespace stripped from end of line.
return (1, "FW291", 0)
def __fixW292(self, code, line, pos):
"""
Private method to fix a missing newline at the end of file.
Codes: W292
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
self.__source[line - 1] += self.__eol
# newline added to end of file.
return (1, "FW292", 0)
def __fixW391(self, code, line, pos):
"""
Private method to fix trailing blank lines.
Codes: W391
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
index = line - 1
while index:
if self.__source[index].strip() == "":
del self.__source[index]
index -= 1
else:
break
# Superfluous trailing blank lines removed from end of file.
return (1, "FW391", 0)
def __fixW603(self, code, line, pos):
"""
Private method to fix the not equal notation.
Codes: W603
@param code code of the issue (string)
@param line line number of the issue (integer)
@param pos position inside line (integer)
@return value indicating an applied/deferred fix (-1, 0, 1),
a message for the fix (string) and an ID for a deferred
fix (integer)
"""
self.__source[line - 1] = self.__source[line - 1].replace("<>", "!=")
# '<>' replaced by '!='.
return (1, "FW603", 0)
class Reindenter(object):
"""
Class to reindent badly-indented code to uniformly use four-space
indentation.
Released to the public domain, by Tim Peters, 03 October 2000.
"""
def __init__(self, sourceLines):
"""
Constructor
@param sourceLines list of source lines including eol marker
(list of string)
"""
# Raw file lines.
self.raw = sourceLines
self.after = []
# File lines, rstripped & tab-expanded. Dummy at start is so
# that we can use tokenize's 1-based line numbering easily.
# Note that a line is all-blank iff it's "\n".
self.lines = [line.rstrip().expandtabs() + "\n"
for line in self.raw]
self.lines.insert(0, None)
self.index = 1 # index into self.lines of next line
# List of (lineno, indentlevel) pairs, one for each stmt and
# comment line. indentlevel is -1 for comment lines, as a
# signal that tokenize doesn't know what to do about them;
# indeed, they're our headache!
self.stats = []
def run(self):
"""
Public method to run the re-indenter.
@return flag indicating that a change was done (boolean)
"""
try:
stats = self.__genStats(tokenize.generate_tokens(self.getline))
except (SyntaxError, tokenize.TokenError):
return False
# Remove trailing empty lines.
lines = self.lines
while lines and lines[-1] == "\n":
lines.pop()
# Sentinel.
stats.append((len(lines), 0))
# Map count of leading spaces to # we want.
have2want = {}
# Program after transformation.
after = self.after = []
# Copy over initial empty lines -- there's nothing to do until
# we see a line with *something* on it.
i = stats[0][0]
after.extend(lines[1:i])
for i in range(len(stats) - 1):
thisstmt, thislevel = stats[i]
nextstmt = stats[i + 1][0]
have = self.__getlspace(lines[thisstmt])
want = thislevel * 4
if want < 0:
# A comment line.
if have:
# An indented comment line. If we saw the same
# indentation before, reuse what it most recently
# mapped to.
want = have2want.get(have, -1)
if want < 0:
# Then it probably belongs to the next real stmt.
for j in range(i + 1, len(stats) - 1):
jline, jlevel = stats[j]
if jlevel >= 0:
if have == self.__getlspace(lines[jline]):
want = jlevel * 4
break
if want < 0: # Maybe it's a hanging comment like this one,
# in which case we should shift it like its base
# line got shifted.
for j in range(i - 1, -1, -1):
jline, jlevel = stats[j]
if jlevel >= 0:
want = \
have + \
self.__getlspace(after[jline - 1]) - \
self.__getlspace(lines[jline])
break
if want < 0:
# Still no luck -- leave it alone.
want = have
else:
want = 0
assert want >= 0
have2want[have] = want
diff = want - have
if diff == 0 or have == 0:
after.extend(lines[thisstmt:nextstmt])
else:
for line in lines[thisstmt:nextstmt]:
if diff > 0:
if line == "\n":
after.append(line)
else:
after.append(" " * diff + line)
else:
remove = min(self.__getlspace(line), -diff)
after.append(line[remove:])
return self.raw != self.after
def fixedLine(self, line):
"""
Public method to get a fixed line.
@param line number of the line to retrieve (integer)
@return fixed line (string)
"""
if line < len(self.after):
return self.after[line]
def getline(self):
"""
Public method to get a line of text for tokenize.
@return line of text (string)
"""
if self.index >= len(self.lines):
line = ""
else:
line = self.lines[self.index]
self.index += 1
return line
def __genStats(self, tokens):
"""
Private method to generate the re-indent statistics.
@param tokens tokens generator (tokenize._tokenize)
@return reference to the generated statistics
"""
find_stmt = True # next token begins a fresh stmt?
level = 0 # current indent level
stats = []
for t in tokens:
token_type = t[0]
sline = t[2][0]
line = t[4]
if token_type == tokenize.NEWLINE:
# A program statement, or ENDMARKER, will eventually follow,
# after some (possibly empty) run of tokens of the form
# (NL | COMMENT)* (INDENT | DEDENT+)?
self.find_stmt = True
elif token_type == tokenize.INDENT:
find_stmt = True
level += 1
elif token_type == tokenize.DEDENT:
find_stmt = True
level -= 1
elif token_type == tokenize.COMMENT:
if find_stmt:
stats.append((sline, -1))
# but we're still looking for a new stmt, so leave
# find_stmt alone
elif token_type == tokenize.NL:
pass
elif find_stmt:
# This is the first "real token" following a NEWLINE, so it
# must be the first token of the next program statement, or an
# ENDMARKER.
find_stmt = False
if line: # not endmarker
stats.append((sline, level))
return stats
def __getlspace(self, line):
"""
Private method to count number of leading blanks.
@param line line to check (string)
@return number of leading blanks (integer)
"""
i = 0
n = len(line)
while i < n and line[i] == " ":
i += 1
return i
class IndentationWrapper(object):
"""
Class used by fixers dealing with indentation.
Each instance operates on a single logical line.
"""
SKIP_TOKENS = frozenset([
tokenize.COMMENT, tokenize.NL, tokenize.INDENT,
tokenize.DEDENT, tokenize.NEWLINE, tokenize.ENDMARKER
])
def __init__(self, physical_lines):
"""
Constructor
@param physical_lines list of physical lines to operate on
(list of strings)
"""
self.lines = physical_lines
self.tokens = []
self.rel_indent = None
sio = StringIO(''.join(physical_lines))
for t in tokenize.generate_tokens(sio.readline):
if not len(self.tokens) and t[0] in self.SKIP_TOKENS:
continue
if t[0] != tokenize.ENDMARKER:
self.tokens.append(t)
self.logical_line = self.__buildTokensLogical(self.tokens)
def __buildTokensLogical(self, tokens):
"""
Private method to build a logical line from a list of tokens.
@param tokens list of tokens as generated by tokenize.generate_tokens
@return logical line (string)
"""
# from pep8.py with minor modifications
logical = []
previous = None
for t in tokens:
token_type, text = t[0:2]
if token_type in self.SKIP_TOKENS:
continue
if previous:
end_line, end = previous[3]
| start_line, start = t[2] | 8,040 | lcc_e | python | null | 6fe5153eaa63a0b535fb9ad0eadbb35360017cadd331084b |
|
#!/usr/bin/env python3
import re
import os.path
from fractions import gcd
import abc
import random
import time
import datetime
from collections import OrderedDict, defaultdict
from itertools import zip_longest
from math import log, sqrt
import logging
import socket
import gzip
import heapq
import patterns
import wudb
import cadoprograms
import cadoparams
import cadocommand
import wuserver
import workunit
from struct import error as structerror
from shutil import rmtree
from workunit import Workunit
# Patterns for floating-point numbers
# They can be used with the string.format() function, e.g.,
# re.compile("value = {cap_fp}".format(**REGEXES))
# where format() replaces "{cap_fp}" with the string in CAP_FP
RE_FP = r"[-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?"
CAP_FP = "(%s)" % RE_FP
REGEXES = {"fp": RE_FP, "cap_fp": CAP_FP}
def re_cap_n_fp(prefix, n, suffix=""):
""" Generate a regular expression that starts with prefix, then captures
1 up to n floating-point numbers (possibly in scientific notation)
separated by whitespace, and ends with suffix.
>>> re.match(re_cap_n_fp("foo", 2), 'foo 1.23').group(1)
'1.23'
>>> re.match(re_cap_n_fp("foo", 2), 'foo1.23 4.56').groups()
('1.23', '4.56')
# The first fp pattern must match something
>>> re.match(re_cap_n_fp("foo", 2), 'foo')
"""
template = prefix
if n > 0:
# The first CAP_FP pattern is mandatory, and can have zero or more
# whitespace in front
template += "\s*{cap_fp}"
# The remaining FP_CAPs are optional, and have 1 or more whitespace
template += "(?:\s+{cap_fp})?" * (n - 1)
template += suffix
return template.format(**REGEXES)
class Polynomial(list):
"""
>>> p = Polynomial()
>>> p.degree < 0
True
>>> p[0] = 1
>>> p.degree == 0
True
>>> p[42] = 1
>>> p.degree == 42
True
>>> p[42] = 0
>>> p.degree == 0
True
>>> p = Polynomial([3,2,1]) # x^2 + 2*x + 3
>>> p.eval(0)
3
>>> p.eval(1)
6
>>> p.eval(2)
11
>>> p.eval(-3)
6
>>> p.eval_h(2,7)
179
>>> p.eval_h(-3,5)
54
"""
@property
def degree(self):
return len(self) - 1 if len(self) > 0 else float("-inf")
def __setitem__(self, index, value):
if index >= len(self):
self.extend([0]*(index + 1 - len(self)))
list.__setitem__(self, index, value)
# Remove leading zeroes
while len(self) > 0 and self[-1] == 0:
self.pop()
def __str__(self):
xpow = ["", "*x"] + ["*x^%d" % i for i in range(2, len(self))]
arr = ["%+d%s" % (self[idx], xpow[idx]) for idx in range(0, len(self))
if self[idx]]
poly = "".join(reversed(arr)).lstrip('+')
poly = re.sub(r'\b1\*', "", poly)
return poly
def eval(self, x):
""" Evaluate the polynomial at x """
if len(self) == 0:
return 0
deg = self.degree
value = self[deg]
for i in range(deg):
value = value * x + self[deg - i - 1]
return value
def eval_h(self, a, b):
""" Evaluate homogenized bi-variate polynomial at a,b """
if len(self) == 0:
return 0
powers_a = [a**i for i in range(self.degree + 1)]
powers_b = [b**i for i in range(self.degree + 1)]
return sum([coeff * pow_a * pow_b for (coeff, pow_a, pow_b)
in zip(self, powers_a, reversed(powers_b))])
def same_lc(self, other):
""" Return true if the two polynomials have the same degree
and leading coefficient
"""
return self.degree == other.degree and \
self[self.degree] == other[other.degree]
class PolynomialParseException(Exception):
""" Exception class for signaling errors during polynomial parsing """
pass
class Polynomials(object):
r""" A class that represents a polynomial
>>> Polynomials([""])
Traceback (most recent call last):
cadotask.PolynomialParseException: No polynomials found
>>> t="n: 1021\nc0: 1\nc5: -1\nc5: 1\nY0: 4\nY1: -1\nskew: 1.0\n"
>>> p=Polynomials(t.splitlines())
Traceback (most recent call last):
cadotask.PolynomialParseException: Line 'c5: 1' redefines coefficient of x^5
>>> t="n: 1021\nc0: 1\nc1: -1\nc5: 1\nY0: 4\nY1: -1\nskew: 1.0\n"
>>> p=Polynomials(t.splitlines())
>>> str(p)
'n: 1021\nskew: 1.0\nc0: 1\nc1: -1\nc5: 1\nY0: 4\nY1: -1\n# f(x) = x^5-x+1\n# g(x) = -x+4\n'
>>> t="n: 1021\nc0: -1\nc1: 1\nc5: -1\nY0: -4\nY1: 1\nskew: 1.0\n"
>>> p=Polynomials(t.splitlines())
>>> str(p)
'n: 1021\nskew: 1.0\nc0: -1\nc1: 1\nc5: -1\nY0: -4\nY1: 1\n# f(x) = -x^5+x-1\n# g(x) = x-4\n'
>>> t="n: 1021\npoly0: 1, 2, 3\npoly1: 4, 5, 6\nskew: 1.0\n"
>>> p=Polynomials(t.splitlines())
"""
re_pol_f = re.compile(r"c(\d+)\s*:\s*(-?\d+)")
re_pol_g = re.compile(r"Y(\d+)\s*:\s*(-?\d+)")
re_polys = re.compile(r"poly(\d+)\s*:") # FIXME: do better?
re_Murphy = re.compile(re_cap_n_fp(r"\s*#\s*MurphyE\s*\((.*)\)\s*=", 1))
re_skew = re.compile(re_cap_n_fp(r"skew:", 1))
re_best = re.compile(r"# Best polynomial found \(revision (.*)\):")
# the 'lognorm' variable now represents the expected E-value
re_lognorm = re.compile(re_cap_n_fp(r"\s*#\s*exp_E", 1))
# Keys that can occur in a polynomial file, in their preferred ordering,
# and whether the key is mandatory or not. The preferred ordering is used
# when turning a polynomial back into a string.
keys = OrderedDict(
(
("n", (int, True)),
("skew", (float, False)),
("type", (str, False))
))
def __init__(self, lines):
""" Parse a polynomial file in the syntax as produced by polyselect
and polyselect_ropt
"""
self.MurphyE = 0.
self.skew = 0.
self.MurphyParams = None
self.revision = None
self.lognorm = 0.
self.params = {}
polyf = Polynomial()
polyg = Polynomial()
# in case of multiple fields
tabpoly = {}
def match_poly(line, poly, regex):
match = regex.match(line)
if match:
(idx, coeff) = map(int, match.groups())
if idx <= poly.degree and poly[idx]:
raise PolynomialParseException(
"Line '%s' redefines coefficient of x^%d"
% (line, idx))
poly[idx] = coeff
return True
return False
# line = "poly0: 1, 2, 3" => poly[0] = {1, 2, 3} = 1+2*X+3*X^2
def match_poly_all(line, regex):
match = regex.match(line)
if match:
line2 = line.split(":")
# get index of poly
ip = int(line2[0].split("poly")[1])
# get coeffs of 1+2*X+3*X^2
line3=line2[1].split(",")
pol = Polynomial()
for idx in range(len(line3)):
pol[idx] = int(line3[idx]);
return ip, pol
return -1, []
for line in lines:
# print ("Parsing line: >%s<" % line.strip())
# If this is a comment line telling the Murphy E value,
# extract the value and store it
match = self.re_Murphy.match(line)
if match:
if self.MurphyParams or self.MurphyE:
raise PolynomialParseException(
"Line '%s' redefines Murphy E value" % line)
self.MurphyParams = match.group(1)
self.MurphyE = float(match.group(2))
continue
match = self.re_skew.match(line)
if match:
self.skew = float(match.group(1))
# go through
match = self.re_best.match(line)
if match:
self.revision = match.group(1)
continue
# If this is a comment line telling the expected E-value,
# extract the value and store it
match = self.re_lognorm.match(line)
if match:
if self.lognorm != 0:
raise PolynomialParseException(
"Line '%s' redefines exp_E value" % line)
self.lognorm = float(match.group(1))
continue
# Drop comment, strip whitespace
line2 = line.split('#', 1)[0].strip()
# If nothing is left, process next line
if not line2:
continue
# Try to parse polynomial coefficients
if match_poly(line, polyf, self.re_pol_f) or \
match_poly(line, polyg, self.re_pol_g):
continue
# is it in format "poly*: ..."
ip,tip=match_poly_all(line, self.re_polys)
if ip != -1:
tabpoly[ip] = tip
continue
# All remaining lines must be of the form "x: y"
array = line2.split(":")
if not len(array) == 2:
raise PolynomialParseException("Invalid line '%s'" % line)
key = array[0].strip()
value = array[1].strip()
if not key in self.keys:
raise PolynomialParseException("Invalid key '%s' in line '%s'" %
(key, line))
if key in self.params:
raise PolynomialParseException("Key %s in line %s has occurred "
"before" % (key, line))
(_type, isrequired) = self.keys[key]
self.params[key] = _type(value)
# If no polynomial was found at all (not even partial data), assume
# that polyselect simply did not find anything in this search range
if polyf.degree < 0 and polyg.degree < 0 and self.params == {} and \
self.MurphyE == 0.:
raise PolynomialParseException("No polynomials found")
# Test that all required keys are there
for (key, (_type, isrequired)) in self.keys.items():
if isrequired and not key in self.params:
raise PolynomialParseException("Key %s missing" % key)
if len(tabpoly) > 0:
polyg = tabpoly[0]
polyf = tabpoly[1]
self.polyf = polyf
self.polyg = polyg
self.tabpoly = tabpoly
return
def __str__(self):
arr = ["%s: %s\n" % (key, self.params[key])
for key in self.keys if key in self.params]
if len(self.tabpoly) > 0:
for i in range(len(self.tabpoly)):
poltmp = self.tabpoly[i]
arr += ["poly%d: %s" % (i, poltmp[0])]
arr += [","+str(poltmp[j]) for j in range(1, len(poltmp))]
arr += "\n"
else:
arr += ["c%d: %d\n" % (idx, coeff) for (idx, coeff)
in enumerate(self.polyf) if not coeff == 0]
arr += ["Y%d: %d\n" % (idx, coeff) for (idx, coeff)
in enumerate(self.polyg) if not coeff == 0]
if not self.MurphyE == 0.:
if self.MurphyParams:
arr.append("# MurphyE (%s) = %g\n" % (self.MurphyParams, self.MurphyE))
else:
arr.append("# MurphyE = %g\n" % self.MurphyE)
if not self.revision == None:
arr.append("# found by revision %s\n" % self.revision)
if not self.lognorm == 0.:
arr.append("# exp_E %g\n" % self.lognorm)
if len(self.tabpoly) > 0:
for i in range(len(self.tabpoly)):
arr.append("# poly%d = %s\n" % (i, str(self.tabpoly[i])))
else:
arr.append("# f(x) = %s\n" % str(self.polyf))
arr.append("# g(x) = %s\n" % str(self.polyg))
return "".join(arr)
def __eq__(self, other):
return self.polyf == other.polyf and self.polyg == other.polyg \
and self.params == other.params
def __ne__(self, other):
return not (self == other)
def create_file(self, filename):
# Write polynomial to a file
with open(str(filename), "w") as poly_file:
poly_file.write(str(self))
def getN(self):
return self.params["n"]
def same_lc(self, other):
""" Returns true if both polynomial pairs have same degree and
leading coefficient
"""
return self.polyf.same_lc(other.polyf) and \
self.polyg.same_lc(other.polyg)
def get_polynomial(self, side):
""" Returns one of the two polynomial as indexed by side """
assert side == 0 or side == 1
# Welp, f is side 1 and g is side 0 :(
if side == 0:
return self.polyg
else:
return self.polyf
class FilePath(object):
""" A class that represents a path to a file, where the path should be
somewhat relocateable.
In particular, we separate the path to the working directory, and the file
path relative to the working directory. For persistent storage in the DB,
the path relative to the workdir should be used, whereas for any file
accesses, the full path needs to be used.
It also piggy-backs a version information field.
"""
def __init__(self, workdir, filepath, version=None):
self.workdir = workdir.rstrip(os.sep)
self.filepath = filepath
self.version = version
def __str__(self):
return "%s%s%s" % (self.workdir, os.sep, self.filepath)
def get_wdir_relative(self):
return self.filepath
def isfile(self):
return os.path.isfile(str(self))
def isdir(self):
return os.path.isdir(str(self))
def get_version(self):
return self.version
def mkdir(self, *, parent=False, mode=None):
""" Creates a directory.
parent acts much like the Unix mkdir's '-p' parameter: required parent
directories are created if they don't exist, and no error is raised
if the directory to be created already exists.
If parent==True, a mode for the directory to be created can be specified
as well.
"""
if parent:
# os.makedirs specifies 0o777 as the default value for mode,
# thus we can't pass None to get the default value. We also
# want to avoid hard-coding 0x777 as the default in this
# method's signature, or using **kwargs magic. Thus we use
# a default of None in this method, and pass the mode value
# to makedirs only if it is not None.
if mode is None:
os.makedirs(str(self), exist_ok=True)
else:
os.makedirs(str(self), exist_ok=True, mode=mode)
else:
os.mkdir(str(self))
def realpath(self):
return os.path.realpath(str(self))
def open(self, *args, **kwargs):
return open(str(self), *args, **kwargs)
def rmtree (self, ignore_errors=False):
rmtree(str(self), ignore_errors)
class WorkDir(object):
""" A class that allows generating file and directory names under a
working directory.
The directory layout is as follows:
The current project (i.e., the factorization) has a jobname, e.g.,
"RSA512". Each task may have a name, e.g., "sieving".
A task can create various files under
workdir/jobname.taskname.file
or put them in a subdirectory
workdir/jobname.taskname/file
or, for multiple subdirectories,
workdir/jobname.taskname/subdir/file
It is also ok for tasks to have no particular name that is
reflected in the filename hierarchy.
>>> f = WorkDir("/foo/bar", "jobname", "taskname")
>>> str(f.make_dirname("foo")).replace(os.sep,'/')
'/foo/bar/jobname.foo/'
>>> str(f.make_filename('file')).replace(os.sep,'/')
'/foo/bar/jobname.file'
>>> str(f.make_filename('file', subdir="foo")).replace(os.sep,'/')
'/foo/bar/jobname.foo/file'
>>> str(f.make_filename('file', prefix="bar", subdir='foo')).replace(os.sep,'/')
'/foo/bar/jobname.foo/jobname.bar.file'
"""
def __init__(self, workdir, jobname=None, taskname=None):
self.workdir = str(workdir).rstrip(os.sep)
self.jobname = jobname
self.taskname = taskname
def path_in_workdir(self, filename, version=None):
return FilePath(self.workdir, filename, version=version)
def make_filename2(self, jobname=None, taskname=None, filename=None):
if jobname is None:
jobname = self.jobname
if taskname is None:
taskname = self.taskname
filename_arr = [s for s in [jobname, taskname, filename] if s]
return FilePath(self.workdir, ".".join(filename_arr))
def make_dirname(self, subdir):
""" Make a directory name of the form workdir/jobname.prefix/ """
return self.path_in_workdir("".join([self.jobname, ".", subdir, os.sep]))
def make_filename(self, name, prefix=None, subdir=None):
""" If subdir is None, make a filename of the form
workdir/jobname.prefix.name or workdir/jobname.name depending on
whether prefix is None or not.
If subdir is not None, make a filename of the form
workdir/jobname.subdir/jobname.prefix.name
or workdir/jobname.subdir/name
"""
components=[self.jobname]
if subdir is not None:
components += [ ".", subdir, os.sep]
if prefix is not None:
components += [ self.jobname, ".", prefix, "." ]
components += [ name ]
else:
if prefix is not None:
components += [ ".", prefix ]
components += [ ".", name ]
return self.path_in_workdir("".join(components))
def get_workdir_jobname(self):
return self.jobname
def get_workdir_path(self):
return self.workdir
class Statistics(object):
""" Class that holds statistics on program execution, and can merge two
such statistics.
"""
def __init__(self, conversions, formats):
self.conversions = conversions
self.stat_formats = formats
self.stats = {}
@staticmethod
def typecast(values, types):
""" Cast the values in values to the types specified in types """
if type(types) is type:
return [types(v) for v in values]
else:
return [t(v) for (v, t) in zip(values, types)]
@staticmethod
def _to_str(stat):
""" Convert one statistic to a string """
return " ".join(map(str, stat))
@staticmethod
def _from_str(string, types):
""" Convert a string (probably from a state dict) to a statistic """
return Statistics.typecast(string.split(), types)
def from_dict(self, stats):
""" Initialise values in self from the strings in the "stats"
dictionary
"""
for (key, types, defaults, combine, regex, allow_several) in self.conversions:
if key in stats:
if key in self.stats:
print("duplicate %s\n" % key)
assert not key in self.stats
self.stats[key] = self._from_str(stats.get(key, defaults),
types)
assert not self.stats[key] is None
def parse_line(self, line):
""" Parse one line of program output and look for statistics.
If they are found, they are added to self.stats.
"""
for (key, types, defaults, combine, regex, allow_several) in self.conversions:
match = regex.match(line)
if match:
# print (pattern.pattern, match.groups())
# Optional groups that did not match are returned as None.
# Skip over those so typecast doesn't raise TypeError
groups = [group for group in match.groups() if not group is None]
new_val = self.typecast(groups, types)
if not allow_several:
assert not key in self.stats
self.stats[key] = new_val
else:
# Some output files inherently have several values.
# This is the case of bwc output files if we use
# multiple sequences.
if key in self.stats:
self.stats[key] = combine(self.stats[key], new_val)
else:
self.stats[key] = new_val
assert not self.stats[key] is None
def merge_one_stat(self, key, new_val, combine):
if key in self.stats:
self.stats[key] = combine(self.stats[key], new_val)
else:
self.stats[key] = new_val
assert not self.stats[key] is None
# print(self.stats)
def merge_stats(self, new_stats):
""" Merge the stats currently in self with the Statistics in
"new_stats"
"""
assert self.conversions == new_stats.conversions
for (key, types, defaults, combine, regex, allow_several) in self.conversions:
if key in new_stats.stats:
self.merge_one_stat(key, new_stats.stats[key], combine)
def as_dict(self):
return {key: self._to_str(self.stats[key]) for key in self.stats}
def as_strings(self):
""" Convert statistics to lines of output
The self.stat_formats is an array, with each entry corresponding to
a line that should be output.
Each such entry is again an array, containing the format strings that
should be used for the conversion of statistics. If a conversion
fails with a KeyError or an IndexError, it is silently skipped over.
This is to allow producing lines on which some statistics are not
printed if the value is not known.
"""
result = []
errors = []
for format_arr in self.stat_formats:
line = []
for format_str in format_arr:
try:
line.append(format_str.format(**self.stats))
except KeyError:
errors.append("KeyError with \"%s\"" % format_str)
except IndexError:
errors.append("IndexError with \"%s\"" % format_str)
if line:
result.append("".join(line))
if len(errors) > 0:
errors.append("(registered stats: %s)" % self.stats)
return result, errors
else:
return result, None
# Helper functions for processing statistics.
# We can't make them @staticmethod or references are not callable
def add_list(*lists):
""" Add zero or more lists elementwise.
Short lists are handled as if padded with zeroes.
>>> Statistics.add_list([])
[]
>>> Statistics.add_list([1])
[1]
>>> Statistics.add_list([1,2], [3,7])
[4, 9]
>>> Statistics.add_list([1,2], [3,7], [5], [3,1,4,1,5])
[12, 10, 4, 1, 5]
"""
return [sum(items) for items in zip_longest(*lists, fillvalue=0)]
def weigh(samples, weights):
return [sample * weight for (sample, weight) in zip(samples, weights)]
def combine_mean(means, samples):
""" From two lists, one containing values and the other containing
the respective sample sizes (i.e., weights of the values), compute
the combined mean (i.e. the weighted mean of the values).
The two lists must have equal length.
"""
assert len(means) == len(samples)
total_samples = sum(samples)
weighted_sum = sum(Statistics.weigh(means, samples))
return [weighted_sum / total_samples, total_samples]
def zip_combine_mean(*lists):
""" From a list of 2-tuples, each tuple containing a value and a
weight, compute the weighted average of the values.
"""
for l in lists:
assert len(l) == 2
(means, samples) = zip(*lists)
return Statistics.combine_mean(means, samples)
def combine_stats(*stats):
""" Computes the combined mean and std.dev. for the stats
stats is a list of 3-tuples, each containing number of sample points,
mean, and std.dev.
Returns a 3-tuple with the combined number of sample points, mean,
and std. dev.
"""
# Samples is a list containing the first item (number of samples) of
# each item of stats, means is list of means, stddevs is list of
# std. dev.s
for s in stats:
assert len(s) == 3
(samples, means, stddevs) = zip(*stats)
(total_mean, total_samples) = Statistics.combine_mean(means, samples)
# t is the E[X^2] part of V(X)=E(X^2) - (E[X])^2
t = [mean**2 + stddev**2 for (mean, stddev) in zip(means, stddevs)]
# Compute combined variance
total_var = Statistics.combine_mean(t, samples)[0] - total_mean**2
return [total_samples, total_mean, sqrt(total_var)]
def test_combine_stats():
""" Test function for combine_stats()
>>> Statistics.test_combine_stats()
True
"""
from random import randrange
def mean(x):
return float(sum(x))/float(len(x))
def var(x):
E = mean(x)
return mean([(a-E)**2 for a in x])
def stddev(x):
return sqrt(var(x))
# Generate between 1 and 5 random integers in [1,100]
lengths = [randrange(100) + 1 for i in range(randrange(5) + 1)]
lengths = [1, 10]
# Generate lists of random integers in [1,100]
lists = [[randrange(100) for i in range(l)] for l in lengths]
stats = [(length, mean(l), stddev(l))
for (length, l) in zip(lengths, lists)]
combined = []
for l in lists:
combined += l
combined1 = Statistics.combine_stats(*stats)
combined2 = [len(combined), mean(combined), stddev(combined)]
if abs(combined1[2] - combined2[2]) > 0.2 * combined2[2]:
print("lists = %r" % lists)
print("combineds = %r" % combined)
print("stats = %r" % stats)
print("combined1 = %r" % combined1)
print("combined2 = %r" % combined2)
print(combined1[2], combined2[2])
print(abs(combined1[2] / combined2[2] - 1))
return combined1[0] == combined2[0] and \
abs(combined1[1] / combined2[1] - 1) < 1e-10 and \
abs(combined1[2] - combined2[2]) <= 1e-10 * combined2[2]
def smallest_n(*lists, n=10):
concat = []
for l in lists:
concat += l
concat.sort()
return concat[0:n]
def parse_stats(self, filename):
""" Parse statistics from the file with name "filename" and merge them
into self
Returns the newly parsed stats as a dictionary
"""
new_stats = Statistics(self.conversions, self.stat_formats)
with open(str(filename), "r") as inputfile:
for line in inputfile:
new_stats.parse_line(line)
self.merge_stats(new_stats)
return new_stats.as_dict()
class HasName(object, metaclass=abc.ABCMeta):
@abc.abstractproperty
def name(self):
# The name of the task in a simple form that can be used as
# a Python dictionary key, a directory name, part of a file name,
# part of an SQL table name, etc. That pretty much limits it to
# alphabetic first letter, and alphanumeric rest.
pass
class HasTitle(object, metaclass=abc.ABCMeta):
@abc.abstractproperty
def title(self):
# A pretty name for the task, will be used in screen output
pass
class DoesLogging(HasTitle, metaclass=abc.ABCMeta):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.logger = logging.getLogger(self.title)
class MakesTablenames(HasName):
@property
def database_state_table_name(self):
""" Prefix string for table names
By default, the table name prefix is the name attribute, but this can
be overridden
"""
return self.name
def make_tablename(self, extra=None):
""" Return a name for a DB table """
# Maybe replace SQL-disallowed characters here, like digits and '.' ?
# Could be tricky to avoid collisions
name = self.database_state_table_name
if extra:
name = name + '_' + extra
wudb.check_tablename(name)
return name
class HasState(MakesTablenames, wudb.HasDbConnection):
""" Declares that the class has a DB-backed dictionary in which the class
can store state information.
The dictionary is available as an instance attribute "state".
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
name = self.make_tablename()
self.state = self.make_db_dict(name, connection=self.db_connection)
class FilesCreator(MakesTablenames, wudb.HasDbConnection, metaclass=abc.ABCMeta):
""" A base class for classes that produce a list of output files, with
some auxiliary information stored with each file (e.g., nr. of relations).
This info is stored in the form of a DB-backed dictionary, with the file
name as the key and the auxiliary data as the value.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
tablename = self.make_tablename("outputfiles")
self.output_files = self.make_db_dict(tablename,
connection=self.db_connection)
def add_output_files(self, filenames, *, commit):
""" Adds a dict of files to the list of existing output files """
final_files = {}
for filename in filenames:
if filename in self.output_files:
self.logger.warning("%s already in output files table" % filename)
#raise KeyError("%s already in output files table" % filename)
else:
final_files[filename] = filenames[filename]
self.output_files.update(final_files, commit=commit)
def get_output_filenames(self, condition=None):
""" Return output file names, optionally those that match a condition
If a condition is given, it must be callable with 1 parameter and
boolean return type; then only those filenames are returned where
for the auxiliary data s (i.e., the value stored in the dictionary
with the file name as key) satisfies condition(s) == True.
"""
if condition is None:
return list(self.output_files.keys())
else:
return [f for (f, s) in self.output_files.items() if condition(s)]
def forget_output_filenames(self, filenames, *, commit):
self.output_files.clear(filenames, commit=commit)
class BaseStatistics(object):
""" Base class for HasStatistics and SimpleStatistics that terminates the
print_stats() call chain.
"""
def print_stats(self):
pass
class HasStatistics(BaseStatistics, HasState, DoesLogging, metaclass=abc.ABCMeta):
@property
def stat_conversions(self):
""" Sub-classes should override """
return []
@property
def stat_formats(self):
""" Sub-classes should override """
return []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.statistics = Statistics(self.stat_conversions, self.stat_formats)
self.statistics.from_dict(self.state)
def get_statistics_as_strings(self):
""" Return the statistics collected so far as a List of strings.
Sub-classes can override to add/remove/change strings.
"""
result, errors = self.statistics.as_strings()
if errors is not None:
self.logger.warning("some stats could not be displayed for %s (see log file for debug info)", self.name)
for e in errors:
self.logger.debug(e)
return result
def print_stats(self):
stat_msgs = self.get_statistics_as_strings()
if stat_msgs:
self.logger.info("Aggregate statistics:")
for msg in stat_msgs:
self.logger.info(msg)
super().print_stats()
def parse_stats(self, filename, *, commit):
# self.logger.info("Parsing filename %s\n", filename)
new_stats = self.statistics.parse_stats(filename)
self.logger.debug("Newly arrived stats: %s", new_stats)
update = self.statistics.as_dict()
self.logger.debug("Combined stats: %s", update)
self.state.update(update, commit=commit)
class SimpleStatistics(BaseStatistics, HasState, DoesLogging,
metaclass=abc.ABCMeta):
@abc.abstractproperty
def programs(self):
# A list of classes of Programs which this tasks uses
pass
def print_cpu_real_time(self, cputotal, realtotal, program):
""" Print cpu and/or real time to logger """
# Uses self only for access to the logger
pairs = zip((cputotal, realtotal), ("cpu", "real"))
usepairs = [pair for pair in pairs if pair[0]]
if usepairs:
printformat = "/".join(["%g"] * len(usepairs))
usepairs = tuple(zip(*usepairs))
timestr = '/'.join(usepairs[1])
self.logger.info("Total %s time for %s: " + printformat,
timestr, program, *usepairs[0])
@staticmethod
def keyname(is_cpu, programname):
return "cputime_%s" % programname if is_cpu else "realtime_%s" % programname
def update_cpu_real_time(self, programname, cpu=None, real=None, commit=True):
""" Add seconds to the statistics of cpu time spent by program,
and return the new total.
"""
assert isinstance(programname, str)
update = {}
for (is_cpu, time) in ((True, cpu), (False, real)):
if time is not None:
key = self.keyname(is_cpu, programname)
update[key] = self.state.get(key, 0.) + time
if update:
self.state.update(update, commit=commit)
def get_cpu_real_time(self, program):
""" Return list of cpu and real time spent by program """
return [self.state.get(self.keyname(is_cpu, program.name), 0.)
for is_cpu in (True, False)]
def get_total_cpu_or_real_time(self, is_cpu):
""" Return tuple with number of seconds of cpu and real time spent
by all programs of this Task
"""
times = [self.get_cpu_real_time(p) for p, o, i in self.programs]
times = tuple(map(sum, zip(*times)))
return times[0 if is_cpu else 1]
def print_stats(self):
for program, o, i in self.programs:
cputotal, realtotal = self.get_cpu_real_time(program)
self.print_cpu_real_time(cputotal, realtotal, program.name)
super().print_stats()
class Runnable(object):
@abc.abstractmethod
def run(self):
pass
class DoesImport(DoesLogging, cadoparams.UseParameters, Runnable,
metaclass=abc.ABCMeta):
@abc.abstractproperty
def paramnames(self):
return self.join_params(super().paramnames, {"import": None})
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._did_import = False
def run(self):
super().run()
if "import" in self.params and not self._did_import:
self.import_files(self.params["import"])
self._did_import = True
def import_files(self, input_filename):
if input_filename.startswith('@'):
self.logger.info("Importing files listed in %s", input_filename[1:])
with open(input_filename[1:], "r") as f:
filenames = f.read().splitlines()
else:
self.logger.info("Importing file %s", input_filename)
filenames = [input_filename]
for filename in filenames:
self.import_one_file(filename)
def did_import(self):
return self._did_import
@abc.abstractmethod
def import_one_file(self, filename):
pass
def chain_dict(d1, d2):
""" Chain two mappings.
If d[x] == y and e[y] == z, then chain_dict(d, e)[x] == z.
>>> chain_dict({1: 17}, {17: 42})
{1: 42}
"""
return {key: d2[value] for key, value in d1.items()}
class RealTimeOutputFilter:
def __init__(self, logger, filename):
self.stdout = open(filename, "w")
self.logger = logger
def filter(self, data):
self.stdout.write(data)
def __enter__(self):
return self
def __exit__(self, *args):
self.stdout.close()
class Task(patterns.Colleague, SimpleStatistics, HasState, DoesLogging,
cadoparams.UseParameters, Runnable, metaclass=abc.ABCMeta):
""" A base class that represents one task that needs to be processed.
Sub-classes must define class variables:
"""
# Properties that subclasses need to define
@abc.abstractproperty
def programs(self):
# A tuple of 3-tuples, with each 3-tuple containing
# 1. the class of Program which this tasks uses
# 2. a tuple of parameters to the Program which the Task computes and
# which therefore should not be filled in from the Parameters file
# 3. a dict of parameters which are file names and which should be
# filled in by sending Requests to other Tasks. This also enables
# testing whether input files have been changed by the other Task.
pass
@abc.abstractproperty
def paramnames(self):
# Parameters that all tasks use
return self.join_params(super().paramnames,
{"name": str, "workdir": str, "run": True})
@property
def param_nodename(self):
# avoid segregating our parameters, which are user-visible
# things, underneath tree nodes whose name depends on some
# implementation detail which is the task name. Except in
# specific cases, a "task" does not (no longer) define a nesting
# level in the parameter hierarchy.
#
# return self.name
return None
def __init__(self, *, mediator, db, parameters, path_prefix):
''' Sets up a database connection and a DB-backed dictionary for
parameters. Reads parameters from DB, and merges with hierarchical
parameters in the parameters argument. Parameters passed in by
parameters argument do not override values in the DB-backed
parameter dictionary.
'''
super().__init__(mediator=mediator, db=db, parameters=parameters,
path_prefix=path_prefix)
self.logger.debug("Enter Task.__init__(%s)",
self.name)
self.logger.debug("state = %s", self.state)
# Set default parameters for this task, if any are given
self.params = self.parameters.myparams(self.paramnames)
self.logger.debug("self.parameters = %s", self.parameters)
self.logger.debug("params = %s", self.params)
# Set default parameters for our programs
# The progparams entries should not be modified after a class'
# constuctor (within __init__() is fine tho)
self.progparams = []
maindict = self.parameters.parameters
for prog, override, needed_input in self.programs:
# Parameters listed in needed_input are assumed to be overridden
for key in (set(override) & set(needed_input)):
self.logger.warning("Parameter %s listed in both overridden "
"parameters and in input files for %s, "
"only one is needed", key, prog.name)
prog_param_path = self.parameters.get_param_path() + [prog.name]
progparams = self.parameters.myparams(prog.get_accepted_keys(),
prog.name)
for c in progparams:
finergrain = '.'.join(prog_param_path+[c])
coarsegrain = maindict.locate(finergrain)
self.logger.debug("%s found from %s" % (finergrain, coarsegrain))
for param in (set(needed_input)|set(override)) & set(progparams):
finergrain = '.'.join(prog_param_path+[param])
coarsegrain = maindict.locate(finergrain)
# Whenever we see a parameter that is marked as override,
# we will discard it and let the task level fill data for
# this parameter. There are cases where this really is a
# user error and we want to complain:
# - when the parameter file *explicitly* sets this
# parameter at this level. This does not make sense
# and is a troubling no-op. Typical example is
# specifying tasks.linalg.bwc.m in dlp mode.
# - when the parameter file sets it at a level above,
# but the task level does *not* know about this
# parameter anyway. This is ignored as well, and the
# task level will fill that parameter based on data it
# knows. But leaving the user with the feeling that he
# might be able to control that parameter is
# inelegant. A typical example is
# tasks.sieve.makefb.lim (which the tasks.sieve level
# sets based on the lim0 and lim1 parameters it knows
# about). Likewise, many "out" parameters behave
# similarly.
if finergrain == coarsegrain or param not in set(self.paramnames):
self.logger.error('Parameter "%s" for program "%s" is '
'generated at run time and cannot be '
'supplied through the parameter file',
param, prog.name)
self.logger.error('Ignoring %s, we rely on %s to compute it '
'based on parameters at level %s only',
'.'.join(path_prefix+[prog.name, param]),
self.__class__,
'.'.join(path_prefix))
# We'll anyway discard it, but it's normal if we
# inherited the parameter from a level above.
del(progparams[param])
self.progparams.append(progparams)
# FIXME: whether to init workdir or not should not be controlled via
# presence of a "workdir" parameter, but by class definition
if "workdir" in self.params:
self.workdir = WorkDir(self.params["workdir"], self.params["name"],
self.name)
# Request mediator to run this task. It the "run" parameter is set
# to false, then run() below will abort.
self.send_notification(Notification.WANT_TO_RUN, None)
self.logger.debug("Exit Task.__init__(%s)", self.name)
return
def run(self):
if not self.params["run"]:
self.logger.info("Stopping at %s", self.name)
raise Exception("Job aborted because of a forcibly disabled task")
self.logger.info("Starting")
self.logger.debug("%s.run(): Task state: %s", self.name, self.state)
super().run()
# Make set of requests so multiply listed requests are sent only once
# The input_file dict maps key -> Request. Make set union of requests
requests = set.union(*[set(i.values()) for p, o, i in self.programs])
# Make dict mapping Request -> answer (i.e., FileName object)
answers = self.batch_request(dict(zip(requests, requests)))
# Make list of dicts mapping key -> answer
self._input_files = [chain_dict(i, answers) for p, o, i in self.programs]
# Since merged_args is re-generated in each run(), the subclass can
# modify it as it pleases (unlike progparams)
# For each program, merge the progparams and the input_files dicts
self.merged_args = [dict(p.items() | i.items())
for p, i in zip(self.progparams, self._input_files)]
def translate_input_filename(self, filename):
return filename
def test_outputfile_exists(self, filename):
return filename.isfile()
def cmp_input_version(self, state_key, version):
""" Compare the version of the input file with the version we have
processed before
Returns None if filename does not include file version information,
returns -2 if we have never processed the file before,
returns -1 if the previously processed file is older,
returns 0 if they are the same version,
throws an exception if the processed version is newer than the
current one.
"""
if version is None:
return None
if not state_key in self.state:
return -2
if self.state[state_key] < version:
return -1
if self.state[state_key] == version:
return 0
raise ValueError("Previously processed version is newer than current")
@staticmethod
def _input_file_to_state_dict(key, index, filename):
return ("processed_version_%d_%s" % (index, key), filename.get_version())
def have_new_input_files(self):
# Change this to "self.logger.info" for showing each check on screen
log = self.logger.debug
result = False
for index, input_files in enumerate(self._input_files):
for key, filename in input_files.items():
(state_key, version) = \
self._input_file_to_state_dict(key, index, filename)
c = self.cmp_input_version(state_key, version)
if c == -2:
log("File %s was not processed before", filename)
result = True
if c == -1:
log("File %s is newer than last time", filename)
result = True
# Collapse programs from all dict into one set
all = set.union(*[set(i.values()) for i in self._input_files])
if result is False and all:
(n, v) = (("", "is"), ("s", "are"))[len(all) > 1]
log("Input file%s %s %s unchanged since last time",
n, ", ".join(map(str, all)), v)
return result
def remember_input_versions(self, commit=True):
update = {}
for index, input_files in enumerate(self._input_files):
for (key, filename) in input_files.items():
(state_key, version) = \
self._input_file_to_state_dict(key, index, filename)
if version is not None:
update[state_key] = version
self.state.update(update, commit)
@staticmethod
def check_files_exist(filenames, filedesc, shouldexist):
""" Check that the output files in "filenames" exist or don't exist,
according to shouldexist.
Raise IOError if any check fails, return None
"""
for filename in filenames:
if isinstance(filename, FilePath):
exists = filename.isfile()
else:
exists = os.path.isfile(filename)
if shouldexist and not exists:
raise IOError("%s file %s does not exist" % (filedesc, filename))
elif not shouldexist and exists:
raise IOError("%s file %s already exists" % (filedesc, filename))
return
# These two function go together, one produces a workunit name from the
# name of the factorization, the task name, and a task-provided identifier,
# and the other function splits them again
wu_paste_char = '_'
wu_attempt_char = '#'
def make_wuname(self, identifier, attempt=None):
""" Generates a wuname from project name, task name, identifier, and
attempt number.
"""
assert not self.wu_paste_char in self.name # self.name is task name
assert not self.wu_paste_char in identifier # identifier is, e.g., range string
assert not self.wu_attempt_char in identifier
wuname = self.wu_paste_char.join([self.params["name"], self.name,
identifier])
if not attempt is None:
wuname += "%s%d" % (self.wu_attempt_char, attempt)
return wuname
def split_wuname(self, wuname):
""" Splits a wuname into project name, task name, identifier, and
attempt number.
Always returns a list of length 4; if there is not attempt given in
the wuname, then the last array entry is None
>>> # Test many possible combinations of "_" and "#" occuring in names
>>> # where these characters are allowed
>>> class Klass():
... params = {"name": None}
... wu_paste_char = '_'
... wu_attempt_char = '#'
>>> inst = Klass()
>>> from itertools import product
>>> prod = product(*[["", "_", "#"]] * 4 + [["", "#"]]*2)
>>> for sep in prod:
... inst.params["name"] = "%s%sprojectname%s%s" % sep[0:4]
... inst.name = "%staskname%s" % sep[4:6]
... for attempt in [None, 2, 3]:
... identifier = "identifier"
... wuname = Task.make_wuname(inst, "identifier", attempt=attempt)
... wu_split = Task.split_wuname(inst, wuname)
... assert wu_split == [inst.params["name"], inst.name, identifier, attempt]
"""
arr = wuname.rsplit(self.wu_paste_char, 2)
assert len(arr) == 3
attempt = None
# Split off attempt number, if available
if "#" in arr[2]:
(arr[2], attempt) = arr[2].split('#')
attempt = int(attempt)
arr.append(attempt)
return arr
class ResultInfo(wudb.WuResultMessage):
def __init__(self, wuid, rc, stdout, stderr, program, cmd_line, host):
self.wuid = wuid
self.rc = rc
self.stdout = stdout if stdout else None
self.stdoutfile = program.get_stdout()
# stdout must be either in a string or in a file, but not both
assert self.stdout is None or not self.stdoutfile
self.stderr = stderr if stderr else None
self.stderrfile = program.get_stderr()
# stderr must be either in a string or in a file, but not both
assert self.stderr is None or not self.stderrfile
self.output_files = program.get_output_files(with_stdio=False)
self.cmd_line = cmd_line
self.host = host
def get_wu_id(self):
return self.wuid
def get_output_files(self):
return self.output_files
def get_stdout(self, command_nr):
assert command_nr == 0
return self.stdout
def get_stdoutfile(self, command_nr):
assert command_nr == 0
return self.stdoutfile
def get_stderr(self, command_nr):
assert command_nr == 0
return self.stderr
def get_stderrfile(self, command_nr):
assert command_nr == 0
return self.stderrfile
def get_exitcode(self, command_nr):
assert command_nr == 0
return self.rc
def get_command_line(self, command_nr):
assert command_nr == 0
return self.cmd_line
def get_host(self):
return self.host
def log_failed_command_error(self, message, command_nr):
host = message.get_host()
host_msg = " run on %s" % host if host else ""
self.logger.error("Program%s failed with exit code %d",
host_msg, message.get_exitcode(command_nr))
cmd_line = message.get_command_line(command_nr)
if cmd_line:
self.logger.error("Command line was: %s", cmd_line)
stderr = message.read_stderr(command_nr)
stderrfilename = message.get_stderrfile(command_nr)
if stderrfilename:
stderrmsg = " (stored in file %s)" % stderrfilename
else:
stderrmsg = ""
if stderr:
self.logger.error("Stderr output (last 10 lines only) follow%s:", stderrmsg)
for l in stderr.decode().split('\n')[-10:]:
self.logger.error("\t"+l)
def submit_command(self, command, identifier, commit=True, log_errors=False):
''' Run a command.
Return the result tuple. If the caller is an Observer, also send
result to updateObserver().
'''
wuname = self.make_wuname(identifier)
process = cadocommand.Command(command)
cputime_used = os.times()[2] # CPU time of child processes
realtime_used = time.time()
(rc, stdout, stderr) = process.wait()
cputime_used = os.times()[2] - cputime_used
realtime_used = time.time() - realtime_used
self.update_cpu_real_time(command.name, cputime_used, realtime_used, commit)
message = Task.ResultInfo(wuname, rc, stdout, stderr, command,
command.make_command_line(), "server")
if rc != 0 and log_errors:
self.log_failed_command_error(message, 0)
if isinstance(self, patterns.Observer):
# pylint: disable=E1101
self.updateObserver(message)
return message
def filter_notification(self, message):
wuid = message.get_wu_id()
rc = message.get_exitcode(0)
stdout = message.read_stdout(0)
stderr = message.read_stderr(0)
output_files = message.get_output_files()
self.logger.message("%s: Received notification for wuid=%s, rc=%d, "
"output_files=[%s]",
self.name, wuid, rc, ", ".join(output_files))
(name, task, identifier, attempt) = self.split_wuname(wuid)
if name != self.params["name"] or task != self.name:
# This notification is not for me
self.logger.message("Notification %s is not for me", wuid)
return
self.logger.message("Notification %s is for me", wuid)
if rc != 0:
self.logger.debug("Return code is: %d", rc)
if stdout:
self.logger.debug("stdout is: %s", stdout)
if stderr:
self.logger.debug("stderr is: %s", stderr)
if output_files:
self.logger.message("Output files are: %s", ", ".join(output_files))
return identifier
def send_notification(self, key, value):
""" Wrapper around Colleague.send_notification() that instantiates a
Notification with self as the sender
"""
notification = Notification(self, key, value)
super().send_notification(notification)
def send_request(self, key, *args):
""" Wrapper around Colleague.send_request() that instantiates a
Request with self as the sender
"""
request = Request(self, key, *args)
return super().send_request(request)
def batch_request(self, requests):
""" Given a dict from keys to Request objects, return a dict with the
same keys to the results of the requests.
"""
return {key: self.send_request(request) for key, request in requests.items()}
def get_number_outstanding_wus(self):
return 0
def verification(self, wuid, ok, *, commit):
pass
def get_state_filename(self, key, version=None):
""" Return a file name stored in self.state as a FilePath object
If a version parameter is passed, then this version is set as the
version field of the FilePath object. If no parameter is passed, but
our state includes an "output_version" key, then that is used.
"""
if not key in self.state:
return None
if version is None:
version = self.state.get("output_version", None)
return self.workdir.path_in_workdir(self.state[key], version)
def make_std_paths(self, progname, do_increment=True, prefix=None):
count = self.state.get("stdiocount", 0)
if do_increment:
count += 1
did_increment = do_increment
while True:
try:
stdoutname = "%s.stdout.%d" % (progname, count)
stderrname = "%s.stderr.%d" % (progname, count)
self.check_files_exist((stdoutname, stderrname), "stdio",
shouldexist=False)
except IOError:
count += 1
did_increment = True
self.logger.warning("Stdout or stderr files with index %d "
"already exist", count)
else:
break
stdoutpath = self.workdir.make_filename(stdoutname, prefix=prefix)
stderrpath = self.workdir.make_filename(stderrname, prefix=prefix)
if did_increment:
self.state["stdiocount"] = count
return (stdoutpath, stderrpath)
def make_filelist(self, files, prefix=None):
""" Create file file containing a list of files, one per line """
filelist_idx = self.state.get("filelist_idx", 0) + 1
self.state["filelist_idx"] = filelist_idx
filelistname = self.workdir.make_filename("filelist.%d" % filelist_idx, prefix=prefix)
with filelistname.open("w") as filelistfile:
filelistfile.write("\n".join(files) + "\n")
return filelistname
def collect_usable_parameters(self, rl):
message=[]
message.append("Parameters used by Task %s" % self.name)
prefix = '.'.join(self.parameters.get_param_path())
for p in self.paramnames:
message.append(" %s.%s" % (prefix, p))
rl[p].append(prefix)
for prog, override, needed_input in self.programs:
message.append(" Parameters for program %s (general form %s.%s.*)" % (
prog.name, prefix, prog.name))
for p in sorted(prog.get_accepted_keys()):
t = "%s.%s.%s" % (prefix, prog.name, p)
rl[p].append("%s.%s" % (prefix, prog.name))
if p in set(override):
message.append(" [excluding internal parameter %s]" % t)
elif p in set(needed_input):
message.append(" [excluding internal file name %s]" % t)
else:
message.append(" %s" % t)
message.append("")
return "\n".join(message)
class ClientServerTask(Task, wudb.UsesWorkunitDb, patterns.Observer):
@abc.abstractproperty
def paramnames(self):
return self.join_params(super().paramnames,
{"maxwu": 10,
"wutimeout": 10800, # Default: 3h
"maxresubmit": 5,
"maxtimedout": 100,
"maxfailed": 100})
def __init__(self, *, mediator, db, parameters, path_prefix):
super().__init__(mediator=mediator, db=db, parameters=parameters,
path_prefix=path_prefix)
self.state.setdefault("wu_submitted", 0)
self.state.setdefault("wu_received", 0)
self.state.setdefault("wu_timedout", 0)
self.state.setdefault("wu_failed", 0)
assert self.get_number_outstanding_wus() >= 0
# start_real_time will be a float giving the number of seconds since
# Jan 1 1900 at the beginning of the task
self.state.update({"start_real_time": 0})
# start_achievement is a variable that tells us how far we were at
# the beginning of this run (for example if a factorization is
# restarted in the middle of a polyselect or sieve task.)
# It should be in [0,1], and if not initialized yet it is -1.
self.state.update({"start_achievement": -1})
self.send_notification(Notification.SUBSCRIBE_WU_NOTIFICATIONS, None)
def submit_wu(self, wu, commit=True):
""" Submit a WU and update wu_submitted counter """
# at beginning of the task, set "start_real_time" to the number of
# seconds since Jan 1 1900
if self.state["start_real_time"] == 0:
delta = datetime.datetime.now() - datetime.datetime(1900,1,1)
self.state.update({"start_real_time": delta.total_seconds()})
key = "wu_submitted"
self.state.update({key: self.state[key] + 1}, commit=False)
self.wuar.create(str(wu), commit=commit)
def cancel_wu(self, wuid, commit=True):
""" Cancel a WU and update wu_timedout counter """
self.logger.debug("Cancelling: %s", wuid)
key = "wu_timedout"
maxtimedout = self.params["maxtimedout"]
if not self.state[key] < maxtimedout:
self.logger.error("Exceeded maximum number of timed out "
"workunits, maxtimedout=%d ", maxtimedout)
raise Exception("Too many timed out work units. Please increase tasks.maxtimedout (current value is %d)" % maxtimedout)
self.state.update({key: self.state[key] + 1}, commit=False)
self.wuar.cancel(wuid, commit=commit)
def submit_command(self, command, identifier, commit=True, log_errors=False):
''' Submit a workunit to the database. '''
while self.get_number_available_wus() >= self.params["maxwu"]:
self.wait()
wuid = self.make_wuname(identifier)
wutext = command.make_wu(wuid)
for filename in command.get_exec_files() + command.get_input_files():
basename = os.path.basename(filename)
self.send_notification(Notification.REGISTER_FILENAME,
{basename:filename})
self.logger.info("Adding workunit %s to database", wuid)
# self.logger.debug("WU:\n%s" % wutext)
self.submit_wu(wutext, commit=commit)
# Write command line to a file
cmdline = command.make_command_line()
client_cmd_filename = self.workdir.make_filename2(taskname="",
filename="wucmd")
with client_cmd_filename.open("a") as client_cmd_file:
client_cmd_file.write("# Command for work unit: %s\n%s\n" %
(wuid, cmdline))
def get_eta(self):
delta = datetime.datetime.now() - datetime.datetime(1900,1,1)
seconds = delta.total_seconds() - self.state["start_real_time"]
a = self.get_achievement()
a0 = self.state["start_achievement"]
if a0 == -1:
self.state["start_achievement"] = a
a0 = a
elif a0 > a:
# if a0 > a, it means we had a failing filtering try, which means
# a had attained 100%, and then decreased to say 95% for example,
# thus we need to update a0 by multiplying it by a
a0 = a0 * a
try:
remaining_time = seconds / (a - a0) * (1.0 - a)
now = datetime.datetime.now()
arrival = now + datetime.timedelta(seconds=remaining_time)
return arrival.ctime()
except (OverflowError,ZeroDivisionError):
return "Unknown"
def verification(self, wuid, ok, *, commit):
""" Mark a workunit as verified ok or verified with error and update
wu_received counter """
ok_str = "ok" if ok else "not ok"
assert self.get_number_outstanding_wus() >= 1
key = "wu_received"
self.state.update({key: self.state[key] + 1}, commit=False)
# only print ETA when achievement > 0 to avoid division by zero
a = self.get_achievement()
if a > 0:
self.logger.info("Marking workunit %s as %s (%.1f%% => ETA %s)",
wuid, ok_str, 100.0 * a, self.get_eta())
self.wuar.verification(wuid, ok, commit=commit)
def cancel_available_wus(self):
self.logger.info("Cancelling remaining workunits")
self.wuar.cancel_all_available()
def get_number_outstanding_wus(self):
return self.state["wu_submitted"] - self.state["wu_received"] \
- self.state["wu_timedout"]
def get_number_available_wus(self):
return self.wuar.count_available()
def test_outputfile_exists(self, filename):
# Can't test
return False
def wait(self):
# Ask the mediator to check for workunits of status Received,
# and if there are any, to send WU result notifications to the
# subscribed listeners.
# If we get notification on new results reliably from the HTTP server,
# we might not need this poll. But they probably won't be totally
# reliable
if not self.send_request(Request.GET_WU_RESULT):
self.resubmit_timed_out_wus()
time.sleep(1)
def resubmit_one_wu(self, wu, commit=True, maxresubmit=None):
""" Takes a Workunit instance and adds it to workunits table under
a modified name.
"""
wuid = wu.get_id()
(name, task, identifier, attempt) = self.split_wuname(wuid)
attempt = 2 if attempt is None else attempt + 1
# Don't do "if not maxresubmit:" as 0 is legit value
if maxresubmit is None:
maxresubmit = self.params["maxresubmit"]
if attempt > maxresubmit:
self.logger.info("Not resubmitting workunit %s, failed %d times",
wuid, attempt - 1)
self.wuar.commit(commit)
return
new_wuid = self.make_wuname(identifier, attempt)
wu.set_id(new_wuid)
self.logger.info("Resubmitting workunit %s as %s", wuid, new_wuid)
self.submit_wu(wu, commit=commit)
def resubmit_timed_out_wus(self):
""" Check for any timed out workunits and resubmit them """
# We don't store the lastcheck in state as we do *not* want to check
# instantly when we start up - clients should get a chance to upload
# results first
now = time.time()
if not hasattr(self, "last_timeout_check"):
self.logger.debug("Setting last timeout check to %f", now)
self.last_timeout_check = now
return
check_every = 60 # Check every xx seconds
if self.last_timeout_check + check_every >= now:
# self.logger.info("It's not time to check yet, now = %f", now)
return
self.last_timeout_check = now
timeout = self.params["wutimeout"]
delta = datetime.timedelta(seconds=timeout)
cutoff = str(datetime.datetime.utcnow() - delta)
# self.logger.debug("Doing timeout check, cutoff=%s, and setting last check to %f",
# cutoff, now)
results = self.wuar.query(eq={"status": wudb.WuStatus.ASSIGNED},
lt={"timeassigned": cutoff})
results += self.wuar.query(eq={"status": wudb.WuStatus.NEED_RESUBMIT})
if not results:
# self.logger.debug("Found no timed-out workunits")
pass
self.logger.debug("Timeout check took %f s, found %d WUs",
time.time() - now, len(results))
for entry in results:
self.cancel_wu(entry["wuid"], commit=False)
self.resubmit_one_wu(Workunit(entry["wu"]), commit=True)
def handle_error_result(self, message):
""" Handle workunit with non-zero exit code
If the result message indicates a failed command, log an error
message, set the workunit to VERIFIED_ERROR in the DB, resubmit
the work unit (but no more than once) and return True.
If it indicates no error, return False. """
if message.get_exitcode(0) == 0:
return False
self.log_failed_command_error(message, 0)
key = "wu_failed"
maxfailed = self.params["maxfailed"]
if not self.state[key] < maxfailed:
self.logger.error("Exceeded maximum number of failed "
"workunits, maxfailed=%d ", maxfailed)
raise Exception("Too many failed work units")
results = self.wuar.query(eq={"wuid":message.get_wu_id()})
assert len(results) == 1 # There must be exactly 1 WU
assert results[0]["status"] == wudb.WuStatus.RECEIVED_ERROR
wu = workunit.Workunit(results[0]["wu"])
self.state.update({key: self.state[key] + 1}, commit=False)
self.verification(message.get_wu_id(), False, commit=False)
self.resubmit_one_wu(wu, commit=True, maxresubmit=2)
return True
class Polysel1Task(ClientServerTask, DoesImport, HasStatistics, patterns.Observer):
""" Finds a number of size-optimized polynomial, uses client/server """
@property
def name(self):
return "polyselect1"
@property
def title(self):
return "Polynomial Selection (size optimized)"
@property
def programs(self):
# admin and admax are special, which is a bit ugly: these parameters
# to the Polyselect constructor are supplied by the task, but the
# task has itself admin, admax parameters, which specify the total
# size of the search range. Thus we don't include admin, admax here,
# or PolyselTask would incorrectly warn about them not being used.
return ((cadoprograms.Polyselect, ("out"), {}),)
@property
def paramnames(self):
return self.join_params(super().paramnames, {
"N": int, "adrange": int, "admin": 0, "admax": int,
"nrkeep": 20, "import_sopt": [str]})
@staticmethod
def update_lognorms(old_lognorm, new_lognorm):
lognorm = [0, 0, 0, 0, 0]
# print("update_lognorms: old_lognorm: %s" % old_lognorm)
# print("update_lognorms: new_lognorm: %s" % new_lognorm)
# New minimum. Don't use default value of 0 for minimum
lognorm[1] = min(old_lognorm[1] or new_lognorm[1], new_lognorm[1])
# New maximum
lognorm[3] = max(old_lognorm[3], new_lognorm[3])
# Rest is done by combine_stats(). [0::2] selects indices 0,2,4
lognorm[0::2] = Statistics.combine_stats(old_lognorm[0::2],
new_lognorm[0::2])
return lognorm
# Stat: potential collisions=124.92 (2.25e+00/s)
# Stat: raw lognorm (nr/min/av/max/std): 132/18.87/21.83/24.31/0.48
# Stat: optimized lognorm (nr/min/av/max/std): 125/20.10/22.73/24.42/0.69
# Stat: total phase took 55.47s
@property
def stat_conversions(self):
return (
(
"stats_collisions",
float,
"0",
Statistics.add_list,
re.compile(re_cap_n_fp("# Stat: potential collisions=", 1)),
False
),
(
"stats_rawlognorm",
(int, float, float, float, float),
"0 0 0 0 0",
self.update_lognorms,
re.compile(r"# Stat: raw lognorm \(nr/min/av/max/std\): (\d+)/{cap_fp}/{cap_fp}/{cap_fp}/{cap_fp}".format(**REGEXES)),
False
),
(
"stats_optlognorm",
(int, float, float, float, float),
"0 0 0 0 0",
self.update_lognorms,
re.compile(r"# Stat: optimized lognorm \(nr/min/av/max/std\): (\d+)/{cap_fp}/{cap_fp}/{cap_fp}/{cap_fp}".format(**REGEXES)),
False
),
(
"stats_tries",
int,
"0 0 0",
Statistics.add_list,
re.compile(r"# Stat: tried (\d+) ad-value\(s\), found (\d+) polynomial\(s\), (\d+) below maxnorm"),
False
),
(
"stats_total_time",
float,
"0",
Statistics.add_list,
re.compile(re_cap_n_fp("# Stat: total phase took", 1, "s")),
False
),
)
@property
def stat_formats(self):
return (
["potential collisions: {stats_collisions[0]:g}"],
["raw lognorm (nr/min/av/max/std): {stats_rawlognorm[0]:d}"] +
["/{stats_rawlognorm[%d]:.3f}" % i for i in range(1, 5)],
["optimized lognorm (nr/min/av/max/std): {stats_optlognorm[0]:d}"] +
["/{stats_optlognorm[%d]:.3f}" % i for i in range(1, 5)],
["Total time: {stats_total_time[0]:g}"],
)
def __init__(self, *, mediator, db, parameters, path_prefix):
super().__init__(mediator=mediator, db=db, parameters=parameters,
path_prefix=path_prefix)
assert self.params["nrkeep"] > 0
self.state["adnext"] = \
max(self.state.get("adnext", 0), self.params["admin"])
# Remove admin and admax from the parameter-file-supplied program
# parameters as those would conflict with the computed values
self.progparams[0].pop("admin", None)
self.progparams[0].pop("admax", None)
tablename = self.make_tablename("bestpolynomials")
self.best_polynomials = self.make_db_dict(
tablename, connection=self.db_connection)
self._check_best_polynomials()
self.poly_heap = []
# If we have "import", discard any existing polynomials
if "import" in self.params and self.best_polynomials:
self.logger.warning('Have "import" parameter, discarding '
'previously found polynomials')
self.best_polynomials.clear()
self.import_existing_polynomials()
self._check_best_polynomials()
self._compare_heap_db()
def _check_best_polynomials(self):
# Check that the keys form a sequence of consecutive non-negative
# integers
oldkeys = list(self.best_polynomials.keys())
oldkeys.sort(key=int)
assert oldkeys == list(map(str, range(len(self.best_polynomials))))
def _compare_heap_db(self):
""" Compare that the polynomials in the heap and in the DB agree
They must contain an equal number of entries, and each polynomial
stored in the heap must be at the specified index in the DB.
"""
assert len(self.poly_heap) == len(self.best_polynomials)
for lognorm, (key, poly) in self.poly_heap:
assert self.best_polynomials[key] == str(poly)
def import_existing_polynomials(self):
debug = False
oldkeys = list(self.best_polynomials.keys())
oldkeys.sort(key=int) # Sort by numerical value
for oldkey in oldkeys:
if debug:
print("Adding old polynomial at DB index %s: %s" %
(oldkey, self.best_polynomials[oldkey]))
poly = Polynomials(self.best_polynomials[oldkey].splitlines())
if not poly.lognorm:
self.logger.error("Polynomial at DB index %s has no lognorm", oldkey)
continue
newkey = self._add_poly_heap(poly)
if newkey is None:
# Heap is full, and the poly was worse than the worst one on
# the heap. Thus it did not get added and must be removed from
# the DB
if debug:
print("Deleting polynomial lognorm=%f, key=%s" %
(poly.lognorm, oldkey))
del(self.best_polynomials[oldkey])
elif newkey != oldkey:
# Heap is full, worst one in heap (with key=newkey) was
# overwritten and its DB entry gets replaced with poly from
# key=oldkey
if debug:
print("Overwriting poly lognorm=%f, key=%s with poly "
"lognorm=%f, key=%s" %
(self.poly_heap[0][0], newkey, poly, oldkey))
self.best_polynomials.clear(oldkey, commit=False)
self.best_polynomials.update({newkey: poly}, commit=True)
else:
# Last case newkey == oldkey: nothing to do
if debug:
print("Adding lognorm=%f, key=%s" % (poly.lognorm, oldkey))
def run(self):
if self.send_request(Request.GET_WILL_IMPORT_FINAL_POLYNOMIAL):
self.logger.info("Skipping this phase, as we will import the final polynomial")
return True
super().run()
if self.did_import() and "import_sopt" in self.params:
self.logger.critical("The import and import_sopt parameters "
"are mutually exclusive")
return False
if self.did_import():
self.logger.info("Imported polynomial(s), skipping this phase")
return True
if "import_sopt" in self.params:
self.import_files(self.params["import_sopt"])
worstmsg = ", worst lognorm %f" % -self.poly_heap[0][0] \
if self.poly_heap else ""
self.logger.info("%d polynomials in queue from previous run%s",
len(self.poly_heap), worstmsg)
if self.is_done():
self.logger.info("Already finished - nothing to do")
return True
# Submit all the WUs we need to reach admax
while self.need_more_wus():
self.submit_one_wu()
# Wait for all the WUs to finish
while self.get_number_outstanding_wus() > 0:
self.wait()
self._compare_heap_db()
self.logger.info("Finished")
return True
def is_done(self):
return not self.need_more_wus() and \
self.get_number_outstanding_wus() == 0
def get_achievement(self):
return self.state["wu_received"] * self.params["adrange"] / (self.params["admax"] - self.params["admin"])
def updateObserver(self, message):
identifier = self.filter_notification(message)
if not identifier:
# This notification was not for me
return False
if self.handle_error_result(message):
return True
(filename, ) = message.get_output_files()
self.process_polyfile(filename, commit=False)
self.parse_stats(filename, commit=False)
# Always mark ok to avoid warning messages about WUs that did not
# find a poly
self.verification(message.get_wu_id(), True, commit=True)
return True
@staticmethod
def read_blocks(input):
""" Return blocks of consecutive non-empty lines from input
Whitespace is stripped; a line containing only whitespace is
considered empty. An empty block is never returned.
>>> list(Polysel1Task.read_blocks(['', 'a', 'b', '', 'c', '', '', 'd', 'e', '']))
[['a', 'b'], ['c'], ['d', 'e']]
"""
block = []
for line in input:
line = line.strip()
if line:
block.append(line)
else:
if block:
yield block
block = []
if block:
yield block
def import_one_file(self, filename):
self.process_polyfile(filename)
def process_polyfile(self, filename, commit=True):
""" Read all size-optimized polynomials in a file and add them to the
DB and priority queue if worthwhile.
Different polynomials must be separated by a blank line.
"""
try:
polyfile = self.read_log_warning(filename)
except (OSError, IOError) as e:
if e.errno == 2: # No such file or directory
self.logger.error("File '%s' does not exist", filename)
return None
else:
raise
totalparsed, totaladded = 0, 0
for block in self.read_blocks(polyfile):
parsed, added = self.parse_and_add_poly(block, filename)
totalparsed += parsed
totaladded += added
have = len(self.poly_heap)
nrkeep = self.params["nrkeep"]
fullmsg = ("%d/%d" % (have, nrkeep)) if have < nrkeep else "%d" % nrkeep
self.logger.info("Parsed %d polynomials, added %d to priority queue (has %s)",
totalparsed, totaladded, fullmsg)
if totaladded:
self.logger.info("Worst polynomial in queue now has lognorm %f",
-self.poly_heap[0][0])
def read_log_warning(self, filename):
""" Read lines from file. If a "# WARNING" line occurs, log it.
"""
re_warning = re.compile("# WARNING")
with open(filename, "r") as inputfile:
for line in inputfile:
if re_warning.match(line):
self.logger.warn("File %s contains: %s",
filename, line.strip())
yield line
def parse_and_add_poly(self, text, filename):
""" Parse a polynomial from an iterable of lines and add it to the
priority queue and DB. Return a two-element list with the number of
polynomials parsed and added, i.e., (0,0) or (1,0) or (1,1).
"""
poly = self.parse_poly(text, filename)
if poly is None:
return (0, 0)
if poly.getN() != self.params["N"]:
self.logger.error("Polynomial is for the wrong number to be factored:\n%s",
poly)
return (0, 0)
if not poly.lognorm:
self.logger.warn("Polynomial in file %s has no lognorm, skipping it",
filename)
return (0, 0)
if self._add_poly_heap_db(poly):
return (1, 1)
else:
return (1, 0)
def _add_poly_heap_db(self, poly):
""" Add a polynomial to the heap and DB, if it's good enough.
Returns True if the poly was added, False if not. """
key = self._add_poly_heap(poly)
if key is None:
return False
self.best_polynomials[key] = str(poly)
return True
def _add_poly_heap(self, poly):
""" Add a polynomial to the heap
If the heap is full (nrkeep), the worst polynomial (i.e., with the
largest lognorm) is replaced if the new one is better.
Returns the key (as a str) under which the polynomial was added,
or None if it was not added.
"""
assert len(self.poly_heap) <= self.params["nrkeep"]
debug = False
# Find DB index under which to store this new poly. If the heap
# is not full, use the next bigger index.
key = len(self.poly_heap)
# Is the heap full?
if key == self.params["nrkeep"]:
# Should we store this poly at all, i.e., is it better than
# the worst one in the heap?
worstnorm = -self.poly_heap[0][0]
if worstnorm <= poly.lognorm:
if debug:
self.logger.debug("_add_poly_heap(): new poly lognorm %f, "
"worst in heap has %f. Not adding",
poly.lognorm, worstnorm)
return None
# Pop the worst poly from heap and re-use its DB index
key = heapq.heappop(self.poly_heap)[1][0]
if debug:
self.logger.debug("_add_poly_heap(): new poly lognorm %f, "
"worst in heap has %f. Replacing DB index %s",
poly.lognorm, worstnorm, key)
else:
# Heap was not full
if debug:
self.logger.debug("_add_poly_heap(): heap was not full, adding "
"poly with lognorm %f at DB index %s", poly.lognorm, key)
# The DB requires the key to be a string. In order to have
# identical data in DB and heap, we store key as str everywhere.
key = str(key)
# Python heapq stores a minheap, so in order to have the worst
# polynomial (with largest norm) easily accessible, we use
# -lognorm as the heap key
new_entry = (-poly.lognorm, (key, poly))
heapq.heappush(self.poly_heap, new_entry)
return key
def parse_poly(self, text, filename):
poly = None
try:
poly = Polynomials(text)
except PolynomialParseException as e:
if str(e) != "No polynomials found":
self.logger.warn("Invalid polyselect file '%s': %s",
filename, e)
return None
except UnicodeDecodeError as e:
self.logger.error("Error reading '%s' (corrupted?): %s", filename, e)
return None
if not poly:
return None
return poly
def get_raw_polynomials(self):
# Extract polynomials from heap and return as list
return [entry[1][1] for entry in self.poly_heap]
def get_poly_rank(self, search_poly):
""" Return how many polynomnials with lognorm less than the lognorm
of the size-optimized version of search_poly there are in the
priority queue.
The size-optimized version of search_poly is identified by comparing
the leading coefficients of both polynomials.
"""
df = search_poly.polyf.degree
dg = search_poly.polyg.degree
# Search for the raw polynomial pair by comparing the leading
# coefficients of both polynomials
found = None
for (index, (lognorm, (key, poly))) in enumerate(self.poly_heap):
if search_poly.polyg.same_lc(poly.polyg):
if not found is None:
self.logger.warning("Found more than one match for:\n%s", search_poly)
else:
found = index
if found is None:
self.logger.warning("Could not find polynomial rank for %s", search_poly)
return None
# print("search_poly: %s" % search_poly)
# print("Poly found in heap: %s" % self.poly_heap[found][1][1])
search_lognorm = -self.poly_heap[found][0]
rank = 0
for (lognorm, (key, poly)) in self.poly_heap:
if -lognorm < search_lognorm:
rank += 1
return rank
def need_more_wus(self):
return self.state["adnext"] < self.params["admax"]
def submit_one_wu(self):
adstart = self.state["adnext"]
adend = adstart + self.params["adrange"]
adend = adend - (adend % self.params["adrange"])
assert adend > adstart
adend = min(adend, self.params["admax"])
outputfile = self.workdir.make_filename("%d-%d" % (adstart, adend), prefix=self.name)
if self.test_outputfile_exists(outputfile):
self.logger.info("%s already exists, won't generate again",
outputfile)
else:
p = cadoprograms.Polyselect(admin=adstart, admax=adend,
stdout=str(outputfile),
**self.progparams[0])
self.submit_command(p, "%d-%d" % (adstart, adend), commit=False)
self.state.update({"adnext": adend}, commit=True)
def get_total_cpu_or_real_time(self, is_cpu):
""" Return number of seconds of cpu time spent by polyselect """
return float(self.state.get("stats_total_time", 0.)) if is_cpu else 0.
class Polysel2Task(ClientServerTask, HasStatistics, DoesImport, patterns.Observer):
""" Finds a polynomial, uses client/server """
@property
def name(self):
return "polyselect2"
@property
def title(self):
return "Polynomial Selection (root optimized)"
@property
def programs(self):
return ((cadoprograms.PolyselectRopt, (), {}),)
@property
def paramnames(self):
return self.join_params(super().paramnames, {
"N": int, "I": int, "qmin": int, "lpb1": int, "lpb0": int,
"batch": [int], "import_ropt": [str]})
@property
def stat_conversions(self):
return (
(
"stats_total_time",
float,
"0",
Statistics.add_list,
re.compile(re_cap_n_fp("# Stat: total phase took", 1, "s")),
False
),
(
"stats_rootsieve_time",
float,
"0",
Statistics.add_list,
re.compile(re_cap_n_fp("# Stat: rootsieve took", 1, "s")),
False
)
)
@property
def stat_formats(self):
return (
["Total time: {stats_total_time[0]:g}"],
["Rootsieve time: {stats_rootsieve_time[0]:g}"],
)
def __init__(self, *, mediator, db, parameters, path_prefix):
super().__init__(mediator=mediator, db=db, parameters=parameters,
path_prefix=path_prefix)
self.bestpoly = None
if "import" in self.params and "bestpoly" in self.state:
self.logger.warning('Have "import" parameter, discarding '
'previously found best polynomial')
self.state.clear(["bestpoly", "bestfile"])
if "bestpoly" in self.state:
self.bestpoly = Polynomials(self.state["bestpoly"].splitlines())
self.state.setdefault("nr_poly_submitted", 0)
# I don't understand why the area is based on one particular side.
self.progparams[0].setdefault("area", 2.**(2*self.params["I"]-1) \
* self.params["qmin"])
# on Sep 26, 2018, changed Bf,Bg from lim1/lim0 to 2^lpb1/2^lpb0
self.progparams[0].setdefault("Bf", float(2**self.params["lpb1"]))
self.progparams[0].setdefault("Bg", float(2**self.params["lpb0"]))
if not "batch" in self.params:
t = self.progparams[0].get("threads", 1)
# batch = 5 rounded up to a multiple of t
self.params["batch"] = (4 // t + 1) * t
self.poly_to_submit = None
def run(self):
super().run()
if self.bestpoly is None:
self.logger.info("No polynomial was previously found")
else:
self.logger.info("Best polynomial previously found in %s has "
"Murphy_E = %g",
self.state["bestfile"], self.bestpoly.MurphyE)
if self.did_import() and "import_ropt" in self.params:
self.logger.critical("The import and import_ropt parameters "
"are mutually exclusive")
return False
if self.did_import():
self.logger.info("Imported polynomial, skipping this phase")
return True
if "import_ropt" in self.params:
self.import_files(self.params["import_ropt"])
# Get the list of polynomials to submit
self.poly_to_submit = self.send_request(Request.GET_RAW_POLYNOMIALS)
if self.is_done():
self.logger.info("Already finished - nothing to do")
self.print_rank()
# If the poly file got lost somehow, write it again
filename = self.get_state_filename("polyfilename")
if filename is None or not filename.isfile():
self.logger.warn("Polynomial file disappeared, writing again")
self.write_poly_file()
return True
# Submit all the WUs we need
while self.need_more_wus():
self.submit_one_wu()
# Wait for all the WUs to finish
while self.get_number_outstanding_wus() > 0:
self.wait()
if self.bestpoly is None:
self.logger.error ("No polynomial found. Consider increasing the "
"search range bound admax, or maxnorm")
return False
self.logger.info("Finished, best polynomial from file %s has Murphy_E "
"= %g", self.state["bestfile"] , self.bestpoly.MurphyE)
self.print_rank()
self.write_poly_file()
return True
def is_done(self):
return not self.bestpoly is None and not self.need_more_wus() and \
self.get_number_outstanding_wus() == 0
def need_more_wus(self):
return self.state["nr_poly_submitted"] < len(self.poly_to_submit)
def get_achievement(self):
return (self.state["nr_poly_submitted"] - self.params["batch"] * self.get_number_outstanding_wus()) / len(self.poly_to_submit)
def updateObserver(self, message):
identifier = self.filter_notification(message)
if not identifier:
# This notification was not for me
return False
if self.handle_error_result(message):
return True
(filename, ) = message.get_output_files()
self.process_polyfile(filename, commit=False)
self.parse_stats(filename, commit=False)
# Always mark ok to avoid warning messages about WUs that did not
# find a poly
# FIXME: wrong, we should always get an optimized poly for a raw one
self.verification(message.get_wu_id(), True, commit=True)
return True
def import_one_file(self, filename):
old_bestpoly = self.bestpoly
self.process_polyfile(filename)
if not self.bestpoly is old_bestpoly:
self.write_poly_file()
def process_polyfile(self, filename, commit=True):
poly = self.parse_poly(filename)
if not poly is None:
self.bestpoly = poly
update = {"bestpoly": str(poly), "bestfile": filename}
self.state.update(update, commit=commit)
def read_log_warning(self, filename):
""" Read lines from file. If a "# WARNING" line occurs, log it.
"""
re_warning = re.compile("# WARNING")
| with open(filename, "r") as inputfile: | 9,404 | lcc_e | python | null | 5000466fd0e978528df5dc9cc04c64e82de28800ef7404d1 |
|
import sys,string,types,io,math,copy
from . import func
from .Var import var
from .Glitch import Glitch
if var.usePfAndNumpy:
import numpy
from .Model import Model
from .Node import Node,NodePart,NodeBranchPart
from . import NexusToken
class Tree(object):
"""A phylogenetic tree.
**Some instance variables**
* ``fName``, if the Tree was read from a file
* ``name``, the name of the tree.
* ``root``, the root node
* ``nodes``, a list of nodes.
* ``preOrder`` and ``postOrder``, lists of node numbers
* ``recipWeight``, the weight, if it exists, is usually 1/something, so the reciprocal looks nicer ...
* ``nexusSets``, if it exists, a NexusSets object.
**Properties**
* ``taxNames``, a list of names. Usually the order is important!
* ``data``, a :class:`Data.Data` object
* ``model``, a :class:`Model.Model` object
* ``nTax``, the number of taxa
* ``nInternalNodes``, the number of non-leaf nodes
**The node method**
You often will want to refer to a node in a tree. This can be
done via its name, or its nodeNum, or as an object, via the method
:meth:`Tree.Tree.node`. For example, from a Tree ``t`` you can
get node number 3 via::
n = t.node(3)
and you can get the node that is the parent of the Mastodon via::
n = t.node('Mastodon').parent
For many methods that require specifying a node, the method argument is *nodeSpecifier*, eg::
t.reRoot(23)
``reRoots``'s the tree to node number 23.
**Describe, draw, and get information about the tree**
.. autosummary::
Tree.dump
Tree.draw
Tree.textDrawList
Tree.tv
Tree.btv
Tree.isFullyBifurcating
Tree.len
Tree.lenInternals
Tree.stemminess
Tree.taxSetIsASplit
Tree.getAllLeafNames
Tree.getChildrenNums
Tree.getDegree
Tree.getNodeNumsAbove
Tree.getPreAndPostOrderAbove
Tree.getPreAndPostOrderAboveRoot
Tree.getSeqNumsAbove
Tree.subTreeIsFullyBifurcating
Tree.summarizeModelThingsNNodes
Tree.verifyIdentityWith
**Write**
.. autosummary::
Tree.write
Tree.writeNewick
Tree.writeNexus
Tree.writePhylip
Tree.tPickle
See also Trees methods :meth:`Trees.Trees.writeNexus` and
:meth:`Trees.Trees.writeNewick` for doing trees by the bunch.
**Iteration over the nodes**
Sometimes you don't want to just iterate over the self.nodes list,
because after some manipulations a node might be in self.nodes but
not actually in the tree; using these 'iter' methods takes care of
that, skipping such nodes.
.. autosummary::
Tree.iterInternals
Tree.iterInternalsNoRoot
Tree.iterInternalsNoRootPostOrder
Tree.iterInternalsNoRootPreOrder
Tree.iterInternalsPostOrder
Tree.iterLeavesNoRoot
Tree.iterLeavesPostOrder
Tree.iterLeavesPreOrder
Tree.iterNodes
Tree.iterNodesNoRoot
Tree.iterPostOrder
Tree.iterPreOrder
Tree.nextNode
See also Node methods that do similar things starting from a given node.
**Copy**
.. autosummary::
Tree.dupe
Tree.copyToTree
Tree.dupeSubTree
**In combination with Data and Model**
.. autosummary::
Tree.calcLogLike
Tree.optLogLike
Tree.simulate
Tree.getSiteLikes
Tree.getSiteRates
Tree.bigXSquaredSubM
Tree.compStatFromCharFreqs
Tree.compoTestUsingSimulations
Tree.modelFitTests
Tree.modelSanityCheck
Tree.simsForModelFitTests
**Setting a model**
.. autosummary::
Tree.newComp
Tree.newRMatrix
Tree.newGdasrv
Tree.setPInvar
Tree.setRelRate
Tree.setModelThing
Tree.setModelThingsRandomly
Tree.setModelThingsNNodes
Tree.summarizeModelThingsNNodes
Tree.setNGammaCat
Tree.setTextDrawSymbol
**Tree manipulation**
.. autosummary::
Tree.addLeaf
Tree.addNodeBetweenNodes
Tree.addSibLeaf
Tree.addSubTree
Tree.allBiRootedTrees
Tree.collapseNode
Tree.ladderize
Tree.lineUpLeaves
Tree.nni
Tree.pruneSubTreeWithoutParent
Tree.randomSpr
Tree.randomizeTopology
Tree.reRoot
Tree.reconnectSubTreeWithoutParent
Tree.removeEverythingExceptCladeAtNode
Tree.removeNode
Tree.removeAboveNode
Tree.removeRoot
Tree.renameForPhylip
Tree.restoreDupeTaxa
Tree.restoreNamesFromRenameForPhylip
Tree.rotateAround
Tree.spr
Tree.stripBrLens
**Misc**
.. autosummary::
Tree.checkDupedTaxonNames
Tree.checkSplitKeys
Tree.checkTaxNames
Tree.checkThatAllSelfNodesAreInTheTree
Tree.inputTreesToSuperTreeDistances
Tree.makeSplitKeys
Tree.readBipartitionsFromPaupLogFile
Tree.recalculateSplitKeysOfNodeFromChildren
Tree.setNexusSets
Tree.topologyDistance
Tree.tvTopologyCompare
Tree.patristicDistanceMatrix
Tree.inputTreesToSuperTreeDistances
"""
# Tree methods in other files.
from .Tree_muck import node, rotateAround, reRoot, removeRoot, removeNode, removeAboveNode, collapseNode, pruneSubTreeWithoutParent, reconnectSubTreeWithoutParent, addNodeBetweenNodes, allBiRootedTrees, ladderize, randomizeTopology, readBipartitionsFromPaupLogFile, renameForPhylip, restoreNamesFromRenameForPhylip, restoreDupeTaxa, lineUpLeaves, removeEverythingExceptCladeAtNode, dupeSubTree, addSubTree, addLeaf, addSibLeaf, subTreeIsFullyBifurcating, nni, checkThatAllSelfNodesAreInTheTree, spr, randomSpr, inputTreesToSuperTreeDistances
from .Tree_write import patristicDistanceMatrix, tPickle, writePhylip, writeNexus, write, writeNewick, _getMcmcCommandComment, draw, textDrawList, eps, svg
if var.usePfAndNumpy:
from .Tree_model import data, model, _setData, _checkModelThing, newComp, newRMatrix, newGdasrv, setPInvar, setRelRate, setRjComp, setRjRMatrix, setModelThing, setModelThingsRandomly, setModelThingsNNodes, summarizeModelThingsNNodes, setTextDrawSymbol, setNGammaCat, modelSanityCheck, setEmpiricalComps
from .Tree_optSim import __del__, deleteCStuff, allocCStuff, setCStuff, _commonCStuff, calcLogLike, optLogLike, optTest, simplexDump, simulate, getSiteLikes, getSiteRates
from .Tree_fit import simsForModelFitTests, modelFitTests, compoTestUsingSimulations, bigXSquaredSubM, compStatFromCharFreqs
#from Tree_pyLike import allocBigP, allocPartPatternsAndCondLikes, calcPartPatterns, calcBigP, calcBigPPart, calcCondLikes, calcCondLikesForPart, calcCondLikesForPartAtNode,partLogLike, calcLogLike2
def __init__(self):
self.fName = None # The name of the file it came from
self.name = None
self.root = None
self.nodes = []
self.preOrder = None # nodeNums of nodes root -> tips A numpy array
self.postOrder = None # nodeNums of nodes tips -> root A numpy array
self.preAndPostOrderAreValid = 0
self.recipWeight = None # Usually weight is 1/N, so the reciprocal looks nicer
#self.weight = None # Only for floating point weights, so not usually ...
self._taxNames = [] # An ordered list. self.taxNames is a property
self._data = None # A Data object. self.data is a property
self.cTree = None # A pointer to a c-struct
self.logLike = None
self.partLikes = None
self._model = None # A Model object. self.model is a property
# self.nTax, a property
self._nTax = 0
self._nInternalNodes = -1
# self.nInternalNodes, a property
self.doDataPart = 0
self.nexusSets = None
self.nodeForSplitKeyDict = None
#########################################################
# Properties: data, model in Tree_model
#########################################################
#########################################################
# Properties: taxNames, nTax, nInternalNodes
#########################################################
def _setTaxNames(self, theTaxNames):
gm = ['Tree._setTaxNames()']
if type(theTaxNames) != type([]):
gm.append("You can only set property 'taxNames' to a list.")
gm.append("Got attempt to set to '%s'" % theTaxNames)
raise Glitch(gm)
self._taxNames = theTaxNames
if theTaxNames: # and not var.allowTreesWithDifferingTaxonSets: # Peter commented out until it is sorted. Why is it here?
self.checkTaxNames()
def _setTaxNamesFromLeaves(self):
tax = []
for n in self.iterNodes():
if n.isLeaf and n.name:
tax.append(n.name)
# This next line should not be needed, as root leaves should be leaves.
elif n == self.root and n.name and n.getNChildren() < 2: # terminal root that has a taxName
tax.append(n.name)
tax.sort()
self._taxNames = tax
if self._taxNames:
self.checkTaxNames()
def _delTaxNames(self):
gm = ['Tree._delTaxNames()']
gm.append(" Caught an attempt to delete self.taxNames, but")
gm.append("self.taxNames is a property, so you can't delete it.")
gm.append("But you can set it to an empty list if you like.")
raise Glitch(gm)
taxNames = property(lambda self: self._taxNames, _setTaxNames, _delTaxNames)
def _getNTax(self):
# We can't rely on len(self.taxNames), cuz it might not exist.
#if hasattr(self, '_nTax') and self._nTax:
if self._nTax:
return self._nTax
else:
nTax = 0
if self.nodes:
for n in self.iterNodes():
if n.isLeaf:
nTax += 1
self._nTax = nTax
return nTax
nTax = property(_getNTax)
def _getNInternalNodes(self):
if self._nInternalNodes >= 0:
return self._nInternalNodes
elif not self.nodes:
return 0
else:
self._nInternalNodes = len([n for n in self.iterInternalsNoRoot()])
if not self.root.isLeaf:
self._nInternalNodes += 1
return self._nInternalNodes
def _setNInternalNodes(self, theNInternalNodes):
gm = ['Tree._setNInternalNodes()']
gm.append("Caught an attempt to set self.nInternalNodes, but")
gm.append("self.nInternalNodes is a property, so you shouldn't do that.")
raise Glitch(gm)
def _delNInternalNodes(self):
self._nInternalNodes = -1
nInternalNodes = property(_getNInternalNodes, _setNInternalNodes, _delNInternalNodes)
##################################################
##################################################
def dupe(self):
"""Duplicates self, but with no c-pointers. And no data object.
If there is a model, it is duped.
"""
if var.usePfAndNumpy:
storedData = None
if self.data:
storedData = self.data
self.data = None # We don't want to have to copy a big data object, now do we?
import copy
dupe = copy.deepcopy(self)
#print 'Tree.dupe() self.root=%s, dupe.root=%s' % (self.root, dupe.root)
# Delete cPointers
for n in dupe.nodes:
if n.cNode:
n.cNode = None
if dupe.cTree:
dupe.cTree = None
if var.usePfAndNumpy:
if dupe.model and dupe.model.cModel:
dupe.model.cModel = None
if var.usePfAndNumpy:
if storedData:
self.data = storedData
return dupe
def parseNexus(self, flob, translationHash=None, doModelComments=0):
"""Start parsing nexus format newick tree description.
From just after the command word 'tree', to the first paren of
the Newick part of the tree."""
gm = ['Tree.parseNexus()'] # re-defined below
if 0:
print('Tree.parseNexus() translationHash = %s' % translationHash)
print(' doModelComments = %s (nParts)' % doModelComments)
print(' var.nexus_doFastNextTok = %s' % var.nexus_doFastNextTok)
if var.nexus_doFastNextTok:
from .NexusToken2 import nextTok,safeNextTok
else:
from .NexusToken import nextTok,safeNextTok
tok = safeNextTok(flob, 'Tree.parseNexus()')
#print 'parseNexus() tok = %s' % tok
tok = func.nexusUnquoteName(tok)
if tok == '*':
print(gm[0])
print(" Ignoring '*' in tree description")
tok = safeNextTok(flob, 'Tree.parseNexus()')
if not func.nexusCheckName(tok):
gm.append("Bad tree name: '%s'" % tok)
raise Glitch(gm)
self.name = tok
#print "got name: '%s'" % tok
#print "%s" % tok
gm = ["Tree.parseNexus() '%s'" % self.name] # re-defining
tok = safeNextTok(flob, gm[0])
if tok != '=':
gm.append("Tree name must be followed by '='")
raise Glitch(gm)
# Generally this is the beginning of the newick tree
# description. But we have to look ahead to see if there is a
# weight comment.
savedPos = flob.tell()
while 1:
tok = safeNextTok(flob, gm[0])
#print "parseNexus: tok after '=' is '%s'" % tok
# This next bit will only happen if either var.nexus_getWeightCommandComments
# or var nexus_getAllCommandComments is set.
if tok[0] == '[':
self.getWeightCommandComment(tok)
elif tok == '(':
flob.seek(-1, 1)
self.parseNewick(flob, translationHash, doModelComments)
#self.initFinish()
break
elif tok == ';':
gm.append("Got ';' before any tree description.")
raise Glitch(gm)
elif tok[0] in string.letters + string.digits + '_' + '\'':
flob.seek(savedPos, 0)
self.parseNewick(flob, translationHash, doModelComments)
#self.initFinish()
break
else:
gm.append('Expecting a newick tree description.')
raise Glitch(gm)
self.initFinish()
#print 'finished Tree.parseNexus()'
def getWeightCommandComment(self, tok):
if 0:
print('var.nexus_getWeightCommandComments = %s' % var.nexus_getWeightCommandComments)
print('var.nexus_getAllCommandComments = %s' % var.nexus_getAllCommandComments)
print("Got comment '%s', checking if it is a 'weight' comment." % tok)
gm = ["Tree.getWeightCommandComment()"]
from .NexusToken import nextTok,safeNextTok # python, not c, so I can use StringIO
cFlob = io.StringIO(tok)
cFlob.seek(1) # The [
cTok = nextTok(cFlob)
if not cTok:
#print "no cTok -- returning nothing"
return
lowCTok = string.lower(cTok)
if lowCTok in ['&r', '&u']:
#print "got %s -- returning nothing" % cTok
return
if lowCTok != '&w':
gm.append('Expecting a weight comment. Got %s' % tok)
raise Glitch(gm)
cTok = nextTok(cFlob)
# It might be a float, or the more usual 1/something
if ("." in cTok):
# A float?
try:
self.weight = float(cTok)
except:
gm.append("I can't grok '%s' in weight comment %s" % (cTok, tok))
raise Glitch(gm)
# Should check for scientific notation?
else:
try:
theNumerator = int(cTok)
if theNumerator != 1:
gm.append('Expecting a numerator 1 in weight comment %s' % tok)
raise Glitch(gm)
#print 'got theNumerator %i' % theNumerator
except ValueError:
gm.append('Expecting a numerator 1 in weight comment %s' % tok)
raise Glitch(gm)
cTok = nextTok(cFlob)
if cTok == '/':
cTok = safeNextTok(cFlob, 'Getting weight comment %s' % tok)
try:
self.recipWeight = int(cTok)
except ValueError:
gm.append('Bad denominator in weight comment %s' % tok)
raise Glitch(gm)
elif cTok == ']':
#self.recipWeight = theNumerator # ie 1, might as well leave it as None
pass
else:
gm.append("I can't grok '%s' in weight comment %s" % (cTok, tok))
raise Glitch(gm)
cFlob.close()
#print 'got recipWeight = %s' % self.recipWeight
## ##Ignore
## def printStack(self, theStack): # only used for debugging parseNewick()
## print 'stack = ',
## for n in theStack:
## print "%i['%s'] " % (n.nodeNum, n.name),
## print ''
def parseNewick(self, flob, translationHash, doModelComments=0):
"""Parse Newick tree descriptions.
This is stack-based, and does not use recursion.
"""
#print 'parseNewick here. var.nexus_doFastNextTok=%s' % var.nexus_doFastNextTok
#print 'parseNewick here. doModelComments=%s' % doModelComments
#print "parseNewick() translationHash=%s, self.taxNames=%s" % (translationHash, self.taxNames)
if self.name:
gm = ["Tree.parseNewick(), tree '%s'" % self.name]
else:
gm = ['Tree.parseNewick()']
if hasattr(flob, 'name') and flob.name:
self.fName = flob.name
gm[0] += ", File %s" % self.fName
if doModelComments:
savedP4Nexus_getAllCommandComments = var.nexus_getAllCommandComments # restore at end
var.nexus_getAllCommandComments = 1
stack = []
isAfterParen = 1 # to start, even tho its not true
isAfterComma = 0
parenNestLevel = 0
lastPopped = None
if var.nexus_doFastNextTok:
from .NexusToken2 import nextTok,safeNextTok
else:
from .NexusToken import nextTok,safeNextTok
tok = nextTok(flob)
if not tok:
return
tok = func.nexusUnquoteName(tok) # Should generally be the opening paren, except if its a single-node tree.
while tok != ';':
#print "top of loop tok '%s', tok[0] is '%s'" % (tok, tok[0])
if tok == '(':
#print "Got '(': new node (%i)." % len(self.nodes)
if not (isAfterParen or isAfterComma):
gm.append('Got badly-placed paren, not after a paren or comma.')
raise Glitch(gm)
newNode = Node()
if doModelComments:
for pNum in range(doModelComments):
newNode.parts.append(NodePart())
newNode.br.parts.append(NodeBranchPart())
#self.printStack(stack)
if len(stack):
newNode.parent = stack[-1]
if newNode.parent.leftChild == None:
newNode.parent.leftChild = newNode
else:
newNode.parent.rightmostChild().sibling = newNode
else:
if len(self.nodes) == 0:
self.root = newNode
newNode.isLeaf = 1 # Sometimes. Generally not true-- corrected at the end.
else:
gm.append('Something is wrong. Stack is empty.')
gm.append('Extra paren?')
raise Glitch(gm)
newNode.nodeNum = len(self.nodes)
self.nodes.append(newNode)
stack.append(newNode)
isAfterParen = 1
parenNestLevel += 1
elif tok == ',':
if isAfterParen:
gm.append('Got comma after paren.')
raise Glitch(gm)
elif isAfterComma:
gm.append('Got comma after comma.')
raise Glitch(gm)
#self.printStack(stack)
try:
lastPopped = stack.pop()
except IndexError:
gm.append('Empty stack. Out of place comma?')
raise Glitch(gm)
isAfterComma = 1
if len(stack) == 0:
gm.append('Empty stack. Out of place comma?')
raise Glitch(gm)
elif tok == ')':
try:
lastPopped = stack.pop()
except IndexError:
gm.append('Empty stack. Out of place unparen?')
raise Glitch(gm)
isAfterParen = 0
isAfterComma = 0
parenNestLevel = parenNestLevel - 1
if parenNestLevel < 0:
gm.append('Unmatched unparen.')
raise Glitch(gm)
if len(stack) == 0 and len(self.nodes) > 1:
gm.append('Empty stack. Out of place unparen?')
raise Glitch(gm)
elif tok[0] in string.letters or tok[0] in string.digits or tok[0] == "'" or tok[0] in [
'_', '#', '\\', '/', '"', '(', ')']:
if len(self.nodes) == 0: # A single-node tree, not ()aName, rather just aName.
isAfterParen = 1
if not (isAfterParen or isAfterComma):
# Probably a name of an internal node.
if len(stack):
#if stack[-1].isLeaf and stack[-1].name != '(':
if stack[-1].name:
if not var.newick_allowSpacesInNames:
# a second name after a node name, eg (A foo, B) =>foo is bad
# or eg (A, B)foo bar => bar is bad
gm.append("Badly placed token '%s'." % tok)
gm.append("Appears to be a second node name, after '%s'" % stack[-1].name)
gm.append('Missing comma maybe? Or punctuation or spaces in an unquoted name?')
gm.append("To allow reading Newick (or Nexus) with spaces, ")
gm.append("turn var.newick_allowSpacesInNames on")
raise Glitch(gm)
else:
stack[-1].name += ' '
stack[-1].name += tok
else:
# Usually this...
#print "naming node %i as '%s'" % (stack[-1].nodeNum, tok)
# We allow bad names on internal nodes, ie we do not nexusCheckName(tok)
stack[-1].name = tok
else: # len(stack) == 0
if lastPopped and lastPopped.name == None: # ()A
#print "naming lastPopped node %i with '%s'" % (lastPopped.nodeNum, tok)
lastPopped.isLeaf = 1
#lastPopped.label = tok
lastPopped.name = tok
else:
gm.append("Badly placed token '%s' in tree description." % tok)
raise Glitch(gm)
else:
# A new terminal node.
if tok[0] in string.letters or tok[0] in ['_']:
if translationHash and tok in translationHash:
#print 'got key %s, val is %s' % (tok, translationHash[tok])
tok = translationHash[tok]
elif tok[0] in string.digits:
if var.nexus_allowAllDigitNames:
if translationHash and tok in translationHash:
#print 'got key %s, val is %s' % (tok, translationHash[tok])
tok = translationHash[tok]
else:
try:
tok = int(tok)
if translationHash and repr(tok) in translationHash:
tok = translationHash[repr(tok)]
elif translationHash and repr(tok) not in translationHash:
gm.append("There is a 'translation' for this tree, but the")
gm.append("number '%i' in the tree description" % tok)
gm.append('is not included in that translate command.')
raise Glitch(gm)
elif self.taxNames:
try:
tok = self.taxNames[tok - 1]
except IndexError:
gm.append("Can't make sense out of token '%s' for a new terminal node." % tok)
gm.append('There is no translate command, and the taxNames does not')
gm.append('have a value for that number.')
raise Glitch(gm)
else:
gm.append("We have a taxon name '%s', composed only of numerals." % tok)
gm.append(" ")
gm.append('The Nexus format allows tree specifications with no')
gm.append('translate command to use integers to refer to taxa.')
gm.append('That is possible because in a proper Nexus file the')
gm.append('taxa are defined before the trees. P4, however, does')
gm.append('not require definition of taxa before the trees, and in')
gm.append('the present case no definition was made. Deal with it.')
raise Glitch(gm)
except ValueError:
if translationHash and repr(tok) in translationHash:
tok = translationHash[repr(tok)]
#else: # starts with a digit, but it is not an int.
# gm.append('Problem token %s' % tok)
# raise Glitch, gm
#print "Got terminal node '%s'" % tok
newNode = Node()
if doModelComments:
for pNum in range(doModelComments):
newNode.parts.append(NodePart())
newNode.br.parts.append(NodeBranchPart())
newNode.isLeaf = 1
if func.nexusCheckName(tok):
newNode.name = tok
#print 'got newNode.name = %s' % tok
else:
gm.append("Bad name '%s'" % tok)
raise Glitch(gm)
if len(stack):
newNode.parent = stack[-1]
if newNode.parent.leftChild == None:
newNode.parent.leftChild = newNode
else:
newNode.parent.rightmostChild().sibling = newNode
newNode.nodeNum = len(self.nodes)
if len(self.nodes) == 0:
self.root = newNode
self.nodes.append(newNode)
stack.append(newNode)
isAfterParen = 0
isAfterComma = 0
elif tok == ':':
# Looking for a br.len number, which might be eg 0.234 or -1.23e-05
# It might be a multi-token operation. Accumulate tok's in theNum
theNum = nextTok(flob)
if not theNum:
gm.append('Tree description ended with a colon. Bad!')
raise Glitch(gm)
#print " Got token after colon: '%s'" % theNum
if theNum == '-' or theNum == '+':
tok = nextTok(flob)
#print " Got tok: '%s' after '%s'" % (tok, theNum)
if not tok:
gm.append('Trying to deal with a branch length.')
gm.append("It didn't work, tho.")
gm.append("Got this after colon: '%s'" % theNum)
gm.append('followed by nothing.')
raise Glitch(gm)
theNum += tok
try:
# If it is a simple number like 0.123 or -23, then we are finished.
stack[-1].br.len = float(theNum) # Won't work if it ends in 'e'
#print ' Successfully got br.len %f' % stack[-1].br.len
except ValueError:
# The first bit after the colon is hopefully something like +1.23e
if theNum[-1] not in ['e', 'E']:
gm.append('Trying to deal with a branch length after a colon, but I am totally confused.')
gm.append("Can't make sense out of '%s'" % theNum)
raise Glitch(gm, 'newick_badBranchLength')
try:
float(theNum[:-1])
except ValueError:
gm.append('Trying to deal with a branch length after a colon, but I am totally confused.')
gm.append("Can't make sense out of '%s'" % theNum)
raise Glitch(gm, 'newick_badBranchLength')
# Now we are sure that the first bit *is* something like +1.23e
# We do not allow spaces after the 'e', so we do not use nextTok().
# That introduces a bug, where comments inserted in the number don't get ignored. <<== unfixed bug!
# The first thing must be a '+' or a '-'.
c = flob.read(1)
if not c:
gm.append('Trying to deal with a branch length, possibly in scientific notation.')
gm.append("Got '%s' after the colon, but then nothing." % theNum)
raise Glitch(gm)
if c not in ['+', '-']:
gm.append('Trying to deal with a branch length, possibly in scientific notation.')
gm.append("Got '%s' after the colon." % theNum)
gm.append("Expecting a '+' or '-' after that (no spaces allowed).")
gm.append("Got '%s'." % c)
raise Glitch(gm)
# Accumulate characters in 'theExp'. We need at least one digit.
theExp = c
c = flob.read(1)
if not c:
gm.append('Trying to deal with a branch length, possibly in scientific notation.')
gm.append("Got '%s%s' after the colon, but then nothing." % (theNum, theExp))
raise Glitch(gm)
if c not in string.digits:
gm.append("Trying to deal with a branch length, possibly in scientific notation.")
gm.append("Got '%s%s' after the colon." % (theNum, theExp))
gm.append('Expecting one or more digits.')
gm.append("Got '%s'" % c)
raise Glitch(gm)
theExp += c
# So we got one good digit. Are there any more?
while 1:
c = flob.read(1)
if not c:
gm.append('Trying to deal with a branch length, possibly in scientific notation.')
gm.append("Got '%s%s' after the colon, but then nothing." % (theNum, theExp))
raise Glitch(gm)
# We got something. If its a digit, add it to
# theExp. If its anything else, back up one
# space and then break
if c in string.digits:
theExp += c
else:
flob.seek(-1, 1)
break
#print " At this point, theNum='%s' and theExp='%s'" % (theNum, theExp)
try:
#print " Trying to see if theExp '%s' can be converted to an int." % theExp
int(theExp)
try:
theBrLen = float(theNum + theExp)
#print ' Successfully got br.len %g (from %s%s)' % (theBrLen, theNum, theExp)
stack[-1].br.len = theBrLen
except ValueError:
gm.append('Trying to deal with a branch length, possibly in scientific notation.')
gm.append("It didn't work, tho.")
gm.append("Got these after colon: '%s' and '%s'" % (theNum, theExp))
gm.append('And they could not be converted to an exponential float.')
raise Glitch(gm)
except ValueError:
gm.append('Trying to deal with a branch length, possibly in scientific notation.')
gm.append("It didn't work, tho.")
gm.append("Got these after colon: '%s' and '%s'." % (theNum, theExp))
gm.append('And the latter does not appear to be an int.')
raise Glitch(gm)
elif tok[0] == '[':
#print "openSquareBracket. Got tok '%s'" % tok
# if doModelComments is set, it should be set to nParts.
if doModelComments:
# eg [& c0.1 r0.0]
n = stack[-1]
#print 'got comment %s, node %i' % (tok, n.nodeNum)
cFlob = io.StringIO(tok)
cFlob.seek(2)
tok2 = NexusToken.safeNextTok(cFlob)
while 1:
if tok2 == ']':
break
elif tok2[0] in ['c', 'r', 'g']:
ending = tok2[1:]
splitEnding = string.split(ending, '.')
try:
firstNum = int(splitEnding[0])
secondNum = int(splitEnding[1])
except ValueError:
gm.append('Bad command comment %s' % tok)
raise Glitch(gm)
if tok2[0] == 'c':
n.parts[firstNum].compNum = secondNum
if tok2[0] == 'r':
n.br.parts[firstNum].rMatrixNum = secondNum
if tok2[0] == 'g':
n.br.parts[firstNum].gdasrvNum = secondNum
else:
gm.append('Bad command comment %s' % tok)
raise Glitch(gm)
tok2 = NexusToken.safeNextTok(cFlob)
elif 0:
# Ugly hack for RAxML trees with bootstrap
# supports in square brackets after the br len, on
# internal nodes. First modify the eg [100] to be
# [&100], set var.nexus_getAllCommandComments =
# True, and turn on this elif section.
myNode = stack[-1]
assert not myNode.isLeaf
assert not myNode.name
mySupportString = tok[2:-1]
#print mySupportString
myNode.name = mySupportString
elif var.nexus_readBeastTreeCommandComments:
n = stack[-1]
i = 2
while i < (len(tok) - 1):
j = i
inBraces = False
while 1:
j += 1
#print tok[j]
if tok[j] == ']':
break
if tok[j] == '{':
inBraces = True
if inBraces:
if tok[j] == '}':
inBraces = False
else:
if tok[j] == ',':
break
substring = tok[i:j]
#print substring
splSS = substring.split('=')
theNameString = splSS[0].strip()
if '%' in theNameString:
theNameString = theNameString.replace('%', '')
theValString = splSS[1]
if '{' in theValString:
theValString = theValString.replace('{', '(')
theValString = theValString.replace('}', ')')
if theValString == 'true':
theVal = True
elif theValString == 'false':
theVal = False
else:
theVal = eval(theValString)
assert type(theVal) in [float, tuple, bool]
n.__setattr__(theNameString, theVal)
i = j + 1
else:
gm.append("I can't make sense of the token '%s'" % tok)
if len(tok) == 1:
if tok[0] in var.punctuation:
gm.append("The token is in var.punctuation. If you don't think it should")
gm.append("be, you can modify what p4 thinks that punctuation is.")
if var.nexus_doFastNextTok:
gm.append("But to do that you can't use nexus_doFastNextToken, so you ")
gm.append("need to turn that off, temporarily. ")
gm.append("(var.nexus_doFastNextTok is currently on.)")
gm.append("So you might do this:")
gm.append("var.nexus_doFastNextTok = False ")
gm.append("var.punctuation = var.phylip_punctuation")
gm.append("(or use your own definition -- see Var.py)")
gm.append("read('yourWackyTreeFile.phy')")
gm.append("That might work.")
else:
gm.append("(Doing that does not work if nexus_doFastNextToken is turned on,")
gm.append("but var.nexus_doFastNextTok is currently off.)")
gm.append("So you might do this:")
gm.append("var.punctuation = var.phylip_punctuation")
gm.append("(or use your own definition -- see Var.py)")
gm.append("read('yourWackyTreeFile.phy')")
gm.append("That might work.")
if tok[0] not in var.punctuation:
gm.append("The token is not in your current var.punctuation.")
if var.nexus_doFastNextTok:
gm.append("var.nexus_doFastNextTok is currently on.")
gm.append("It uses a hard-wired list of punctuation, and so you may need to ")
gm.append("need to turn var.nexus_doFastNextTok off, temporarily. ")
gm.append("So you might do this:")
gm.append("var.nexus_doFastNextTok = False ")
gm.append("read('yourWackyTreeFile.phy')")
gm.append("That might work.")
else:
gm.append("var.nexus_doFastNextTok is currently off.")
#gm.append("tok[0] is '%s'" % tok[0])
raise Glitch(gm)
tok = func.nexusUnquoteName(safeNextTok(flob, 'Tree init, reading tree string'))
#print 'got tok for next round = %s' % tok
# This is the end of the "while tok != ';':" loop
#print '\n*** Stack len = %i ***' % len(stack)
if parenNestLevel > 0:
gm.append('Unmatched paren.')
raise Glitch(gm)
elif parenNestLevel < 0:
gm.append('Unmatched unparen.')
raise Glitch(gm)
if len(stack) == 0:
if len(self.nodes) == 1:
pass
else:
gm.append("Got an oddly-placed ';' in the tree %s description." % self.name)
self.dump(treeInfo=0, nodeInfo=1)
raise Glitch(gm)
elif len(stack) > 1:
gm.append("Got an oddly-placed ';' in the tree %s description." % self.name)
#gm.append('(stack len = %i)' % len(stack)
#self.dump(tree=0, node=1)
raise Glitch(gm)
if self.root.leftChild and self.root.leftChild.sibling: # usually this
self.root.isLeaf = 0
# Should a root on a stick be a leaf? If it is just for
# display purposes, then it should be ok to not be a leaf.
# But if you are going to re-Root, then it will cause trouble.
# So by default, a root on a stick should be a leaf. I think.
# Hopefully if you are dealing with this, then you know what
# you are doing and what you want, and how to modify things to
# get it.
# Uncomment this next line to make it always non-leaf, even if it is a leaf.
#self.root.isLeaf = 0 # do this to always have a non-leaf root-on-a-stick <-- Potential Trouble!!!
self.root.br = None
#self.draw()
#self.dump(tree=0, node=1, treeModel=0)
if doModelComments:
# restore the value of var.nexus_getAllCommandComments, which was saved above.
var.nexus_getAllCommandComments = savedP4Nexus_getAllCommandComments
##Ignore
def initFinish(self):
if self.name:
gm = ["Tree.initFinish() tree '%s'" % self.name]
else:
gm = ['Tree.initFinish()']
# Checking for duped taxon names used to be here, but it was
# pushed further ahead, so that self.taxNames can be corrected
# also, if need be. At this point, self does not have
# taxNames set.
# Check that all terminal nodes have names
for item in self.nodes:
if item.isLeaf:
#print 'leaf name %s' % item.name
if not item.name:
if item == self.root:
if var.warnAboutTerminalRootWithNoName:
print('Tree.initFinish()')
print(' Non-fatal warning: the root is terminal, but has no name.')
print(' This may be what you wanted. Or not?')
print(' (To get rid of this warning, turn off var.warnAboutTerminalRootWithNoName)')
else:
gm.append('Got a terminal node with no name.')
raise Glitch(gm)
if var.usePfAndNumpy:
self.preOrder = numpy.array([var.NO_ORDER] * len(self.nodes), numpy.int32)
self.postOrder = numpy.array([var.NO_ORDER] * len(self.nodes), numpy.int32)
else:
self.preOrder = [var.NO_ORDER] * len(self.nodes)
self.postOrder = [var.NO_ORDER] * len(self.nodes)
if len(self.nodes) > 1:
self.setPreAndPostOrder()
def checkDupedTaxonNames(self):
# Called by func._tryToReadNexusFile() and func._tryToReadPhylipFile()
# Check for duped names
if self.name:
gm = ["Tree.checkDupedTaxonNames() tree '%s'" % self.name]
else:
gm = ['Tree.checkDupedTaxonNames()']
if self.fName:
gm[0] += ' file=%s' % self.fName
hasDupedName = 0
loNames = []
for n in self.nodes:
if n.isLeaf and n.name:
loNames.append(n.name.lower())
for loName in loNames:
if loNames.count(loName) > 1:
if var.allowDupedTaxonNames:
pass
elif not var.doRepairDupedTaxonNames:
gm.append("Got duplicated taxon (lowercased) name '%s'." % loName)
gm.append('Since var.doRepairDupedTaxonNames is not turned on, p4 will not fix duplications.')
gm.append('To repair duplications verbosely, set ')
gm.append('var.doRepairDupedTaxonNames = 1')
gm.append('To repair duplications silently, set')
gm.append('var.doRepairDupedTaxonNames = 2')
raise Glitch(gm)
hasDupedName = 1
break
if hasDupedName:
#print self.name
if var.allowDupedTaxonNames:
# more hacking ...
if var.allowDupedTaxonNames == 2: # ie silently.
pass
else:
complainedAlready = []
for loName in loNames:
if loNames.count(loName) > 1 and loName not in complainedAlready:
if self.name:
print(" Tree %s. Duped tax name (lowercased) '%s'" % (
self.name, loName))
else:
print(" Duped tax name (lowercased) '%s'" % loName)
complainedAlready.append(loName)
elif var.doRepairDupedTaxonNames:
repairedNames = []
for loName in loNames:
if loNames.count(loName) > 1 and n not in repairedNames:
repairCounter = 1
repairCounter2 = 1
for n in self.nodes:
if n.isLeaf:
if n.name and n.name.lower() == loName:
newName = '%s_%i' % (n.name, repairCounter)
if var.doRepairDupedTaxonNames == 1:
if self.name:
print(" Tree %s. Changing '%s' to '%s'" % (
self.name, n.name, newName))
else:
print(" Changing '%s' to '%s'" % (n.name, newName))
n.name = newName
repairedNames.append(loName)
repairCounter += 1
if self.taxNames:
for tNameNum in range(len(self.taxNames)):
tName = self.taxNames[tNameNum]
if tName.lower() == loName:
newName = '%s_%i' % (tName, repairCounter2)
self.taxNames[tNameNum] = newName
repairCounter2 += 1
assert repairCounter == repairCounter2, "Got a problem with re-naming duped taxa."
##############
##############
#
# dump()
#
##############
##############
def dump(self, tree=0, node=0, model=0, all=0):
"""Print rubbish about self.
tree
is the default, showing basic info about the tree.
node
shows info about all the nodes.
model
shows which modelThing number goes on which node.
(which you can also get by drawing the tree)
(If you want the info about the model itself, do a
aTree.model.dump() instead.)
"""
if all:
self._doTreeInfo()
self._doNodeInfo()
self._doNodeModelInfo()
elif not tree and not node and not model:
self._doTreeInfo()
else:
if tree:
self._doTreeInfo()
if node:
self._doNodeInfo()
if model:
self._doNodeModelInfo()
def _doTreeInfo(self):
if self.name:
print("Tree '%s' dump" % self.name)
else:
print('Tree dump. No name.')
if self.fName:
print(" From file '%s'" % self.fName)
else:
print(" From an unknown file, or no file.")
if self.root:
print(' Node %i is root' % self.root.nodeNum)
else:
print(' There is no root')
if self.recipWeight:
print(' The tree recipWeight is %s' % self.recipWeight)
else:
print(' There is no recipWeight')
print(' There are %i nodes' % len(self.nodes))
terminals = 0
for i in self.nodes:
if i.isLeaf:
terminals += 1
print(' of which %i are terminal nodes' % terminals)
if self.data:
print(' There is a data object, with %i parts.' % self.data.nParts)
else:
print(' There is no data object.')
if self.data:
print(' The data came from file(s):')
for a in self.data.alignments:
if a.fName:
print(' %s' % a.fName)
if self.model:
print(' There is a model object, with %i parts.' % self.model.nParts)
if self.model.cModel:
print(' model.cModel is %i' % self.model.cModel)
else:
print(' There is no cModel.')
else:
print(' There is no model object.')
if self.taxNames:
print(' There is a taxNames list.')
else:
print(' There is no taxNames list.')
if self.cTree:
print(' cTree is %i' % self.cTree)
else:
print(' There is no cTree.')
def _doNodeInfo(self):
"""Basic rubbish about nodes."""
print('\n-------- nodes -----------------------------------------')
print('%7s %6s %6s %6s %6s %7s %6s %4s' % ('nodeNum', 'isLeaf', 'parent', 'leftCh',
'siblng', 'br.len', 'seqNum', 'name'))
for n in self.nodes:
print('%7s %6s' % (n.nodeNum, n.isLeaf), end=' ')
if n.parent:
print('%6s' % n.parent.nodeNum, end=' ')
else:
print('%6s' % 'None', end=' ')
if n.leftChild:
print('%6s' % n.leftChild.nodeNum, end=' ')
else:
print('%6s' % 'None', end=' ')
if n.sibling:
print('%6s' % n.sibling.nodeNum, end=' ')
else:
print('%6s' % 'None', end=' ')
if n.br and (n.br.len or n.br.len == 0.0):
print('%7.3f' % n.br.len, end=' ')
else:
print('%7s' % 'None', end=' ')
if n.seqNum or n.seqNum == 0:
print('%6s' % n.seqNum, end=' ')
else:
print('%6s' % 'None', end=' ')
if n.name:
print(' %s' % n.name)
else:
print(' %s' % 'None')
print('--------------------------------------------------------\n')
doMore = 0
for n in self.iterNodesNoRoot():
if hasattr(n.br, 'name') and n.br.name:
doMore = 1
break
elif hasattr(n.br, 'uName') and n.br.uName:
doMore = 1
break
elif hasattr(n.br, 'support') and n.br.support:
doMore = 1
break
if doMore:
print('\n-------- more node stuff -------------------------------')
print('%7s %10s %10s %10s %4s' % ('nodeNum', 'br.name', 'br.uName', 'br.support', 'name'))
for n in self.nodes:
print('%7s' % n.nodeNum, end=' ')
if n.br and hasattr(n.br, 'name') and n.br.name:
print('%10s' % n.br.name, end=' ')
else:
print('%10s' % '-', end=' ')
if n.br and hasattr(n.br, 'uName') and n.br.uName:
print('%10s' % n.br.uName, end=' ')
else:
print('%10s' % '-', end=' ')
if n.br and hasattr(n.br, 'support') and n.br.support:
print('%10.4f' % n.br.support, end=' ')
else:
print('%10s' % '-', end=' ')
if n.name:
print(' %s' % n.name)
else:
print(' %s' % 'None')
print('--------------------------------------------------------\n')
doMore = 0
for n in self.nodes:
if hasattr(n, 'rootCount') and n.rootCount:
doMore = 1
break
if n.br:
if hasattr(n.br, 'color') and n.br.color:
doMore = 1
break
elif hasattr(n.br, 'biRootCount') and n.br.biRootCount:
doMore = 1
break
if doMore:
print('\n-------- even more node stuff --------------------------')
print('%7s %10s %14s %10s %4s' % ('nodeNum', 'br.color', 'br.biRootCount', 'rootCount', 'name'))
for n in self.nodes:
print('%7s' % n.nodeNum, end=' ')
if n.br and hasattr(n.br, 'color') and n.br.color:
print('%10s' % n.br.color, end=' ')
else:
print('%10s' % '-', end=' ')
if n.br and hasattr(n.br, 'biRootCount') and n.br.biRootCount:
print('%14s' % n.br.biRootCount, end=' ')
else:
print('%14s' % '-', end=' ')
if hasattr(n, 'rootCount') and n.rootCount:
print('%10s' % n.rootCount, end=' ')
else:
print('%10s' % '-', end=' ')
if n.name:
print(' %s' % n.name)
else:
print(' %s' % 'None')
print('--------------------------------------------------------\n')
def _doNodeModelInfo(self):
if not self.model:
print('\n****** Node Model Info. No model.')
if not self.data:
print('(no data attached, either)')
else:
print('\n****** Node Model Info. nParts=%s' % self.model.nParts)
if not self.data:
print('no data')
if not self.model.nParts:
return
print('\nComps in the nodes:')
print(' %13s' % 'nodeNum', end=' ')
for i in range(self.model.nParts):
print(' %8s' % 'part%i' % i, end=' ')
print('')
for n in self.nodes:
print(' %13i' % n.nodeNum, end=' ')
for i in range(self.model.nParts):
print('%8i' % n.parts[i].compNum, end=' ')
print('')
print('\nrMatrices in the nodes:')
print(' %13s' % 'nodeNum', end=' ')
for i in range(self.model.nParts):
print(' %8s' % 'part%i' % i, end=' ')
print('')
for n in self.iterNodesNoRoot():
print(' %13i' % n.nodeNum, end=' ')
for i in range(self.model.nParts):
print('%8i' % n.br.parts[i].rMatrixNum, end=' ')
print('')
print('\ngdasrvs in the nodes:')
print(' %13s' % '', end=' ')
for i in range(self.model.nParts):
print(' %8s' % 'part%i' % i, end=' ')
print('')
print(' %13s' % 'nGammaCats ->', end=' ')
for i in range(self.model.nParts):
print('%8i' % self.model.parts[i].nGammaCat, end=' ')
print('\n')
print(' %13s' % 'nodeNum', end=' ')
for i in range(self.model.nParts):
print(' %8s' % 'part%i' % i, end=' ')
print('')
for n in self.iterNodesNoRoot():
print(' %13i' % n.nodeNum, end=' ')
for i in range(self.model.nParts):
print('%8i' % n.br.parts[i].gdasrvNum, end=' ')
print('')
###########################
#
# Set a NexusSets object, for taxSets ...
#
###########################
def setNexusSets(self):
"""Set self.nexusSets from var.nexusSets.
A deepcopy is made of var.nexusSets, only if it exists. If
var.nexusSets does not yet exist, a new blank one is not made
(cf this method in Alignment class, where it would be
made).
Important! This method depends on a correct taxNames.
"""
assert self.taxNames, "This method requires correct taxNames, in the correct order."
gm = ["Tree.setNexusSets()"]
if not var.nexusSets:
return
self.nexusSets = copy.deepcopy(var.nexusSets)
self.nexusSets.taxNames = self.taxNames
self.nexusSets.nTax = self.nTax
self.nexusSets.constant = None
self.nexusSets.gapped = None
self.nexusSets.charSets = []
self.nexusSets.charPartitions = []
if self.nexusSets.taxSets:
#print "%s. There are %i taxSets." % (gm[0], len(self.nexusSets.taxSets))
# Check that no taxSet name is a taxName
lowSelfTaxNames = [string.lower(txName) for txName in self.taxNames]
for ts in self.nexusSets.taxSets:
if ts.lowName in lowSelfTaxNames:
gm.append("Can't have taxSet names that are the same (case-insensitive) as a tax name")
gm.append("Lowercased taxSet name '%s' is the same as a lowcased taxName." % ts.name)
raise Glitch(gm)
self.nexusSets.lowTaxNames = lowSelfTaxNames
# If it is standard format,
# convert triplets to numberTriplets, and then mask
for ts in self.nexusSets.taxSets:
if ts.format == 'standard':
ts.setNumberTriplets()
ts.setMask()
#print ts.mask
elif ts.format == 'vector':
assert ts.mask
if len(ts.mask) != self.nTax:
gm.append("taxSet %s" % ts.name)
gm.append("It is vector format, but the length is wrong.")
gm.append("taxSet mask is length %i, but self nTax is %i" % (len(ts.mask), self.nTax))
raise Glitch(gm)
else:
gm.append("taxSet %s" % ts.name)
gm.append("unknown format %s" % ts.format)
raise Glitch(gm)
###########################
#
# Get lists of nodeNums ...
#
###########################
def getNodeNumsAbove(self, nodeSpecifier, leavesOnly=0):
"""Gets a list of nodeNums, in postOrder, above nodeSpecifier.
The node specified is not included.
"""
x, y = self.getPreAndPostOrderAbove(nodeSpecifier)
if leavesOnly:
tOnly = []
for i in y[:-1]:
if self.nodes[i].isLeaf:
tOnly.append(i)
return tOnly
return y[:-1]
def getSeqNumsAbove(self, nodeSpecifier):
"""Gets a list of seqNums above nodeSpecifier."""
x, y = self.getPreAndPostOrderAbove(nodeSpecifier)
seqNums = []
for nNum in y[:-1]:
n = self.nodes[nNum]
if n.isLeaf:
seqNums.append(n.seqNum)
return seqNums
# def getAllChildrenNums(self, specifier):
# """Returns a list of the nodeNums of all children of the specified node
# Ambiguous, unused.
# """
# theNode = self.node(specifier)
# if not theNode:
# gm = ['Tree.getChildrenNums()']
# gm.append('Bad node specifier')
# raise Glitch, gm
# ret = []
# x, y = self.getPreAndPostOrderAbove(specifier)
# for i in y[:-1]:
# ret.append(self.nodes[i].nodeNum)
# return ret
def getAllLeafNames(self, specifier):
"""Returns a list of the leaf names of all children
"""
theNode = self.node(specifier)
if not theNode:
gm = ['Tree.getChildrenNums()']
gm.append('Bad node specifier')
raise Glitch(gm)
ret = []
x, y = self.getPreAndPostOrderAbove(specifier)
for i in y[:-1]:
if self.nodes[i].isLeaf:
ret.append(self.nodes[i].name)
return ret
def getChildrenNums(self, specifier):
"""Returns a list of nodeNums of children of the specified node.
See also Node.getNChildren()"""
theNode = self.node(specifier)
if not theNode:
gm = ['Tree.getChildrenNums()']
gm.append('Bad node specifier')
raise Glitch(gm)
ret = []
c = theNode.leftChild
while 1:
if c:
ret.append(c.nodeNum)
c = c.sibling
else:
return ret
def setPreAndPostOrder(self):
"""Sets or re-sets self.preOrder and self.postOrder lists of node numbers.
PreOrder starts from the root and goes to the tips; postOrder
starts from the tips and goes to the root."""
self.getPreAndPostOrderAboveRoot()
self.preAndPostOrderAreValid = 1
def getPreAndPostOrderAbove(self, nodeSpecifier):
"""Returns 2 lists of node numbers, preOrder and postOrder.
This uses a stack, not recursion, so it should work for large
trees without bumping into the recursion limit. The 2 lists
are relative to the node specified, and include the node
specified. PreOrder starts from theNode and goes to the tips;
postOrder starts from the tips and goes to theNode."""
gm = ['Tree.getPreAndPostOrderAbove()']
theNode = self.node(nodeSpecifier)
preOrder = [] # nodeNum's
postOrder = []
if not theNode.leftChild:
preOrder.append(theNode.nodeNum)
postOrder.append(theNode.nodeNum)
return preOrder, postOrder
stack = [] # nodes
stack.append(theNode)
preOrder.append(theNode.nodeNum)
while len(stack):
if stack[-1].leftChild:
#print 'leftChild: %i' % stack[-1].leftChild.nodeNum
theNodeNum = stack[-1].leftChild.nodeNum
stack.append(stack[-1].leftChild)
preOrder.append(theNodeNum)
elif stack[-1].sibling:
#print 'sibling: %i' % stack[-1].sibling.nodeNum
theNodeNum = stack[-1].sibling.nodeNum
theSib = stack[-1].sibling
#print ' postOrder appending u %i' % stack[-1].nodeNum
postOrder.append(stack[-1].nodeNum)
stack.pop()
stack.append(theSib)
preOrder.append(theNodeNum)
else:
#print ' postOrder appending v %i' % stack[-1].nodeNum
postOrder.append(stack[-1].nodeNum)
stack.pop()
if len(stack) == 0:
break
#print ' postOrder appending w %i' % stack[-1].nodeNum
postOrder.append(stack[-1].nodeNum)
theNode = stack.pop()
while not theNode.sibling:
if len(stack) == 0:
break
#print ' postOrder appending x %i' % stack[-1].nodeNum
postOrder.append(stack[-1].nodeNum)
theNode = stack.pop()
if len(stack) == 0:
break
if theNode.sibling:
stack.append(theNode.sibling)
preOrder.append(theNode.sibling.nodeNum)
else:
gm.append('Problemo.')
gm.append('xxx got theNode %s' % theNode.nodeNum)
raise Glitch(gm)
return preOrder, postOrder
def getPreAndPostOrderAboveRoot(self):
"""Sets self.preOrder and self.postOrder.
This uses a stack, not recursion, so it should work for large
trees without bumping into the recursion limit. PreOrder
starts from the root and goes to the tips; postOrder starts
from the tips and goes to the root."""
gm = ['Tree.getPreAndPostOrderAboveRoot()']
theNode = self.root
preOrdIndx = 0
postOrdIndx = 0
if self.preOrder == None or self.postOrder == None or len(self.preOrder) != len(self.nodes) or len(self.postOrder) != len(self.nodes):
if var.usePfAndNumpy:
self.preOrder = numpy.array([var.NO_ORDER] * len(self.nodes), numpy.int32)
self.postOrder = numpy.array([var.NO_ORDER] * len(self.nodes), numpy.int32)
else:
self.preOrder = [var.NO_ORDER] * len(self.nodes)
self.postOrder = [var.NO_ORDER] * len(self.nodes)
#print "xx self.preOrder=%s" % self.preOrder
if not theNode.leftChild:
self.preOrder[preOrdIndx] = theNode.nodeNum
preOrdIndx += 1
self.postOrder[postOrdIndx] = theNode.nodeNum
postOrdIndx += 1
else:
stack = [] # nodes
stack.append(theNode)
self.preOrder[preOrdIndx] = theNode.nodeNum
preOrdIndx += 1
while len(stack):
if stack[-1].leftChild:
#print 'leftChild: %i (%s)' % (stack[-1].leftChild.nodeNum, [n.nodeNum for n in stack])
theNodeNum = stack[-1].leftChild.nodeNum
stack.append(stack[-1].leftChild)
self.preOrder[preOrdIndx] = theNodeNum
preOrdIndx += 1
elif stack[-1].sibling:
#print 'sibling: %i (%s)' % (stack[-1].sibling.nodeNum, [n.nodeNum for n in stack])
theNodeNum = stack[-1].sibling.nodeNum
theSib = stack[-1].sibling
#print ' postOrder appending u %i' % stack[-1].nodeNum
self.postOrder[postOrdIndx] = stack[-1].nodeNum
postOrdIndx += 1
stack.pop()
stack.append(theSib)
try:
self.preOrder[preOrdIndx] = theNodeNum
except IndexError:
gm.append("preOrdIndx=%s, theNodeNum=%i" % (preOrdIndx, theNodeNum))
gm.append("preOrder = %s" % self.preOrder)
raise Glitch(gm)
preOrdIndx += 1
else:
#print ' postOrder appending v %i' % stack[-1].nodeNum
self.postOrder[postOrdIndx] = stack[-1].nodeNum
postOrdIndx += 1
stack.pop()
if len(stack) == 0:
break
#print ' postOrder appending w %i' % stack[-1].nodeNum
self.postOrder[postOrdIndx] = stack[-1].nodeNum
postOrdIndx += 1
theNode = stack.pop()
while not theNode.sibling:
if len(stack) == 0:
break
#print ' postOrder appending x %i' % stack[-1].nodeNum
self.postOrder[postOrdIndx] = stack[-1].nodeNum
postOrdIndx += 1
theNode = stack.pop()
if len(stack) == 0:
break
if theNode.sibling:
stack.append(theNode.sibling)
#print "self.preOrder = %s, preOrdIndx=%i" % (self.preOrder, preOrdIndx)
self.preOrder[preOrdIndx] = theNode.sibling.nodeNum
preOrdIndx += 1
else:
gm.append('Problemo.')
gm.append('xxx got theNode %s' % theNode.nodeNum)
raise Glitch(gm)
if 1:
assert preOrdIndx == postOrdIndx
#print "a preOrdIndx = %i, len(self.nodes) = %i" % (preOrdIndx, len(self.nodes))
if preOrdIndx != len(self.nodes):
pOI = preOrdIndx
for i in range(preOrdIndx, len(self.nodes)):
self.preOrder[i] = var.NO_ORDER
self.postOrder[i] = var.NO_ORDER
preOrdIndx += 1
postOrdIndx += 1
#print "b preOrdIndx = %i, len(self.nodes) = %i" % (preOrdIndx, len(self.nodes))
assert preOrdIndx == len(self.nodes) and postOrdIndx == len(self.nodes)
def iterPreOrder(self):
"""Node generator. Assumes preAndPostOrderAreValid."""
for i in self.preOrder:
j = int(i) # this speeds things up a lot! Two-fold!
if j != var.NO_ORDER:
#yield self.nodes[int(i)]
yield self.nodes[j]
def iterPostOrder(self):
"""Node generator. Assumes preAndPostOrderAreValid."""
for i in self.postOrder:
j = int(i)
if j != var.NO_ORDER:
yield self.nodes[j]
def iterNodes(self):
"""Node generator, in preOrder. Assumes preAndPostOrderAreValid."""
for i in self.preOrder:
j = int(i)
if j != var.NO_ORDER:
yield self.nodes[j]
def iterNodesNoRoot(self):
"""Node generator, skipping the root. PreOrder."""
for i in self.preOrder:
j = int(i)
if j != var.NO_ORDER:
n = self.nodes[j]
if n != self.root:
yield n
def iterLeavesNoRoot(self):
"""Leaf node generator, skipping the root. PreOrder."""
for i in self.preOrder:
j = int(i)
if j != var.NO_ORDER:
n = self.nodes[j]
if n != self.root and n.isLeaf:
yield n
def iterLeavesPreOrder(self):
for i in self.preOrder:
j = int(i)
if j != var.NO_ORDER:
n = self.nodes[j]
if n.isLeaf:
yield n
def iterLeavesPostOrder(self):
for i in self.postOrder:
j = int(i)
if j != var.NO_ORDER:
n = self.nodes[j]
if n.isLeaf:
yield n
def iterInternalsNoRootPreOrder(self):
"""Internal post order node generator, skipping the root. Assumes preAndPostOrderAreValid."""
for i in self.preOrder:
j = int(i)
if j != var.NO_ORDER:
n = self.nodes[j]
if n != self.root and not n.isLeaf:
yield n
def iterInternalsNoRootPostOrder(self):
"""Internal post order node generator, skipping the root. Assumes preAndPostOrderAreValid."""
for i in self.postOrder:
j = int(i)
if j != var.NO_ORDER:
n = self.nodes[j]
if n != self.root and not n.isLeaf:
yield n
def iterInternalsPostOrder(self):
"""Internal post order node generator. Assumes preAndPostOrderAreValid."""
for i in self.postOrder:
j = int(i)
if j != var.NO_ORDER:
n = self.nodes[j]
if not n.isLeaf:
yield n
def iterInternalsNoRoot(self):
"""Internal node generator, skipping the root. PreOrder"""
for i in self.preOrder:
j = int(i)
if j != var.NO_ORDER:
n = self.nodes[j]
if n != self.root and not n.isLeaf:
yield n
def iterInternals(self):
"""Internal node generator. PreOrder. Including the root, if it is internal."""
for i in self.preOrder:
j = int(i)
if j != var.NO_ORDER:
n = self.nodes[j]
if not n.isLeaf:
yield n
################################################
#
def stripBrLens(self):
"""Sets all node.br.len's to 0.1, the default in p4.
Then, if you were to write it out in Nexus or Newick format,
no branch lengths would be printed.
"""
for n in self.iterNodesNoRoot():
n.br.len = 0.1
def len(self):
"""Return the sum of all br.len's."""
total = 0.0
for n in self.iterNodesNoRoot():
total += n.br.len
return total
def lenInternals(self):
"""Return the sum of all internal br.len's."""
total = 0.0
for n in self.iterInternalsNoRoot():
total += n.br.len
return total
def stemminess(self):
"""Ratio of internal branches to overall tree length.
Also called 'treeness'. Via Phillips and Penny, MPE 2003, but
they cite Lanyon 1988."""
total = self.len()
internals = self.lenInternals()
return internals/total
def _makeRCSplitKeys(self, splitList=None):
"""Make long integer-valued split keys.
This is dependent on that the leaf bitkeys are already set, ie this method should only
be used by the reduced consensus supertree class
"""
if not self.preAndPostOrderAreValid:
self.setPreAndPostOrder()
#self.draw()
# allOnes = 2L**(self.nTax) - 1
#print 'nTax = %i, allOnes = %i' % (self.nTax, allOnes)
# for n in self.iterInternalsPostOrder():
for n in self.iterInternalsNoRootPostOrder():
# if n != self.root:
#print 'doing node %s' % n.nodeNum
# if not n.isLeaf():
if n.leftChild:
childrenNums = self.getChildrenNums(n)
# x = childrenNums[0]
x = self.nodes[childrenNums[0]].br.rc
for i in childrenNums[1:]:
x = x | self.nodes[i].br.rc
# x = x | i
n.br.rc = x
if splitList != None:
splitList.append([x,0])
n.br.rcList = [n.br.rc]
def makeSplitKeys(self, makeNodeForSplitKeyDict=False):
"""Make long integer-valued split keys.
This needs to have self.taxNames set.
We make 2 kinds of split keys-- rawSplitKeys and splitKeys.
Both are attributes of node.br, so we have eg node.br.splitKey.
Raw split keys for terminal nodes are 2**n, where n is the index
of the taxon name. Eg for the first taxon, the rawSplitKey will
be 1, for the 3rd taxon the rawSplitKey will be 4.
RawSplitKeys for internal nodes are the rawSplitKey's for the
children, bitwise OR'ed together.
SplitKeys, cf rawSplitKeys, are in 'standard form', where the
numbers are even, ie do not contain the 1-bit. Having it in
standard form means that you can compare splits among trees.
If the rawSplitKey is even, then the splitKey is simply that,
unchanged. If, however, the rawSplitKey is odd, then the
splitKey is the rawSplitKey bit-flipped. For example, if
there are 5 taxa, and one of the rawSplitKeys is 9 (odd), we
can calculate the splitKey by bit-flipping, as::
01001 = 9 rawSplitKey
10110 = 22 splitKey
(Bit-flipping is done by exclusive-or'ing (xor) with 11111.)
The splitKey is readily converted to a splitString for
display, as 22 becomes '.**.*' (note the '1' bit is now on the
left). It is conventional that the first taxon, on the left,
is always a dot. (I don't know where the convention comes
from.)
The root has no rawSplitKey or splitKey.
For example, the tree::
+-------2:B (rawSplitKey = 2)
+---1
| +---------3:C (rawSplitKey = 4)
|
0-------------4:E (rawSplitKey = 16)
|
| +-----6:A (rawSplitKey = 1)
+----5
+-----------7:D (rawSplitKey = 8)
has 2 internal splits, on nodes 1 and 5.
::
Node n.br.rawSplitKey n.br.splitKey
1 6 6
5 9 22
There should be no duplicated rawSplitKeys, but if the tree
has a bifurcating root then there will be a duped splitKey.
This method will fail for trees with internal nodes that have
only one child, because that will make duplicated splits.
If arg *makeNodeForSplitKeyDict* is set, then it will make a
dictionary ``nodeForSplitKeyDict`` where the keys are the
splitKeys and the values are the corresponding nodes.
"""
gm = ['Tree.makeSplitKeys()']
#raise Glitch, gm
if not self.taxNames:
gm.append('No taxNames.')
raise Glitch(gm)
if not self.preAndPostOrderAreValid:
self.setPreAndPostOrder()
#self.draw()
if makeNodeForSplitKeyDict:
self.nodeForSplitKeyDict = {}
allOnes = 2**(self.nTax) - 1
#print 'nTax = %i, allOnes = %i' % (self.nTax, allOnes)
for n in self.iterPostOrder():
if n != self.root:
#print 'doing node %s' % n.nodeNum
if not n.leftChild:
# A long int, eg 1L, has no upper limit on its value
try:
n.br.rawSplitKey = 1 << self.taxNames.index(n.name) # "<<" is left-shift
except ValueError:
gm.append('node.name %s' % n.name)
gm.append('is not in taxNames %s' % self.taxNames)
raise Glitch(gm)
#n.br.rawSplitKey = 1L << self.taxNames.index(n.name) # "<<" is left-shift
if n.br.rawSplitKey == 1:
n.br.splitKey = allOnes - 1
else:
n.br.splitKey = n.br.rawSplitKey
#print 'upward leaf node %s, rawSplitKey %s, splitKey %s' % (n.nodeNum, n.br.rawSplitKey, n.br.splitKey)
else:
childrenNums = self.getChildrenNums(n)
if len(childrenNums) == 1:
gm.append('Internal node has only one child. That will make a duped split.')
raise Glitch(gm)
x = self.nodes[childrenNums[0]].br.rawSplitKey
for i in childrenNums[1:]:
y = self.nodes[i].br.rawSplitKey
x = x | y # '|' is bitwise "OR".
n.br.rawSplitKey = x
# Make node.br.splitKey's in "standard form", ie
# without the first taxon, ie without a 1. To do that
# we use the '&' operator, to bitwise "and" with 1.
if 1 & n.br.rawSplitKey: # Ie "Does rawSplitKey contain a 1?" or "Is rawSplitKey odd?"
n.br.splitKey = allOnes ^ n.br.rawSplitKey # "^" is xor, a bit-flipper.
else:
n.br.splitKey = n.br.rawSplitKey
#print 'intern node %s, rawSplitKey %s, splitKey %s' % (n.nodeNum, n.br.rawSplitKey, n.br.splitKey)
if makeNodeForSplitKeyDict:
self.nodeForSplitKeyDict[n.br.splitKey] = n
if 0:
# There should be no duped rawSplitKeys
theKeys = []
for n in self.iterNodesNoRoot():
theKeys.append(n.br.rawSplitKey)
for k in theKeys:
if theKeys.count(k) > 1:
gm.append('Duped rawSplitKey %i.' % k)
for n in self.nodes:
if n != self.root:
print('%7s %4s %4s' % (n.nodeNum, n.br.rawSplitKey, n.br.splitKey))
raise Glitch(gm)
# Any duped splitKeys? There will be if the tree is bi-Rooted.
if 0:
theKeys = []
for n in self.iterNodesNoRoot():
theKeys.append(n.br.splitKey)
for k in theKeys:
if theKeys.count(k) > 1:
gm.append('Duped splitKey %i.' % k)
for n in self.iterNodesNoRoot():
print('%7s %4s %4s' % (n.nodeNum, n.br.rawSplitKey, n.br.splitKey))
raise Glitch(gm)
if 0:
print(gm[0])
print(self.taxNames)
print('nodeNum rawSplitKey splitKey')
for n in self.iterNodesNoRoot():
print('%7s %4s %4s %s' % (
n.nodeNum, n.br.rawSplitKey, n.br.splitKey, func.getSplitStringFromKey(n.br.splitKey, self.nTax)))
def recalculateSplitKeysOfNodeFromChildren(self, aNode, allOnes):
children = [n for n in aNode.iterChildren()]
x = children[0].br.rawSplitKey
for n in children[1:]:
x = x | n.br.rawSplitKey # '|' is bitwise "OR".
aNode.br.rawSplitKey = x
if 1 & aNode.br.rawSplitKey: # Ie "Does rawSplitKey contain a 1?" or "Is rawSplitKey odd?"
aNode.br.splitKey = allOnes ^ aNode.br.rawSplitKey # "^" is xor, a bit-flipper.
else:
aNode.br.splitKey = aNode.br.rawSplitKey
def checkSplitKeys(self, useOldName=False, glitch=True, verbose=True):
gm = ['Tree.checkSplitKeys()']
allOnes = 2**(self.nTax) - 1
#print 'nTax = %i, allOnes = %i' % (self.nTax, allOnes)
isBad = False
for n in self.iterPostOrder():
if n != self.root:
#print 'doing node %s' % n.nodeNum
if not n.leftChild:
# A long int, eg 1L, has no upper limit on its value
try:
if useOldName:
rawSplitKey = 1 << self.taxNames.index(n.oldName) # "<<" is left-shift
else:
rawSplitKey = 1 << self.taxNames.index(n.name) # "<<" is left-shift
except ValueError:
if useOldName:
gm.append('node.name %s' % n.oldName)
else:
gm.append('node.name %s' % n.name)
gm.append('is not in taxNames %s' % self.taxNames)
raise Glitch(gm)
#n.br.rawSplitKey = 1L << self.taxNames.index(n.name) # "<<" is left-shift
if rawSplitKey == 1:
splitKey = allOnes - 1
else:
splitKey = rawSplitKey
#print 'upward leaf node %s, rawSplitKey %s, splitKey %s' % (n.nodeNum, n.br.rawSplitKey, n.br.splitKey)
else:
childrenNums = self.getChildrenNums(n)
if len(childrenNums) == 1:
gm.append('Internal node has only one child. That will make a duped split.')
raise Glitch(gm)
x = self.nodes[childrenNums[0]].br.rawSplitKey
for i in childrenNums[1:]:
y = self.nodes[i].br.rawSplitKey
x = x | y # '|' is bitwise "OR".
rawSplitKey = x
# Make node.br.splitKey's in "standard form", ie
# without the first taxon, ie without a 1. To do that
# we use the '&' operator, to bitwise "and" with 1.
if 1 & rawSplitKey: # Ie "Does rawSplitKey contain a 1?" or "Is rawSplitKey odd?"
splitKey = allOnes ^ rawSplitKey # "^" is xor, a bit-flipper.
else:
splitKey = rawSplitKey
#print 'intern node %s, rawSplitKey %s, splitKey %s' % (n.nodeNum, n.br.rawSplitKey, n.br.splitKey)
if n.br.rawSplitKey != rawSplitKey:
print("checkSplitKeys node %2i rawSplitKey: existing %s, calculated %s" % (n.nodeNum, n.br.rawSplitKey, rawSplitKey))
isBad = True
if n.br.splitKey != splitKey:
print("checkSplitKeys node %2i splitKey: existing %s, calculated %s" % (n.nodeNum, n.br.splitKey, splitKey))
isBad = True
if glitch and isBad:
raise Glitch(gm)
if verbose and not isBad:
print("checkSplitKeys(). ok")
def taxSetIsASplit(self, taxSetName):
assert self.nexusSets
assert self.taxNames
assert self.nexusSets.taxSets
lowArgTaxSetName = string.lower(taxSetName)
theTS = None
for ts in self.nexusSets.taxSets:
if ts.lowName == lowArgTaxSetName:
theTS = ts
break
assert theTS, "Could not find the taxSet named %s" % taxSetName
#theTS.dump()
assert theTS.mask
rawSplitKey = 0
for i in range(len(theTS.mask)):
#print i, theTS.mask[i]
if theTS.mask[i] == '1':
rawSplitKey += (1 << i)
if 1 & rawSplitKey: # Ie "Does rawSplitKey contain a 1?" or "Is rawSplitKey odd?"
allOnes = 2**(self.nTax) - 1
splitKey = allOnes ^ rawSplitKey # "^" is xor, a bit-flipper.
else:
splitKey = rawSplitKey
#print "got splitKey %s" % splitKey
for n in self.nodes:
if n.br and not n.isLeaf:
#print " %2i %s %s" % (n.nodeNum, n.br.splitKey, splitKey)
if n.br.splitKey == splitKey:
#self.draw()
#return n.nodeNum
return n
return None # Was -1 before, when n.nodeNum was returned for hits. Now a node is returned.
def checkTaxNames(self):
"""Check that all taxNames are in the tree, and vice versa."""
#If var.allowTreesWithDifferingTaxonSets is set to True we will not check
#the taxnames. This is primarily used to read trees for supertree and
#leafstability calcuations.
# Peter comments that Tobias added this, but it is not needed,
# and messes other things up -- so comment out until it is
# sorted.
#if var.allowTreesWithDifferingTaxonSets:
# return
# self.taxNames is a property, so when it is set, it calls this method
if self.name:
gm = ['Tree.checkTaxNames() tree %s' % self.name]
else:
gm = ['Tree.checkTaxNames()']
if not self.taxNames:
gm.append('No taxNames.')
raise Glitch(gm)
tax = []
for n in self.iterNodes():
if n.isLeaf and n.name:
tax.append(n.name)
# This next line should not be needed, as root leaves should be leaves.
elif n == self.root and n.name and n.getNChildren() < 2: # terminal root that has a taxName
tax.append(n.name)
#print 'tax from tree = %s' % tax
#print 'self.taxNames = %s' % self.taxNames
# Check for same number of taxa
if len(tax) != len(self.taxNames):
#self.draw()
#self.dump(node=1)
gm.append('Mismatch. Length of self.taxNames is wrong.')
gm.append('The tree has %i leaves with names, but len(self.taxNames) = %i' % (
len(tax), len(self.taxNames)))
gm.append('leaves on the tree = %s' % tax)
gm.append('self.taxNames = %s' % self.taxNames)
gm.append('symmetric_difference is %s' % set(tax).symmetric_difference(set(self.taxNames)))
#print ' Setting invalid taxNames to an empty list.'
#self.taxNames = []
raise Glitch(gm, 'tree_badTaxNamesLength')
# Check for mis-matched taxNames
isBad = 0
taxSet = set(tax)
selfTaxNamesSet = set(self.taxNames)
s = taxSet.difference(selfTaxNamesSet)
if len(s):
isBad = 1
print(gm[0])
print('TaxName mismatch between the tree and self.taxNames.')
print('These taxa are found in the tree but not in self.taxNames:')
print(s)
s = selfTaxNamesSet.difference(taxSet)
if len(s):
isBad = 1
print(gm[0])
print('TaxName mismatch between the tree and self.taxNames.')
print('These taxa are found in the self.taxNames but not in the tree:')
print(s)
if isBad:
raise Glitch(gm, 'tree_taxNamesMisMatch')
################################################
# Copy and Verify
def copyToTree(self, otherTree):
gm = ['Tree.copyToTree()']
if len(self.nodes) != len(otherTree.nodes):
gm.append('Different number of nodes.')
raise Glitch(gm)
# node relations (parent, child, sib)
for nNum in range(len(self.nodes)):
selfNode = self.nodes[nNum]
otherNode = otherTree.nodes[nNum]
# parent
if selfNode.parent:
otherNode.parent = otherTree.nodes[selfNode.parent.nodeNum]
else:
#print "otherNode.parent", otherNode.parent
otherNode.parent = None
# leftChild
if selfNode.leftChild:
otherNode.leftChild = otherTree.nodes[selfNode.leftChild.nodeNum]
else:
#print "otherNode.leftChild", otherNode.leftChild
otherNode.leftChild = None
# sibling
if selfNode.sibling:
otherNode.sibling = otherTree.nodes[selfNode.sibling.nodeNum]
else:
#print "otherNode.sibling", otherNode.sibling
otherNode.sibling = None
# root
otherTree.root.br = otherTree.nodes[self.root.nodeNum].br
otherTree.nodes[self.root.nodeNum].br = None
otherTree.root = otherTree.nodes[self.root.nodeNum]
# brLens and splitKeys
for nNum in range(len(self.nodes)):
if self.nodes[nNum] != self.root:
otherTree.nodes[nNum].br.len = self.nodes[nNum].br.len
otherTree.nodes[nNum].br.lenChanged = self.nodes[nNum].br.lenChanged
#otherTree.nodes[nNum].br.flag = self.nodes[nNum].flag
otherTree.nodes[nNum].br.splitKey = self.nodes[nNum].br.splitKey
otherTree.nodes[nNum].br.rawSplitKey = self.nodes[nNum].br.rawSplitKey
# model usage numbers
if self.model:
for nNum in range(len(self.nodes)):
selfNode = self.nodes[nNum]
otherNode = otherTree.nodes[nNum]
for pNum in range(self.model.nParts):
otherNode.parts[pNum].compNum = selfNode.parts[pNum].compNum
if selfNode != self.root:
otherNode.br.parts[pNum].rMatrixNum = selfNode.br.parts[pNum].rMatrixNum
otherNode.br.parts[pNum].gdasrvNum = selfNode.br.parts[pNum].gdasrvNum
# pre- and postOrder
for i in range(len(self.preOrder)):
otherTree.preOrder[i] = self.preOrder[i]
otherTree.postOrder[i] = self.postOrder[i]
# partLikes
if self.model:
for pNum in range(self.model.nParts):
otherTree.partLikes[pNum] = self.partLikes[pNum]
otherTree._nInternalNodes = self._nInternalNodes
def verifyIdentityWith(self, otherTree, doSplitKeys):
"""For MCMC debugging. Verifies that two trees are identical."""
complaintHead = '\nTree.verifyIdentityWith()' # keep
if len(self.nodes) != len(otherTree.nodes):
print(complaintHead)
print(' Different number of nodes.')
return var.DIFFERENT
# check node relations (parent, child, sib)
isBad = 0
for nNum in range(len(self.nodes)):
selfNode = self.nodes[nNum]
otherNode = otherTree.nodes[nNum]
# parent
if selfNode.parent:
if otherNode.parent:
if otherNode.parent.nodeNum != selfNode.parent.nodeNum:
isBad = 1
else:
isBad = 1
else:
if otherNode.parent:
isBad = 1
# leftChild
if selfNode.leftChild:
if otherNode.leftChild:
if otherNode.leftChild.nodeNum != selfNode.leftChild.nodeNum:
isBad = 1
else:
isBad = 1
else:
if otherNode.leftChild:
isBad = 1
# sibling
if selfNode.sibling:
if otherNode.sibling:
if otherNode.sibling.nodeNum != selfNode.sibling.nodeNum:
isBad = 1
else:
isBad = 1
else:
if otherNode.sibling:
isBad = 1
if isBad:
print(complaintHead)
print(' Node %i, relations differ.' % nNum)
self.write()
otherTree.write()
return var.DIFFERENT
if self.root.nodeNum != otherTree.root.nodeNum:
print(complaintHead)
print(' Roots differ.')
return var.DIFFERENT
# brLens, lenChanged, and node.flag. and splitKeys
for nNum in range(len(self.nodes)):
if self.nodes[nNum] != self.root:
#if self.nodes[nNum].br.len != otherTree.nodes[nNum].br.len:
if math.fabs(self.nodes[nNum].br.len - otherTree.nodes[nNum].br.len) > 1.e-8:
print(complaintHead)
print(' BrLens differ.')
return var.DIFFERENT
if self.nodes[nNum].br.lenChanged != otherTree.nodes[nNum].br.lenChanged:
print(complaintHead)
print(' br.lenChanged differs.')
return var.DIFFERENT
if self.nodes[nNum].flag != otherTree.nodes[nNum].flag:
print(complaintHead)
print(' flag differs, nodeNum %i. %s vs %s' % (nNum, self.nodes[nNum].flag, otherTree.nodes[nNum].flag))
return var.DIFFERENT
if doSplitKeys:
if self.nodes[nNum].br.splitKey != otherTree.nodes[nNum].br.splitKey:
print(complaintHead)
print(' SplitKeys differ.')
return var.DIFFERENT
if self.nodes[nNum].br.rawSplitKey != otherTree.nodes[nNum].br.rawSplitKey:
print(complaintHead)
print(' rawSplitKeys differ.')
return var.DIFFERENT
# model usage numbers
isBad = 0
for pNum in range(self.model.nParts):
for nNum in range(len(self.nodes)):
selfNode = self.nodes[nNum]
otherNode = otherTree.nodes[nNum]
if selfNode.parts[pNum].compNum != otherNode.parts[pNum].compNum:
isBad = 1
if self.nodes[nNum] != self.root:
if selfNode.br.parts[pNum].rMatrixNum != otherNode.br.parts[pNum].rMatrixNum:
isBad = 1
elif selfNode.br.parts[pNum].gdasrvNum != otherNode.br.parts[pNum].gdasrvNum:
isBad = 1
if isBad:
print(complaintHead)
print(' Node %i, model usage info does not match.' % nNum)
return var.DIFFERENT
# pre- and postOrder
isBad = 0
for i in range(len(self.preOrder)):
if self.preOrder[i] != otherTree.preOrder[i]:
isBad = 1
break
elif self.postOrder[i] != otherTree.postOrder[i]:
isBad = 1
break
if isBad:
print(complaintHead)
print(' Pre- or postOrder do not match.')
return var.DIFFERENT
if self._nInternalNodes != otherTree._nInternalNodes:
print(complaintHead)
print(' _nInternalNodes differ.')
return var.DIFFERENT
# partLikes
for pNum in range(self.model.nParts):
#if otherTree.partLikes[pNum] != self.partLikes[pNum]:
if math.fabs(otherTree.partLikes[pNum] - self.partLikes[pNum]) > 1.e-8:
print(complaintHead)
print(" partLikes differ. (%.5f, (%g) %.5f (%g)" % (
otherTree.partLikes[pNum], otherTree.partLikes[pNum], self.partLikes[pNum], self.partLikes[pNum]))
return var.DIFFERENT
if 0: # some more
for nNum in range(len(self.nodes)):
selfNode = self.nodes[nNum]
otherNode = otherTree.nodes[nNum]
if selfNode.nodeNum != otherNode.nodeNum:
print(complaintHead)
print(' nodeNum differs')
return var.DIFFERENT
if selfNode.seqNum != otherNode.seqNum:
print(complaintHead)
print(' seqNum differs')
return var.DIFFERENT
if selfNode.name != otherNode.name:
print(complaintHead)
print(' name differs')
return var.DIFFERENT
if selfNode.isLeaf != otherNode.isLeaf:
print(complaintHead)
print(' isLeaf differs')
return var.DIFFERENT
return var.SAME
############################################
def isFullyBifurcating(self, verbose=False):
"""Returns True if the tree is fully bifurcating. Else False. """
if self.root and self.root.leftChild and self.root.leftChild.sibling and self.root.leftChild.sibling.sibling:
if self.root.leftChild.sibling.sibling.sibling:
if verbose:
print("isFullyBifurcating() returning False, due to root with 4 or more children.")
return False
elif self.root and self.root.isLeaf:
pass
else:
if verbose:
print("isFullyBifurcating() returning False, due to (non-leaf) root not having 3 children.")
return False
for n in self.iterInternalsNoRoot():
if n.leftChild and n.leftChild.sibling:
if n.leftChild.sibling.sibling:
if verbose:
print("isFullyBifurcating() returning False, due to node %i having 3 or more children." % n.nodeNum)
return False
else:
if verbose:
print("isFullyBifurcating() returning False, due to non-leaf node %i having 1 or fewer children." % n.nodeNum)
return False
return True
# These next two are for the eTBR implementation that I got from Jason Evans' Crux. Thanks Jason!
def getDegree(self, nodeSpecifier):
n = self.node(nodeSpecifier)
if n.isLeaf:
if n.parent:
return 1
else:
return 0
else:
#assert n.leftChild
deg = 1 # the leftChild
if n.parent:
deg += 1
s = n.leftChild.sibling
while s:
deg += 1
s = s.sibling
return deg
def nextNode(self, spokeSpecifier, hubSpecifier):
"""Get next node cycling around a hub node.
A bit of a hack to make a p4 Node behave sorta like a
Felsenstein node. Imagine cycling around the branches
emanating from a node like spokes on a hub, starting from
anywhere, with no end.
The hub node would usually be the parent of the spoke, or the
spoke would be the hub itself. Usually
self.nextNode(spoke,hub) delivers spoke.sibling. What happens
when the siblings run out is that
self.nextNode(rightmostSibling, hub) delivers hub itself, and
of course its branch (spoke) points toward the hub.parent.
(Unless hub is the root, of course, in which case
self.nextNode(rightmostSibling, hub) delivers hub.leftChild.)
In the usual case of the hub not being the root, the next node
to be delivered by nextNode(spokeIsHub, hub) is usually the
leftChild of the hub. Round and round, clockwise.
"""
spoke = self.node(spokeSpecifier)
hub = self.node(hubSpecifier)
if spoke.parent == hub or hub == spoke:
if spoke == hub:
if spoke.leftChild:
return spoke.leftChild
else:
return hub
else:
if spoke.sibling:
return spoke.sibling
else:
if hub.parent:
return hub
else:
return hub.leftChild
else:
print("*=" * 25)
self.draw()
gm = ["Tree.nextNode() spoke=%i, hub=%i" % (spoke.nodeNum, hub.nodeNum)]
gm.append("Need to have either spoke.parent == hub or hub == spoke.")
raise Glitch(gm)
def topologyDistance(self, tree2, metric='sd', resetSplitKeySet=False):
"""Compares the topology of self with tree2.
The complete list of metrics is given in var.topologyDistanceMetrics
For most metrics using this method, taxNames needs to be set,
to the same in the two trees. If the taxa differ, this method
simply returns -1
The 'metric' can be one of 'sd' (symmetric difference), 'wrf'
(weighted Robinson-Foulds), 'bld' (Felsenstein's branch-
length distance), or 'diffs'. The
unwighted Robinson-Foulds metric would be the same as the
symmetric difference.
There is also an experimental scqdist, but that needs the
scqdist.so module, in the QDist directory.
See Felsenstein 2004 Inferring Phylogenies, Pg 529.
The default metric is the very simple 'sd', symmetric
difference. Using this metric, if the 2 trees share the same
set of splits, they are deemed to be the same topology; branch
lengths are not compared. This method returns the number of
splits that are in self that are not in tree2 plus the number
of splits that are in tree2 that are not in self. So it would
return 0 for trees that are the same.
The 'wrf' and 'bld' metrics take branch lengths into account.
Bifurcating roots complicate things, so they are not allowed
for weighted distance calculations.
In the unweighted case (ie metric='sd'), whether the trees
compared have bifurcating roots or not is ignored. So the
trees (A,B,(C,D)) and ((A,B),(C,D)) will be deemed to have the
same topology, since they have the same splits.
The measurement 'diffs', which returns a tuple of 2 numbers --
both are set differences. The first is the number of splits
in self that are not in tree2, and the second is the number of
splits in tree2 that are not in self. (Consider it as the the
symmetric difference split into its 2 parts.)
If you calculate a distance and then make a topology change, a
subsequent sd topologyDistance calculation will be wrong, as it
uses previous splits. So then you need to 'resetSplitKeySet'.
The 'scqdist' metric also gives quartet distances. It was
written by Anders Kabell Kristensen for his Masters degree at
Aarhus University, 2010. http://www.cs.au.dk/~dalko/thesis/
It has two versions -- a pure Python version (that needs
scipy) that I do not include here, and a fast C++ version,
that I wrapped in python. Its speedy -- the 'sc' in 'scqdist'
is for 'sub-cubic', ie better than O(n^3).
"""
gm = ['Tree.topologyDistance()']
if metric not in var.topologyDistanceMetrics:
gm.append("Got a request for unknown metric '%s'" % metric)
gm.append("The 'metric' arg should be one of %s" % var.topologyDistanceMetrics)
raise Glitch(gm)
if metric == 'scqdist': # no need for taxNames
try:
import scqdist
except ImportError:
gm.append("Could not find the experimental 'scqdist' module needed for this metric.")
raise Glitch(gm)
tsSelf = self.writeNewick(toString=True)
tsTree2 = tree2.writeNewick(toString=True)
return scqdist.qdist(tsSelf, tsTree2)
if not self.taxNames or not tree2.taxNames:
gm.append("This method requires taxNames to be set.")
raise Glitch(gm)
if self.taxNames != tree2.taxNames:
gm.append("The taxNames are different for the two trees.")
gm.append("Self: %s" % self.taxNames)
gm.append("tree2: %s" % tree2.taxNames)
raise Glitch(gm)
if (self.root.getNChildren() == 2 or tree2.root.getNChildren() == 2) and ( metric in ['wrf', 'bld']):
gm.append('One of the input trees has a bifurcating root.')
gm.append('Weighted tree distance calculations do not work on bi-rooted trees.')
raise Glitch(gm)
# Tobias' old stuff. The quartet distances did not give correct distances. Not clear about the triplet distances.
# # Building a triplet set before the splitset is build to avoid doing extra work
# if metric == 'triplet':
# from p4.LeafSupport import TripletStripper
# # Create at stripper to get triplets from trees
# tripletStripper = TripletStripper()
# # Strip the triplets from one self
# if not hasattr(self, 'triplets'):
# self.triplets = tripletStripper.getQuartetSet(True, self)
# if not hasattr(tree2, 'triplets'):
# # Strip triplets from the other tree
# tree2.triplets = tripletStripper.getQuartetSet(True, tree2)
# # The difference between the triplet sets
# # if both trees have the same set of taxa this should be a decent measure
# # otherwise it is of questionalbe use
# tripletDifference = len(self.triplets.symmetric_difference(tree2.triplets))
# # return quartetDifference, len(self.quartets) + len(tree2.quartets)
# return tripletDifference
# # Building quartet sets before the splitset is built to avoid extra work
# if metric == 'quartet':
# from p4.LeafSupport import QuartetStripper
# # Create a stripper to get quartets from trees
# quartetStripper = QuartetStripper()
# # Strip quartets from self
# if not hasattr(self, 'quartets'):
# self.quartets = quartetStripper.getQuartetSet(True, self)
# # Strip quartets from tree2
# if not hasattr(tree2, 'quartets'):
# tree2.quartets = quartetStripper.getQuartetSet(True, tree2)
# # The symmetric difference between the quartet sets
# # if both trees have the same set of taxa this should be a resonable distance measure
# # if not the usefullness is questionalbe.
# #print len(self.quartets)
# #print len(tree2.quartets)
# if 0:
# selfHasButTree2DoesNot = self.quartets.difference(tree2.quartets)
# tree2HasButSelfDoesNot = tree2.quartets.difference(self.quartets)
# print len(selfHasButTree2DoesNot),len(tree2HasButSelfDoesNot)
# quartetDifference = len(self.quartets.symmetric_difference(tree2.quartets))
# # return quartetDifference, len(self.quartets) + len(tree2.quartets)
# return quartetDifference
# if metric == 'thquartet':
# from p4.QuartetDistance import QuartetDistance
# qd = QuartetDistance([self, tree2])
# ret = qd.calcQuartetDistance()
# return ret
# I might be doing a lot of these, and I don't want to make
# splitKeys and make the splitKeys set over and over again.
# So store it as a Tree.attribute.
if resetSplitKeySet:
del(self.splitKeySet)
del(tree2.splitKeySet)
if not hasattr(self, 'splitKeySet'):
self.makeSplitKeys()
self.splitKeySet = set([n.br.splitKey for n in self.iterNodesNoRoot()])
if not hasattr(tree2, 'splitKeySet'):
tree2.makeSplitKeys()
tree2.splitKeySet = set([n.br.splitKey for n in tree2.iterNodesNoRoot()])
if metric == 'sd':
# Symmetric difference. The symmetric_difference method
# returns all elements that are in exactly one of the sets.
theSD = len(self.splitKeySet.symmetric_difference(tree2.splitKeySet))
#del(self.splitKeyHash)
#del(tree2.splitKeyHash)
return theSD
# return theSD, len(self.splitKeySet) + len(tree2.splitKeySet)
# The difference method returns the difference of two sets as
# a new Set. I.e. all elements that are in self and not in
# the other.
selfHasButTree2DoesNot = self.splitKeySet.difference(tree2.splitKeySet)
tree2HasButSelfDoesNot = tree2.splitKeySet.difference(self.splitKeySet)
if metric == 'diffs':
return len(selfHasButTree2DoesNot),len(tree2HasButSelfDoesNot)
if metric in ['wrf', 'bld']:
self.splitKeyHash = {}
for n in self.iterNodesNoRoot():
self.splitKeyHash[n.br.splitKey] = n
tree2.splitKeyHash = {}
for n in tree2.iterNodesNoRoot():
tree2.splitKeyHash[n.br.splitKey] = n
if metric == 'wrf':
theSum = 0.0
for k in self.splitKeySet.intersection(tree2.splitKeySet):
#print '%s - %s' % (self.splitKeyHash[k].br.len, tree2.splitKeyHash[k].br.len)
theSum += abs(self.splitKeyHash[k].br.len - tree2.splitKeyHash[k].br.len)
for k in selfHasButTree2DoesNot:
#print 'x %s' % self.splitKeyHash[k].br.len
theSum += self.splitKeyHash[k].br.len
for k in tree2HasButSelfDoesNot:
#print 'y %s' % tree2.splitKeyHash[k].br.len
theSum += tree2.splitKeyHash[k].br.len
return theSum
elif metric == 'bld':
theSum = 0.0
| for k in self.splitKeySet.intersection(tree2.splitKeySet): | 10,347 | lcc_e | python | null | 47372aee575d933a7ca74a66c0300cb8016920d00cc89938 |
|
# (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Classes for representing multi-dimensional data with metadata.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
from xml.dom.minidom import Document
import collections
import copy
import datetime
import operator
import warnings
import zlib
import biggus
import numpy as np
import numpy.ma as ma
import iris.analysis
from iris.analysis.cartography import wrap_lons
import iris.analysis.maths
import iris.analysis.interpolate
import iris.aux_factory
import iris.coord_systems
import iris.coords
import iris._concatenate
import iris._constraints
import iris._merge
import iris.exceptions
import iris.util
from iris._cube_coord_common import CFVariableMixin
from functools import reduce
__all__ = ['Cube', 'CubeList', 'CubeMetadata']
class CubeMetadata(collections.namedtuple('CubeMetadata',
['standard_name',
'long_name',
'var_name',
'units',
'attributes',
'cell_methods'])):
"""
Represents the phenomenon metadata for a single :class:`Cube`.
"""
def name(self, default='unknown'):
"""
Returns a human-readable name.
First it tries self.standard_name, then it tries the 'long_name'
attribute, then the 'var_name' attribute, before falling back to
the value of `default` (which itself defaults to 'unknown').
"""
return self.standard_name or self.long_name or self.var_name or default
# The XML namespace to use for CubeML documents
XML_NAMESPACE_URI = "urn:x-iris:cubeml-0.2"
class _CubeFilter(object):
"""
A constraint, paired with a list of cubes matching that constraint.
"""
def __init__(self, constraint, cubes=None):
self.constraint = constraint
if cubes is None:
cubes = CubeList()
self.cubes = cubes
def __len__(self):
return len(self.cubes)
def add(self, cube):
"""
Adds the appropriate (sub)cube to the list of cubes where it
matches the constraint.
"""
sub_cube = self.constraint.extract(cube)
if sub_cube is not None:
self.cubes.append(sub_cube)
def merged(self, unique=False):
"""
Returns a new :class:`_CubeFilter` by merging the list of
cubes.
Kwargs:
* unique:
If True, raises `iris.exceptions.DuplicateDataError` if
duplicate cubes are detected.
"""
return _CubeFilter(self.constraint, self.cubes.merge(unique))
class _CubeFilterCollection(object):
"""
A list of _CubeFilter instances.
"""
@staticmethod
def from_cubes(cubes, constraints=None):
"""
Creates a new collection from an iterable of cubes, and some
optional constraints.
"""
constraints = iris._constraints.list_of_constraints(constraints)
pairs = [_CubeFilter(constraint) for constraint in constraints]
collection = _CubeFilterCollection(pairs)
for cube in cubes:
collection.add_cube(cube)
return collection
def __init__(self, pairs):
self.pairs = pairs
def add_cube(self, cube):
"""
Adds the given :class:`~iris.cube.Cube` to all of the relevant
constraint pairs.
"""
for pair in self.pairs:
pair.add(cube)
def cubes(self):
"""
Returns all the cubes in this collection concatenated into a
single :class:`CubeList`.
"""
result = CubeList()
for pair in self.pairs:
result.extend(pair.cubes)
return result
def merged(self, unique=False):
"""
Returns a new :class:`_CubeFilterCollection` by merging all the cube
lists of this collection.
Kwargs:
* unique:
If True, raises `iris.exceptions.DuplicateDataError` if
duplicate cubes are detected.
"""
return _CubeFilterCollection([pair.merged(unique) for pair in
self.pairs])
class CubeList(list):
"""
All the functionality of a standard :class:`list` with added "Cube"
context.
"""
def __new__(cls, list_of_cubes=None):
"""Given a :class:`list` of cubes, return a CubeList instance."""
cube_list = list.__new__(cls, list_of_cubes)
# Check that all items in the incoming list are cubes. Note that this
# checking does not guarantee that a CubeList instance *always* has
# just cubes in its list as the append & __getitem__ methods have not
# been overridden.
if not all([isinstance(cube, Cube) for cube in cube_list]):
raise ValueError('All items in list_of_cubes must be Cube '
'instances.')
return cube_list
def __str__(self):
"""Runs short :meth:`Cube.summary` on every cube."""
result = ['%s: %s' % (i, cube.summary(shorten=True)) for i, cube in
enumerate(self)]
if result:
result = '\n'.join(result)
else:
result = '< No cubes >'
return result
def __repr__(self):
"""Runs repr on every cube."""
return '[%s]' % ',\n'.join([repr(cube) for cube in self])
# TODO #370 Which operators need overloads?
def __add__(self, other):
return CubeList(list.__add__(self, other))
def __getitem__(self, keys):
"""x.__getitem__(y) <==> x[y]"""
result = super(CubeList, self).__getitem__(keys)
if isinstance(result, list):
result = CubeList(result)
return result
def __getslice__(self, start, stop):
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
result = super(CubeList, self).__getslice__(start, stop)
result = CubeList(result)
return result
def xml(self, checksum=False, order=True, byteorder=True):
"""Return a string of the XML that this list of cubes represents."""
doc = Document()
cubes_xml_element = doc.createElement("cubes")
cubes_xml_element.setAttribute("xmlns", XML_NAMESPACE_URI)
for cube_obj in self:
cubes_xml_element.appendChild(
cube_obj._xml_element(
doc, checksum=checksum, order=order, byteorder=byteorder))
doc.appendChild(cubes_xml_element)
# return our newly created XML string
return doc.toprettyxml(indent=" ")
def extract(self, constraints, strict=False):
"""
Filter each of the cubes which can be filtered by the given
constraints.
This method iterates over each constraint given, and subsets each of
the cubes in this CubeList where possible. Thus, a CubeList of length
**n** when filtered with **m** constraints can generate a maximum of
**m * n** cubes.
Keywords:
* strict - boolean
If strict is True, then there must be exactly one cube which is
filtered per constraint.
"""
return self._extract_and_merge(self, constraints, strict,
merge_unique=None)
@staticmethod
def _extract_and_merge(cubes, constraints, strict, merge_unique=False):
# * merge_unique - if None: no merging, if false: non unique merging,
# else unique merging (see merge)
constraints = iris._constraints.list_of_constraints(constraints)
# group the resultant cubes by constraints in a dictionary
constraint_groups = dict([(constraint, CubeList()) for constraint in
constraints])
for cube in cubes:
for constraint, cube_list in constraint_groups.iteritems():
sub_cube = constraint.extract(cube)
if sub_cube is not None:
cube_list.append(sub_cube)
if merge_unique is not None:
for constraint, cubelist in constraint_groups.iteritems():
constraint_groups[constraint] = cubelist.merge(merge_unique)
result = CubeList()
for constraint in constraints:
constraint_cubes = constraint_groups[constraint]
if strict and len(constraint_cubes) != 1:
msg = 'Got %s cubes for constraint %r, ' \
'expecting 1.' % (len(constraint_cubes), constraint)
raise iris.exceptions.ConstraintMismatchError(msg)
result.extend(constraint_cubes)
if strict and len(constraints) == 1:
result = result[0]
return result
def extract_strict(self, constraints):
"""
Calls :meth:`CubeList.extract` with the strict keyword set to True.
"""
return self.extract(constraints, strict=True)
def extract_overlapping(self, coord_names):
"""
Returns a :class:`CubeList` of cubes extracted over regions
where the coordinates overlap, for the coordinates
in coord_names.
Args:
* coord_names:
A string or list of strings of the names of the coordinates
over which to perform the extraction.
"""
if isinstance(coord_names, basestring):
coord_names = [coord_names]
def make_overlap_fn(coord_name):
def overlap_fn(cell):
return all(cell in cube.coord(coord_name).cells()
for cube in self)
return overlap_fn
coord_values = {coord_name: make_overlap_fn(coord_name)
for coord_name in coord_names}
return self.extract(iris.Constraint(coord_values=coord_values))
def merge_cube(self):
"""
Return the merged contents of the :class:`CubeList` as a single
:class:`Cube`.
If it is not possible to merge the `CubeList` into a single
`Cube`, a :class:`~iris.exceptions.MergeError` will be raised
describing the reason for the failure.
For example:
>>> cube_1 = iris.cube.Cube([1, 2])
>>> cube_1.add_aux_coord(iris.coords.AuxCoord(0, long_name='x'))
>>> cube_2 = iris.cube.Cube([3, 4])
>>> cube_2.add_aux_coord(iris.coords.AuxCoord(1, long_name='x'))
>>> cube_2.add_dim_coord(
... iris.coords.DimCoord([0, 1], long_name='z'), 0)
>>> single_cube = iris.cube.CubeList([cube_1, cube_2]).merge_cube()
Traceback (most recent call last):
...
iris.exceptions.MergeError: failed to merge into a single cube.
Coordinates in cube.dim_coords differ: z.
Coordinate-to-dimension mapping differs for cube.dim_coords.
"""
if not self:
raise ValueError("can't merge an empty CubeList")
# Register each of our cubes with a single ProtoCube.
proto_cube = iris._merge.ProtoCube(self[0])
for cube in self[1:]:
proto_cube.register(cube, error_on_mismatch=True)
# Extract the merged cube from the ProtoCube.
merged_cube, = proto_cube.merge()
return merged_cube
def merge(self, unique=True):
"""
Returns the :class:`CubeList` resulting from merging this
:class:`CubeList`.
Kwargs:
* unique:
If True, raises `iris.exceptions.DuplicateDataError` if
duplicate cubes are detected.
This combines cubes with different values of an auxiliary scalar
coordinate, by constructing a new dimension.
.. testsetup::
import iris
c1 = iris.cube.Cube([0,1,2], long_name='some_parameter')
xco = iris.coords.DimCoord([11, 12, 13], long_name='x_vals')
c1.add_dim_coord(xco, 0)
c1.add_aux_coord(iris.coords.AuxCoord([100], long_name='y_vals'))
c2 = c1.copy()
c2.coord('y_vals').points = [200]
For example::
>>> print(c1)
some_parameter / (unknown) (x_vals: 3)
Dimension coordinates:
x_vals x
Scalar coordinates:
y_vals: 100
>>> print(c2)
some_parameter / (unknown) (x_vals: 3)
Dimension coordinates:
x_vals x
Scalar coordinates:
y_vals: 200
>>> cube_list = iris.cube.CubeList([c1, c2])
>>> new_cube = cube_list.merge()[0]
>>> print(new_cube)
some_parameter / (unknown) (y_vals: 2; x_vals: 3)
Dimension coordinates:
y_vals x -
x_vals - x
>>> print(new_cube.coord('y_vals').points)
[100 200]
>>>
Contrast this with :meth:`iris.cube.CubeList.concatenate`, which joins
cubes along an existing dimension.
.. note::
If time coordinates in the list of cubes have differing epochs then
the cubes will not be able to be merged. If this occurs, use
:func:`iris.util.unify_time_units` to normalise the epochs of the
time coordinates so that the cubes can be merged.
"""
# Register each of our cubes with its appropriate ProtoCube.
proto_cubes_by_name = {}
for cube in self:
name = cube.standard_name
proto_cubes = proto_cubes_by_name.setdefault(name, [])
proto_cube = None
for target_proto_cube in proto_cubes:
if target_proto_cube.register(cube):
proto_cube = target_proto_cube
break
if proto_cube is None:
proto_cube = iris._merge.ProtoCube(cube)
proto_cubes.append(proto_cube)
# Extract all the merged cubes from the ProtoCubes.
merged_cubes = CubeList()
for name in sorted(proto_cubes_by_name):
for proto_cube in proto_cubes_by_name[name]:
merged_cubes.extend(proto_cube.merge(unique=unique))
return merged_cubes
def concatenate_cube(self):
"""
Return the concatenated contents of the :class:`CubeList` as a single
:class:`Cube`.
If it is not possible to concatenate the `CubeList` into a single
`Cube`, a :class:`~iris.exceptions.ConcatenateError` will be raised
describing the reason for the failure.
"""
if not self:
raise ValueError("can't concatenate an empty CubeList")
names = [cube.metadata.name() for cube in self]
unique_names = list(collections.OrderedDict.fromkeys(names))
if len(unique_names) == 1:
res = iris._concatenate.concatenate(self, error_on_mismatch=True)
n_res_cubes = len(res)
if n_res_cubes == 1:
return res[0]
else:
msgs = []
msgs.append('An unexpected problem prevented concatenation.')
msgs.append('Expected only a single cube, '
'found {}.'.format(n_res_cubes))
raise iris.exceptions.ConcatenateError(msgs)
else:
msgs = []
msgs.append('Cube names differ: {} != {}'.format(names[0],
names[1]))
raise iris.exceptions.ConcatenateError(msgs)
def concatenate(self):
"""
Concatenate the cubes over their common dimensions.
Returns:
A new :class:`iris.cube.CubeList` of concatenated
:class:`iris.cube.Cube` instances.
This combines cubes with a common dimension coordinate, but occupying
different regions of the coordinate value. The cubes are joined across
that dimension.
.. testsetup::
import iris
import numpy as np
xco = iris.coords.DimCoord([11, 12, 13, 14], long_name='x_vals')
yco1 = iris.coords.DimCoord([4, 5], long_name='y_vals')
yco2 = iris.coords.DimCoord([7, 9, 10], long_name='y_vals')
c1 = iris.cube.Cube(np.zeros((2,4)), long_name='some_parameter')
c1.add_dim_coord(xco, 1)
c1.add_dim_coord(yco1, 0)
c2 = iris.cube.Cube(np.zeros((3,4)), long_name='some_parameter')
c2.add_dim_coord(xco, 1)
c2.add_dim_coord(yco2, 0)
For example::
>>> print(c1)
some_parameter / (unknown) (y_vals: 2; x_vals: 4)
Dimension coordinates:
y_vals x -
x_vals - x
>>> print(c1.coord('y_vals').points)
[4 5]
>>> print(c2)
some_parameter / (unknown) (y_vals: 3; x_vals: 4)
Dimension coordinates:
y_vals x -
x_vals - x
>>> print(c2.coord('y_vals').points)
[ 7 9 10]
>>> cube_list = iris.cube.CubeList([c1, c2])
>>> new_cube = cube_list.concatenate()[0]
>>> print(new_cube)
some_parameter / (unknown) (y_vals: 5; x_vals: 4)
Dimension coordinates:
y_vals x -
x_vals - x
>>> print(new_cube.coord('y_vals').points)
[ 4 5 7 9 10]
>>>
Contrast this with :meth:`iris.cube.CubeList.merge`, which makes a new
dimension from values of an auxiliary scalar coordinate.
.. note::
If time coordinates in the list of cubes have differing epochs then
the cubes will not be able to be concatenated. If this occurs, use
:func:`iris.util.unify_time_units` to normalise the epochs of the
time coordinates so that the cubes can be concatenated.
.. warning::
This routine will load your data payload!
"""
return iris._concatenate.concatenate(self)
class Cube(CFVariableMixin):
"""
A single Iris cube of data and metadata.
Typically obtained from :func:`iris.load`, :func:`iris.load_cube`,
:func:`iris.load_cubes`, or from the manipulation of existing cubes.
For example:
>>> cube = iris.load_cube(iris.sample_data_path('air_temp.pp'))
>>> print(cube)
air_temperature / (K) (latitude: 73; longitude: 96)
Dimension coordinates:
latitude x -
longitude - x
Scalar coordinates:
forecast_period: 6477 hours, bound=(-28083.0, 6477.0) hours
forecast_reference_time: 1998-03-01 03:00:00
pressure: 1000.0 hPa
time: 1998-12-01 00:00:00, \
bound=(1994-12-01 00:00:00, 1998-12-01 00:00:00)
Attributes:
STASH: m01s16i203
source: Data from Met Office Unified Model
Cell methods:
mean within years: time
mean over years: time
See the :doc:`user guide</userguide/index>` for more information.
"""
#: Indicates to client code that the object supports
#: "orthogonal indexing", which means that slices that are 1d arrays
#: or lists slice along each dimension independently. This behavior
#: is similar to Fortran or Matlab, but different than numpy.
__orthogonal_indexing__ = True
def __init__(self, data, standard_name=None, long_name=None,
var_name=None, units=None, attributes=None,
cell_methods=None, dim_coords_and_dims=None,
aux_coords_and_dims=None, aux_factories=None):
"""
Creates a cube with data and optional metadata.
Not typically used - normally cubes are obtained by loading data
(e.g. :func:`iris.load`) or from manipulating existing cubes.
Args:
* data
This object defines the shape of the cube and the phenomenon
value in each cell.
It can be a biggus array, a numpy array, a numpy array
subclass (such as :class:`numpy.ma.MaskedArray`), or an
*array_like* as described in :func:`numpy.asarray`.
See :attr:`Cube.data<iris.cube.Cube.data>`.
Kwargs:
* standard_name
The standard name for the Cube's data.
* long_name
An unconstrained description of the cube.
* var_name
The CF variable name for the cube.
* units
The unit of the cube, e.g. ``"m s-1"`` or ``"kelvin"``.
* attributes
A dictionary of cube attributes
* cell_methods
A tuple of CellMethod objects, generally set by Iris, e.g.
``(CellMethod("mean", coords='latitude'), )``.
* dim_coords_and_dims
A list of coordinates with scalar dimension mappings, e.g
``[(lat_coord, 0), (lon_coord, 1)]``.
* aux_coords_and_dims
A list of coordinates with dimension mappings,
e.g ``[(lat_coord, 0), (lon_coord, (0, 1))]``.
See also :meth:`Cube.add_dim_coord()<iris.cube.Cube.add_dim_coord>`
and :meth:`Cube.add_aux_coord()<iris.cube.Cube.add_aux_coord>`.
* aux_factories
A list of auxiliary coordinate factories. See
:mod:`iris.aux_factory`.
For example::
>>> from iris.coords import DimCoord
>>> from iris.cube import Cube
>>> latitude = DimCoord(np.linspace(-90, 90, 4),
... standard_name='latitude',
... units='degrees')
>>> longitude = DimCoord(np.linspace(45, 360, 8),
... standard_name='longitude',
... units='degrees')
>>> cube = Cube(np.zeros((4, 8), np.float32),
... dim_coords_and_dims=[(latitude, 0),
... (longitude, 1)])
"""
# Temporary error while we transition the API.
if isinstance(data, basestring):
raise TypeError('Invalid data type: {!r}.'.format(data))
if not isinstance(data, (biggus.Array, ma.MaskedArray)):
data = np.asarray(data)
self._my_data = data
#: The "standard name" for the Cube's phenomenon.
self.standard_name = standard_name
#: An instance of :class:`iris.unit.Unit` describing the Cube's data.
self.units = units
#: The "long name" for the Cube's phenomenon.
self.long_name = long_name
#: The CF variable name for the Cube.
self.var_name = var_name
self.cell_methods = cell_methods
#: A dictionary, with a few restricted keys, for arbitrary
#: Cube metadata.
self.attributes = attributes
# Coords
self._dim_coords_and_dims = []
self._aux_coords_and_dims = []
self._aux_factories = []
identities = set()
if dim_coords_and_dims:
dims = set()
for coord, dim in dim_coords_and_dims:
identity = coord.standard_name, coord.long_name
if identity not in identities and dim not in dims:
self._add_unique_dim_coord(coord, dim)
else:
self.add_dim_coord(coord, dim)
identities.add(identity)
dims.add(dim)
if aux_coords_and_dims:
for coord, dims in aux_coords_and_dims:
identity = coord.standard_name, coord.long_name
if identity not in identities:
self._add_unique_aux_coord(coord, dims)
else:
self.add_aux_coord(coord, dims)
identities.add(identity)
if aux_factories:
for factory in aux_factories:
self.add_aux_factory(factory)
@property
def metadata(self):
"""
An instance of :class:`CubeMetadata` describing the phenomenon.
This property can be updated with any of:
- another :class:`CubeMetadata` instance,
- a tuple/dict which can be used to make a :class:`CubeMetadata`,
- or any object providing the attributes exposed by
:class:`CubeMetadata`.
"""
return CubeMetadata(self.standard_name, self.long_name, self.var_name,
self.units, self.attributes, self.cell_methods)
@metadata.setter
def metadata(self, value):
try:
value = CubeMetadata(**value)
except TypeError:
try:
value = CubeMetadata(*value)
except TypeError:
missing_attrs = [field for field in CubeMetadata._fields
if not hasattr(value, field)]
if missing_attrs:
raise TypeError('Invalid/incomplete metadata')
for name in CubeMetadata._fields:
setattr(self, name, getattr(value, name))
def is_compatible(self, other, ignore=None):
"""
Return whether the cube is compatible with another.
Compatibility is determined by comparing :meth:`iris.cube.Cube.name()`,
:attr:`iris.cube.Cube.units`, :attr:`iris.cube.Cube.cell_methods` and
:attr:`iris.cube.Cube.attributes` that are present in both objects.
Args:
* other:
An instance of :class:`iris.cube.Cube` or
:class:`iris.cube.CubeMetadata`.
* ignore:
A single attribute key or iterable of attribute keys to ignore when
comparing the cubes. Default is None. To ignore all attributes set
this to other.attributes.
Returns:
Boolean.
.. seealso::
:meth:`iris.util.describe_diff()`
.. note::
This function does not indicate whether the two cubes can be
merged, instead it checks only the four items quoted above for
equality. Determining whether two cubes will merge requires
additional logic that is beyond the scope of this method.
"""
compatible = (self.name() == other.name() and
self.units == other.units and
self.cell_methods == other.cell_methods)
if compatible:
common_keys = set(self.attributes).intersection(other.attributes)
if ignore is not None:
if isinstance(ignore, basestring):
ignore = (ignore,)
common_keys = common_keys.difference(ignore)
for key in common_keys:
if np.any(self.attributes[key] != other.attributes[key]):
compatible = False
break
return compatible
def convert_units(self, unit):
"""
Change the cube's units, converting the values in the data array.
For example, if a cube's :attr:`~iris.cube.Cube.units` are
kelvin then::
cube.convert_units('celsius')
will change the cube's :attr:`~iris.cube.Cube.units` attribute to
celsius and subtract 273.15 from each value in
:attr:`~iris.cube.Cube.data`.
.. warning::
Calling this method will trigger any deferred loading, causing
the cube's data array to be loaded into memory.
"""
# If the cube has units convert the data.
if not self.units.is_unknown():
self.data = self.units.convert(self.data, unit)
self.units = unit
def add_cell_method(self, cell_method):
"""Add a CellMethod to the Cube."""
self.cell_methods += (cell_method, )
def add_aux_coord(self, coord, data_dims=None):
"""
Adds a CF auxiliary coordinate to the cube.
Args:
* coord
The :class:`iris.coords.DimCoord` or :class:`iris.coords.AuxCoord`
instance to add to the cube.
Kwargs:
* data_dims
Integer or iterable of integers giving the data dimensions spanned
by the coordinate.
Raises a ValueError if a coordinate with identical metadata already
exists on the cube.
See also :meth:`Cube.remove_coord()<iris.cube.Cube.remove_coord>`.
"""
if self.coords(coord): # TODO: just fail on duplicate object
raise ValueError('Duplicate coordinates are not permitted.')
self._add_unique_aux_coord(coord, data_dims)
def _add_unique_aux_coord(self, coord, data_dims):
# Convert to a tuple of integers
if data_dims is None:
data_dims = tuple()
elif isinstance(data_dims, collections.Container):
data_dims = tuple(int(d) for d in data_dims)
else:
data_dims = (int(data_dims),)
if data_dims:
if len(data_dims) != coord.ndim:
msg = 'Invalid data dimensions: {} given, {} expected for ' \
'{!r}.'.format(len(data_dims), coord.ndim, coord.name())
raise ValueError(msg)
# Check compatibility with the shape of the data
for i, dim in enumerate(data_dims):
if coord.shape[i] != self.shape[dim]:
msg = 'Unequal lengths. Cube dimension {} => {};' \
' coord {!r} dimension {} => {}.'
raise ValueError(msg.format(dim, self.shape[dim],
coord.name(), i,
coord.shape[i]))
elif coord.shape != (1,):
raise ValueError('Missing data dimensions for multi-valued'
' coordinate {!r}'.format(coord.name()))
self._aux_coords_and_dims.append([coord, data_dims])
def add_aux_factory(self, aux_factory):
"""
Adds an auxiliary coordinate factory to the cube.
Args:
* aux_factory
The :class:`iris.aux_factory.AuxCoordFactory` instance to add.
"""
if not isinstance(aux_factory, iris.aux_factory.AuxCoordFactory):
raise TypeError('Factory must be a subclass of '
'iris.aux_factory.AuxCoordFactory.')
self._aux_factories.append(aux_factory)
def add_dim_coord(self, dim_coord, data_dim):
"""
Add a CF coordinate to the cube.
Args:
* dim_coord
The :class:`iris.coords.DimCoord` instance to add to the cube.
* data_dim
Integer giving the data dimension spanned by the coordinate.
Raises a ValueError if a coordinate with identical metadata already
exists on the cube or if a coord already exists for the
given dimension.
See also :meth:`Cube.remove_coord()<iris.cube.Cube.remove_coord>`.
"""
if self.coords(dim_coord):
raise ValueError('The coordinate already exists on the cube. '
'Duplicate coordinates are not permitted.')
# Check dimension is available
if self.coords(dimensions=data_dim, dim_coords=True):
raise ValueError('A dim_coord is already associated with '
'dimension %d.' % data_dim)
self._add_unique_dim_coord(dim_coord, data_dim)
def _add_unique_dim_coord(self, dim_coord, data_dim):
if isinstance(dim_coord, iris.coords.AuxCoord):
raise ValueError('The dim_coord may not be an AuxCoord instance.')
# Convert data_dim to a single integer
if isinstance(data_dim, collections.Container):
if len(data_dim) != 1:
raise ValueError('The supplied data dimension must be a'
' single number.')
data_dim = int(list(data_dim)[0])
else:
data_dim = int(data_dim)
# Check data_dim value is valid
if data_dim < 0 or data_dim >= self.ndim:
raise ValueError('The cube does not have the specified dimension '
'(%d)' % data_dim)
# Check compatibility with the shape of the data
if dim_coord.shape[0] != self.shape[data_dim]:
msg = 'Unequal lengths. Cube dimension {} => {}; coord {!r} => {}.'
raise ValueError(msg.format(data_dim, self.shape[data_dim],
dim_coord.name(),
len(dim_coord.points)))
self._dim_coords_and_dims.append([dim_coord, int(data_dim)])
def remove_aux_factory(self, aux_factory):
"""Removes the given auxiliary coordinate factory from the cube."""
self._aux_factories.remove(aux_factory)
def _remove_coord(self, coord):
self._dim_coords_and_dims = [(coord_, dim) for coord_, dim in
self._dim_coords_and_dims if coord_
is not coord]
self._aux_coords_and_dims = [(coord_, dims) for coord_, dims in
self._aux_coords_and_dims if coord_
is not coord]
def remove_coord(self, coord):
"""
Removes a coordinate from the cube.
Args:
* coord (string or coord)
The (name of the) coordinate to remove from the cube.
See also :meth:`Cube.add_dim_coord()<iris.cube.Cube.add_dim_coord>`
and :meth:`Cube.add_aux_coord()<iris.cube.Cube.add_aux_coord>`.
"""
coord = self.coord(coord)
self._remove_coord(coord)
for factory in self.aux_factories:
factory.update(coord)
def replace_coord(self, new_coord):
"""
Replace the coordinate whose metadata matches the given coordinate.
"""
old_coord = self.coord(new_coord)
dims = self.coord_dims(old_coord)
was_dimensioned = old_coord in self.dim_coords
self._remove_coord(old_coord)
if was_dimensioned and isinstance(new_coord, iris.coords.DimCoord):
self.add_dim_coord(new_coord, dims[0])
else:
self.add_aux_coord(new_coord, dims)
for factory in self.aux_factories:
factory.update(old_coord, new_coord)
def coord_dims(self, coord):
"""
Returns a tuple of the data dimensions relevant to the given
coordinate.
When searching for the given coordinate in the cube the comparison is
made using coordinate metadata equality. Hence the given coordinate
instance need not exist on the cube, and may contain different
coordinate values.
Args:
* coord (string or coord)
The (name of the) coord to look for.
"""
coord = self.coord(coord)
# Search for existing coordinate (object) on the cube, faster lookup
# than equality - makes no functional difference.
matches = [(dim,) for coord_, dim in self._dim_coords_and_dims if
coord_ is coord]
if not matches:
matches = [dims for coord_, dims in self._aux_coords_and_dims if
coord_ is coord]
# Search derived aux coords
target_defn = coord._as_defn()
if not matches:
match = lambda factory: factory._as_defn() == target_defn
factories = filter(match, self._aux_factories)
matches = [factory.derived_dims(self.coord_dims) for factory in
factories]
# Deprecate name based searching
# -- Search by coord name, if have no match
# XXX Where did this come from? And why isn't it reflected in the
# docstring?
if not matches:
warnings.warn('name based coord matching is deprecated and will '
'be removed in a future release.',
stacklevel=2)
matches = [(dim,) for coord_, dim in self._dim_coords_and_dims if
coord_.name() == coord.name()]
# Finish deprecate name based searching
if not matches:
raise iris.exceptions.CoordinateNotFoundError(coord.name())
return matches[0]
def aux_factory(self, name=None, standard_name=None, long_name=None,
var_name=None):
"""
Returns the single coordinate factory that matches the criteria,
or raises an error if not found.
Kwargs:
* name
If not None, matches against factory.name().
* standard_name
The CF standard name of the desired coordinate factory.
If None, does not check for standard name.
* long_name
An unconstrained description of the coordinate factory.
If None, does not check for long_name.
* var_name
The CF variable name of the desired coordinate factory.
If None, does not check for var_name.
.. note::
If the arguments given do not result in precisely 1 coordinate
factory being matched, an
:class:`iris.exceptions.CoordinateNotFoundError` is raised.
"""
factories = self.aux_factories
if name is not None:
factories = [factory for factory in factories if
factory.name() == name]
if standard_name is not None:
factories = [factory for factory in factories if
factory.standard_name == standard_name]
if long_name is not None:
factories = [factory for factory in factories if
factory.long_name == long_name]
if var_name is not None:
factories = [factory for factory in factories if
factory.var_name == var_name]
if len(factories) > 1:
factory_names = (factory.name() for factory in factories)
msg = 'Expected to find exactly one coordinate factory, but ' \
'found {}. They were: {}.'.format(len(factories),
', '.join(factory_names))
raise iris.exceptions.CoordinateNotFoundError(msg)
elif len(factories) == 0:
msg = 'Expected to find exactly one coordinate factory, but ' \
'found none.'
raise iris.exceptions.CoordinateNotFoundError(msg)
return factories[0]
def coords(self, name_or_coord=None, standard_name=None,
long_name=None, var_name=None, attributes=None, axis=None,
contains_dimension=None, dimensions=None, coord=None,
coord_system=None, dim_coords=None, name=None):
"""
Return a list of coordinates in this cube fitting the given criteria.
Kwargs:
* name_or_coord
Either
(a) a :attr:`standard_name`, :attr:`long_name`, or
:attr:`var_name`. Defaults to value of `default`
(which itself defaults to `unknown`) as defined in
:class:`iris._cube_coord_common.CFVariableMixin`.
(b) a coordinate instance with metadata equal to that of
the desired coordinates. Accepts either a
:class:`iris.coords.DimCoord`, :class:`iris.coords.AuxCoord`,
:class:`iris.aux_factory.AuxCoordFactory`
or :class:`iris.coords.CoordDefn`.
* name
.. deprecated:: 1.6. Please use the name_or_coord kwarg.
* standard_name
The CF standard name of the desired coordinate. If None, does not
check for standard name.
* long_name
An unconstrained description of the coordinate. If None, does not
check for long_name.
* var_name
The CF variable name of the desired coordinate. If None, does not
check for var_name.
* attributes
A dictionary of attributes desired on the coordinates. If None,
does not check for attributes.
* axis
The desired coordinate axis, see
:func:`iris.util.guess_coord_axis`. If None, does not check for
axis. Accepts the values 'X', 'Y', 'Z' and 'T' (case-insensitive).
* contains_dimension
The desired coordinate contains the data dimension. If None, does
not check for the dimension.
* dimensions
The exact data dimensions of the desired coordinate. Coordinates
with no data dimension can be found with an empty tuple or list
(i.e. ``()`` or ``[]``). If None, does not check for dimensions.
* coord
.. deprecated:: 1.6. Please use the name_or_coord kwarg.
* coord_system
Whether the desired coordinates have coordinate systems equal to
the given coordinate system. If None, no check is done.
* dim_coords
Set to True to only return coordinates that are the cube's
dimension coordinates. Set to False to only return coordinates
that are the cube's auxiliary and derived coordinates. If None,
returns all coordinates.
See also :meth:`Cube.coord()<iris.cube.Cube.coord>`.
"""
# Handle deprecated kwargs
if name is not None:
name_or_coord = name
warnings.warn('the name kwarg is deprecated and will be removed '
'in a future release. Consider converting '
'existing code to use the name_or_coord '
'kwarg as a replacement.',
stacklevel=2)
if coord is not None:
name_or_coord = coord
warnings.warn('the coord kwarg is deprecated and will be removed '
'in a future release. Consider converting '
'existing code to use the name_or_coord '
'kwarg as a replacement.',
stacklevel=2)
# Finish handling deprecated kwargs
name = None
coord = None
if isinstance(name_or_coord, basestring):
name = name_or_coord
else:
coord = name_or_coord
coords_and_factories = []
if dim_coords in [True, None]:
coords_and_factories += list(self.dim_coords)
if dim_coords in [False, None]:
coords_and_factories += list(self.aux_coords)
coords_and_factories += list(self.aux_factories)
if name is not None:
coords_and_factories = [coord_ for coord_ in coords_and_factories
if coord_.name() == name]
if standard_name is not None:
coords_and_factories = [coord_ for coord_ in coords_and_factories
if coord_.standard_name == standard_name]
if long_name is not None:
coords_and_factories = [coord_ for coord_ in coords_and_factories
if coord_.long_name == long_name]
if var_name is not None:
coords_and_factories = [coord_ for coord_ in coords_and_factories
if coord_.var_name == var_name]
if axis is not None:
axis = axis.upper()
guess_axis = iris.util.guess_coord_axis
coords_and_factories = [coord_ for coord_ in coords_and_factories
if guess_axis(coord_) == axis]
if attributes is not None:
if not isinstance(attributes, collections.Mapping):
msg = 'The attributes keyword was expecting a dictionary ' \
'type, but got a %s instead.' % type(attributes)
raise ValueError(msg)
attr_filter = lambda coord_: all(k in coord_.attributes and
coord_.attributes[k] == v for
k, v in attributes.iteritems())
coords_and_factories = [coord_ for coord_ in coords_and_factories
if attr_filter(coord_)]
if coord_system is not None:
coords_and_factories = [coord_ for coord_ in coords_and_factories
if coord_.coord_system == coord_system]
if coord is not None:
if isinstance(coord, iris.coords.CoordDefn):
defn = coord
else:
defn = coord._as_defn()
coords_and_factories = [coord_ for coord_ in coords_and_factories
if coord_._as_defn() == defn]
if contains_dimension is not None:
coords_and_factories = [coord_ for coord_ in coords_and_factories
if contains_dimension in
self.coord_dims(coord_)]
if dimensions is not None:
if not isinstance(dimensions, collections.Container):
dimensions = [dimensions]
dimensions = tuple(dimensions)
coords_and_factories = [coord_ for coord_ in coords_and_factories
if self.coord_dims(coord_) == dimensions]
# If any factories remain after the above filters we have to make the
# coords so they can be returned
def extract_coord(coord_or_factory):
if isinstance(coord_or_factory, iris.aux_factory.AuxCoordFactory):
coord = coord_or_factory.make_coord(self.coord_dims)
elif isinstance(coord_or_factory, iris.coords.Coord):
coord = coord_or_factory
else:
msg = 'Expected Coord or AuxCoordFactory, got ' \
'{!r}.'.format(type(coord_or_factory))
raise ValueError(msg)
return coord
coords = [extract_coord(coord_or_factory) for coord_or_factory in
coords_and_factories]
return coords
def coord(self, name_or_coord=None, standard_name=None,
long_name=None, var_name=None, attributes=None, axis=None,
contains_dimension=None, dimensions=None, coord=None,
coord_system=None, dim_coords=None, name=None):
"""
Return a single coord given the same arguments as :meth:`Cube.coords`.
.. note::
If the arguments given do not result in precisely 1 coordinate
being matched, an :class:`iris.exceptions.CoordinateNotFoundError`
is raised.
.. seealso::
:meth:`Cube.coords()<iris.cube.Cube.coords>` for full keyword
documentation.
"""
# Handle deprecated kwargs
if name is not None:
name_or_coord = name
warnings.warn('the name kwarg is deprecated and will be removed '
'in a future release. Consider converting '
'existing code to use the name_or_coord '
'kwarg as a replacement.',
stacklevel=2)
if coord is not None:
name_or_coord = coord
warnings.warn('the coord kwarg is deprecated and will be removed '
'in a future release. Consider converting '
'existing code to use the name_or_coord '
'kwarg as a replacement.',
stacklevel=2)
# Finish handling deprecated kwargs
coords = self.coords(name_or_coord=name_or_coord,
standard_name=standard_name,
long_name=long_name, var_name=var_name,
attributes=attributes, axis=axis,
contains_dimension=contains_dimension,
dimensions=dimensions,
coord_system=coord_system,
dim_coords=dim_coords)
if len(coords) > 1:
msg = 'Expected to find exactly 1 coordinate, but found %s. ' \
'They were: %s.' % (len(coords), ', '.join(coord.name() for
coord in coords))
raise iris.exceptions.CoordinateNotFoundError(msg)
elif len(coords) == 0:
bad_name = name or standard_name or long_name or \
(coord and coord.name()) or ''
msg = 'Expected to find exactly 1 %s coordinate, but found ' \
'none.' % bad_name
raise iris.exceptions.CoordinateNotFoundError(msg)
return coords[0]
def coord_system(self, spec=None):
"""
Find the coordinate system of the given type.
If no target coordinate system is provided then find
any available coordinate system.
Kwargs:
* spec:
The the name or type of a coordinate system subclass.
E.g. ::
cube.coord_system("GeogCS")
cube.coord_system(iris.coord_systems.GeogCS)
If spec is provided as a type it can be a superclass of
any coordinate system found.
If spec is None, then find any available coordinate
systems within the :class:`iris.cube.Cube`.
Returns:
The :class:`iris.coord_systems.CoordSystem` or None.
"""
if isinstance(spec, basestring) or spec is None:
spec_name = spec
else:
msg = "type %s is not a subclass of CoordSystem" % spec
assert issubclass(spec, iris.coord_systems.CoordSystem), msg
spec_name = spec.__name__
# Gather a temporary list of our unique CoordSystems.
coord_systems = ClassDict(iris.coord_systems.CoordSystem)
for coord in self.coords():
if coord.coord_system:
coord_systems.add(coord.coord_system, replace=True)
result = None
if spec_name is None:
for key in sorted(coord_systems.keys()):
result = coord_systems[key]
break
else:
result = coord_systems.get(spec_name)
return result
@property
def cell_methods(self):
"""
Tuple of :class:`iris.coords.CellMethod` representing the processing
done on the phenomenon.
"""
return self._cell_methods
@cell_methods.setter
def cell_methods(self, cell_methods):
self._cell_methods = tuple(cell_methods) if cell_methods else tuple()
@property
def shape(self):
"""The shape of the data of this cube."""
shape = self.lazy_data().shape
return shape
@property
def dtype(self):
"""The :class:`numpy.dtype` of the data of this cube."""
return self.lazy_data().dtype
@property
def ndim(self):
"""The number of dimensions in the data of this cube."""
return len(self.shape)
def lazy_data(self, array=None):
"""
Return a :class:`biggus.Array` representing the
multi-dimensional data of the Cube, and optionally provide a
new array of values.
Accessing this method will never cause the data to be loaded.
Similarly, calling methods on, or indexing, the returned Array
will not cause the Cube to have loaded data.
If the data have already been loaded for the Cube, the returned
Array will be a :class:`biggus.NumpyArrayAdapter` which wraps
the numpy array from `self.data`.
Kwargs:
* array (:class:`biggus.Array` or None):
When this is not None it sets the multi-dimensional data of
the cube to the given value.
Returns:
A :class:`biggus.Array` representing the multi-dimensional
data of the Cube.
"""
if array is not None:
if not isinstance(array, biggus.Array):
raise TypeError('new values must be a biggus.Array')
if self.shape != array.shape:
# The _ONLY_ data reshape permitted is converting a
# 0-dimensional array into a 1-dimensional array of
# length one.
# i.e. self.shape = () and array.shape == (1,)
if self.shape or array.shape != (1,):
raise ValueError('Require cube data with shape %r, got '
'%r.' % (self.shape, array.shape))
self._my_data = array
else:
array = self._my_data
if not isinstance(array, biggus.Array):
array = biggus.NumpyArrayAdapter(array)
return array
@property
def data(self):
"""
The :class:`numpy.ndarray` representing the multi-dimensional data of
the cube.
.. note::
Cubes obtained from netCDF, PP, and FieldsFile files will only
populate this attribute on its first use.
To obtain the shape of the data without causing it to be loaded,
use the Cube.shape attribute.
Example::
>>> fname = iris.sample_data_path('air_temp.pp')
>>> cube = iris.load_cube(fname, 'air_temperature')
>>> # cube.data does not yet have a value.
...
>>> print(cube.shape)
(73, 96)
>>> # cube.data still does not have a value.
...
>>> cube = cube[:10, :20]
>>> # cube.data still does not have a value.
...
>>> data = cube.data
>>> # Only now is the data loaded.
...
>>> print(data.shape)
(10, 20)
"""
data = self._my_data
if not isinstance(data, np.ndarray):
try:
data = data.masked_array()
except MemoryError:
msg = "Failed to create the cube's data as there was not" \
" enough memory available.\n" \
"The array shape would have been {0!r} and the data" \
" type {1}.\n" \
"Consider freeing up variables or indexing the cube" \
" before getting its data."
msg = msg.format(self.shape, data.dtype)
raise MemoryError(msg)
# Unmask the array only if it is filled.
if ma.count_masked(data) == 0:
data = data.data
self._my_data = data
return data
@data.setter
def data(self, value):
data = np.asanyarray(value)
if self.shape != data.shape:
# The _ONLY_ data reshape permitted is converting a 0-dimensional
# array i.e. self.shape == () into a 1-dimensional array of length
# one i.e. data.shape == (1,)
if self.shape or data.shape != (1,):
raise ValueError('Require cube data with shape %r, got '
'%r.' % (self.shape, data.shape))
self._my_data = data
def has_lazy_data(self):
return isinstance(self._my_data, biggus.Array)
@property
def dim_coords(self):
"""
Return a tuple of all the dimension coordinates, ordered by dimension.
.. note::
The length of the returned tuple is not necessarily the same as
:attr:`Cube.ndim` as there may be dimensions on the cube without
dimension coordinates. It is therefore unreliable to use the
resulting tuple to identify the dimension coordinates for a given
dimension - instead use the :meth:`Cube.coord` method with the
``dimensions`` and ``dim_coords`` keyword arguments.
"""
return tuple((coord for coord, dim in
sorted(self._dim_coords_and_dims,
key=lambda co_di: (co_di[1], co_di[0].name()))))
@property
def aux_coords(self):
"""
Return a tuple of all the auxiliary coordinates, ordered by
dimension(s).
"""
return tuple((coord for coord, dims in
sorted(self._aux_coords_and_dims,
key=lambda co_di: (co_di[1], co_di[0].name()))))
@property
def derived_coords(self):
"""
Return a tuple of all the coordinates generated by the coordinate
factories.
"""
return tuple(factory.make_coord(self.coord_dims) for factory in
sorted(self.aux_factories,
key=lambda factory: factory.name()))
@property
def aux_factories(self):
"""Return a tuple of all the coordinate factories."""
return tuple(self._aux_factories)
def _summary_coord_extra(self, coord, indent):
# Returns the text needed to ensure this coordinate can be
# distinguished from all others with the same name.
extra = ''
similar_coords = self.coords(coord.name())
if len(similar_coords) > 1:
# Find all the attribute keys
keys = set()
for similar_coord in similar_coords:
keys.update(similar_coord.attributes.iterkeys())
# Look for any attributes that vary
vary = set()
attributes = {}
for key in keys:
for similar_coord in similar_coords:
if key not in similar_coord.attributes:
vary.add(key)
break
value = similar_coord.attributes[key]
if attributes.setdefault(key, value) != value:
vary.add(key)
break
keys = sorted(vary & coord.attributes.viewkeys())
bits = ['{}={!r}'.format(key, coord.attributes[key]) for key in
keys]
if bits:
extra = indent + ', '.join(bits)
return extra
def _summary_extra(self, coords, summary, indent):
# Where necessary, inserts extra lines into the summary to ensure
# coordinates can be distinguished.
new_summary = []
for coord, summary in zip(coords, summary):
new_summary.append(summary)
extra = self._summary_coord_extra(coord, indent)
if extra:
new_summary.append(extra)
return new_summary
def summary(self, shorten=False, name_padding=35):
"""
Unicode string summary of the Cube with name, a list of dim coord names
versus length and optionally relevant coordinate information.
"""
# Create a set to contain the axis names for each data dimension.
dim_names = [set() for dim in range(len(self.shape))]
# Add the dim_coord names that participate in the associated data
# dimensions.
for dim in range(len(self.shape)):
dim_coords = self.coords(contains_dimension=dim, dim_coords=True)
if dim_coords:
dim_names[dim].add(dim_coords[0].name())
else:
dim_names[dim].add('-- ')
# Convert axes sets to lists and sort.
dim_names = [sorted(names, key=sorted_axes) for names in dim_names]
# Generate textual summary of the cube dimensionality.
if self.shape == ():
dimension_header = 'scalar cube'
else:
dimension_header = '; '.join(
[', '.join(dim_names[dim]) +
': %d' % dim_shape for dim, dim_shape in
enumerate(self.shape)])
nameunit = '{name} / ({units})'.format(name=self.name(),
units=self.units)
cube_header = '{nameunit!s:{length}} ({dimension})'.format(
length=name_padding,
nameunit=nameunit,
dimension=dimension_header)
summary = ''
# Generate full cube textual summary.
if not shorten:
indent = 10
extra_indent = ' ' * 13
# Cache the derived coords so we can rely on consistent
# object IDs.
derived_coords = self.derived_coords
# Determine the cube coordinates that are scalar (single-valued)
# AND non-dimensioned.
dim_coords = self.dim_coords
aux_coords = self.aux_coords
all_coords = dim_coords + aux_coords + derived_coords
scalar_coords = [coord for coord in all_coords if not
self.coord_dims(coord) and coord.shape == (1,)]
# Determine the cube coordinates that are not scalar BUT
# dimensioned.
scalar_coord_ids = set(map(id, scalar_coords))
vector_dim_coords = [coord for coord in dim_coords if id(coord) not
in scalar_coord_ids]
vector_aux_coords = [coord for coord in aux_coords if id(coord) not
in scalar_coord_ids]
vector_derived_coords = [coord for coord in derived_coords if
id(coord) not in scalar_coord_ids]
# Determine the cube coordinates that don't describe the cube and
# are most likely erroneous.
vector_coords = vector_dim_coords + vector_aux_coords + \
vector_derived_coords
ok_coord_ids = scalar_coord_ids.union(set(map(id, vector_coords)))
invalid_coords = [coord for coord in all_coords if id(coord) not
in ok_coord_ids]
# Sort scalar coordinates by name.
scalar_coords.sort(key=lambda coord: coord.name())
# Sort vector coordinates by data dimension and name.
vector_dim_coords.sort(
key=lambda coord: (self.coord_dims(coord), coord.name()))
vector_aux_coords.sort(
key=lambda coord: (self.coord_dims(coord), coord.name()))
vector_derived_coords.sort(
key=lambda coord: (self.coord_dims(coord), coord.name()))
# Sort other coordinates by name.
invalid_coords.sort(key=lambda coord: coord.name())
#
# Generate textual summary of cube vector coordinates.
#
def vector_summary(vector_coords, cube_header, max_line_offset):
"""
Generates a list of suitably aligned strings containing coord
names and dimensions indicated by one or more 'x' symbols.
.. note::
The function may need to update the cube header so this is
returned with the list of strings.
"""
vector_summary = []
if vector_coords:
# Identify offsets for each dimension text marker.
alignment = np.array([index for index, value in
enumerate(cube_header) if
value == ':'])
# Generate basic textual summary for each vector coordinate
# - WITHOUT dimension markers.
for coord in vector_coords:
vector_summary.append('%*s%s' % (
indent, ' ', iris.util.clip_string(coord.name())))
min_alignment = min(alignment)
# Determine whether the cube header requires realignment
# due to one or more longer vector coordinate summaries.
if max_line_offset >= min_alignment:
delta = max_line_offset - min_alignment + 5
cube_header = '%-*s (%s)' % (int(name_padding + delta),
self.name() or 'unknown',
dimension_header)
alignment += delta
# Generate full textual summary for each vector coordinate
# - WITH dimension markers.
for index, coord in enumerate(vector_coords):
dims = self.coord_dims(coord)
for dim in range(len(self.shape)):
width = alignment[dim] - len(vector_summary[index])
char = 'x' if dim in dims else '-'
line = '{pad:{width}}{char}'.format(pad=' ',
width=width,
char=char)
vector_summary[index] += line
# Interleave any extra lines that are needed to distinguish
# the coordinates.
vector_summary = self._summary_extra(vector_coords,
vector_summary,
extra_indent)
return vector_summary, cube_header
# Calculate the maximum line offset.
max_line_offset = 0
for coord in all_coords:
max_line_offset = max(max_line_offset, len('%*s%s' % (
indent, ' ', iris.util.clip_string(str(coord.name())))))
if vector_dim_coords:
dim_coord_summary, cube_header = vector_summary(
vector_dim_coords, cube_header, max_line_offset)
summary += '\n Dimension coordinates:\n' + \
'\n'.join(dim_coord_summary)
if vector_aux_coords:
aux_coord_summary, cube_header = vector_summary(
vector_aux_coords, cube_header, max_line_offset)
summary += '\n Auxiliary coordinates:\n' + \
'\n'.join(aux_coord_summary)
if vector_derived_coords:
derived_coord_summary, cube_header = vector_summary(
vector_derived_coords, cube_header, max_line_offset)
summary += '\n Derived coordinates:\n' + \
'\n'.join(derived_coord_summary)
#
# Generate textual summary of cube scalar coordinates.
#
scalar_summary = []
if scalar_coords:
for coord in scalar_coords:
if (coord.units in ['1', 'no_unit', 'unknown'] or
coord.units.is_time_reference()):
unit = ''
else:
unit = ' {!s}'.format(coord.units)
# Format cell depending on type of point and whether it
# has a bound
with iris.FUTURE.context(cell_datetime_objects=False):
coord_cell = coord.cell(0)
if isinstance(coord_cell.point, basestring):
# Indent string type coordinates
coord_cell_split = [iris.util.clip_string(str(item))
for item in
coord_cell.point.split('\n')]
line_sep = '\n{pad:{width}}'.format(
pad=' ', width=indent + len(coord.name()) + 2)
coord_cell_str = line_sep.join(coord_cell_split) + unit
else:
# Human readable times
if coord.units.is_time_reference():
coord_cell_cpoint = coord.units.num2date(
coord_cell.point)
if coord_cell.bound is not None:
coord_cell_cbound = coord.units.num2date(
coord_cell.bound)
else:
coord_cell_cpoint = coord_cell.point
coord_cell_cbound = coord_cell.bound
coord_cell_str = '{!s}{}'.format(coord_cell_cpoint,
unit)
if coord_cell.bound is not None:
bound = '({})'.format(', '.join(str(val) for
val in coord_cell_cbound))
coord_cell_str += ', bound={}{}'.format(bound,
unit)
scalar_summary.append('{pad:{width}}{name}: {cell}'.format(
pad=' ', width=indent, name=coord.name(),
cell=coord_cell_str))
# Interleave any extra lines that are needed to distinguish
# the coordinates.
scalar_summary = self._summary_extra(scalar_coords,
scalar_summary,
extra_indent)
summary += '\n Scalar coordinates:\n' + '\n'.join(
scalar_summary)
#
# Generate summary of cube's invalid coordinates.
#
if invalid_coords:
invalid_summary = []
for coord in invalid_coords:
invalid_summary.append(
'%*s%s' % (indent, ' ', coord.name()))
# Interleave any extra lines that are needed to distinguish the
# coordinates.
invalid_summary = self._summary_extra(
invalid_coords, invalid_summary, extra_indent)
summary += '\n Invalid coordinates:\n' + \
'\n'.join(invalid_summary)
#
# Generate summary of cube attributes.
#
if self.attributes:
attribute_lines = []
for name, value in sorted(self.attributes.iteritems()):
value = iris.util.clip_string(unicode(value))
line = u'{pad:{width}}{name}: {value}'.format(pad=' ',
width=indent,
name=name,
value=value)
attribute_lines.append(line)
summary += '\n Attributes:\n' + '\n'.join(attribute_lines)
#
# Generate summary of cube cell methods
#
if self.cell_methods:
summary += '\n Cell methods:\n'
cm_lines = []
for cm in self.cell_methods:
cm_lines.append('%*s%s' % (indent, ' ', str(cm)))
summary += '\n'.join(cm_lines)
# Construct the final cube summary.
summary = cube_header + summary
return summary
def assert_valid(self):
"""Raise an exception if the cube is invalid; otherwise return None."""
warnings.warn('Cube.assert_valid() has been deprecated.')
def __str__(self):
return self.summary().encode(errors='replace')
def __unicode__(self):
return self.summary()
def __repr__(self):
return "<iris 'Cube' of %s>" % self.summary(shorten=True,
name_padding=1)
def __iter__(self):
raise TypeError('Cube is not iterable')
def __getitem__(self, keys):
"""
Cube indexing (through use of square bracket notation) has been
implemented at the data level. That is, the indices provided to this
method should be aligned to the data of the cube, and thus the indices
requested must be applicable directly to the cube.data attribute. All
metadata will be subsequently indexed appropriately.
"""
# turn the keys into a full slice spec (all dims)
full_slice = iris.util._build_full_slice_given_keys(keys,
len(self.shape))
# make indexing on the cube column based by using the
# column_slices_generator (potentially requires slicing the data
# multiple times)
dimension_mapping, slice_gen = iris.util.column_slices_generator(
full_slice, len(self.shape))
new_coord_dims = lambda coord_: [dimension_mapping[d] for d in
self.coord_dims(coord_) if
dimension_mapping[d] is not None]
try:
first_slice = next(slice_gen)
except StopIteration:
first_slice = None
if first_slice is not None:
data = self._my_data[first_slice]
else:
data = copy.deepcopy(self._my_data)
for other_slice in slice_gen:
data = data[other_slice]
# We don't want a view of the data, so take a copy of it if it's
# not already our own.
if isinstance(data, biggus.Array) or not data.flags['OWNDATA']:
data = copy.deepcopy(data)
# We can turn a masked array into a normal array if it's full.
if isinstance(data, ma.core.MaskedArray):
if ma.count_masked(data) == 0:
data = data.filled()
# Make the new cube slice
cube = Cube(data)
cube.metadata = copy.deepcopy(self.metadata)
# Record a mapping from old coordinate IDs to new coordinates,
# for subsequent use in creating updated aux_factories.
coord_mapping = {}
# Slice the coords
for coord in self.aux_coords:
coord_keys = tuple([full_slice[dim] for dim in
self.coord_dims(coord)])
try:
new_coord = coord[coord_keys]
except ValueError:
# TODO make this except more specific to catch monotonic error
# Attempt to slice it by converting to AuxCoord first
new_coord = iris.coords.AuxCoord.from_coord(coord)[coord_keys]
cube.add_aux_coord(new_coord, new_coord_dims(coord))
coord_mapping[id(coord)] = new_coord
for coord in self.dim_coords:
coord_keys = tuple([full_slice[dim] for dim in
self.coord_dims(coord)])
new_dims = new_coord_dims(coord)
# Try/Catch to handle slicing that makes the points/bounds
# non-monotonic
try:
new_coord = coord[coord_keys]
if not new_dims:
# If the associated dimension has been sliced so the coord
# is a scalar move the coord to the aux_coords container
cube.add_aux_coord(new_coord, new_dims)
else:
cube.add_dim_coord(new_coord, new_dims)
except ValueError:
# TODO make this except more specific to catch monotonic error
# Attempt to slice it by converting to AuxCoord first
new_coord = iris.coords.AuxCoord.from_coord(coord)[coord_keys]
cube.add_aux_coord(new_coord, new_dims)
coord_mapping[id(coord)] = new_coord
for factory in self.aux_factories:
cube.add_aux_factory(factory.updated(coord_mapping))
return cube
def subset(self, coord):
"""
Get a subset of the cube by providing the desired resultant coordinate.
"""
if not isinstance(coord, iris.coords.Coord):
raise ValueError('coord_to_extract must be a valid Coord.')
# Get the coord to extract from the cube
coord_to_extract = self.coord(coord)
if len(self.coord_dims(coord_to_extract)) > 1:
msg = "Currently, only 1D coords can be used to subset a cube"
raise iris.exceptions.CoordinateMultiDimError(msg)
# Identify the dimension of the cube which this coordinate references
coord_to_extract_dim = self.coord_dims(coord_to_extract)[0]
# Identify the indices which intersect the requested coord and
# coord_to_extract
coordinate_indices = coord_to_extract.intersect(coord,
return_indices=True)
# Build up a slice which spans the whole of the cube
full_slice = [slice(None, None)] * len(self.shape)
# Update the full slice to only extract specific indices which were
# identified above
full_slice[coord_to_extract_dim] = coordinate_indices
full_slice = tuple(full_slice)
return self[full_slice]
def extract(self, constraint):
"""
Filter the cube by the given constraint using
:meth:`iris.Constraint.extract` method.
"""
# Cast the constraint into a proper constraint if it is not so already
constraint = iris._constraints.as_constraint(constraint)
return constraint.extract(self)
def intersection(self, *args, **kwargs):
"""
Return the intersection of the cube with specified coordinate
ranges.
Coordinate ranges can be specified as:
(a) instances of :class:`iris.coords.CoordExtent`.
(b) keyword arguments, where the keyword name specifies the name
of the coordinate (as defined in :meth:`iris.cube.Cube.coords()`)
and the value defines the corresponding range of coordinate
values as a tuple. The tuple must contain two, three, or four
items corresponding to: (minimum, maximum, min_inclusive,
max_inclusive). Where the items are defined as:
* minimum
The minimum value of the range to select.
* maximum
The maximum value of the range to select.
* min_inclusive
If True, coordinate values equal to `minimum` will be included
in the selection. Default is True.
* max_inclusive
If True, coordinate values equal to `maximum` will be included
in the selection. Default is True.
To perform an intersection that ignores any bounds on the coordinates,
set the optional keyword argument *ignore_bounds* to True. Defaults to
False.
.. note::
For ranges defined over "circular" coordinates (i.e. those
where the `units` attribute has a modulus defined) the cube
will be "rolled" to fit where neccesary.
.. warning::
Currently this routine only works with "circular"
coordinates (as defined in the previous note.)
For example::
>>> import iris
>>> cube = iris.load_cube(iris.sample_data_path('air_temp.pp'))
>>> print(cube.coord('longitude').points[::10])
[ 0. 37.49999237 74.99998474 112.49996948 \
149.99996948
187.49995422 224.99993896 262.49993896 299.99993896 \
337.49990845]
>>> subset = cube.intersection(longitude=(30, 50))
>>> print(subset.coord('longitude').points)
[ 33.74999237 37.49999237 41.24998856 44.99998856 48.74998856]
>>> subset = cube.intersection(longitude=(-10, 10))
>>> print(subset.coord('longitude').points)
[-7.50012207 -3.75012207 0. 3.75 7.5 ]
Returns:
A new :class:`~iris.cube.Cube` giving the subset of the cube
which intersects with the requested coordinate intervals.
"""
result = self
ignore_bounds = kwargs.pop('ignore_bounds', False)
for arg in args:
result = result._intersect(*arg, ignore_bounds=ignore_bounds)
for name, value in kwargs.iteritems():
result = result._intersect(name, *value,
ignore_bounds=ignore_bounds)
return result
def _intersect(self, name_or_coord, minimum, maximum,
min_inclusive=True, max_inclusive=True,
ignore_bounds=False):
coord = self.coord(name_or_coord)
if coord.ndim != 1:
raise iris.exceptions.CoordinateMultiDimError(coord)
if coord.nbounds not in (0, 2):
raise ValueError('expected 0 or 2 bound values per cell')
if minimum > maximum:
raise ValueError('minimum greater than maximum')
modulus = coord.units.modulus
if modulus is None:
raise ValueError('coordinate units with no modulus are not yet'
' supported')
subsets, points, bounds = self._intersect_modulus(coord,
minimum, maximum,
min_inclusive,
max_inclusive,
ignore_bounds)
# By this point we have either one or two subsets along the relevant
# dimension. If it's just one subset (which might be a slice or an
# unordered collection of indices) we can simply index the cube
# and we're done. If it's two subsets we need to stitch the two
# pieces together.
def make_chunk(key):
chunk = self[key_tuple_prefix + (key,)]
chunk_coord = chunk.coord(coord)
chunk_coord.points = points[(key,)]
if chunk_coord.has_bounds():
chunk_coord.bounds = bounds[(key,)]
return chunk
dim, = self.coord_dims(coord)
key_tuple_prefix = (slice(None),) * dim
chunks = [make_chunk(key) for key in subsets]
if len(chunks) == 1:
result = chunks[0]
else:
if self.has_lazy_data():
data = biggus.LinearMosaic([chunk.lazy_data()
for chunk in chunks],
dim)
else:
module = ma if ma.isMaskedArray(self.data) else np
data = module.concatenate([chunk.data for chunk in chunks],
dim)
result = iris.cube.Cube(data)
result.metadata = copy.deepcopy(self.metadata)
# Record a mapping from old coordinate IDs to new coordinates,
# for subsequent use in creating updated aux_factories.
coord_mapping = {}
def create_coords(src_coords, add_coord):
# Add copies of the source coordinates, selecting
# the appropriate subsets out of coordinates which
# share the intersection dimension.
preserve_circular = (min_inclusive and max_inclusive and
abs(maximum - minimum) == modulus)
for src_coord in src_coords:
dims = self.coord_dims(src_coord)
if dim in dims:
dim_within_coord = dims.index(dim)
points = np.concatenate([chunk.coord(src_coord).points
for chunk in chunks],
dim_within_coord)
if src_coord.has_bounds():
bounds = np.concatenate(
[chunk.coord(src_coord).bounds
for chunk in chunks],
dim_within_coord)
else:
bounds = None
result_coord = src_coord.copy(points=points,
bounds=bounds)
circular = getattr(result_coord, 'circular', False)
if circular and not preserve_circular:
result_coord.circular = False
else:
result_coord = src_coord.copy()
add_coord(result_coord, dims)
coord_mapping[id(src_coord)] = result_coord
create_coords(self.dim_coords, result.add_dim_coord)
create_coords(self.aux_coords, result.add_aux_coord)
for factory in self.aux_factories:
result.add_aux_factory(factory.updated(coord_mapping))
return result
def _intersect_modulus(self, coord, minimum, maximum, min_inclusive,
max_inclusive, ignore_bounds):
modulus = coord.units.modulus
if maximum > minimum + modulus:
raise ValueError("requested range greater than coordinate's"
" unit's modulus")
if coord.has_bounds():
values = coord.bounds
else:
values = coord.points
if values.max() > values.min() + modulus:
raise ValueError("coordinate's range greater than coordinate's"
" unit's modulus")
min_comp = np.less_equal if min_inclusive else np.less
max_comp = np.less_equal if max_inclusive else np.less
if coord.has_bounds():
bounds = wrap_lons(coord.bounds, minimum, modulus)
if ignore_bounds:
points = wrap_lons(coord.points, minimum, modulus)
inside_indices, = np.where(
np.logical_and(min_comp(minimum, points),
max_comp(points, maximum)))
else:
inside = np.logical_and(min_comp(minimum, bounds),
max_comp(bounds, maximum))
inside_indices, = np.where(np.any(inside, axis=1))
# To ensure that bounds (and points) of matching cells aren't
# "scrambled" by the wrap operation we detect split cells that
# straddle the wrap point and choose a new wrap point which avoids
# split cells.
# For example: the cell [349.875, 350.4375] wrapped at -10 would
# become [349.875, -9.5625] which is no longer valid. The lower
# cell bound value (and possibly associated point) are
# recalculated so that they are consistent with the extended
# wrapping scheme which moves the wrap point to the correct lower
# bound value (-10.125) thus resulting in the cell no longer
# being split. For bounds which may extend exactly the length of
# the modulus, we simply preserve the point to bound difference,
# and call the new bounds = the new points + the difference.
pre_wrap_delta = np.diff(coord.bounds[inside_indices])
post_wrap_delta = np.diff(bounds[inside_indices])
close_enough = np.allclose(pre_wrap_delta, post_wrap_delta)
if not close_enough:
split_cell_indices, _ = np.where(pre_wrap_delta !=
post_wrap_delta)
# Recalculate the extended minimum.
indices = inside_indices[split_cell_indices]
cells = bounds[indices]
cells_delta = np.diff(coord.bounds[indices])
# Watch out for ascending/descending bounds
if cells_delta[0, 0] > 0:
cells[:, 0] = cells[:, 1] - cells_delta[:, 0]
minimum = np.min(cells[:, 0])
else:
cells[:, 1] = cells[:, 0] + cells_delta[:, 0]
minimum = np.min(cells[:, 1])
points = wrap_lons(coord.points, minimum, modulus)
bound_diffs = coord.points[:, np.newaxis] - coord.bounds
bounds = points[:, np.newaxis] - bound_diffs
else:
points = wrap_lons(coord.points, minimum, modulus)
bounds = None
inside_indices, = np.where(
np.logical_and(min_comp(minimum, points),
max_comp(points, maximum)))
if isinstance(coord, iris.coords.DimCoord):
delta = coord.points[inside_indices] - points[inside_indices]
step = np.rint(np.diff(delta) / modulus)
non_zero_step_indices = np.nonzero(step)[0]
if non_zero_step_indices.size:
# A contiguous block at the start and another at the
# end. (NB. We can't have more than two blocks
# because we've already restricted the coordinate's
# range to its modulus).
end_of_first_chunk = non_zero_step_indices[0]
subsets = [slice(inside_indices[end_of_first_chunk + 1], None),
slice(None, inside_indices[end_of_first_chunk] + 1)]
else:
# A single, contiguous block.
subsets = [slice(inside_indices[0], inside_indices[-1] + 1)]
else:
# An AuxCoord could have its values in an arbitrary
# order, and hence a range of values can select an
# arbitrary subset. Also, we want to preserve the order
# from the original AuxCoord. So we just use the indices
# directly.
subsets = [inside_indices]
return subsets, points, bounds
def _as_list_of_coords(self, names_or_coords):
"""
Convert a name, coord, or list of names/coords to a list of coords.
"""
# If not iterable, convert to list of a single item
if not hasattr(names_or_coords, '__iter__'):
names_or_coords = [names_or_coords]
coords = []
for name_or_coord in names_or_coords:
if (isinstance(name_or_coord, basestring) or
isinstance(name_or_coord, iris.coords.Coord)):
coords.append(self.coord(name_or_coord))
else:
# Don't know how to handle this type
msg = "Don't know how to handle coordinate of type %s. " \
"Ensure all coordinates are of type basestring or " \
"iris.coords.Coord." % type(name_or_coord)
raise TypeError(msg)
return coords
def slices_over(self, ref_to_slice):
"""
Return an iterator of all subcubes along a given coordinate or
dimension index, or multiple of these.
Args:
* ref_to_slice (string, coord, dimension index or a list of these):
Determines which dimensions will be iterated along (i.e. the
dimensions that are not returned in the subcubes).
A mix of input types can also be provided.
Returns:
An iterator of subcubes.
For example, to get all subcubes along the time dimension::
for sub_cube in cube.slices_over('time'):
print(sub_cube)
.. seealso:: :meth:`iris.cube.Cube.slices`.
.. note::
The order of dimension references to slice along does not affect
the order of returned items in the iterator; instead the ordering
is based on the fastest-changing dimension.
"""
# Required to handle a mix between types.
if not hasattr(ref_to_slice, '__iter__'):
ref_to_slice = [ref_to_slice]
slice_dims = set()
for ref in ref_to_slice:
try:
coord, = self._as_list_of_coords(ref)
except TypeError:
dim = int(ref)
if dim < 0 or dim > self.ndim:
msg = ('Requested an iterator over a dimension ({}) '
'which does not exist.'.format(dim))
raise ValueError(msg)
# Convert coord index to a single-element list to prevent a
# TypeError when `slice_dims.update` is called with it.
dims = [dim]
else:
dims = self.coord_dims(coord)
slice_dims.update(dims)
all_dims = set(range(self.ndim))
opposite_dims = list(all_dims - slice_dims)
return self.slices(opposite_dims, ordered=False)
def slices(self, ref_to_slice, ordered=True):
"""
Return an iterator of all subcubes given the coordinates or dimension
indices desired to be present in each subcube.
Args:
* ref_to_slice (string, coord, dimension index or a list of these):
Determines which dimensions will be returned in the subcubes (i.e.
the dimensions that are not iterated over).
A mix of input types can also be provided. They must all be
orthogonal (i.e. point to different dimensions).
Kwargs:
* ordered: if True, the order which the coords to slice or data_dims
are given will be the order in which they represent the data in
the resulting cube slices. If False, the order will follow that of
the source cube. Default is True.
Returns:
An iterator of subcubes.
For example, to get all 2d longitude/latitude subcubes from a
multi-dimensional cube::
for sub_cube in cube.slices(['longitude', 'latitude']):
print(sub_cube)
.. seealso:: :meth:`iris.cube.Cube.slices_over`.
"""
if not isinstance(ordered, bool):
raise TypeError("'ordered' argument to slices must be boolean.")
# Required to handle a mix between types
if not hasattr(ref_to_slice, '__iter__'):
ref_to_slice = [ref_to_slice]
dim_to_slice = []
for ref in ref_to_slice:
try:
# attempt to handle as coordinate
coord = self._as_list_of_coords(ref)[0]
dims = self.coord_dims(coord)
if not dims:
msg = ('Requested an iterator over a coordinate ({}) '
'which does not describe a dimension.')
msg = msg.format(coord.name())
raise ValueError(msg)
dim_to_slice.extend(dims)
except TypeError:
try:
# attempt to handle as dimension index
dim = int(ref)
except ValueError:
raise ValueError('{} Incompatible type {} for '
'slicing'.format(ref, type(ref)))
if dim < 0 or dim > self.ndim:
msg = ('Requested an iterator over a dimension ({}) '
'which does not exist.'.format(dim))
raise ValueError(msg)
dim_to_slice.append(dim)
if len(set(dim_to_slice)) != len(dim_to_slice):
msg = 'The requested coordinates are not orthogonal.'
raise ValueError(msg)
# Create a list with of the shape of our data
dims_index = list(self.shape)
# Set the dimensions which have been requested to length 1
for d in dim_to_slice:
dims_index[d] = 1
return _SliceIterator(self, dims_index, dim_to_slice, ordered)
def transpose(self, new_order=None):
"""
Re-order the data dimensions of the cube in-place.
new_order - list of ints, optional
By default, reverse the dimensions, otherwise permute the
axes according to the values given.
.. note:: If defined, new_order must span all of the data dimensions.
Example usage::
# put the second dimension first, followed by the third dimension,
and finally put the first dimension third cube.transpose([1, 2, 0])
"""
if new_order is None:
new_order = np.arange(self.data.ndim)[::-1]
elif len(new_order) != self.data.ndim:
raise ValueError('Incorrect number of dimensions.')
# The data needs to be copied, otherwise this view of the transposed
# data will not be contiguous. Ensure not to assign via the cube.data
# setter property since we are reshaping the cube payload in-place.
self._my_data = np.transpose(self.data, new_order).copy()
dim_mapping = {src: dest for dest, src in enumerate(new_order)}
def remap_dim_coord(coord_and_dim):
coord, dim = coord_and_dim
return coord, dim_mapping[dim]
self._dim_coords_and_dims = list(map(remap_dim_coord,
self._dim_coords_and_dims))
def remap_aux_coord(coord_and_dims):
coord, dims = coord_and_dims
return coord, tuple(dim_mapping[dim] for dim in dims)
self._aux_coords_and_dims = list(map(remap_aux_coord,
self._aux_coords_and_dims))
def xml(self, checksum=False, order=True, byteorder=True):
"""
Returns a fully valid CubeML string representation of the Cube.
"""
doc = Document()
cube_xml_element = self._xml_element(doc, checksum=checksum,
order=order,
byteorder=byteorder)
cube_xml_element.setAttribute("xmlns", XML_NAMESPACE_URI)
doc.appendChild(cube_xml_element)
# Print our newly created XML
return doc.toprettyxml(indent=" ")
def _xml_element(self, doc, checksum=False, order=True, byteorder=True):
cube_xml_element = doc.createElement("cube")
if self.standard_name:
cube_xml_element.setAttribute('standard_name', self.standard_name)
if self.long_name:
cube_xml_element.setAttribute('long_name', self.long_name)
if self.var_name:
cube_xml_element.setAttribute('var_name', self.var_name)
cube_xml_element.setAttribute('units', str(self.units))
if self.attributes:
attributes_element = doc.createElement('attributes')
for name in sorted(self.attributes.iterkeys()):
attribute_element = doc.createElement('attribute')
attribute_element.setAttribute('name', name)
value = str(self.attributes[name])
attribute_element.setAttribute('value', value)
attributes_element.appendChild(attribute_element)
cube_xml_element.appendChild(attributes_element)
coords_xml_element = doc.createElement("coords")
for coord in sorted(self.coords(), key=lambda coord: coord.name()):
# make a "cube coordinate" element which holds the dimensions (if
# appropriate) which itself will have a sub-element of the
# coordinate instance itself.
cube_coord_xml_element = doc.createElement("coord")
coords_xml_element.appendChild(cube_coord_xml_element)
dims = list(self.coord_dims(coord))
if dims:
cube_coord_xml_element.setAttribute("datadims", repr(dims))
coord_xml_element = coord.xml_element(doc)
cube_coord_xml_element.appendChild(coord_xml_element)
cube_xml_element.appendChild(coords_xml_element)
# cell methods (no sorting!)
cell_methods_xml_element = doc.createElement("cellMethods")
for cm in self.cell_methods:
cell_method_xml_element = cm.xml_element(doc)
cell_methods_xml_element.appendChild(cell_method_xml_element)
cube_xml_element.appendChild(cell_methods_xml_element)
data_xml_element = doc.createElement("data")
data_xml_element.setAttribute("shape", str(self.shape))
# NB. Getting a checksum triggers any deferred loading,
# in which case it also has the side-effect of forcing the
# byte order to be native.
if checksum:
data = self.data
# Ensure consistent memory layout for checksums.
def normalise(data):
data = np.ascontiguousarray(data)
if data.dtype.newbyteorder('<') != data.dtype:
data = data.byteswap(False)
data.dtype = data.dtype.newbyteorder('<')
return data
if isinstance(data, ma.MaskedArray):
# Fill in masked values to avoid the checksum being
# sensitive to unused numbers. Use a fixed value so
# a change in fill_value doesn't affect the
# checksum.
crc = '0x%08x' % (
zlib.crc32(normalise(data.filled(0))) & 0xffffffff, )
data_xml_element.setAttribute("checksum", crc)
if ma.is_masked(data):
crc = '0x%08x' % (
zlib.crc32(normalise(data.mask)) & 0xffffffff, )
else:
crc = 'no-masked-elements'
data_xml_element.setAttribute("mask_checksum", crc)
data_xml_element.setAttribute('fill_value',
str(data.fill_value))
else:
crc = '0x%08x' % (zlib.crc32(normalise(data)) & 0xffffffff, )
data_xml_element.setAttribute("checksum", crc)
elif self.has_lazy_data():
data_xml_element.setAttribute("state", "deferred")
else:
data_xml_element.setAttribute("state", "loaded")
# Add the dtype, and also the array and mask orders if the
# data is loaded.
if not self.has_lazy_data():
data = self.data
dtype = data.dtype
def _order(array):
order = ''
if array.flags['C_CONTIGUOUS']:
order = 'C'
elif array.flags['F_CONTIGUOUS']:
order = 'F'
return order
if order:
data_xml_element.setAttribute('order', _order(data))
# NB. dtype.byteorder can return '=', which is bad for
# cross-platform consistency - so we use dtype.str
# instead.
if byteorder:
array_byteorder = {'>': 'big', '<': 'little'}.get(dtype.str[0])
if array_byteorder is not None:
data_xml_element.setAttribute('byteorder', array_byteorder)
if order and isinstance(data, ma.core.MaskedArray):
data_xml_element.setAttribute('mask_order',
_order(data.mask))
else:
dtype = self.lazy_data().dtype
data_xml_element.setAttribute('dtype', dtype.name)
cube_xml_element.appendChild(data_xml_element)
return cube_xml_element
def copy(self, data=None):
"""
Returns a deep copy of this cube.
Kwargs:
* data:
Replace the data of the cube copy with provided data payload.
Returns:
A copy instance of the :class:`Cube`.
"""
return self._deepcopy({}, data)
def __copy__(self):
"""Shallow copying is disallowed for Cubes."""
raise copy.Error("Cube shallow-copy not allowed. Use deepcopy() or "
"Cube.copy()")
def __deepcopy__(self, memo):
return self._deepcopy(memo)
def _deepcopy(self, memo, data=None):
if data is None:
# Use a copy of the source cube data.
if self.has_lazy_data():
# Use copy.copy, as lazy arrays don't have a copy method.
new_cube_data = copy.copy(self.lazy_data())
else:
# Do *not* use copy.copy, as NumPy 0-d arrays do that wrong.
new_cube_data = self.data.copy()
else:
# Use the provided data (without copying it).
if not isinstance(data, biggus.Array):
data = np.asanyarray(data)
if data.shape != self.shape:
msg = 'Cannot copy cube with new data of a different shape ' \
'(slice or subset the cube first).'
raise ValueError(msg)
new_cube_data = data
new_dim_coords_and_dims = copy.deepcopy(self._dim_coords_and_dims,
memo)
new_aux_coords_and_dims = copy.deepcopy(self._aux_coords_and_dims,
memo)
# Record a mapping from old coordinate IDs to new coordinates,
# for subsequent use in creating updated aux_factories.
coord_mapping = {}
for old_pair, new_pair in zip(self._dim_coords_and_dims,
new_dim_coords_and_dims):
coord_mapping[id(old_pair[0])] = new_pair[0]
for old_pair, new_pair in zip(self._aux_coords_and_dims,
new_aux_coords_and_dims):
coord_mapping[id(old_pair[0])] = new_pair[0]
new_cube = Cube(new_cube_data,
dim_coords_and_dims=new_dim_coords_and_dims,
aux_coords_and_dims=new_aux_coords_and_dims)
new_cube.metadata = copy.deepcopy(self.metadata, memo)
for factory in self.aux_factories:
new_cube.add_aux_factory(factory.updated(coord_mapping))
return new_cube
# START OPERATOR OVERLOADS
def __eq__(self, other):
result = NotImplemented
if isinstance(other, Cube):
result = self.metadata == other.metadata
# having checked the metadata, now check the coordinates
if result:
coord_comparison = iris.analysis.coord_comparison(self, other)
# if there are any coordinates which are not equal
result = not (coord_comparison['not_equal'] or
coord_comparison['non_equal_data_dimension'])
# having checked everything else, check approximate data
# equality - loading the data if has not already been loaded.
if result:
result = np.all(np.abs(self.data - other.data) < 1e-8)
return result
# Must supply __ne__, Python does not defer to __eq__ for negative equality
def __ne__(self, other):
result = self.__eq__(other)
if result is not NotImplemented:
result = not result
return result
def __add__(self, other):
return iris.analysis.maths.add(self, other, ignore=True)
__radd__ = __add__
def __sub__(self, other):
return iris.analysis.maths.subtract(self, other, ignore=True)
__mul__ = iris.analysis.maths.multiply
__rmul__ = iris.analysis.maths.multiply
__div__ = iris.analysis.maths.divide
__truediv__ = iris.analysis.maths.divide
__pow__ = iris.analysis.maths.exponentiate
# END OPERATOR OVERLOADS
def add_history(self, string):
"""
Add the given string to the cube's history.
If the history coordinate does not exist, then one will be created.
.. deprecated:: 1.6
Add/modify history metadata within
attr:`~iris.cube.Cube.attributes` as needed.
"""
warnings.warn("Cube.add_history() has been deprecated - "
"please modify/create cube.attributes['history'] "
"as needed.")
timestamp = datetime.datetime.now().strftime("%d/%m/%y %H:%M:%S")
string = '%s Iris: %s' % (timestamp, string)
try:
history = self.attributes['history']
self.attributes['history'] = '%s\n%s' % (history, string)
except KeyError:
self.attributes['history'] = string
# START ANALYSIS ROUTINES
regridded = iris.util._wrap_function_for_method(
iris.analysis.interpolate.regrid,
"""
Returns a new cube with values derived from this cube on the
horizontal grid specified by the grid_cube.
""")
# END ANALYSIS ROUTINES
def collapsed(self, coords, aggregator, **kwargs):
"""
Collapse one or more dimensions over the cube given the coordinate/s
and an aggregation.
Examples of aggregations that may be used include
:data:`~iris.analysis.COUNT` and :data:`~iris.analysis.MAX`.
Weighted aggregations (:class:`iris.analysis.WeightedAggregator`) may
also be supplied. These include :data:`~iris.analysis.MEAN` and
sum :data:`~iris.analysis.SUM`.
Weighted aggregations support an optional *weights* keyword argument.
If set, this should be supplied as an array of weights whose shape
matches the cube. Values for latitude-longitude area weights may be
calculated using :func:`iris.analysis.cartography.area_weights`.
Some Iris aggregators support "lazy" evaluation, meaning that
cubes resulting from this method may represent data arrays which are
not computed until the data is requested (e.g. via ``cube.data`` or
``iris.save``). If lazy evaluation exists for the given aggregator
it will be used wherever possible when this cube's data is itself
a deferred array.
Args:
* coords (string, coord or a list of strings/coords):
Coordinate names/coordinates over which the cube should be
collapsed.
* aggregator (:class:`iris.analysis.Aggregator`):
Aggregator to be applied for collapse operation.
Kwargs:
* kwargs:
Aggregation function keyword arguments.
Returns:
Collapsed cube.
For example:
>>> import iris
>>> import iris.analysis
>>> path = iris.sample_data_path('ostia_monthly.nc')
>>> cube = iris.load_cube(path)
>>> new_cube = cube.collapsed('longitude', iris.analysis.MEAN)
>>> print(new_cube)
surface_temperature / (K) (time: 54; latitude: 18)
Dimension coordinates:
time x -
latitude - x
Auxiliary coordinates:
forecast_reference_time x -
Scalar coordinates:
forecast_period: 0 hours
longitude: 180.0 degrees, bound=(0.0, 360.0) degrees
Attributes:
Conventions: CF-1.5
STASH: m01s00i024
Cell methods:
mean: month, year
mean: longitude
.. note::
Some aggregations are not commutative and hence the order of
processing is important i.e.::
tmp = cube.collapsed('realization', iris.analysis.VARIANCE)
result = tmp.collapsed('height', iris.analysis.VARIANCE)
is not necessarily the same result as::
tmp = cube.collapsed('height', iris.analysis.VARIANCE)
result2 = tmp.collapsed('realization', iris.analysis.VARIANCE)
Conversely operations which operate on more than one coordinate
at the same time are commutative as they are combined internally
into a single operation. Hence the order of the coordinates
supplied in the list does not matter::
cube.collapsed(['longitude', 'latitude'],
iris.analysis.VARIANCE)
is the same (apart from the logically equivalent cell methods that
may be created etc.) as::
cube.collapsed(['latitude', 'longitude'],
iris.analysis.VARIANCE)
"""
# Convert any coordinate names to coordinates
coords = self._as_list_of_coords(coords)
if (isinstance(aggregator, iris.analysis.WeightedAggregator) and
not aggregator.uses_weighting(**kwargs)):
msg = "Collapsing spatial coordinate {!r} without weighting"
lat_match = [coord for coord in coords
if 'latitude' in coord.name()]
if lat_match:
for coord in lat_match:
warnings.warn(msg.format(coord.name()))
# Determine the dimensions we need to collapse (and those we don't)
if aggregator.cell_method == 'peak':
dims_to_collapse = [list(self.coord_dims(coord))
for coord in coords]
# Remove duplicate dimensions.
new_dims = collections.OrderedDict.fromkeys(
d for dim in dims_to_collapse for d in dim)
# Reverse the dimensions so the order can be maintained when
# reshaping the data.
dims_to_collapse = list(new_dims)[::-1]
else:
dims_to_collapse = set()
for coord in coords:
dims_to_collapse.update(self.coord_dims(coord))
if not dims_to_collapse:
msg = 'Cannot collapse a dimension which does not describe any ' \
'data.'
raise iris.exceptions.CoordinateCollapseError(msg)
untouched_dims = set(range(self.ndim)) - set(dims_to_collapse)
# Remove the collapsed dimension(s) from the metadata
indices = [slice(None, None)] * self.ndim
for dim in dims_to_collapse:
indices[dim] = 0
collapsed_cube = self[tuple(indices)]
# Collapse any coords that span the dimension(s) being collapsed
for coord in self.dim_coords + self.aux_coords:
coord_dims = self.coord_dims(coord)
if set(dims_to_collapse).intersection(coord_dims):
local_dims = [coord_dims.index(dim) for dim in
dims_to_collapse if dim in coord_dims]
collapsed_cube.replace_coord(coord.collapsed(local_dims))
untouched_dims = sorted(untouched_dims)
# Record the axis(s) argument passed to 'aggregation', so the same is
# passed to the 'update_metadata' function.
collapse_axis = -1
data_result = None
# Perform the actual aggregation.
if aggregator.cell_method == 'peak':
# The PEAK aggregator must collapse each coordinate separately.
untouched_shape = [self.shape[d] for d in untouched_dims]
collapsed_shape = [self.shape[d] for d in dims_to_collapse]
new_shape = untouched_shape + collapsed_shape
array_dims = untouched_dims + dims_to_collapse
unrolled_data = np.transpose(
self.data, array_dims).reshape(new_shape)
for dim in dims_to_collapse:
unrolled_data = aggregator.aggregate(unrolled_data,
axis=-1,
**kwargs)
data_result = unrolled_data
# Perform the aggregation in lazy form if possible.
elif (aggregator.lazy_func is not None
and len(dims_to_collapse) == 1 and self.has_lazy_data()):
# Use a lazy operation separately defined by the aggregator, based
# on the cube lazy array.
# NOTE: do not reform the data in this case, as 'lazy_aggregate'
# accepts multiple axes (unlike 'aggregate').
collapse_axis = dims_to_collapse
try:
data_result = aggregator.lazy_aggregate(self.lazy_data(),
collapse_axis,
**kwargs)
except TypeError:
# TypeError - when unexpected keywords passed through (such as
# weights to mean)
pass
# If we weren't able to complete a lazy aggregation, compute it
# directly now.
if data_result is None:
# Perform the (non-lazy) aggregation over the cube data
# First reshape the data so that the dimensions being aggregated
# over are grouped 'at the end' (i.e. axis=-1).
dims_to_collapse = sorted(dims_to_collapse)
end_size = reduce(operator.mul, (self.shape[dim] for dim in
dims_to_collapse))
untouched_shape = [self.shape[dim] for dim in untouched_dims]
new_shape = untouched_shape + [end_size]
dims = untouched_dims + dims_to_collapse
unrolled_data = np.transpose(self.data, dims).reshape(new_shape)
# Perform the same operation on the weights if applicable
if kwargs.get("weights") is not None:
weights = kwargs["weights"].view()
kwargs["weights"] = np.transpose(weights,
dims).reshape(new_shape)
data_result = aggregator.aggregate(unrolled_data,
axis=-1,
**kwargs)
aggregator.update_metadata(collapsed_cube, coords, axis=collapse_axis,
**kwargs)
result = aggregator.post_process(collapsed_cube, data_result, coords,
**kwargs)
return result
def aggregated_by(self, coords, aggregator, **kwargs):
"""
Perform aggregation over the cube given one or more "group
coordinates".
A "group coordinate" is a coordinate where repeating values represent a
single group, such as a month coordinate on a daily time slice.
TODO: It is not clear if repeating values must be consecutive to form a
group.
The group coordinates must all be over the same cube dimension. Each
common value group identified over all the group-by coordinates is
collapsed using the provided aggregator.
Args:
* coords (list of coord names or :class:`iris.coords.Coord` instances):
One or more coordinates over which group aggregation is to be
performed.
* aggregator (:class:`iris.analysis.Aggregator`):
Aggregator to be applied to each group.
Kwargs:
* kwargs:
Aggregator and aggregation function keyword arguments.
Returns:
:class:`iris.cube.Cube`.
.. note::
This operation does not yet have support for lazy evaluation.
For example:
>>> import iris
>>> import iris.analysis
>>> import iris.coord_categorisation as cat
>>> fname = iris.sample_data_path('ostia_monthly.nc')
>>> cube = iris.load_cube(fname, 'surface_temperature')
>>> cat.add_year(cube, 'time', name='year')
>>> new_cube = cube.aggregated_by('year', iris.analysis.MEAN)
>>> print(new_cube)
surface_temperature / (K) \
(time: 5; latitude: 18; longitude: 432)
Dimension coordinates:
time \
x - -
latitude \
- x -
longitude \
- - x
Auxiliary coordinates:
forecast_reference_time \
x - -
year \
x - -
Scalar coordinates:
forecast_period: 0 hours
Attributes:
Conventions: CF-1.5
STASH: m01s00i024
Cell methods:
mean: month, year
mean: year
"""
groupby_coords = []
dimension_to_groupby = None
# We can't handle weights
if isinstance(aggregator, iris.analysis.WeightedAggregator) and \
aggregator.uses_weighting(**kwargs):
raise ValueError('Invalid Aggregation, aggregated_by() cannot use'
' weights.')
coords = self._as_list_of_coords(coords)
for coord in sorted(coords, key=lambda coord: coord._as_defn()):
if coord.ndim > 1:
msg = 'Cannot aggregate_by coord %s as it is ' \
'multidimensional.' % coord.name()
raise iris.exceptions.CoordinateMultiDimError(msg)
dimension = self.coord_dims(coord)
if not dimension:
msg = 'Cannot group-by the coordinate "%s", as its ' \
'dimension does not describe any data.' % coord.name()
raise iris.exceptions.CoordinateCollapseError(msg)
if dimension_to_groupby is None:
dimension_to_groupby = dimension[0]
if dimension_to_groupby != dimension[0]:
msg = 'Cannot group-by coordinates over different dimensions.'
raise iris.exceptions.CoordinateCollapseError(msg)
groupby_coords.append(coord)
# Determine the other coordinates that share the same group-by
# coordinate dimension.
shared_coords = list(filter(
lambda coord_: coord_ not in groupby_coords,
self.coords(dimensions=dimension_to_groupby)))
# Create the aggregation group-by instance.
groupby = iris.analysis._Groupby(groupby_coords, shared_coords)
# Create the resulting aggregate-by cube and remove the original
# coordinates that are going to be groupedby.
key = [slice(None, None)] * self.ndim
# Generate unique index tuple key to maintain monotonicity.
key[dimension_to_groupby] = tuple(range(len(groupby)))
key = tuple(key)
aggregateby_cube = self[key]
for coord in groupby_coords + shared_coords:
aggregateby_cube.remove_coord(coord)
# Determine the group-by cube data shape.
data_shape = list(self.shape + aggregator.aggregate_shape(**kwargs))
data_shape[dimension_to_groupby] = len(groupby)
# Aggregate the group-by data.
cube_slice = [slice(None, None)] * len(data_shape)
for i, groupby_slice in enumerate(groupby.group()):
# Slice the cube with the group-by slice to create a group-by
# sub-cube.
cube_slice[dimension_to_groupby] = groupby_slice
groupby_sub_cube = self[tuple(cube_slice)]
# Perform the aggregation over the group-by sub-cube and
# repatriate the aggregated data into the aggregate-by cube data.
cube_slice[dimension_to_groupby] = i
result = aggregator.aggregate(groupby_sub_cube.data,
axis=dimension_to_groupby,
**kwargs)
# Determine aggregation result data type for the aggregate-by cube
# data on first pass.
if i == 0:
if isinstance(self.data, ma.MaskedArray):
aggregateby_data = ma.zeros(data_shape, dtype=result.dtype)
else:
aggregateby_data = np.zeros(data_shape, dtype=result.dtype)
aggregateby_data[tuple(cube_slice)] = result
# Add the aggregation meta data to the aggregate-by cube.
aggregator.update_metadata(aggregateby_cube,
groupby_coords,
aggregate=True, **kwargs)
# Replace the appropriate coordinates within the aggregate-by cube.
dim_coord, = self.coords(dimensions=dimension_to_groupby,
dim_coords=True) or [None]
for coord in groupby.coords:
if dim_coord is not None and \
dim_coord._as_defn() == coord._as_defn() and \
isinstance(coord, iris.coords.DimCoord):
aggregateby_cube.add_dim_coord(coord.copy(),
dimension_to_groupby)
else:
aggregateby_cube.add_aux_coord(coord.copy(),
dimension_to_groupby)
# Attach the aggregate-by data into the aggregate-by cube.
aggregateby_cube = aggregator.post_process(aggregateby_cube,
aggregateby_data,
coords, **kwargs)
return aggregateby_cube
def rolling_window(self, coord, aggregator, window, **kwargs):
"""
Perform rolling window aggregation on a cube given a coordinate, an
aggregation method and a window size.
Args:
* coord (string/:class:`iris.coords.Coord`):
The coordinate over which to perform the rolling window
aggregation.
* aggregator (:class:`iris.analysis.Aggregator`):
Aggregator to be applied to the data.
* window (int):
Size of window to use.
Kwargs:
* kwargs:
Aggregator and aggregation function keyword arguments. The weights
argument to the aggregator, if any, should be a 1d array with the
same length as the chosen window.
Returns:
:class:`iris.cube.Cube`.
.. note::
This operation does not yet have support for lazy evaluation.
For example:
>>> import iris, iris.analysis
>>> fname = iris.sample_data_path('GloSea4', 'ensemble_010.pp')
>>> air_press = iris.load_cube(fname, 'surface_temperature')
>>> print(air_press)
surface_temperature / (K) \
(time: 6; latitude: 145; longitude: 192)
Dimension coordinates:
time \
x - -
latitude \
- x -
longitude \
- - x
Auxiliary coordinates:
forecast_period \
x - -
Scalar coordinates:
forecast_reference_time: 2011-07-23 00:00:00
realization: 10
Attributes:
STASH: m01s00i024
source: Data from Met Office Unified Model
um_version: 7.6
Cell methods:
mean: time (1 hour)
>>> print(air_press.rolling_window('time', iris.analysis.MEAN, 3))
surface_temperature / (K) \
(time: 4; latitude: 145; longitude: 192)
Dimension coordinates:
time \
x - -
latitude \
- x -
longitude \
- - x
Auxiliary coordinates:
forecast_period \
x - -
Scalar coordinates:
forecast_reference_time: 2011-07-23 00:00:00
realization: 10
Attributes:
STASH: m01s00i024
source: Data from Met Office Unified Model
um_version: 7.6
Cell methods:
mean: time (1 hour)
mean: time
Notice that the forecast_period dimension now represents the 4
possible windows of size 3 from the original cube.
"""
coord = self._as_list_of_coords(coord)[0]
if getattr(coord, 'circular', False):
raise iris.exceptions.NotYetImplementedError(
'Rolling window over a circular coordinate.')
if window < 2:
raise ValueError('Cannot perform rolling window '
'with a window size less than 2.')
if coord.ndim > 1:
raise iris.exceptions.CoordinateMultiDimError(coord)
dimension = self.coord_dims(coord)
if len(dimension) != 1:
raise iris.exceptions.CoordinateCollapseError(
'Cannot perform rolling window with coordinate "%s", '
'must map to one data dimension.' % coord.name())
dimension = dimension[0]
# Use indexing to get a result-cube of the correct shape.
# NB. This indexes the data array which is wasted work.
# As index-to-get-shape-then-fiddle is a common pattern, perhaps
# some sort of `cube.prepare()` method would be handy to allow
# re-shaping with given data, and returning a mapping of
# old-to-new-coords (to avoid having to use metadata identity)?
key = [slice(None, None)] * self.ndim
key[dimension] = slice(None, self.shape[dimension] - window + 1)
new_cube = self[tuple(key)]
# take a view of the original data using the rolling_window function
# this will add an extra dimension to the data at dimension + 1 which
# represents the rolled window (i.e. will have a length of window)
rolling_window_data = iris.util.rolling_window(self.data,
window=window,
axis=dimension)
# now update all of the coordinates to reflect the aggregation
for coord_ in self.coords(dimensions=dimension):
if coord_.has_bounds():
warnings.warn('The bounds of coordinate %r were ignored in '
'the rolling window operation.' % coord_.name())
if coord_.ndim != 1:
raise ValueError('Cannot calculate the rolling '
'window of %s as it is a multidimensional '
'coordinate.' % coord_.name())
new_bounds = iris.util.rolling_window(coord_.points, window)
if np.issubdtype(new_bounds.dtype, np.str):
# Handle case where the AuxCoord contains string. The points
# are the serialized form of the points contributing to each
# window and the bounds are the first and last points in the
# window as with numeric coordinates.
new_points = np.apply_along_axis(lambda x: '|'.join(x), -1,
new_bounds)
new_bounds = new_bounds[:, (0, -1)]
else:
# Take the first and last element of the rolled window (i.e.
# the bounds) and the new points are the midpoints of these
# bounds.
new_bounds = new_bounds[:, (0, -1)]
new_points = np.mean(new_bounds, axis=-1)
# wipe the coords points and set the bounds
new_coord = new_cube.coord(coord_)
new_coord.points = new_points
new_coord.bounds = new_bounds
# update the metadata of the cube itself
aggregator.update_metadata(
new_cube, [coord],
action='with a rolling window of length %s over' % window,
**kwargs)
# and perform the data transformation, generating weights first if
# needed
if isinstance(aggregator, iris.analysis.WeightedAggregator) and \
aggregator.uses_weighting(**kwargs):
if 'weights' in kwargs:
weights = kwargs['weights']
if weights.ndim > 1 or weights.shape[0] != window:
raise ValueError('Weights for rolling window aggregation '
'must be a 1d array with the same length '
'as the window.')
| kwargs = dict(kwargs) | 11,922 | lcc_e | python | null | 17306687a40f0d095672642ef1a138abe1b6a336b902c82f |
|
# orm/events.py
# Copyright (C) 2005-2018 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""ORM event interfaces.
"""
from .. import event, exc, util
from .base import _mapper_or_none
import inspect
import weakref
from . import interfaces
from . import mapperlib, instrumentation
from .session import Session, sessionmaker
from .scoping import scoped_session
from .attributes import QueryableAttribute
from .query import Query
from sqlalchemy.util.compat import inspect_getargspec
class InstrumentationEvents(event.Events):
"""Events related to class instrumentation events.
The listeners here support being established against
any new style class, that is any object that is a subclass
of 'type'. Events will then be fired off for events
against that class. If the "propagate=True" flag is passed
to event.listen(), the event will fire off for subclasses
of that class as well.
The Python ``type`` builtin is also accepted as a target,
which when used has the effect of events being emitted
for all classes.
Note the "propagate" flag here is defaulted to ``True``,
unlike the other class level events where it defaults
to ``False``. This means that new subclasses will also
be the subject of these events, when a listener
is established on a superclass.
.. versionchanged:: 0.8 - events here will emit based
on comparing the incoming class to the type of class
passed to :func:`.event.listen`. Previously, the
event would fire for any class unconditionally regardless
of what class was sent for listening, despite
documentation which stated the contrary.
"""
_target_class_doc = "SomeBaseClass"
_dispatch_target = instrumentation.InstrumentationFactory
@classmethod
def _accept_with(cls, target):
if isinstance(target, type):
return _InstrumentationEventsHold(target)
else:
return None
@classmethod
def _listen(cls, event_key, propagate=True, **kw):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, \
event_key._listen_fn
def listen(target_cls, *arg):
listen_cls = target()
if propagate and issubclass(target_cls, listen_cls):
return fn(target_cls, *arg)
elif not propagate and target_cls is listen_cls:
return fn(target_cls, *arg)
def remove(ref):
key = event.registry._EventKey(
None, identifier, listen,
instrumentation._instrumentation_factory)
getattr(instrumentation._instrumentation_factory.dispatch,
identifier).remove(key)
target = weakref.ref(target.class_, remove)
event_key.\
with_dispatch_target(instrumentation._instrumentation_factory).\
with_wrapper(listen).base_listen(**kw)
@classmethod
def _clear(cls):
super(InstrumentationEvents, cls)._clear()
instrumentation._instrumentation_factory.dispatch._clear()
def class_instrument(self, cls):
"""Called after the given class is instrumented.
To get at the :class:`.ClassManager`, use
:func:`.manager_of_class`.
"""
def class_uninstrument(self, cls):
"""Called before the given class is uninstrumented.
To get at the :class:`.ClassManager`, use
:func:`.manager_of_class`.
"""
def attribute_instrument(self, cls, key, inst):
"""Called when an attribute is instrumented."""
class _InstrumentationEventsHold(object):
"""temporary marker object used to transfer from _accept_with() to
_listen() on the InstrumentationEvents class.
"""
def __init__(self, class_):
self.class_ = class_
dispatch = event.dispatcher(InstrumentationEvents)
class InstanceEvents(event.Events):
"""Define events specific to object lifecycle.
e.g.::
from sqlalchemy import event
def my_load_listener(target, context):
print "on load!"
event.listen(SomeClass, 'load', my_load_listener)
Available targets include:
* mapped classes
* unmapped superclasses of mapped or to-be-mapped classes
(using the ``propagate=True`` flag)
* :class:`.Mapper` objects
* the :class:`.Mapper` class itself and the :func:`.mapper`
function indicate listening for all mappers.
.. versionchanged:: 0.8.0 instance events can be associated with
unmapped superclasses of mapped classes.
Instance events are closely related to mapper events, but
are more specific to the instance and its instrumentation,
rather than its system of persistence.
When using :class:`.InstanceEvents`, several modifiers are
available to the :func:`.event.listen` function.
:param propagate=False: When True, the event listener should
be applied to all inheriting classes as well as the
class which is the target of this listener.
:param raw=False: When True, the "target" argument passed
to applicable event listener functions will be the
instance's :class:`.InstanceState` management
object, rather than the mapped instance itself.
"""
_target_class_doc = "SomeClass"
_dispatch_target = instrumentation.ClassManager
@classmethod
def _new_classmanager_instance(cls, class_, classmanager):
_InstanceEventsHold.populate(class_, classmanager)
@classmethod
@util.dependencies("sqlalchemy.orm")
def _accept_with(cls, orm, target):
if isinstance(target, instrumentation.ClassManager):
return target
elif isinstance(target, mapperlib.Mapper):
return target.class_manager
elif target is orm.mapper:
return instrumentation.ClassManager
elif isinstance(target, type):
if issubclass(target, mapperlib.Mapper):
return instrumentation.ClassManager
else:
manager = instrumentation.manager_of_class(target)
if manager:
return manager
else:
return _InstanceEventsHold(target)
return None
@classmethod
def _listen(cls, event_key, raw=False, propagate=False, **kw):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, \
event_key._listen_fn
if not raw:
def wrap(state, *arg, **kw):
return fn(state.obj(), *arg, **kw)
event_key = event_key.with_wrapper(wrap)
event_key.base_listen(propagate=propagate, **kw)
if propagate:
for mgr in target.subclass_managers(True):
event_key.with_dispatch_target(mgr).base_listen(
propagate=True)
@classmethod
def _clear(cls):
super(InstanceEvents, cls)._clear()
_InstanceEventsHold._clear()
def first_init(self, manager, cls):
"""Called when the first instance of a particular mapping is called.
This event is called when the ``__init__`` method of a class
is called the first time for that particular class. The event
invokes before ``__init__`` actually proceeds as well as before
the :meth:`.InstanceEvents.init` event is invoked.
"""
def init(self, target, args, kwargs):
"""Receive an instance when its constructor is called.
This method is only called during a userland construction of
an object, in conjunction with the object's constructor, e.g.
its ``__init__`` method. It is not called when an object is
loaded from the database; see the :meth:`.InstanceEvents.load`
event in order to intercept a database load.
The event is called before the actual ``__init__`` constructor
of the object is called. The ``kwargs`` dictionary may be
modified in-place in order to affect what is passed to
``__init__``.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param args: positional arguments passed to the ``__init__`` method.
This is passed as a tuple and is currently immutable.
:param kwargs: keyword arguments passed to the ``__init__`` method.
This structure *can* be altered in place.
.. seealso::
:meth:`.InstanceEvents.init_failure`
:meth:`.InstanceEvents.load`
"""
def init_failure(self, target, args, kwargs):
"""Receive an instance when its constructor has been called,
and raised an exception.
This method is only called during a userland construction of
an object, in conjunction with the object's constructor, e.g.
its ``__init__`` method. It is not called when an object is loaded
from the database.
The event is invoked after an exception raised by the ``__init__``
method is caught. After the event
is invoked, the original exception is re-raised outwards, so that
the construction of the object still raises an exception. The
actual exception and stack trace raised should be present in
``sys.exc_info()``.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param args: positional arguments that were passed to the ``__init__``
method.
:param kwargs: keyword arguments that were passed to the ``__init__``
method.
.. seealso::
:meth:`.InstanceEvents.init`
:meth:`.InstanceEvents.load`
"""
def load(self, target, context):
"""Receive an object instance after it has been created via
``__new__``, and after initial attribute population has
occurred.
This typically occurs when the instance is created based on
incoming result rows, and is only called once for that
instance's lifetime.
Note that during a result-row load, this method is called upon
the first row received for this instance. Note that some
attributes and collections may or may not be loaded or even
initialized, depending on what's present in the result rows.
The :meth:`.InstanceEvents.load` event is also available in a
class-method decorator format called :func:`.orm.reconstructor`.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param context: the :class:`.QueryContext` corresponding to the
current :class:`.Query` in progress. This argument may be
``None`` if the load does not correspond to a :class:`.Query`,
such as during :meth:`.Session.merge`.
.. seealso::
:meth:`.InstanceEvents.init`
:meth:`.InstanceEvents.refresh`
:meth:`.SessionEvents.loaded_as_persistent`
:ref:`mapping_constructors`
"""
def refresh(self, target, context, attrs):
"""Receive an object instance after one or more attributes have
been refreshed from a query.
Contrast this to the :meth:`.InstanceEvents.load` method, which
is invoked when the object is first loaded from a query.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param context: the :class:`.QueryContext` corresponding to the
current :class:`.Query` in progress.
:param attrs: sequence of attribute names which
were populated, or None if all column-mapped, non-deferred
attributes were populated.
.. seealso::
:meth:`.InstanceEvents.load`
"""
def refresh_flush(self, target, flush_context, attrs):
"""Receive an object instance after one or more attributes have
been refreshed within the persistence of the object.
This event is the same as :meth:`.InstanceEvents.refresh` except
it is invoked within the unit of work flush process, and the values
here typically come from the process of handling an INSERT or
UPDATE, such as via the RETURNING clause or from Python-side default
values.
.. versionadded:: 1.0.5
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param flush_context: Internal :class:`.UOWTransaction` object
which handles the details of the flush.
:param attrs: sequence of attribute names which
were populated.
"""
def expire(self, target, attrs):
"""Receive an object instance after its attributes or some subset
have been expired.
'keys' is a list of attribute names. If None, the entire
state was expired.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param attrs: sequence of attribute
names which were expired, or None if all attributes were
expired.
"""
def pickle(self, target, state_dict):
"""Receive an object instance when its associated state is
being pickled.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param state_dict: the dictionary returned by
:class:`.InstanceState.__getstate__`, containing the state
to be pickled.
"""
def unpickle(self, target, state_dict):
"""Receive an object instance after its associated state has
been unpickled.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param state_dict: the dictionary sent to
:class:`.InstanceState.__setstate__`, containing the state
dictionary which was pickled.
"""
class _EventsHold(event.RefCollection):
"""Hold onto listeners against unmapped, uninstrumented classes.
Establish _listen() for that class' mapper/instrumentation when
those objects are created for that class.
"""
def __init__(self, class_):
self.class_ = class_
@classmethod
def _clear(cls):
cls.all_holds.clear()
class HoldEvents(object):
_dispatch_target = None
@classmethod
def _listen(cls, event_key, raw=False, propagate=False, **kw):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, event_key.fn
if target.class_ in target.all_holds:
collection = target.all_holds[target.class_]
else:
collection = target.all_holds[target.class_] = {}
event.registry._stored_in_collection(event_key, target)
collection[event_key._key] = (event_key, raw, propagate)
if propagate:
stack = list(target.class_.__subclasses__())
while stack:
subclass = stack.pop(0)
stack.extend(subclass.__subclasses__())
subject = target.resolve(subclass)
if subject is not None:
# we are already going through __subclasses__()
# so leave generic propagate flag False
event_key.with_dispatch_target(subject).\
listen(raw=raw, propagate=False, **kw)
def remove(self, event_key):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, event_key.fn
if isinstance(target, _EventsHold):
collection = target.all_holds[target.class_]
del collection[event_key._key]
@classmethod
def populate(cls, class_, subject):
for subclass in class_.__mro__:
if subclass in cls.all_holds:
collection = cls.all_holds[subclass]
for event_key, raw, propagate in collection.values():
if propagate or subclass is class_:
# since we can't be sure in what order different
# classes in a hierarchy are triggered with
# populate(), we rely upon _EventsHold for all event
# assignment, instead of using the generic propagate
# flag.
event_key.with_dispatch_target(subject).\
listen(raw=raw, propagate=False)
class _InstanceEventsHold(_EventsHold):
all_holds = weakref.WeakKeyDictionary()
def resolve(self, class_):
return instrumentation.manager_of_class(class_)
class HoldInstanceEvents(_EventsHold.HoldEvents, InstanceEvents):
pass
dispatch = event.dispatcher(HoldInstanceEvents)
class MapperEvents(event.Events):
"""Define events specific to mappings.
e.g.::
from sqlalchemy import event
def my_before_insert_listener(mapper, connection, target):
# execute a stored procedure upon INSERT,
# apply the value to the row to be inserted
target.calculated_value = connection.scalar(
"select my_special_function(%d)"
% target.special_number)
# associate the listener function with SomeClass,
# to execute during the "before_insert" hook
event.listen(
SomeClass, 'before_insert', my_before_insert_listener)
Available targets include:
* mapped classes
* unmapped superclasses of mapped or to-be-mapped classes
(using the ``propagate=True`` flag)
* :class:`.Mapper` objects
* the :class:`.Mapper` class itself and the :func:`.mapper`
function indicate listening for all mappers.
.. versionchanged:: 0.8.0 mapper events can be associated with
unmapped superclasses of mapped classes.
Mapper events provide hooks into critical sections of the
mapper, including those related to object instrumentation,
object loading, and object persistence. In particular, the
persistence methods :meth:`~.MapperEvents.before_insert`,
and :meth:`~.MapperEvents.before_update` are popular
places to augment the state being persisted - however, these
methods operate with several significant restrictions. The
user is encouraged to evaluate the
:meth:`.SessionEvents.before_flush` and
:meth:`.SessionEvents.after_flush` methods as more
flexible and user-friendly hooks in which to apply
additional database state during a flush.
When using :class:`.MapperEvents`, several modifiers are
available to the :func:`.event.listen` function.
:param propagate=False: When True, the event listener should
be applied to all inheriting mappers and/or the mappers of
inheriting classes, as well as any
mapper which is the target of this listener.
:param raw=False: When True, the "target" argument passed
to applicable event listener functions will be the
instance's :class:`.InstanceState` management
object, rather than the mapped instance itself.
:param retval=False: when True, the user-defined event function
must have a return value, the purpose of which is either to
control subsequent event propagation, or to otherwise alter
the operation in progress by the mapper. Possible return
values are:
* ``sqlalchemy.orm.interfaces.EXT_CONTINUE`` - continue event
processing normally.
* ``sqlalchemy.orm.interfaces.EXT_STOP`` - cancel all subsequent
event handlers in the chain.
* other values - the return value specified by specific listeners.
"""
_target_class_doc = "SomeClass"
_dispatch_target = mapperlib.Mapper
@classmethod
def _new_mapper_instance(cls, class_, mapper):
_MapperEventsHold.populate(class_, mapper)
@classmethod
@util.dependencies("sqlalchemy.orm")
def _accept_with(cls, orm, target):
if target is orm.mapper:
return mapperlib.Mapper
elif isinstance(target, type):
if issubclass(target, mapperlib.Mapper):
return target
else:
mapper = _mapper_or_none(target)
if mapper is not None:
return mapper
else:
return _MapperEventsHold(target)
else:
return target
@classmethod
def _listen(
cls, event_key, raw=False, retval=False, propagate=False, **kw):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, \
event_key._listen_fn
if identifier in ("before_configured", "after_configured") and \
target is not mapperlib.Mapper:
util.warn(
"'before_configured' and 'after_configured' ORM events "
"only invoke with the mapper() function or Mapper class "
"as the target.")
if not raw or not retval:
if not raw:
meth = getattr(cls, identifier)
try:
target_index = \
inspect_getargspec(meth)[0].index('target') - 1
except ValueError:
target_index = None
def wrap(*arg, **kw):
if not raw and target_index is not None:
arg = list(arg)
arg[target_index] = arg[target_index].obj()
if not retval:
fn(*arg, **kw)
return interfaces.EXT_CONTINUE
else:
return fn(*arg, **kw)
event_key = event_key.with_wrapper(wrap)
if propagate:
for mapper in target.self_and_descendants:
event_key.with_dispatch_target(mapper).base_listen(
propagate=True, **kw)
else:
event_key.base_listen(**kw)
@classmethod
def _clear(cls):
super(MapperEvents, cls)._clear()
_MapperEventsHold._clear()
def instrument_class(self, mapper, class_):
r"""Receive a class when the mapper is first constructed,
before instrumentation is applied to the mapped class.
This event is the earliest phase of mapper construction.
Most attributes of the mapper are not yet initialized.
This listener can either be applied to the :class:`.Mapper`
class overall, or to any un-mapped class which serves as a base
for classes that will be mapped (using the ``propagate=True`` flag)::
Base = declarative_base()
@event.listens_for(Base, "instrument_class", propagate=True)
def on_new_class(mapper, cls_):
" ... "
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param class\_: the mapped class.
"""
def mapper_configured(self, mapper, class_):
r"""Called when a specific mapper has completed its own configuration
within the scope of the :func:`.configure_mappers` call.
The :meth:`.MapperEvents.mapper_configured` event is invoked
for each mapper that is encountered when the
:func:`.orm.configure_mappers` function proceeds through the current
list of not-yet-configured mappers.
:func:`.orm.configure_mappers` is typically invoked
automatically as mappings are first used, as well as each time
new mappers have been made available and new mapper use is
detected.
When the event is called, the mapper should be in its final
state, but **not including backrefs** that may be invoked from
other mappers; they might still be pending within the
configuration operation. Bidirectional relationships that
are instead configured via the
:paramref:`.orm.relationship.back_populates` argument
*will* be fully available, since this style of relationship does not
rely upon other possibly-not-configured mappers to know that they
exist.
For an event that is guaranteed to have **all** mappers ready
to go including backrefs that are defined only on other
mappings, use the :meth:`.MapperEvents.after_configured`
event; this event invokes only after all known mappings have been
fully configured.
The :meth:`.MapperEvents.mapper_configured` event, unlike
:meth:`.MapperEvents.before_configured` or
:meth:`.MapperEvents.after_configured`,
is called for each mapper/class individually, and the mapper is
passed to the event itself. It also is called exactly once for
a particular mapper. The event is therefore useful for
configurational steps that benefit from being invoked just once
on a specific mapper basis, which don't require that "backref"
configurations are necessarily ready yet.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param class\_: the mapped class.
.. seealso::
:meth:`.MapperEvents.before_configured`
:meth:`.MapperEvents.after_configured`
"""
# TODO: need coverage for this event
def before_configured(self):
"""Called before a series of mappers have been configured.
The :meth:`.MapperEvents.before_configured` event is invoked
each time the :func:`.orm.configure_mappers` function is
invoked, before the function has done any of its work.
:func:`.orm.configure_mappers` is typically invoked
automatically as mappings are first used, as well as each time
new mappers have been made available and new mapper use is
detected.
This event can **only** be applied to the :class:`.Mapper` class
or :func:`.mapper` function, and not to individual mappings or
mapped classes. It is only invoked for all mappings as a whole::
from sqlalchemy.orm import mapper
@event.listens_for(mapper, "before_configured")
def go():
# ...
Contrast this event to :meth:`.MapperEvents.after_configured`,
which is invoked after the series of mappers has been configured,
as well as :meth:`.MapperEvents.mapper_configured`, which is invoked
on a per-mapper basis as each one is configured to the extent possible.
Theoretically this event is called once per
application, but is actually called any time new mappers
are to be affected by a :func:`.orm.configure_mappers`
call. If new mappings are constructed after existing ones have
already been used, this event will likely be called again. To ensure
that a particular event is only called once and no further, the
``once=True`` argument (new in 0.9.4) can be applied::
from sqlalchemy.orm import mapper
@event.listens_for(mapper, "before_configured", once=True)
def go():
# ...
.. versionadded:: 0.9.3
.. seealso::
:meth:`.MapperEvents.mapper_configured`
:meth:`.MapperEvents.after_configured`
"""
def after_configured(self):
"""Called after a series of mappers have been configured.
The :meth:`.MapperEvents.after_configured` event is invoked
each time the :func:`.orm.configure_mappers` function is
invoked, after the function has completed its work.
:func:`.orm.configure_mappers` is typically invoked
automatically as mappings are first used, as well as each time
new mappers have been made available and new mapper use is
detected.
Contrast this event to the :meth:`.MapperEvents.mapper_configured`
event, which is called on a per-mapper basis while the configuration
operation proceeds; unlike that event, when this event is invoked,
all cross-configurations (e.g. backrefs) will also have been made
available for any mappers that were pending.
Also contrast to :meth:`.MapperEvents.before_configured`,
which is invoked before the series of mappers has been configured.
This event can **only** be applied to the :class:`.Mapper` class
or :func:`.mapper` function, and not to individual mappings or
mapped classes. It is only invoked for all mappings as a whole::
from sqlalchemy.orm import mapper
@event.listens_for(mapper, "after_configured")
def go():
# ...
Theoretically this event is called once per
application, but is actually called any time new mappers
have been affected by a :func:`.orm.configure_mappers`
call. If new mappings are constructed after existing ones have
already been used, this event will likely be called again. To ensure
that a particular event is only called once and no further, the
``once=True`` argument (new in 0.9.4) can be applied::
from sqlalchemy.orm import mapper
@event.listens_for(mapper, "after_configured", once=True)
def go():
# ...
.. seealso::
:meth:`.MapperEvents.mapper_configured`
:meth:`.MapperEvents.before_configured`
"""
def before_insert(self, mapper, connection, target):
"""Receive an object instance before an INSERT statement
is emitted corresponding to that instance.
This event is used to modify local, non-object related
attributes on the instance before an INSERT occurs, as well
as to emit additional SQL statements on the given
connection.
The event is often called for a batch of objects of the
same class before their INSERT statements are emitted at
once in a later step. In the extremely rare case that
this is not desirable, the :func:`.mapper` can be
configured with ``batch=False``, which will cause
batches of instances to be broken up into individual
(and more poorly performing) event->persist->event
steps.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit INSERT statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
def after_insert(self, mapper, connection, target):
"""Receive an object instance after an INSERT statement
is emitted corresponding to that instance.
This event is used to modify in-Python-only
state on the instance after an INSERT occurs, as well
as to emit additional SQL statements on the given
connection.
The event is often called for a batch of objects of the
same class after their INSERT statements have been
emitted at once in a previous step. In the extremely
rare case that this is not desirable, the
:func:`.mapper` can be configured with ``batch=False``,
which will cause batches of instances to be broken up
into individual (and more poorly performing)
event->persist->event steps.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit INSERT statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
def before_update(self, mapper, connection, target):
"""Receive an object instance before an UPDATE statement
is emitted corresponding to that instance.
This event is used to modify local, non-object related
attributes on the instance before an UPDATE occurs, as well
as to emit additional SQL statements on the given
connection.
This method is called for all instances that are
marked as "dirty", *even those which have no net changes
to their column-based attributes*. An object is marked
as dirty when any of its column-based attributes have a
"set attribute" operation called or when any of its
collections are modified. If, at update time, no
column-based attributes have any net changes, no UPDATE
statement will be issued. This means that an instance
being sent to :meth:`~.MapperEvents.before_update` is
*not* a guarantee that an UPDATE statement will be
issued, although you can affect the outcome here by
modifying attributes so that a net change in value does
exist.
To detect if the column-based attributes on the object have net
changes, and will therefore generate an UPDATE statement, use
``object_session(instance).is_modified(instance,
include_collections=False)``.
The event is often called for a batch of objects of the
same class before their UPDATE statements are emitted at
once in a later step. In the extremely rare case that
this is not desirable, the :func:`.mapper` can be
configured with ``batch=False``, which will cause
batches of instances to be broken up into individual
(and more poorly performing) event->persist->event
steps.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit UPDATE statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
def after_update(self, mapper, connection, target):
"""Receive an object instance after an UPDATE statement
is emitted corresponding to that instance.
This event is used to modify in-Python-only
state on the instance after an UPDATE occurs, as well
as to emit additional SQL statements on the given
connection.
This method is called for all instances that are
marked as "dirty", *even those which have no net changes
to their column-based attributes*, and for which
no UPDATE statement has proceeded. An object is marked
as dirty when any of its column-based attributes have a
"set attribute" operation called or when any of its
collections are modified. If, at update time, no
column-based attributes have any net changes, no UPDATE
statement will be issued. This means that an instance
being sent to :meth:`~.MapperEvents.after_update` is
*not* a guarantee that an UPDATE statement has been
issued.
To detect if the column-based attributes on the object have net
changes, and therefore resulted in an UPDATE statement, use
``object_session(instance).is_modified(instance,
include_collections=False)``.
The event is often called for a batch of objects of the
same class after their UPDATE statements have been emitted at
once in a previous step. In the extremely rare case that
this is not desirable, the :func:`.mapper` can be
configured with ``batch=False``, which will cause
batches of instances to be broken up into individual
(and more poorly performing) event->persist->event
steps.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit UPDATE statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
def before_delete(self, mapper, connection, target):
"""Receive an object instance before a DELETE statement
is emitted corresponding to that instance.
This event is used to emit additional SQL statements on
the given connection as well as to perform application
specific bookkeeping related to a deletion event.
The event is often called for a batch of objects of the
same class before their DELETE statements are emitted at
once in a later step.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit DELETE statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being deleted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
def after_delete(self, mapper, connection, target):
"""Receive an object instance after a DELETE statement
has been emitted corresponding to that instance.
This event is used to emit additional SQL statements on
the given connection as well as to perform application
specific bookkeeping related to a deletion event.
The event is often called for a batch of objects of the
same class after their DELETE statements have been emitted at
once in a previous step.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit DELETE statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being deleted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
class _MapperEventsHold(_EventsHold):
all_holds = weakref.WeakKeyDictionary()
def resolve(self, class_):
return _mapper_or_none(class_)
class HoldMapperEvents(_EventsHold.HoldEvents, MapperEvents):
pass
dispatch = event.dispatcher(HoldMapperEvents)
class SessionEvents(event.Events):
"""Define events specific to :class:`.Session` lifecycle.
e.g.::
from sqlalchemy import event
from sqlalchemy.orm import sessionmaker
def my_before_commit(session):
print "before commit!"
Session = sessionmaker()
event.listen(Session, "before_commit", my_before_commit)
The :func:`~.event.listen` function will accept
:class:`.Session` objects as well as the return result
of :class:`~.sessionmaker()` and :class:`~.scoped_session()`.
Additionally, it accepts the :class:`.Session` class which
will apply listeners to all :class:`.Session` instances
globally.
"""
_target_class_doc = "SomeSessionOrFactory"
_dispatch_target = Session
@classmethod
def _accept_with(cls, target):
if isinstance(target, scoped_session):
target = target.session_factory
if not isinstance(target, sessionmaker) and \
(
not isinstance(target, type) or
not issubclass(target, Session)
):
raise exc.ArgumentError(
"Session event listen on a scoped_session "
"requires that its creation callable "
"is associated with the Session class.")
if isinstance(target, sessionmaker):
return target.class_
elif isinstance(target, type):
if issubclass(target, scoped_session):
return Session
elif issubclass(target, Session):
return target
elif isinstance(target, Session):
return target
else:
return None
def after_transaction_create(self, session, transaction):
"""Execute when a new :class:`.SessionTransaction` is created.
This event differs from :meth:`~.SessionEvents.after_begin`
in that it occurs for each :class:`.SessionTransaction`
overall, as opposed to when transactions are begun
on individual database connections. It is also invoked
for nested transactions and subtransactions, and is always
matched by a corresponding
:meth:`~.SessionEvents.after_transaction_end` event
(assuming normal operation of the :class:`.Session`).
:param session: the target :class:`.Session`.
:param transaction: the target :class:`.SessionTransaction`.
To detect if this is the outermost
:class:`.SessionTransaction`, as opposed to a "subtransaction" or a
SAVEPOINT, test that the :attr:`.SessionTransaction.parent` attribute
is ``None``::
@event.listens_for(session, "after_transaction_create")
def after_transaction_create(session, transaction):
if transaction.parent is None:
# work with top-level transaction
To detect if the :class:`.SessionTransaction` is a SAVEPOINT, use the
:attr:`.SessionTransaction.nested` attribute::
@event.listens_for(session, "after_transaction_create")
def after_transaction_create(session, transaction):
if transaction.nested:
# work with SAVEPOINT transaction
.. seealso::
:class:`.SessionTransaction`
:meth:`~.SessionEvents.after_transaction_end`
"""
def after_transaction_end(self, session, transaction):
"""Execute when the span of a :class:`.SessionTransaction` ends.
This event differs from :meth:`~.SessionEvents.after_commit`
in that it corresponds to all :class:`.SessionTransaction`
objects in use, including those for nested transactions
and subtransactions, and is always matched by a corresponding
:meth:`~.SessionEvents.after_transaction_create` event.
:param session: the target :class:`.Session`.
:param transaction: the target :class:`.SessionTransaction`.
To detect if this is the outermost
:class:`.SessionTransaction`, as opposed to a "subtransaction" or a
SAVEPOINT, test that the :attr:`.SessionTransaction.parent` attribute
is ``None``::
@event.listens_for(session, "after_transaction_create")
def after_transaction_end(session, transaction):
if transaction.parent is None:
# work with top-level transaction
To detect if the :class:`.SessionTransaction` is a SAVEPOINT, use the
:attr:`.SessionTransaction.nested` attribute::
@event.listens_for(session, "after_transaction_create")
def after_transaction_end(session, transaction):
if transaction.nested:
# work with SAVEPOINT transaction
.. seealso::
:class:`.SessionTransaction`
:meth:`~.SessionEvents.after_transaction_create`
"""
def before_commit(self, session):
"""Execute before commit is called.
.. note::
The :meth:`~.SessionEvents.before_commit` hook is *not* per-flush,
that is, the :class:`.Session` can emit SQL to the database
many times within the scope of a transaction.
For interception of these events, use the
:meth:`~.SessionEvents.before_flush`,
:meth:`~.SessionEvents.after_flush`, or
:meth:`~.SessionEvents.after_flush_postexec`
events.
:param session: The target :class:`.Session`.
.. seealso::
:meth:`~.SessionEvents.after_commit`
:meth:`~.SessionEvents.after_begin`
:meth:`~.SessionEvents.after_transaction_create`
:meth:`~.SessionEvents.after_transaction_end`
"""
def after_commit(self, session):
"""Execute after a commit has occurred.
.. note::
The :meth:`~.SessionEvents.after_commit` hook is *not* per-flush,
that is, the :class:`.Session` can emit SQL to the database
many times within the scope of a transaction.
For interception of these events, use the
:meth:`~.SessionEvents.before_flush`,
:meth:`~.SessionEvents.after_flush`, or
:meth:`~.SessionEvents.after_flush_postexec`
events.
.. note::
The :class:`.Session` is not in an active transaction
when the :meth:`~.SessionEvents.after_commit` event is invoked,
and therefore can not emit SQL. To emit SQL corresponding to
every transaction, use the :meth:`~.SessionEvents.before_commit`
event.
:param session: The target :class:`.Session`.
.. seealso::
:meth:`~.SessionEvents.before_commit`
:meth:`~.SessionEvents.after_begin`
:meth:`~.SessionEvents.after_transaction_create`
:meth:`~.SessionEvents.after_transaction_end`
"""
def after_rollback(self, session):
"""Execute after a real DBAPI rollback has occurred.
Note that this event only fires when the *actual* rollback against
the database occurs - it does *not* fire each time the
:meth:`.Session.rollback` method is called, if the underlying
DBAPI transaction has already been rolled back. In many
cases, the :class:`.Session` will not be in
an "active" state during this event, as the current
transaction is not valid. To acquire a :class:`.Session`
which is active after the outermost rollback has proceeded,
use the :meth:`.SessionEvents.after_soft_rollback` event, checking the
:attr:`.Session.is_active` flag.
:param session: The target :class:`.Session`.
"""
def after_soft_rollback(self, session, previous_transaction):
"""Execute after any rollback has occurred, including "soft"
rollbacks that don't actually emit at the DBAPI level.
This corresponds to both nested and outer rollbacks, i.e.
the innermost rollback that calls the DBAPI's
rollback() method, as well as the enclosing rollback
calls that only pop themselves from the transaction stack.
The given :class:`.Session` can be used to invoke SQL and
:meth:`.Session.query` operations after an outermost rollback
by first checking the :attr:`.Session.is_active` flag::
@event.listens_for(Session, "after_soft_rollback")
def do_something(session, previous_transaction):
if session.is_active:
session.execute("select * from some_table")
:param session: The target :class:`.Session`.
:param previous_transaction: The :class:`.SessionTransaction`
transactional marker object which was just closed. The current
:class:`.SessionTransaction` for the given :class:`.Session` is
available via the :attr:`.Session.transaction` attribute.
.. versionadded:: 0.7.3
"""
def before_flush(self, session, flush_context, instances):
"""Execute before flush process has started.
:param session: The target :class:`.Session`.
:param flush_context: Internal :class:`.UOWTransaction` object
which handles the details of the flush.
:param instances: Usually ``None``, this is the collection of
objects which can be passed to the :meth:`.Session.flush` method
(note this usage is deprecated).
.. seealso::
:meth:`~.SessionEvents.after_flush`
:meth:`~.SessionEvents.after_flush_postexec`
:ref:`session_persistence_events`
"""
def after_flush(self, session, flush_context):
"""Execute after flush has completed, but before commit has been
called.
Note that the session's state is still in pre-flush, i.e. 'new',
'dirty', and 'deleted' lists still show pre-flush state as well
as the history settings on instance attributes.
:param session: The target :class:`.Session`.
:param flush_context: Internal :class:`.UOWTransaction` object
which handles the details of the flush.
.. seealso::
:meth:`~.SessionEvents.before_flush`
:meth:`~.SessionEvents.after_flush_postexec`
:ref:`session_persistence_events`
"""
def after_flush_postexec(self, session, flush_context):
"""Execute after flush has completed, and after the post-exec
state occurs.
This will be when the 'new', 'dirty', and 'deleted' lists are in
their final state. An actual commit() may or may not have
occurred, depending on whether or not the flush started its own
transaction or participated in a larger transaction.
:param session: The target :class:`.Session`.
:param flush_context: Internal :class:`.UOWTransaction` object
which handles the details of the flush.
.. seealso::
:meth:`~.SessionEvents.before_flush`
:meth:`~.SessionEvents.after_flush`
:ref:`session_persistence_events`
"""
def after_begin(self, session, transaction, connection):
"""Execute after a transaction is begun on a connection
:param session: The target :class:`.Session`.
:param transaction: The :class:`.SessionTransaction`.
:param connection: The :class:`~.engine.Connection` object
which will be used for SQL statements.
.. seealso::
:meth:`~.SessionEvents.before_commit`
:meth:`~.SessionEvents.after_commit`
:meth:`~.SessionEvents.after_transaction_create`
:meth:`~.SessionEvents.after_transaction_end`
"""
def before_attach(self, session, instance):
"""Execute before an instance is attached to a session.
This is called before an add, delete or merge causes
the object to be part of the session.
.. versionadded:: 0.8. Note that :meth:`~.SessionEvents.after_attach`
now fires off after the item is part of the session.
:meth:`.before_attach` is provided for those cases where
the item should not yet be part of the session state.
.. seealso::
:meth:`~.SessionEvents.after_attach`
:ref:`session_lifecycle_events`
"""
def after_attach(self, session, instance):
"""Execute after an instance is attached to a session.
This is called after an add, delete or merge.
.. note::
As of 0.8, this event fires off *after* the item
has been fully associated with the session, which is
different than previous releases. For event
handlers that require the object not yet
be part of session state (such as handlers which
may autoflush while the target object is not
yet complete) consider the
new :meth:`.before_attach` event.
.. seealso::
:meth:`~.SessionEvents.before_attach`
:ref:`session_lifecycle_events`
"""
@event._legacy_signature("0.9",
["session", "query", "query_context", "result"],
lambda update_context: (
update_context.session,
update_context.query,
update_context.context,
update_context.result))
def after_bulk_update(self, update_context):
"""Execute after a bulk update operation to the session.
This is called as a result of the :meth:`.Query.update` method.
:param update_context: an "update context" object which contains
details about the update, including these attributes:
* ``session`` - the :class:`.Session` involved
* ``query`` -the :class:`.Query` object that this update operation
was called upon.
* ``context`` The :class:`.QueryContext` object, corresponding
to the invocation of an ORM query.
* ``result`` the :class:`.ResultProxy` returned as a result of the
bulk UPDATE operation.
"""
@event._legacy_signature("0.9",
["session", "query", "query_context", "result"],
lambda delete_context: (
delete_context.session,
delete_context.query,
delete_context.context,
delete_context.result))
def after_bulk_delete(self, delete_context):
"""Execute after a bulk delete operation to the session.
This is called as a result of the :meth:`.Query.delete` method.
:param delete_context: a "delete context" object which contains
details about the update, including these attributes:
* ``session`` - the :class:`.Session` involved
* ``query`` -the :class:`.Query` object that this update operation
was called upon.
* ``context`` The :class:`.QueryContext` object, corresponding
to the invocation of an ORM query.
* ``result`` the :class:`.ResultProxy` returned as a result of the
bulk DELETE operation.
"""
def transient_to_pending(self, session, instance):
"""Intercept the "transient to pending" transition for a specific object.
This event is a specialization of the
:meth:`.SessionEvents.after_attach` event which is only invoked
for this specific transition. It is invoked typically during the
:meth:`.Session.add` call.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def pending_to_transient(self, session, instance):
"""Intercept the "pending to transient" transition for a specific object.
This less common transition occurs when an pending object that has
not been flushed is evicted from the session; this can occur
when the :meth:`.Session.rollback` method rolls back the transaction,
or when the :meth:`.Session.expunge` method is used.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def persistent_to_transient(self, session, instance):
"""Intercept the "persistent to transient" transition for a specific object.
This less common transition occurs when an pending object that has
has been flushed is evicted from the session; this can occur
when the :meth:`.Session.rollback` method rolls back the transaction.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def pending_to_persistent(self, session, instance):
"""Intercept the "pending to persistent"" transition for a specific object.
This event is invoked within the flush process, and is
similar to scanning the :attr:`.Session.new` collection within
the :meth:`.SessionEvents.after_flush` event. However, in this
case the object has already been moved to the persistent state
when the event is called.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def detached_to_persistent(self, session, instance):
"""Intercept the "detached to persistent" transition for a specific object.
This event is a specialization of the
:meth:`.SessionEvents.after_attach` event which is only invoked
for this specific transition. It is invoked typically during the
:meth:`.Session.add` call, as well as during the
:meth:`.Session.delete` call if the object was not previously
associated with the
:class:`.Session` (note that an object marked as "deleted" remains
in the "persistent" state until the flush proceeds).
.. note::
If the object becomes persistent as part of a call to
:meth:`.Session.delete`, the object is **not** yet marked as
deleted when this event is called. To detect deleted objects,
check the ``deleted`` flag sent to the
:meth:`.SessionEvents.persistent_to_detached` to event after the
flush proceeds, or check the :attr:`.Session.deleted` collection
within the :meth:`.SessionEvents.before_flush` event if deleted
objects need to be intercepted before the flush.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def loaded_as_persistent(self, session, instance):
"""Intercept the "loaded as persistent" transition for a specific object.
This event is invoked within the ORM loading process, and is invoked
very similarly to the :meth:`.InstanceEvents.load` event. However,
the event here is linkable to a :class:`.Session` class or instance,
rather than to a mapper or class hierarchy, and integrates
with the other session lifecycle events smoothly. The object
is guaranteed to be present in the session's identity map when
this event is called.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def persistent_to_deleted(self, session, instance):
"""Intercept the "persistent to deleted" transition for a specific object.
This event is invoked when a persistent object's identity
is deleted from the database within a flush, however the object
still remains associated with the :class:`.Session` until the
transaction completes.
If the transaction is rolled back, the object moves again
to the persistent state, and the
:meth:`.SessionEvents.deleted_to_persistent` event is called.
If the transaction is committed, the object becomes detached,
which will emit the :meth:`.SessionEvents.deleted_to_detached`
event.
Note that while the :meth:`.Session.delete` method is the primary
public interface to mark an object as deleted, many objects
get deleted due to cascade rules, which are not always determined
until flush time. Therefore, there's no way to catch
every object that will be deleted until the flush has proceeded.
the :meth:`.SessionEvents.persistent_to_deleted` event is therefore
invoked at the end of a flush.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def deleted_to_persistent(self, session, instance):
"""Intercept the "deleted to persistent" transition for a specific object.
This transition occurs only when an object that's been deleted
successfully in a flush is restored due to a call to
:meth:`.Session.rollback`. The event is not called under
any other circumstances.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def deleted_to_detached(self, session, instance):
"""Intercept the "deleted to detached" transition for a specific object.
This event is invoked when a deleted object is evicted
from the session. The typical case when this occurs is when
the transaction for a :class:`.Session` in which the object
was deleted is committed; the object moves from the deleted
state to the detached state.
It is also invoked for objects that were deleted in a flush
when the :meth:`.Session.expunge_all` or :meth:`.Session.close`
events are called, as well as if the object is individually
expunged from its deleted state via :meth:`.Session.expunge`.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def persistent_to_detached(self, session, instance):
"""Intercept the "persistent to detached" transition for a specific object.
This event is invoked when a persistent object is evicted
from the session. There are many conditions that cause this
to happen, including:
* using a method such as :meth:`.Session.expunge`
or :meth:`.Session.close`
* Calling the :meth:`.Session.rollback` method, when the object
was part of an INSERT statement for that session's transaction
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
:param deleted: boolean. If True, indicates this object moved
to the detached state because it was marked as deleted and flushed.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
class AttributeEvents(event.Events):
"""Define events for object attributes.
These are typically defined on the class-bound descriptor for the
target class.
e.g.::
from sqlalchemy import event
def my_append_listener(target, value, initiator):
print "received append event for target: %s" % target
event.listen(MyClass.collection, 'append', my_append_listener)
Listeners have the option to return a possibly modified version
of the value, when the ``retval=True`` flag is passed
to :func:`~.event.listen`::
def validate_phone(target, value, oldvalue, initiator):
"Strip non-numeric characters from a phone number"
return re.sub(r'\D', '', value)
# setup listener on UserContact.phone attribute, instructing
# it to use the return value
listen(UserContact.phone, 'set', validate_phone, retval=True)
A validation function like the above can also raise an exception
such as :exc:`ValueError` to halt the operation.
Several modifiers are available to the :func:`~.event.listen` function.
:param active_history=False: When True, indicates that the
"set" event would like to receive the "old" value being
replaced unconditionally, even if this requires firing off
database loads. Note that ``active_history`` can also be
set directly via :func:`.column_property` and
:func:`.relationship`.
:param propagate=False: When True, the listener function will
be established not just for the class attribute given, but
for attributes of the same name on all current subclasses
of that class, as well as all future subclasses of that
class, using an additional listener that listens for
instrumentation events.
:param raw=False: When True, the "target" argument to the
event will be the :class:`.InstanceState` management
object, rather than the mapped instance itself.
:param retval=False: when True, the user-defined event
listening must return the "value" argument from the
function. This gives the listening function the opportunity
to change the value that is ultimately used for a "set"
or "append" event.
"""
_target_class_doc = "SomeClass.some_attribute"
_dispatch_target = QueryableAttribute
@staticmethod
def _set_dispatch(cls, dispatch_cls):
dispatch = event.Events._set_dispatch(cls, dispatch_cls)
dispatch_cls._active_history = False
return dispatch
@classmethod
def _accept_with(cls, target):
# TODO: coverage
if isinstance(target, interfaces.MapperProperty):
return getattr(target.parent.class_, target.key)
else:
return target
@classmethod
def _listen(cls, event_key, active_history=False,
raw=False, retval=False,
propagate=False):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, \
event_key._listen_fn
if active_history:
target.dispatch._active_history = True
if not raw or not retval:
def wrap(target, *arg):
if not raw:
target = target.obj()
if not retval:
if arg:
value = arg[0]
else:
value = None
fn(target, *arg)
return value
else:
return fn(target, *arg)
event_key = event_key.with_wrapper(wrap)
event_key.base_listen(propagate=propagate)
if propagate:
manager = instrumentation.manager_of_class(target.class_)
for mgr in manager.subclass_managers(True):
event_key.with_dispatch_target(
mgr[target.key]).base_listen(propagate=True)
def append(self, target, value, initiator):
"""Receive a collection append event.
The append event is invoked for each element as it is appended
to the collection. This occurs for single-item appends as well
as for a "bulk replace" operation.
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param value: the value being appended. If this listener
is registered with ``retval=True``, the listener
function must return this value, or a new value which
replaces it.
:param initiator: An instance of :class:`.attributes.Event`
representing the initiation of the event. May be modified
from its original value by backref handlers in order to control
chained event propagation, as well as be inspected for information
about the source of the event.
:return: if the event was registered with ``retval=True``,
the given value, or a new effective value, should be returned.
.. seealso::
:meth:`.AttributeEvents.bulk_replace`
"""
def bulk_replace(self, target, values, initiator):
"""Receive a collection 'bulk replace' event.
This event is invoked for a sequence of values as they are incoming
to a bulk collection set operation, which can be
modified in place before the values are treated as ORM objects.
This is an "early hook" that runs before the bulk replace routine
attempts to reconcile which objects are already present in the
collection and which are being removed by the net replace operation.
It is typical that this method be combined with use of the
:meth:`.AttributeEvents.append` event. When using both of these
events, note that a bulk replace operation will invoke
the :meth:`.AttributeEvents.append` event for all new items,
even after :meth:`.AttributeEvents.bulk_replace` has been invoked
for the collection as a whole. In order to determine if an
:meth:`.AttributeEvents.append` event is part of a bulk replace,
use the symbol :attr:`~.attributes.OP_BULK_REPLACE` to test the
incoming initiator::
from sqlalchemy.orm.attributes import OP_BULK_REPLACE
@event.listens_for(SomeObject.collection, "bulk_replace")
def process_collection(target, values, initiator):
values[:] = [_make_value(value) for value in values]
@event.listens_for(SomeObject.collection, "append", retval=True)
def process_collection(target, value, initiator):
# make sure bulk_replace didn't already do it
if initiator is None or initiator.op is not OP_BULK_REPLACE:
return _make_value(value)
else:
return value
.. versionadded:: 1.2
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param value: a sequence (e.g. a list) of the values being set. The
handler can modify this list in place.
:param initiator: An instance of :class:`.attributes.Event`
representing the initiation of the event.
"""
def remove(self, target, value, initiator):
"""Receive a collection remove event.
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param value: the value being removed.
:param initiator: An instance of :class:`.attributes.Event`
representing the initiation of the event. May be modified
from its original value by backref handlers in order to control
chained event propagation.
.. versionchanged:: 0.9.0 the ``initiator`` argument is now
passed as a :class:`.attributes.Event` object, and may be
modified by backref handlers within a chain of backref-linked
events.
:return: No return value is defined for this event.
"""
def set(self, target, value, oldvalue, initiator):
"""Receive a scalar set event.
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param value: the value being set. If this listener
is registered with ``retval=True``, the listener
function must return this value, or a new value which
replaces it.
:param oldvalue: the previous value being replaced. This
may also be the symbol ``NEVER_SET`` or ``NO_VALUE``.
If the listener is registered with ``active_history=True``,
the previous value of the attribute will be loaded from
the database if the existing value is currently unloaded
or expired.
:param initiator: An instance of :class:`.attributes.Event`
representing the initiation of the event. May be modified
from its original value by backref handlers in order to control
chained event propagation.
.. versionchanged:: 0.9.0 the ``initiator`` argument is now
passed as a :class:`.attributes.Event` object, and may be
modified by backref handlers within a chain of backref-linked
events.
:return: if the event was registered with ``retval=True``,
the given value, or a new effective value, should be returned.
"""
def init_scalar(self, target, value, dict_):
"""Receive a scalar "init" event.
This event is invoked when an uninitialized, unpersisted scalar
attribute is accessed. A value of ``None`` is typically returned
in this case; no changes are made to the object's state.
The event handler can alter this behavior in two ways.
One is that a value other than ``None`` may be returned. The other
is that the value may be established as part of the object's state,
which will also have the effect that it is persisted.
Typical use is to establish a specific default value of an attribute
upon access::
SOME_CONSTANT = 3.1415926
@event.listens_for(
MyClass.some_attribute, "init_scalar",
retval=True, propagate=True)
def _init_some_attribute(target, dict_, value):
dict_['some_attribute'] = SOME_CONSTANT
return SOME_CONSTANT
Above, we initialize the attribute ``MyClass.some_attribute`` to the
value of ``SOME_CONSTANT``. The above code includes the following
features:
* By setting the value ``SOME_CONSTANT`` in the given ``dict_``,
we indicate that the value is to be persisted to the database.
**The given value is only persisted to the database if we
explicitly associate it with the object**. The ``dict_`` given
is the ``__dict__`` element of the mapped object, assuming the
default attribute instrumentation system is in place.
* By establishing the ``retval=True`` flag, the value we return
from the function will be returned by the attribute getter.
Without this flag, the event is assumed to be a passive observer
and the return value of our function is ignored.
* The ``propagate=True`` flag is significant if the mapped class
includes inheriting subclasses, which would also make use of this
event listener. Without this flag, an inheriting subclass will
not use our event handler.
When we establish the value in the given dictionary, the value will
be used in the INSERT statement established by the unit of work.
Normally, the default returned value of ``None`` is not established as
part of the object, to avoid the issue of mutations occurring to the
object in response to a normally passive "get" operation, and also
sidesteps the issue of whether or not the :meth:`.AttributeEvents.set`
event should be awkwardly fired off during an attribute access
operation. This does not impact the INSERT operation since the
``None`` value matches the value of ``NULL`` that goes into the
database in any case; note that ``None`` is skipped during the INSERT
to ensure that column and SQL-level default functions can fire off.
The attribute set event :meth:`.AttributeEvents.set` as well as the
related validation feature provided by :obj:`.orm.validates` is
**not** invoked when we apply our value to the given ``dict_``. To
have these events to invoke in response to our newly generated
value, apply the value to the given object as a normal attribute
set operation::
SOME_CONSTANT = 3.1415926
@event.listens_for(
MyClass.some_attribute, "init_scalar",
retval=True, propagate=True)
def _init_some_attribute(target, dict_, value):
# will also fire off attribute set events
target.some_attribute = SOME_CONSTANT
return SOME_CONSTANT
When multiple listeners are set up, the generation of the value
is "chained" from one listener to the next by passing the value
returned by the previous listener that specifies ``retval=True``
as the ``value`` argument of the next listener.
The :meth:`.AttributeEvents.init_scalar` event may be used to
extract values from the default values and/or callables established on
mapped :class:`.Column` objects. See the "active column defaults"
example in :ref:`examples_instrumentation` for an example of this.
.. versionadded:: 1.1
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param value: the value that is to be returned before this event
listener were invoked. This value begins as the value ``None``,
however will be the return value of the previous event handler
function if multiple listeners are present.
:param dict_: the attribute dictionary of this mapped object.
This is normally the ``__dict__`` of the object, but in all cases
represents the destination that the attribute system uses to get
at the actual value of this attribute. Placing the value in this
dictionary has the effect that the value will be used in the
INSERT statement generated by the unit of work.
.. seealso::
:ref:`examples_instrumentation` - see the
``active_column_defaults.py`` example.
"""
def init_collection(self, target, collection, collection_adapter):
"""Receive a 'collection init' event.
This event is triggered for a collection-based attribute, when
the initial "empty collection" is first generated for a blank
attribute, as well as for when the collection is replaced with
a new one, such as via a set event.
E.g., given that ``User.addresses`` is a relationship-based
collection, the event is triggered here::
u1 = User()
u1.addresses.append(a1) # <- new collection
and also during replace operations::
u1.addresses = [a2, a3] # <- new collection
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param collection: the new collection. This will always be generated
from what was specified as
:paramref:`.RelationshipProperty.collection_class`, and will always
be empty.
:param collection_adpater: the :class:`.CollectionAdapter` that will
mediate internal access to the collection.
.. versionadded:: 1.0.0 the :meth:`.AttributeEvents.init_collection`
and :meth:`.AttributeEvents.dispose_collection` events supersede
the :class:`.collection.linker` hook.
"""
def dispose_collection(self, target, collection, collection_adpater):
"""Receive a 'collection dispose' event.
This event is triggered for a collection-based attribute when
a collection is replaced, that is::
u1.addresses.append(a1)
u1.addresses = [a2, a3] # <- old collection is disposed
The old collection received will contain its previous contents.
.. versionchanged:: 1.2 The collection passed to
:meth:`.AttributeEvents.dispose_collection` will now have its
contents before the dispose intact; previously, the collection
would be empty.
.. versionadded:: 1.0.0 the :meth:`.AttributeEvents.init_collection`
and :meth:`.AttributeEvents.dispose_collection` events supersede
the :class:`.collection.linker` hook.
"""
def modified(self, target, initiator):
"""Receive a 'modified' event.
This event is triggered when the :func:`.attributes.flag_modified`
function is used to trigger a modify event on an attribute without
any specific value being set.
.. versionadded:: 1.2
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param initiator: An instance of :class:`.attributes.Event`
representing the initiation of the event.
"""
class QueryEvents(event.Events):
"""Represent events within the construction of a :class:`.Query` object.
The events here are intended to be used with an as-yet-unreleased
inspection system for :class:`.Query`. Some very basic operations
are possible now, however the inspection system is intended to allow
complex query manipulations to be automated.
.. versionadded:: 1.0.0
"""
_target_class_doc = "SomeQuery"
_dispatch_target = Query
def before_compile(self, query):
"""Receive the :class:`.Query` object before it is composed into a
core :class:`.Select` object.
This event is intended to allow changes to the query given::
@event.listens_for(Query, "before_compile", retval=True)
def no_deleted(query):
for desc in query.column_descriptions:
if desc['type'] is User:
entity = desc['entity']
query = query.filter(entity.deleted == False)
return query
The event should normally be listened with the ``retval=True``
parameter set, so that the modified query may be returned.
"""
@classmethod
def _listen(
cls, event_key, retval=False, **kw):
fn = event_key._listen_fn
if not retval:
def wrap(*arg, **kw):
if not retval:
query = arg[0]
fn(*arg, **kw)
return query
else:
| return fn(*arg, **kw) | 9,203 | lcc_e | python | null | b716a299c1c7658241057478b48c06af6a5c86f62fbb472a |
|
#!/usr/bin/env python
#
# $Id: //projects/empy/em.py#146 $ $Date: 2003/10/27 $
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
A system for processing Python as markup embedded in text.
"""
__program__ = 'empy'
__version__ = '3.3'
__url__ = 'http://www.alcyone.com/software/empy/'
__author__ = 'Erik Max Francis <max@alcyone.com>'
__copyright__ = 'Copyright (C) 2002-2003 Erik Max Francis'
__license__ = 'LGPL'
import copy
import getopt
import os
import re
import string
import sys
import types
try:
# The equivalent of import cStringIO as StringIO.
import cStringIO
StringIO = cStringIO
del cStringIO
except ImportError:
import StringIO
# For backward compatibility, we can't assume these are defined.
False, True = 0, 1
# Some basic defaults.
FAILURE_CODE = 1
DEFAULT_PREFIX = '@'
DEFAULT_PSEUDOMODULE_NAME = 'empy'
DEFAULT_SCRIPT_NAME = '?'
SIGNIFICATOR_RE_SUFFIX = r"%(\S+)\s*(.*)\s*$"
SIGNIFICATOR_RE_STRING = DEFAULT_PREFIX + SIGNIFICATOR_RE_SUFFIX
BANGPATH = '#!'
DEFAULT_CHUNK_SIZE = 8192
DEFAULT_ERRORS = 'strict'
# Character information.
IDENTIFIER_FIRST_CHARS = '_abcdefghijklmnopqrstuvwxyz' \
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
IDENTIFIER_CHARS = IDENTIFIER_FIRST_CHARS + '0123456789.'
ENDING_CHARS = {'(': ')', '[': ']', '{': '}'}
# Environment variable names.
OPTIONS_ENV = 'EMPY_OPTIONS'
PREFIX_ENV = 'EMPY_PREFIX'
PSEUDO_ENV = 'EMPY_PSEUDO'
FLATTEN_ENV = 'EMPY_FLATTEN'
RAW_ENV = 'EMPY_RAW_ERRORS'
INTERACTIVE_ENV = 'EMPY_INTERACTIVE'
BUFFERED_ENV = 'EMPY_BUFFERED_OUTPUT'
NO_OVERRIDE_ENV = 'EMPY_NO_OVERRIDE'
UNICODE_ENV = 'EMPY_UNICODE'
INPUT_ENCODING_ENV = 'EMPY_UNICODE_INPUT_ENCODING'
OUTPUT_ENCODING_ENV = 'EMPY_UNICODE_OUTPUT_ENCODING'
INPUT_ERRORS_ENV = 'EMPY_UNICODE_INPUT_ERRORS'
OUTPUT_ERRORS_ENV = 'EMPY_UNICODE_OUTPUT_ERRORS'
# Interpreter options.
BANGPATH_OPT = 'processBangpaths' # process bangpaths as comments?
BUFFERED_OPT = 'bufferedOutput' # fully buffered output?
RAW_OPT = 'rawErrors' # raw errors?
EXIT_OPT = 'exitOnError' # exit on error?
FLATTEN_OPT = 'flatten' # flatten pseudomodule namespace?
OVERRIDE_OPT = 'override' # override sys.stdout with proxy?
CALLBACK_OPT = 'noCallbackError' # is no custom callback an error?
# Usage info.
OPTION_INFO = [
("-V --version", "Print version and exit"),
("-h --help", "Print usage and exit"),
("-H --extended-help", "Print extended usage and exit"),
("-k --suppress-errors", "Do not exit on errors; go interactive"),
("-p --prefix=<char>", "Change prefix to something other than @"),
(" --no-prefix", "Do not do any markup processing at all"),
("-m --module=<name>", "Change the internal pseudomodule name"),
("-f --flatten", "Flatten the members of pseudmodule to start"),
("-r --raw-errors", "Show raw Python errors"),
("-i --interactive", "Go into interactive mode after processing"),
("-n --no-override-stdout", "Do not override sys.stdout with proxy"),
("-o --output=<filename>", "Specify file for output as write"),
("-a --append=<filename>", "Specify file for output as append"),
("-b --buffered-output", "Fully buffer output including open"),
(" --binary", "Treat the file as a binary"),
(" --chunk-size=<chunk>", "Use this chunk size for reading binaries"),
("-P --preprocess=<filename>", "Interpret EmPy file before main processing"),
("-I --import=<modules>", "Import Python modules before processing"),
("-D --define=<definition>", "Execute Python assignment statement"),
("-E --execute=<statement>", "Execute Python statement before processing"),
("-F --execute-file=<filename>", "Execute Python file before processing"),
(" --pause-at-end", "Prompt at the ending of processing"),
(" --relative-path", "Add path of EmPy script to sys.path"),
(" --no-callback-error", "Custom markup without callback is error"),
(" --no-bangpath-processing", "Suppress bangpaths as comments"),
("-u --unicode", "Enable Unicode subsystem (Python 2+ only)"),
(" --unicode-encoding=<e>", "Set both input and output encodings"),
(" --unicode-input-encoding=<e>", "Set input encoding"),
(" --unicode-output-encoding=<e>", "Set output encoding"),
(" --unicode-errors=<E>", "Set both input and output error handler"),
(" --unicode-input-errors=<E>", "Set input error handler"),
(" --unicode-output-errors=<E>", "Set output error handler"),
]
USAGE_NOTES = """\
Notes: Whitespace immediately inside parentheses of @(...) are
ignored. Whitespace immediately inside braces of @{...} are ignored,
unless ... spans multiple lines. Use @{ ... }@ to suppress newline
following expansion. Simple expressions ignore trailing dots; `@x.'
means `@(x).'. A #! at the start of a file is treated as a @#
comment."""
MARKUP_INFO = [
("@# ... NL", "Comment; remove everything up to newline"),
("@? NAME NL", "Set the current context name"),
("@! INTEGER NL", "Set the current context line number"),
("@ WHITESPACE", "Remove following whitespace; line continuation"),
("@\\ ESCAPE_CODE", "A C-style escape sequence"),
("@@", "Literal @; @ is escaped (duplicated prefix)"),
("@), @], @}", "Literal close parenthesis, bracket, brace"),
("@ STRING_LITERAL", "Replace with string literal contents"),
("@( EXPRESSION )", "Evaluate expression and substitute with str"),
("@( TEST [? THEN [! ELSE]] )", "If test is true, evaluate then, otherwise else"),
("@( TRY $ CATCH )", "Expand try expression, or catch if it raises"),
("@ SIMPLE_EXPRESSION", "Evaluate simple expression and substitute;\n"
"e.g., @x, @x.y, @f(a, b), @l[i], etc."),
("@` EXPRESSION `", "Evaluate expression and substitute with repr"),
("@: EXPRESSION : [DUMMY] :", "Evaluates to @:...:expansion:"),
("@{ STATEMENTS }", "Statements are executed for side effects"),
("@[ CONTROL ]", "Control markups: if E; elif E; for N in E;\n"
"while E; try; except E, N; finally; continue;\n"
"break; end X"),
("@%% KEY WHITESPACE VALUE NL", "Significator form of __KEY__ = VALUE"),
("@< CONTENTS >", "Custom markup; meaning provided by user"),
]
ESCAPE_INFO = [
("@\\0", "NUL, null"),
("@\\a", "BEL, bell"),
("@\\b", "BS, backspace"),
("@\\dDDD", "three-digit decimal code DDD"),
("@\\e", "ESC, escape"),
("@\\f", "FF, form feed"),
("@\\h", "DEL, delete"),
("@\\n", "LF, linefeed, newline"),
("@\\N{NAME}", "Unicode character named NAME"),
("@\\oOOO", "three-digit octal code OOO"),
("@\\qQQQQ", "four-digit quaternary code QQQQ"),
("@\\r", "CR, carriage return"),
("@\\s", "SP, space"),
("@\\t", "HT, horizontal tab"),
("@\\uHHHH", "16-bit hexadecimal Unicode HHHH"),
("@\\UHHHHHHHH", "32-bit hexadecimal Unicode HHHHHHHH"),
("@\\v", "VT, vertical tab"),
("@\\xHH", "two-digit hexadecimal code HH"),
("@\\z", "EOT, end of transmission"),
]
PSEUDOMODULE_INFO = [
("VERSION", "String representing EmPy version"),
("SIGNIFICATOR_RE_STRING", "Regular expression matching significators"),
("SIGNIFICATOR_RE_SUFFIX", "The above stub, lacking the prefix"),
("interpreter", "Currently-executing interpreter instance"),
("argv", "The EmPy script name and command line arguments"),
("args", "The command line arguments only"),
("identify()", "Identify top context as name, line"),
("setContextName(name)", "Set the name of the current context"),
("setContextLine(line)", "Set the line number of the current context"),
("atExit(callable)", "Invoke no-argument function at shutdown"),
("getGlobals()", "Retrieve this interpreter's globals"),
("setGlobals(dict)", "Set this interpreter's globals"),
("updateGlobals(dict)", "Merge dictionary into interpreter's globals"),
("clearGlobals()", "Start globals over anew"),
("saveGlobals([deep])", "Save a copy of the globals"),
("restoreGlobals([pop])", "Restore the most recently saved globals"),
("defined(name, [loc])", "Find if the name is defined"),
("evaluate(expression, [loc])", "Evaluate the expression"),
("serialize(expression, [loc])", "Evaluate and serialize the expression"),
("execute(statements, [loc])", "Execute the statements"),
("single(source, [loc])", "Execute the 'single' object"),
("atomic(name, value, [loc])", "Perform an atomic assignment"),
("assign(name, value, [loc])", "Perform an arbitrary assignment"),
("significate(key, [value])", "Significate the given key, value pair"),
("include(file, [loc])", "Include filename or file-like object"),
("expand(string, [loc])", "Explicitly expand string and return"),
("string(data, [name], [loc])", "Process string-like object"),
("quote(string)", "Quote prefixes in provided string and return"),
("flatten([keys])", "Flatten module contents into globals namespace"),
("getPrefix()", "Get current prefix"),
("setPrefix(char)", "Set new prefix"),
("stopDiverting()", "Stop diverting; data sent directly to output"),
("createDiversion(name)", "Create a diversion but do not divert to it"),
("retrieveDiversion(name)", "Retrieve the actual named diversion object"),
("startDiversion(name)", "Start diverting to given diversion"),
("playDiversion(name)", "Recall diversion and then eliminate it"),
("replayDiversion(name)", "Recall diversion but retain it"),
("purgeDiversion(name)", "Erase diversion"),
("playAllDiversions()", "Stop diverting and play all diversions in order"),
("replayAllDiversions()", "Stop diverting and replay all diversions"),
("purgeAllDiversions()", "Stop diverting and purge all diversions"),
("getFilter()", "Get current filter"),
("resetFilter()", "Reset filter; no filtering"),
("nullFilter()", "Install null filter"),
("setFilter(shortcut)", "Install new filter or filter chain"),
("attachFilter(shortcut)", "Attach single filter to end of current chain"),
("areHooksEnabled()", "Return whether or not hooks are enabled"),
("enableHooks()", "Enable hooks (default)"),
("disableHooks()", "Disable hook invocation"),
("getHooks()", "Get all the hooks"),
("clearHooks()", "Clear all hooks"),
("addHook(hook, [i])", "Register the hook (optionally insert)"),
("removeHook(hook)", "Remove an already-registered hook from name"),
("invokeHook(name_, ...)", "Manually invoke hook"),
("getCallback()", "Get interpreter callback"),
("registerCallback(callback)", "Register callback with interpreter"),
("deregisterCallback()", "Deregister callback from interpreter"),
("invokeCallback(contents)", "Invoke the callback directly"),
("Interpreter", "The interpreter class"),
]
ENVIRONMENT_INFO = [
(OPTIONS_ENV, "Specified options will be included"),
(PREFIX_ENV, "Specify the default prefix: -p <value>"),
(PSEUDO_ENV, "Specify name of pseudomodule: -m <value>"),
(FLATTEN_ENV, "Flatten empy pseudomodule if defined: -f"),
(RAW_ENV, "Show raw errors if defined: -r"),
(INTERACTIVE_ENV, "Enter interactive mode if defined: -i"),
(BUFFERED_ENV, "Fully buffered output if defined: -b"),
(NO_OVERRIDE_ENV, "Do not override sys.stdout if defined: -n"),
(UNICODE_ENV, "Enable Unicode subsystem: -n"),
(INPUT_ENCODING_ENV, "Unicode input encoding"),
(OUTPUT_ENCODING_ENV, "Unicode output encoding"),
(INPUT_ERRORS_ENV, "Unicode input error handler"),
(OUTPUT_ERRORS_ENV, "Unicode output error handler"),
]
class Error(Exception):
"""The base class for all EmPy errors."""
pass
EmpyError = EmPyError = Error # DEPRECATED
class DiversionError(Error):
"""An error related to diversions."""
pass
class FilterError(Error):
"""An error related to filters."""
pass
class StackUnderflowError(Error):
"""A stack underflow."""
pass
class SubsystemError(Error):
"""An error associated with the Unicode subsystem."""
pass
class FlowError(Error):
"""An exception related to control flow."""
pass
class ContinueFlow(FlowError):
"""A continue control flow."""
pass
class BreakFlow(FlowError):
"""A break control flow."""
pass
class ParseError(Error):
"""A parse error occurred."""
pass
class TransientParseError(ParseError):
"""A parse error occurred which may be resolved by feeding more data.
Such an error reaching the toplevel is an unexpected EOF error."""
pass
class MetaError(Exception):
"""A wrapper around a real Python exception for including a copy of
the context."""
def __init__(self, contexts, exc):
Exception.__init__(self, exc)
self.contexts = contexts
self.exc = exc
def __str__(self):
backtrace = map(lambda x: str(x), self.contexts)
return "%s: %s (%s)" % (self.exc.__class__, self.exc, \
(string.join(backtrace, ', ')))
class Subsystem:
"""The subsystem class defers file creation so that it can create
Unicode-wrapped files if desired (and possible)."""
def __init__(self):
self.useUnicode = False
self.inputEncoding = None
self.outputEncoding = None
self.errors = None
def initialize(self, inputEncoding=None, outputEncoding=None, \
inputErrors=None, outputErrors=None):
self.useUnicode = True
try:
unicode
import codecs
except (NameError, ImportError):
raise SubsystemError, "Unicode subsystem unavailable"
defaultEncoding = sys.getdefaultencoding()
if inputEncoding is None:
inputEncoding = defaultEncoding
self.inputEncoding = inputEncoding
if outputEncoding is None:
outputEncoding = defaultEncoding
self.outputEncoding = outputEncoding
if inputErrors is None:
inputErrors = DEFAULT_ERRORS
self.inputErrors = inputErrors
if outputErrors is None:
outputErrors = DEFAULT_ERRORS
self.outputErrors = outputErrors
def assertUnicode(self):
if not self.useUnicode:
raise SubsystemError, "Unicode subsystem unavailable"
def open(self, name, mode=None):
if self.useUnicode:
return self.unicodeOpen(name, mode)
else:
return self.defaultOpen(name, mode)
def defaultOpen(self, name, mode=None):
if mode is None:
mode = 'r'
return open(name, mode)
def unicodeOpen(self, name, mode=None):
import codecs
if mode is None:
mode = 'rb'
if mode.find('w') >= 0 or mode.find('a') >= 0:
encoding = self.outputEncoding
errors = self.outputErrors
else:
encoding = self.inputEncoding
errors = self.inputErrors
return codecs.open(name, mode, encoding, errors)
theSubsystem = Subsystem()
class Stack:
"""A simple stack that behaves as a sequence (with 0 being the top
of the stack, not the bottom)."""
def __init__(self, seq=None):
if seq is None:
seq = []
self.data = seq
def top(self):
"""Access the top element on the stack."""
try:
return self.data[-1]
except IndexError:
raise StackUnderflowError, "stack is empty for top"
def pop(self):
"""Pop the top element off the stack and return it."""
try:
return self.data.pop()
except IndexError:
raise StackUnderflowError, "stack is empty for pop"
def push(self, object):
"""Push an element onto the top of the stack."""
self.data.append(object)
def filter(self, function):
"""Filter the elements of the stack through the function."""
self.data = filter(function, self.data)
def purge(self):
"""Purge the stack."""
self.data = []
def clone(self):
"""Create a duplicate of this stack."""
return self.__class__(self.data[:])
def __nonzero__(self): return len(self.data) != 0
def __len__(self): return len(self.data)
def __getitem__(self, index): return self.data[-(index + 1)]
def __repr__(self):
return '<%s instance at 0x%x [%s]>' % \
(self.__class__, id(self), \
string.join(map(repr, self.data), ', '))
class AbstractFile:
"""An abstracted file that, when buffered, will totally buffer the
file, including even the file open."""
def __init__(self, filename, mode='w', buffered=False):
# The calls below might throw, so start off by marking this
# file as "done." This way destruction of a not-completely-
# initialized AbstractFile will generate no further errors.
self.done = True
self.filename = filename
self.mode = mode
self.buffered = buffered
if buffered:
self.bufferFile = StringIO.StringIO()
else:
self.bufferFile = theSubsystem.open(filename, mode)
# Okay, we got this far, so the AbstractFile is initialized.
# Flag it as "not done."
self.done = False
def __del__(self):
self.close()
def write(self, data):
self.bufferFile.write(data)
def writelines(self, data):
self.bufferFile.writelines(data)
def flush(self):
self.bufferFile.flush()
def close(self):
if not self.done:
self.commit()
self.done = True
def commit(self):
if self.buffered:
file = theSubsystem.open(self.filename, self.mode)
file.write(self.bufferFile.getvalue())
file.close()
else:
self.bufferFile.close()
def abort(self):
if self.buffered:
self.bufferFile = None
else:
self.bufferFile.close()
self.bufferFile = None
self.done = True
class Diversion:
"""The representation of an active diversion. Diversions act as
(writable) file objects, and then can be recalled either as pure
strings or (readable) file objects."""
def __init__(self):
self.file = StringIO.StringIO()
# These methods define the writable file-like interface for the
# diversion.
def write(self, data):
self.file.write(data)
def writelines(self, lines):
for line in lines:
self.write(line)
def flush(self):
self.file.flush()
def close(self):
self.file.close()
# These methods are specific to diversions.
def asString(self):
"""Return the diversion as a string."""
return self.file.getvalue()
def asFile(self):
"""Return the diversion as a file."""
return StringIO.StringIO(self.file.getvalue())
class Stream:
"""A wrapper around an (output) file object which supports
diversions and filtering."""
def __init__(self, file):
self.file = file
self.currentDiversion = None
self.diversions = {}
self.filter = file
self.done = False
def write(self, data):
if self.currentDiversion is None:
self.filter.write(data)
else:
self.diversions[self.currentDiversion].write(data)
def writelines(self, lines):
for line in lines:
self.write(line)
def flush(self):
self.filter.flush()
def close(self):
if not self.done:
self.undivertAll(True)
self.filter.close()
self.done = True
def shortcut(self, shortcut):
"""Take a filter shortcut and translate it into a filter, returning
it. Sequences don't count here; these should be detected
independently."""
if shortcut == 0:
return NullFilter()
elif type(shortcut) is types.FunctionType or \
type(shortcut) is types.BuiltinFunctionType or \
type(shortcut) is types.BuiltinMethodType or \
type(shortcut) is types.LambdaType:
return FunctionFilter(shortcut)
elif type(shortcut) is types.StringType:
return StringFilter(filter)
elif type(shortcut) is types.DictType:
raise NotImplementedError, "mapping filters not yet supported"
else:
# Presume it's a plain old filter.
return shortcut
def last(self):
"""Find the last filter in the current filter chain, or None if
there are no filters installed."""
if self.filter is None:
return None
thisFilter, lastFilter = self.filter, None
while thisFilter is not None and thisFilter is not self.file:
lastFilter = thisFilter
thisFilter = thisFilter.next()
return lastFilter
def install(self, shortcut=None):
"""Install a new filter; None means no filter. Handle all the
special shortcuts for filters here."""
# Before starting, execute a flush.
self.filter.flush()
if shortcut is None or shortcut == [] or shortcut == ():
# Shortcuts for "no filter."
self.filter = self.file
else:
if type(shortcut) in (types.ListType, types.TupleType):
shortcuts = list(shortcut)
else:
shortcuts = [shortcut]
# Run through the shortcut filter names, replacing them with
# full-fledged instances of Filter.
filters = []
for shortcut in shortcuts:
filters.append(self.shortcut(shortcut))
if len(filters) > 1:
# If there's more than one filter provided, chain them
# together.
lastFilter = None
for filter in filters:
if lastFilter is not None:
lastFilter.attach(filter)
lastFilter = filter
lastFilter.attach(self.file)
self.filter = filters[0]
else:
# If there's only one filter, assume that it's alone or it's
# part of a chain that has already been manually chained;
# just find the end.
filter = filters[0]
lastFilter = filter.last()
lastFilter.attach(self.file)
self.filter = filter
def attach(self, shortcut):
"""Attached a solitary filter (no sequences allowed here) at the
end of the current filter chain."""
lastFilter = self.last()
if lastFilter is None:
# Just install it from scratch if there is no active filter.
self.install(shortcut)
else:
# Attach the last filter to this one, and this one to the file.
filter = self.shortcut(shortcut)
lastFilter.attach(filter)
filter.attach(self.file)
def revert(self):
"""Reset any current diversions."""
self.currentDiversion = None
def create(self, name):
"""Create a diversion if one does not already exist, but do not
divert to it yet."""
if name is None:
raise DiversionError, "diversion name must be non-None"
if not self.diversions.has_key(name):
self.diversions[name] = Diversion()
def retrieve(self, name):
"""Retrieve the given diversion."""
if name is None:
raise DiversionError, "diversion name must be non-None"
if self.diversions.has_key(name):
return self.diversions[name]
else:
raise DiversionError, "nonexistent diversion: %s" % name
def divert(self, name):
"""Start diverting."""
if name is None:
raise DiversionError, "diversion name must be non-None"
self.create(name)
self.currentDiversion = name
def undivert(self, name, purgeAfterwards=False):
"""Undivert a particular diversion."""
if name is None:
raise DiversionError, "diversion name must be non-None"
if self.diversions.has_key(name):
diversion = self.diversions[name]
self.filter.write(diversion.asString())
if purgeAfterwards:
self.purge(name)
else:
raise DiversionError, "nonexistent diversion: %s" % name
def purge(self, name):
"""Purge the specified diversion."""
if name is None:
raise DiversionError, "diversion name must be non-None"
if self.diversions.has_key(name):
del self.diversions[name]
if self.currentDiversion == name:
self.currentDiversion = None
def undivertAll(self, purgeAfterwards=True):
"""Undivert all pending diversions."""
if self.diversions:
self.revert() # revert before undiverting!
names = self.diversions.keys()
names.sort()
for name in names:
self.undivert(name)
if purgeAfterwards:
self.purge(name)
def purgeAll(self):
"""Eliminate all existing diversions."""
if self.diversions:
self.diversions = {}
self.currentDiversion = None
class NullFile:
"""A simple class that supports all the file-like object methods
but simply does nothing at all."""
def __init__(self): pass
def write(self, data): pass
def writelines(self, lines): pass
def flush(self): pass
def close(self): pass
class UncloseableFile:
"""A simple class which wraps around a delegate file-like object
and lets everything through except close calls."""
def __init__(self, delegate):
self.delegate = delegate
def write(self, data):
self.delegate.write(data)
def writelines(self, lines):
self.delegate.writelines(data)
def flush(self):
self.delegate.flush()
def close(self):
"""Eat this one."""
pass
class ProxyFile:
"""The proxy file object that is intended to take the place of
sys.stdout. The proxy can manage a stack of file objects it is
writing to, and an underlying raw file object."""
def __init__(self, bottom):
self.stack = Stack()
self.bottom = bottom
def current(self):
"""Get the current stream to write to."""
if self.stack:
return self.stack[-1][1]
else:
return self.bottom
def push(self, interpreter):
self.stack.push((interpreter, interpreter.stream()))
def pop(self, interpreter):
result = self.stack.pop()
assert interpreter is result[0]
def clear(self, interpreter):
self.stack.filter(lambda x, i=interpreter: x[0] is not i)
def write(self, data):
self.current().write(data)
def writelines(self, lines):
self.current().writelines(lines)
def flush(self):
self.current().flush()
def close(self):
"""Close the current file. If the current file is the bottom, then
close it and dispose of it."""
current = self.current()
if current is self.bottom:
self.bottom = None
current.close()
def _testProxy(self): pass
class Filter:
"""An abstract filter."""
def __init__(self):
if self.__class__ is Filter:
raise NotImplementedError
self.sink = None
def next(self):
"""Return the next filter/file-like object in the sequence, or None."""
return self.sink
def write(self, data):
"""The standard write method; this must be overridden in subclasses."""
raise NotImplementedError
def writelines(self, lines):
"""Standard writelines wrapper."""
for line in lines:
self.write(line)
def _flush(self):
"""The _flush method should always flush the sink and should not
be overridden."""
self.sink.flush()
def flush(self):
"""The flush method can be overridden."""
self._flush()
def close(self):
"""Close the filter. Do an explicit flush first, then close the
sink."""
self.flush()
self.sink.close()
def attach(self, filter):
"""Attach a filter to this one."""
if self.sink is not None:
# If it's already attached, detach it first.
self.detach()
self.sink = filter
def detach(self):
"""Detach a filter from its sink."""
self.flush()
self._flush() # do a guaranteed flush to just to be safe
self.sink = None
def last(self):
"""Find the last filter in this chain."""
this, last = self, self
while this is not None:
last = this
this = this.next()
return last
class NullFilter(Filter):
"""A filter that never sends any output to its sink."""
def write(self, data): pass
class FunctionFilter(Filter):
"""A filter that works simply by pumping its input through a
function which maps strings into strings."""
def __init__(self, function):
Filter.__init__(self)
self.function = function
def write(self, data):
self.sink.write(self.function(data))
class StringFilter(Filter):
"""A filter that takes a translation string (256 characters) and
filters any incoming data through it."""
def __init__(self, table):
if not (type(table) == types.StringType and len(table) == 256):
raise FilterError, "table must be 256-character string"
Filter.__init__(self)
self.table = table
def write(self, data):
self.sink.write(string.translate(data, self.table))
class BufferedFilter(Filter):
"""A buffered filter is one that doesn't modify the source data
sent to the sink, but instead holds it for a time. The standard
variety only sends the data along when it receives a flush
command."""
def __init__(self):
Filter.__init__(self)
self.buffer = ''
def write(self, data):
self.buffer = self.buffer + data
def flush(self):
if self.buffer:
self.sink.write(self.buffer)
self._flush()
class SizeBufferedFilter(BufferedFilter):
"""A size-buffered filter only in fixed size chunks (excepting the
final chunk)."""
def __init__(self, bufferSize):
BufferedFilter.__init__(self)
self.bufferSize = bufferSize
def write(self, data):
BufferedFilter.write(self, data)
while len(self.buffer) > self.bufferSize:
chunk, self.buffer = \
self.buffer[:self.bufferSize], self.buffer[self.bufferSize:]
self.sink.write(chunk)
class LineBufferedFilter(BufferedFilter):
"""A line-buffered filter only lets data through when it sees
whole lines."""
def __init__(self):
BufferedFilter.__init__(self)
def write(self, data):
BufferedFilter.write(self, data)
chunks = string.split(self.buffer, '\n')
for chunk in chunks[:-1]:
self.sink.write(chunk + '\n')
self.buffer = chunks[-1]
class MaximallyBufferedFilter(BufferedFilter):
"""A maximally-buffered filter only lets its data through on the final
close. It ignores flushes."""
def __init__(self):
BufferedFilter.__init__(self)
def flush(self): pass
def close(self):
if self.buffer:
BufferedFilter.flush(self)
self.sink.close()
class Context:
"""An interpreter context, which encapsulates a name, an input
file object, and a parser object."""
DEFAULT_UNIT = 'lines'
def __init__(self, name, line=0, units=DEFAULT_UNIT):
self.name = name
self.line = line
self.units = units
self.pause = False
def bump(self, quantity=1):
if self.pause:
self.pause = False
else:
self.line = self.line + quantity
def identify(self):
return self.name, self.line
def __str__(self):
if self.units == self.DEFAULT_UNIT:
return "%s:%s" % (self.name, self.line)
else:
return "%s:%s[%s]" % (self.name, self.line, self.units)
class Hook:
"""The base class for implementing hooks."""
def __init__(self):
self.interpreter = None
def register(self, interpreter):
self.interpreter = interpreter
def deregister(self, interpreter):
if interpreter is not self.interpreter:
raise Error, "hook not associated with this interpreter"
self.interpreter = None
def push(self):
self.interpreter.push()
def pop(self):
self.interpreter.pop()
def null(self): pass
def atStartup(self): pass
def atReady(self): pass
def atFinalize(self): pass
def atShutdown(self): pass
def atParse(self, scanner, locals): pass
def atToken(self, token): pass
def atHandle(self, meta): pass
def atInteract(self): pass
def beforeInclude(self, name, file, locals): pass
def afterInclude(self): pass
def beforeExpand(self, string, locals): pass
def afterExpand(self, result): pass
def beforeFile(self, name, file, locals): pass
def afterFile(self): pass
def beforeBinary(self, name, file, chunkSize, locals): pass
def afterBinary(self): pass
def beforeString(self, name, string, locals): pass
def afterString(self): pass
def beforeQuote(self, string): pass
def afterQuote(self, result): pass
def beforeEscape(self, string, more): pass
def afterEscape(self, result): pass
def beforeControl(self, type, rest, locals): pass
def afterControl(self): pass
def beforeSignificate(self, key, value, locals): pass
def afterSignificate(self): pass
def beforeAtomic(self, name, value, locals): pass
def afterAtomic(self): pass
def beforeMulti(self, name, values, locals): pass
def afterMulti(self): pass
def beforeImport(self, name, locals): pass
def afterImport(self): pass
def beforeClause(self, catch, locals): pass
def afterClause(self, exception, variable): pass
def beforeSerialize(self, expression, locals): pass
def afterSerialize(self): pass
def beforeDefined(self, name, locals): pass
def afterDefined(self, result): pass
def beforeLiteral(self, text): pass
def afterLiteral(self): pass
def beforeEvaluate(self, expression, locals): pass
def afterEvaluate(self, result): pass
def beforeExecute(self, statements, locals): pass
def afterExecute(self): pass
def beforeSingle(self, source, locals): pass
def afterSingle(self): pass
class VerboseHook(Hook):
"""A verbose hook that reports all information received by the
hook interface. This class dynamically scans the Hook base class
to ensure that all hook methods are properly represented."""
EXEMPT_ATTRIBUTES = ['register', 'deregister', 'push', 'pop']
def __init__(self, output=sys.stderr):
Hook.__init__(self)
self.output = output
self.indent = 0
class FakeMethod:
"""This is a proxy method-like object."""
def __init__(self, hook, name):
self.hook = hook
self.name = name
def __call__(self, **keywords):
self.hook.output.write("%s%s: %s\n" % \
(' ' * self.hook.indent, \
self.name, repr(keywords)))
for attribute in dir(Hook):
if attribute[:1] != '_' and \
attribute not in self.EXEMPT_ATTRIBUTES:
self.__dict__[attribute] = FakeMethod(self, attribute)
class Token:
"""An element of expansion."""
def run(self, interpreter, locals):
raise NotImplementedError
def string(self):
raise NotImplementedError
def __str__(self): return self.string()
class NullToken(Token):
"""A chunk of data not containing markups."""
def __init__(self, data):
self.data = data
def run(self, interpreter, locals):
interpreter.write(self.data)
def string(self):
return self.data
class ExpansionToken(Token):
"""A token that involves an expansion."""
def __init__(self, prefix, first):
self.prefix = prefix
self.first = first
def scan(self, scanner):
pass
def run(self, interpreter, locals):
pass
class WhitespaceToken(ExpansionToken):
"""A whitespace markup."""
def string(self):
return '%s%s' % (self.prefix, self.first)
class LiteralToken(ExpansionToken):
"""A literal markup."""
def run(self, interpreter, locals):
interpreter.write(self.first)
def string(self):
return '%s%s' % (self.prefix, self.first)
class PrefixToken(ExpansionToken):
"""A prefix markup."""
def run(self, interpreter, locals):
interpreter.write(interpreter.prefix)
def string(self):
return self.prefix * 2
class CommentToken(ExpansionToken):
"""A comment markup."""
def scan(self, scanner):
loc = scanner.find('\n')
if loc >= 0:
self.comment = scanner.chop(loc, 1)
else:
raise TransientParseError, "comment expects newline"
def string(self):
return '%s#%s\n' % (self.prefix, self.comment)
class ContextNameToken(ExpansionToken):
"""A context name change markup."""
def scan(self, scanner):
loc = scanner.find('\n')
if loc >= 0:
self.name = string.strip(scanner.chop(loc, 1))
else:
raise TransientParseError, "context name expects newline"
def run(self, interpreter, locals):
context = interpreter.context()
context.name = self.name
class ContextLineToken(ExpansionToken):
"""A context line change markup."""
def scan(self, scanner):
loc = scanner.find('\n')
if loc >= 0:
try:
self.line = int(scanner.chop(loc, 1))
except ValueError:
raise ParseError, "context line requires integer"
else:
raise TransientParseError, "context line expects newline"
def run(self, interpreter, locals):
context = interpreter.context()
context.line = self.line
context.pause = True
class EscapeToken(ExpansionToken):
"""An escape markup."""
def scan(self, scanner):
try:
code = scanner.chop(1)
result = None
if code in '()[]{}\'\"\\': # literals
result = code
elif code == '0': # NUL
result = '\x00'
elif code == 'a': # BEL
result = '\x07'
elif code == 'b': # BS
result = '\x08'
elif code == 'd': # decimal code
decimalCode = scanner.chop(3)
result = chr(string.atoi(decimalCode, 10))
elif code == 'e': # ESC
result = '\x1b'
elif code == 'f': # FF
result = '\x0c'
elif code == 'h': # DEL
result = '\x7f'
elif code == 'n': # LF (newline)
result = '\x0a'
elif code == 'N': # Unicode character name
theSubsystem.assertUnicode()
import unicodedata
if scanner.chop(1) != '{':
raise ParseError, r"Unicode name escape should be \N{...}"
i = scanner.find('}')
name = scanner.chop(i, 1)
try:
result = unicodedata.lookup(name)
except KeyError:
raise SubsystemError, \
"unknown Unicode character name: %s" % name
elif code == 'o': # octal code
octalCode = scanner.chop(3)
result = chr(string.atoi(octalCode, 8))
elif code == 'q': # quaternary code
quaternaryCode = scanner.chop(4)
result = chr(string.atoi(quaternaryCode, 4))
elif code == 'r': # CR
result = '\x0d'
elif code in 's ': # SP
result = ' '
elif code == 't': # HT
result = '\x09'
elif code in 'u': # Unicode 16-bit hex literal
theSubsystem.assertUnicode()
hexCode = scanner.chop(4)
result = unichr(string.atoi(hexCode, 16))
elif code in 'U': # Unicode 32-bit hex literal
theSubsystem.assertUnicode()
hexCode = scanner.chop(8)
result = unichr(string.atoi(hexCode, 16))
elif code == 'v': # VT
result = '\x0b'
elif code == 'x': # hexadecimal code
hexCode = scanner.chop(2)
result = chr(string.atoi(hexCode, 16))
elif code == 'z': # EOT
result = '\x04'
elif code == '^': # control character
controlCode = string.upper(scanner.chop(1))
if controlCode >= '@' and controlCode <= '`':
result = chr(ord(controlCode) - ord('@'))
elif controlCode == '?':
result = '\x7f'
else:
raise ParseError, "invalid escape control code"
else:
raise ParseError, "unrecognized escape code"
assert result is not None
self.code = result
except ValueError:
raise ParseError, "invalid numeric escape code"
def run(self, interpreter, locals):
interpreter.write(self.code)
def string(self):
return '%s\\x%02x' % (self.prefix, ord(self.code))
class SignificatorToken(ExpansionToken):
"""A significator markup."""
def scan(self, scanner):
loc = scanner.find('\n')
if loc >= 0:
line = scanner.chop(loc, 1)
if not line:
raise ParseError, "significator must have nonblank key"
if line[0] in ' \t\v\n':
raise ParseError, "no whitespace between % and key"
# Work around a subtle CPython-Jython difference by stripping
# the string before splitting it: 'a '.split(None, 1) has two
# elements in Jython 2.1).
fields = string.split(string.strip(line), None, 1)
if len(fields) == 2 and fields[1] == '':
fields.pop()
self.key = fields[0]
if len(fields) < 2:
fields.append(None)
self.key, self.valueCode = fields
else:
raise TransientParseError, "significator expects newline"
def run(self, interpreter, locals):
value = self.valueCode
if value is not None:
value = interpreter.evaluate(string.strip(value), locals)
interpreter.significate(self.key, value)
def string(self):
if self.valueCode is None:
return '%s%%%s\n' % (self.prefix, self.key)
else:
return '%s%%%s %s\n' % (self.prefix, self.key, self.valueCode)
class ExpressionToken(ExpansionToken):
"""An expression markup."""
def scan(self, scanner):
z = scanner.complex('(', ')', 0)
try:
q = scanner.next('$', 0, z, True)
except ParseError:
q = z
try:
i = scanner.next('?', 0, q, True)
try:
j = scanner.next('!', i, q, True)
except ParseError:
try:
j = scanner.next(':', i, q, True) # DEPRECATED
except ParseError:
j = q
except ParseError:
i = j = q
code = scanner.chop(z, 1)
self.testCode = code[:i]
self.thenCode = code[i + 1:j]
self.elseCode = code[j + 1:q]
self.exceptCode = code[q + 1:z]
def run(self, interpreter, locals):
try:
result = interpreter.evaluate(self.testCode, locals)
if self.thenCode:
if result:
result = interpreter.evaluate(self.thenCode, locals)
else:
if self.elseCode:
result = interpreter.evaluate(self.elseCode, locals)
else:
result = None
except SyntaxError:
# Don't catch syntax errors; let them through.
raise
except:
if self.exceptCode:
result = interpreter.evaluate(self.exceptCode, locals)
else:
raise
if result is not None:
interpreter.write(str(result))
def string(self):
result = self.testCode
if self.thenCode:
result = result + '?' + self.thenCode
if self.elseCode:
result = result + '!' + self.elseCode
if self.exceptCode:
result = result + '$' + self.exceptCode
return '%s(%s)' % (self.prefix, result)
class StringLiteralToken(ExpansionToken):
"""A string token markup."""
def scan(self, scanner):
scanner.retreat()
assert scanner[0] == self.first
i = scanner.quote()
self.literal = scanner.chop(i)
def run(self, interpreter, locals):
interpreter.literal(self.literal)
def string(self):
return '%s%s' % (self.prefix, self.literal)
class SimpleExpressionToken(ExpansionToken):
"""A simple expression markup."""
def scan(self, scanner):
i = scanner.simple()
self.code = self.first + scanner.chop(i)
def run(self, interpreter, locals):
interpreter.serialize(self.code, locals)
def string(self):
return '%s%s' % (self.prefix, self.code)
class ReprToken(ExpansionToken):
"""A repr markup."""
def scan(self, scanner):
i = scanner.next('`', 0)
self.code = scanner.chop(i, 1)
def run(self, interpreter, locals):
interpreter.write(repr(interpreter.evaluate(self.code, locals)))
def string(self):
return '%s`%s`' % (self.prefix, self.code)
class InPlaceToken(ExpansionToken):
"""An in-place markup."""
def scan(self, scanner):
i = scanner.next(':', 0)
j = scanner.next(':', i + 1)
self.code = scanner.chop(i, j - i + 1)
def run(self, interpreter, locals):
interpreter.write("%s:%s:" % (interpreter.prefix, self.code))
try:
interpreter.serialize(self.code, locals)
finally:
interpreter.write(":")
def string(self):
return '%s:%s::' % (self.prefix, self.code)
class StatementToken(ExpansionToken):
"""A statement markup."""
def scan(self, scanner):
i = scanner.complex('{', '}', 0)
self.code = scanner.chop(i, 1)
def run(self, interpreter, locals):
interpreter.execute(self.code, locals)
def string(self):
return '%s{%s}' % (self.prefix, self.code)
class CustomToken(ExpansionToken):
"""A custom markup."""
def scan(self, scanner):
i = scanner.complex('<', '>', 0)
self.contents = scanner.chop(i, 1)
def run(self, interpreter, locals):
interpreter.invokeCallback(self.contents)
def string(self):
return '%s<%s>' % (self.prefix, self.contents)
class ControlToken(ExpansionToken):
"""A control token."""
PRIMARY_TYPES = ['if', 'for', 'while', 'try', 'def']
SECONDARY_TYPES = ['elif', 'else', 'except', 'finally']
TERTIARY_TYPES = ['continue', 'break']
GREEDY_TYPES = ['if', 'elif', 'for', 'while', 'def', 'end']
END_TYPES = ['end']
IN_RE = re.compile(r"\bin\b")
def scan(self, scanner):
scanner.acquire()
i = scanner.complex('[', ']', 0)
self.contents = scanner.chop(i, 1)
fields = string.split(string.strip(self.contents), ' ', 1)
if len(fields) > 1:
self.type, self.rest = fields
else:
self.type = fields[0]
self.rest = None
self.subtokens = []
if self.type in self.GREEDY_TYPES and self.rest is None:
raise ParseError, "control '%s' needs arguments" % self.type
if self.type in self.PRIMARY_TYPES:
self.subscan(scanner, self.type)
self.kind = 'primary'
elif self.type in self.SECONDARY_TYPES:
self.kind = 'secondary'
elif self.type in self.TERTIARY_TYPES:
self.kind = 'tertiary'
elif self.type in self.END_TYPES:
self.kind = 'end'
else:
raise ParseError, "unknown control markup: '%s'" % self.type
scanner.release()
def subscan(self, scanner, primary):
"""Do a subscan for contained tokens."""
while True:
token = scanner.one()
if token is None:
raise TransientParseError, \
"control '%s' needs more tokens" % primary
if isinstance(token, ControlToken) and \
token.type in self.END_TYPES:
if token.rest != primary:
raise ParseError, \
"control must end with 'end %s'" % primary
break
self.subtokens.append(token)
def build(self, allowed=None):
"""Process the list of subtokens and divide it into a list of
2-tuples, consisting of the dividing tokens and the list of
subtokens that follow them. If allowed is specified, it will
represent the list of the only secondary markup types which
are allowed."""
if allowed is None:
allowed = SECONDARY_TYPES
result = []
latest = []
result.append((self, latest))
for subtoken in self.subtokens:
if isinstance(subtoken, ControlToken) and \
subtoken.kind == 'secondary':
if subtoken.type not in allowed:
raise ParseError, \
"control unexpected secondary: '%s'" % subtoken.type
latest = []
result.append((subtoken, latest))
else:
latest.append(subtoken)
return result
def run(self, interpreter, locals):
interpreter.invoke('beforeControl', type=self.type, rest=self.rest, \
locals=locals)
if self.type == 'if':
info = self.build(['elif', 'else'])
elseTokens = None
if info[-1][0].type == 'else':
elseTokens = info.pop()[1]
for secondary, subtokens in info:
if secondary.type not in ('if', 'elif'):
raise ParseError, \
"control 'if' unexpected secondary: '%s'" % secondary.type
if interpreter.evaluate(secondary.rest, locals):
self.subrun(subtokens, interpreter, locals)
break
else:
if elseTokens:
self.subrun(elseTokens, interpreter, locals)
elif self.type == 'for':
sides = self.IN_RE.split(self.rest, 1)
if len(sides) != 2:
raise ParseError, "control expected 'for x in seq'"
iterator, sequenceCode = sides
info = self.build(['else'])
elseTokens = None
if info[-1][0].type == 'else':
elseTokens = info.pop()[1]
if len(info) != 1:
raise ParseError, "control 'for' expects at most one 'else'"
sequence = interpreter.evaluate(sequenceCode, locals)
for element in sequence:
try:
interpreter.assign(iterator, element, locals)
self.subrun(info[0][1], interpreter, locals)
except ContinueFlow:
continue
except BreakFlow:
break
else:
if elseTokens:
self.subrun(elseTokens, interpreter, locals)
elif self.type == 'while':
testCode = self.rest
info = self.build(['else'])
elseTokens = None
if info[-1][0].type == 'else':
elseTokens = info.pop()[1]
if len(info) != 1:
raise ParseError, "control 'while' expects at most one 'else'"
atLeastOnce = False
while True:
try:
if not interpreter.evaluate(testCode, locals):
break
atLeastOnce = True
self.subrun(info[0][1], interpreter, locals)
except ContinueFlow:
continue
except BreakFlow:
break
if not atLeastOnce and elseTokens:
self.subrun(elseTokens, interpreter, locals)
elif self.type == 'try':
info = self.build(['except', 'finally'])
if len(info) == 1:
raise ParseError, "control 'try' needs 'except' or 'finally'"
type = info[-1][0].type
if type == 'except':
for secondary, _tokens in info[1:]:
if secondary.type != 'except':
raise ParseError, \
"control 'try' cannot have 'except' and 'finally'"
else:
assert type == 'finally'
if len(info) != 2:
raise ParseError, \
"control 'try' can only have one 'finally'"
if type == 'except':
try:
self.subrun(info[0][1], interpreter, locals)
except FlowError:
raise
except Exception, e:
for secondary, tokens in info[1:]:
exception, variable = interpreter.clause(secondary.rest)
if variable is not None:
interpreter.assign(variable, e)
if isinstance(e, exception):
self.subrun(tokens, interpreter, locals)
break
else:
raise
else:
try:
self.subrun(info[0][1], interpreter, locals)
finally:
self.subrun(info[1][1], interpreter, locals)
elif self.type == 'continue':
raise ContinueFlow, "control 'continue' without 'for', 'while'"
elif self.type == 'break':
raise BreakFlow, "control 'break' without 'for', 'while'"
elif self.type == 'def':
signature = self.rest
definition = self.substring()
code = 'def %s:\n' \
' r"""%s"""\n' \
' return %s.expand(r"""%s""", locals())\n' % \
(signature, definition, interpreter.pseudo, definition)
interpreter.execute(code, locals)
elif self.type == 'end':
raise ParseError, "control 'end' requires primary markup"
else:
raise ParseError, \
"control '%s' cannot be at this level" % self.type
interpreter.invoke('afterControl')
def subrun(self, tokens, interpreter, locals):
"""Execute a sequence of tokens."""
for token in tokens:
token.run(interpreter, locals)
def substring(self):
return string.join(map(str, self.subtokens), '')
def string(self):
if self.kind == 'primary':
return '%s[%s]%s%s[end %s]' % \
(self.prefix, self.contents, self.substring(), \
self.prefix, self.type)
else:
return '%s[%s]' % (self.prefix, self.contents)
class Scanner:
"""A scanner holds a buffer for lookahead parsing and has the
ability to scan for special symbols and indicators in that
buffer."""
# This is the token mapping table that maps first characters to
# token classes.
TOKEN_MAP = [
(None, PrefixToken),
(' \t\v\r\n', WhitespaceToken),
(')]}', LiteralToken),
('\\', EscapeToken),
('#', CommentToken),
('?', ContextNameToken),
('!', ContextLineToken),
('%', SignificatorToken),
('(', ExpressionToken),
(IDENTIFIER_FIRST_CHARS, SimpleExpressionToken),
('\'\"', StringLiteralToken),
('`', ReprToken),
(':', InPlaceToken),
('[', ControlToken),
('{', StatementToken),
('<', CustomToken),
]
def __init__(self, prefix, data=''):
self.prefix = prefix
self.pointer = 0
self.buffer = data
self.lock = 0
def __nonzero__(self): return self.pointer < len(self.buffer)
def __len__(self): return len(self.buffer) - self.pointer
def __getitem__(self, index): return self.buffer[self.pointer + index]
def __getslice__(self, start, stop):
if stop > len(self):
stop = len(self)
return self.buffer[self.pointer + start:self.pointer + stop]
def advance(self, count=1):
"""Advance the pointer count characters."""
self.pointer = self.pointer + count
def retreat(self, count=1):
self.pointer = self.pointer - count
if self.pointer < 0:
raise ParseError, "can't retreat back over synced out chars"
def set(self, data):
"""Start the scanner digesting a new batch of data; start the pointer
over from scratch."""
self.pointer = 0
self.buffer = data
def feed(self, data):
"""Feed some more data to the scanner."""
self.buffer = self.buffer + data
def chop(self, count=None, slop=0):
"""Chop the first count + slop characters off the front, and return
the first count. If count is not specified, then return
everything."""
if count is None:
assert slop == 0
count = len(self)
if count > len(self):
raise TransientParseError, "not enough data to read"
result = self[:count]
self.advance(count + slop)
return result
def acquire(self):
"""Lock the scanner so it doesn't destroy data on sync."""
self.lock = self.lock + 1
def release(self):
"""Unlock the scanner."""
self.lock = self.lock - 1
def sync(self):
"""Sync up the buffer with the read head."""
if self.lock == 0 and self.pointer != 0:
self.buffer = self.buffer[self.pointer:]
self.pointer = 0
def unsync(self):
"""Undo changes; reset the read head."""
if self.pointer != 0:
self.lock = 0
self.pointer = 0
def rest(self):
"""Get the remainder of the buffer."""
return self[:]
def read(self, i=0, count=1):
"""Read count chars starting from i; raise a transient error if
there aren't enough characters remaining."""
if len(self) < i + count:
raise TransientParseError, "need more data to read"
else:
return self[i:i + count]
def check(self, i, archetype=None):
"""Scan for the next single or triple quote, with the specified
archetype. Return the found quote or None."""
quote = None
if self[i] in '\'\"':
quote = self[i]
if len(self) - i < 3:
for j in range(i, len(self)):
if self[i] == quote:
return quote
else:
raise TransientParseError, "need to scan for rest of quote"
if self[i + 1] == self[i + 2] == quote:
quote = quote * 3
if quote is not None:
if archetype is None:
return quote
else:
if archetype == quote:
return quote
elif len(archetype) < len(quote) and archetype[0] == quote[0]:
return archetype
else:
return None
else:
return None
def find(self, sub, start=0, end=None):
"""Find the next occurrence of the character, or return -1."""
if end is not None:
return string.find(self.rest(), sub, start, end)
else:
return string.find(self.rest(), sub, start)
def last(self, char, start=0, end=None):
"""Find the first character that is _not_ the specified character."""
if end is None:
end = len(self)
i = start
while i < end:
if self[i] != char:
return i
i = i + 1
else:
raise TransientParseError, "expecting other than %s" % char
def next(self, target, start=0, end=None, mandatory=False):
"""Scan for the next occurrence of one of the characters in
the target string; optionally, make the scan mandatory."""
if mandatory:
assert end is not None
quote = None
if end is None:
end = len(self)
i = start
while i < end:
newQuote = self.check(i, quote)
if newQuote:
if newQuote == quote:
quote = None
else:
quote = newQuote
i = i + len(newQuote)
else:
c = self[i]
if quote:
if c == '\\':
i = i + 1
else:
if c in target:
return i
i = i + 1
else:
if mandatory:
raise ParseError, "expecting %s, not found" % target
else:
raise TransientParseError, "expecting ending character"
def quote(self, start=0, end=None, mandatory=False):
"""Scan for the end of the next quote."""
assert self[start] in '\'\"'
quote = self.check(start)
if end is None:
end = len(self)
i = start + len(quote)
while i < end:
newQuote = self.check(i, quote)
if newQuote:
i = i + len(newQuote)
if newQuote == quote:
return i
else:
c = self[i]
if c == '\\':
i = i + 1
i = i + 1
else:
if mandatory:
raise ParseError, "expecting end of string literal"
else:
raise TransientParseError, "expecting end of string literal"
def nested(self, enter, exit, start=0, end=None):
"""Scan from i for an ending sequence, respecting entries and exits
only."""
depth = 0
if end is None:
end = len(self)
i = start
while i < end:
c = self[i]
if c == enter:
depth = depth + 1
elif c == exit:
depth = depth - 1
if depth < 0:
return i
i = i + 1
else:
raise TransientParseError, "expecting end of complex expression"
def complex(self, enter, exit, start=0, end=None, skip=None):
"""Scan from i for an ending sequence, respecting quotes,
entries and exits."""
quote = None
depth = 0
if end is None:
end = len(self)
last = None
i = start
while i < end:
newQuote = self.check(i, quote)
if newQuote:
if newQuote == quote:
quote = None
else:
quote = newQuote
i = i + len(newQuote)
else:
c = self[i]
if quote:
if c == '\\':
i = i + 1
else:
if skip is None or last != skip:
if c == enter:
depth = depth + 1
elif c == exit:
depth = depth - 1
if depth < 0:
return i
last = c
i = i + 1
else:
raise TransientParseError, "expecting end of complex expression"
def word(self, start=0):
"""Scan from i for a simple word."""
length = len(self)
i = start
while i < length:
if not self[i] in IDENTIFIER_CHARS:
return i
i = i + 1
else:
raise TransientParseError, "expecting end of word"
def phrase(self, start=0):
"""Scan from i for a phrase (e.g., 'word', 'f(a, b, c)', 'a[i]', or
combinations like 'x[i](a)'."""
# Find the word.
i = self.word(start)
while i < len(self) and self[i] in '([{':
enter = self[i]
if enter == '{':
raise ParseError, "curly braces can't open simple expressions"
exit = ENDING_CHARS[enter]
i = self.complex(enter, exit, i + 1) + 1
return i
def simple(self, start=0):
"""Scan from i for a simple expression, which consists of one
more phrases separated by dots."""
i = self.phrase(start)
length = len(self)
while i < length and self[i] == '.':
i = self.phrase(i)
# Make sure we don't end with a trailing dot.
while i > 0 and self[i - 1] == '.':
i = i - 1
return i
def one(self):
"""Parse and return one token, or None if the scanner is empty."""
if not self:
return None
if not self.prefix:
loc = -1
else:
loc = self.find(self.prefix)
if loc < 0:
# If there's no prefix in the buffer, then set the location to
# the end so the whole thing gets processed.
loc = len(self)
if loc == 0:
# If there's a prefix at the beginning of the buffer, process
# an expansion.
prefix = self.chop(1)
assert prefix == self.prefix
first = self.chop(1)
if first == self.prefix:
first = None
for firsts, factory in self.TOKEN_MAP:
if firsts is None:
if first is None:
break
elif first in firsts:
break
else:
raise ParseError, "unknown markup: %s%s" % (self.prefix, first)
token = factory(self.prefix, first)
try:
token.scan(self)
except TransientParseError:
# If a transient parse error occurs, reset the buffer pointer
# so we can (conceivably) try again later.
self.unsync()
raise
else:
# Process everything up to loc as a null token.
data = self.chop(loc)
token = NullToken(data)
self.sync()
return token
class Interpreter:
"""An interpreter can process chunks of EmPy code."""
# Constants.
VERSION = __version__
SIGNIFICATOR_RE_SUFFIX = SIGNIFICATOR_RE_SUFFIX
SIGNIFICATOR_RE_STRING = None
# Types.
Interpreter = None # define this below to prevent a circular reference
Hook = Hook # DEPRECATED
Filter = Filter # DEPRECATED
NullFilter = NullFilter # DEPRECATED
FunctionFilter = FunctionFilter # DEPRECATED
StringFilter = StringFilter # DEPRECATED
BufferedFilter = BufferedFilter # DEPRECATED
SizeBufferedFilter = SizeBufferedFilter # DEPRECATED
LineBufferedFilter = LineBufferedFilter # DEPRECATED
MaximallyBufferedFilter = MaximallyBufferedFilter # DEPRECATED
# Tables.
ESCAPE_CODES = {0x00: '0', 0x07: 'a', 0x08: 'b', 0x1b: 'e', 0x0c: 'f', \
0x7f: 'h', 0x0a: 'n', 0x0d: 'r', 0x09: 't', 0x0b: 'v', \
0x04: 'z'}
ASSIGN_TOKEN_RE = re.compile(r"[_a-zA-Z][_a-zA-Z0-9]*|\(|\)|,")
DEFAULT_OPTIONS = {BANGPATH_OPT: True,
BUFFERED_OPT: False,
RAW_OPT: False,
EXIT_OPT: True,
FLATTEN_OPT: False,
OVERRIDE_OPT: True,
CALLBACK_OPT: False}
_wasProxyInstalled = False # was a proxy installed?
# Construction, initialization, destruction.
def __init__(self, output=None, argv=None, prefix=DEFAULT_PREFIX, \
pseudo=None, options=None, globals=None, hooks=None):
self.interpreter = self # DEPRECATED
# Set up the stream.
if output is None:
output = UncloseableFile(sys.__stdout__)
self.output = output
self.prefix = prefix
if pseudo is None:
pseudo = DEFAULT_PSEUDOMODULE_NAME
self.pseudo = pseudo
if argv is None:
argv = [DEFAULT_SCRIPT_NAME]
self.argv = argv
self.args = argv[1:]
if options is None:
options = {}
self.options = options
# Initialize any hooks.
self.hooksEnabled = None # special sentinel meaning "false until added"
self.hooks = []
if hooks is None:
hooks = []
for hook in hooks:
self.register(hook)
# Initialize callback.
self.callback = None
# Finalizers.
self.finals = []
# The interpreter stacks.
self.contexts = Stack()
self.streams = Stack()
# Now set up the globals.
self.globals = globals
self.fix()
self.history = Stack()
# Install a proxy stdout if one hasn't been already.
self.installProxy()
# Finally, reset the state of all the stacks.
self.reset()
# Okay, now flatten the namespaces if that option has been set.
if self.options.get(FLATTEN_OPT, False):
self.flatten()
# Set up old pseudomodule attributes.
if prefix is None:
self.SIGNIFICATOR_RE_STRING = None
else:
self.SIGNIFICATOR_RE_STRING = prefix + self.SIGNIFICATOR_RE_SUFFIX
self.Interpreter = self.__class__
# Done. Now declare that we've started up.
self.invoke('atStartup')
def __del__(self):
self.shutdown()
def __repr__(self):
return '<%s pseudomodule/interpreter at 0x%x>' % \
(self.pseudo, id(self))
def ready(self):
"""Declare the interpreter ready for normal operations."""
self.invoke('atReady')
def fix(self):
"""Reset the globals, stamping in the pseudomodule."""
if self.globals is None:
self.globals = {}
# Make sure that there is no collision between two interpreters'
# globals.
if self.globals.has_key(self.pseudo):
if self.globals[self.pseudo] is not self:
raise Error, "interpreter globals collision"
self.globals[self.pseudo] = self
def unfix(self):
"""Remove the pseudomodule (if present) from the globals."""
UNWANTED_KEYS = [self.pseudo, '__builtins__']
for unwantedKey in UNWANTED_KEYS:
if self.globals.has_key(unwantedKey):
del self.globals[unwantedKey]
def update(self, other):
"""Update the current globals dictionary with another dictionary."""
self.globals.update(other)
self.fix()
def clear(self):
"""Clear out the globals dictionary with a brand new one."""
self.globals = {}
self.fix()
def save(self, deep=True):
if deep:
copyMethod = copy.deepcopy
else:
copyMethod = copy.copy
"""Save a copy of the current globals on the history stack."""
self.unfix()
self.history.push(copyMethod(self.globals))
self.fix()
def restore(self, destructive=True):
"""Restore the topmost historic globals."""
if destructive:
fetchMethod = self.history.pop
else:
fetchMethod = self.history.top
self.unfix()
self.globals = fetchMethod()
self.fix()
def shutdown(self):
"""Declare this interpreting session over; close the stream file
object. This method is idempotent."""
if self.streams is not None:
try:
self.finalize()
self.invoke('atShutdown')
while self.streams:
stream = self.streams.pop()
stream.close()
finally:
self.streams = None
def ok(self):
"""Is the interpreter still active?"""
return self.streams is not None
# Writeable file-like methods.
def write(self, data):
self.stream().write(data)
def writelines(self, stuff):
self.stream().writelines(stuff)
def flush(self):
self.stream().flush()
def close(self):
self.shutdown()
# Stack-related activity.
def context(self):
return self.contexts.top()
def stream(self):
return self.streams.top()
def reset(self):
self.contexts.purge()
self.streams.purge()
self.streams.push(Stream(self.output))
if self.options.get(OVERRIDE_OPT, True):
sys.stdout.clear(self)
def push(self):
if self.options.get(OVERRIDE_OPT, True):
sys.stdout.push(self)
def pop(self):
if self.options.get(OVERRIDE_OPT, True):
sys.stdout.pop(self)
# Higher-level operations.
def include(self, fileOrFilename, locals=None):
"""Do an include pass on a file or filename."""
if type(fileOrFilename) is types.StringType:
# Either it's a string representing a filename ...
filename = fileOrFilename
name = filename
file = theSubsystem.open(filename, 'r')
else:
# ... or a file object.
file = fileOrFilename
name = "<%s>" % str(file.__class__)
self.invoke('beforeInclude', name=name, file=file, locals=locals)
self.file(file, name, locals)
self.invoke('afterInclude')
def expand(self, data, locals=None):
"""Do an explicit expansion on a subordinate stream."""
outFile = StringIO.StringIO()
stream = Stream(outFile)
self.invoke('beforeExpand', string=data, locals=locals)
self.streams.push(stream)
try:
self.string(data, '<expand>', locals)
stream.flush()
expansion = outFile.getvalue()
self.invoke('afterExpand', result=expansion)
return expansion
finally:
self.streams.pop()
def quote(self, data):
"""Quote the given string so that if it were expanded it would
evaluate to the original."""
self.invoke('beforeQuote', string=data)
scanner = Scanner(self.prefix, data)
result = []
i = 0
try:
j = scanner.next(self.prefix, i)
result.append(data[i:j])
result.append(self.prefix * 2)
i = j + 1
except TransientParseError:
pass
result.append(data[i:])
result = string.join(result, '')
self.invoke('afterQuote', result=result)
return result
def escape(self, data, more=''):
"""Escape a string so that nonprintable characters are replaced
with compatible EmPy expansions."""
self.invoke('beforeEscape', string=data, more=more)
result = []
for char in data:
if char < ' ' or char > '~':
charOrd = ord(char)
if Interpreter.ESCAPE_CODES.has_key(charOrd):
result.append(self.prefix + '\\' + \
Interpreter.ESCAPE_CODES[charOrd])
else:
result.append(self.prefix + '\\x%02x' % charOrd)
elif char in more:
result.append(self.prefix + '\\' + char)
else:
result.append(char)
result = string.join(result, '')
self.invoke('afterEscape', result=result)
return result
# Processing.
def wrap(self, callable, args):
"""Wrap around an application of a callable and handle errors.
Return whether no error occurred."""
try:
apply(callable, args)
self.reset()
return True
except KeyboardInterrupt, e:
# Handle keyboard interrupts specially: we should always exit
# from these.
self.fail(e, True)
except Exception, e:
# A standard exception (other than a keyboard interrupt).
self.fail(e)
except:
# If we get here, then either it's an exception not derived from
# Exception or it's a string exception, so get the error type
# from the sys module.
e = sys.exc_type
self.fail(e)
# An error occurred if we leak through to here, so do cleanup.
self.reset()
return False
def interact(self):
"""Perform interaction."""
self.invoke('atInteract')
done = False
while not done:
result = self.wrap(self.file, (sys.stdin, '<interact>'))
if self.options.get(EXIT_OPT, True):
done = True
else:
if result:
done = True
else:
self.reset()
def fail(self, error, fatal=False):
"""Handle an actual error that occurred."""
if self.options.get(BUFFERED_OPT, False):
try:
self.output.abort()
except AttributeError:
# If the output file object doesn't have an abort method,
# something got mismatched, but it's too late to do
# anything about it now anyway, so just ignore it.
pass
meta = self.meta(error)
self.handle(meta)
if self.options.get(RAW_OPT, False):
raise
if fatal or self.options.get(EXIT_OPT, True):
sys.exit(FAILURE_CODE)
def file(self, file, name='<file>', locals=None):
"""Parse the entire contents of a file-like object, line by line."""
context = Context(name)
self.contexts.push(context)
self.invoke('beforeFile', name=name, file=file, locals=locals)
scanner = Scanner(self.prefix)
first = True
done = False
while not done:
self.context().bump()
line = file.readline()
if first:
if self.options.get(BANGPATH_OPT, True) and self.prefix:
# Replace a bangpath at the beginning of the first line
# with an EmPy comment.
if string.find(line, BANGPATH) == 0:
line = self.prefix + '#' + line[2:]
first = False
if line:
scanner.feed(line)
else:
done = True
self.safe(scanner, done, locals)
self.invoke('afterFile')
self.contexts.pop()
def binary(self, file, name='<binary>', chunkSize=0, locals=None):
"""Parse the entire contents of a file-like object, in chunks."""
if chunkSize <= 0:
chunkSize = DEFAULT_CHUNK_SIZE
context = Context(name, units='bytes')
self.contexts.push(context)
self.invoke('beforeBinary', name=name, file=file, \
chunkSize=chunkSize, locals=locals)
scanner = Scanner(self.prefix)
done = False
while not done:
chunk = file.read(chunkSize)
if chunk:
scanner.feed(chunk)
else:
done = True
self.safe(scanner, done, locals)
self.context().bump(len(chunk))
self.invoke('afterBinary')
self.contexts.pop()
def string(self, data, name='<string>', locals=None):
"""Parse a string."""
context = Context(name)
self.contexts.push(context)
self.invoke('beforeString', name=name, string=data, locals=locals)
context.bump()
scanner = Scanner(self.prefix, data)
self.safe(scanner, True, locals)
self.invoke('afterString')
self.contexts.pop()
def safe(self, scanner, final=False, locals=None):
"""Do a protected parse. Catch transient parse errors; if
final is true, then make a final pass with a terminator,
otherwise ignore the transient parse error (more data is
pending)."""
try:
self.parse(scanner, locals)
except TransientParseError:
if final:
# If the buffer doesn't end with a newline, try tacking on
# a dummy terminator.
buffer = scanner.rest()
if buffer and buffer[-1] != '\n':
scanner.feed(self.prefix + '\n')
# A TransientParseError thrown from here is a real parse
# error.
self.parse(scanner, locals)
def parse(self, scanner, locals=None):
"""Parse and run as much from this scanner as possible."""
self.invoke('atParse', scanner=scanner, locals=locals)
while True:
token = scanner.one()
if token is None:
break
self.invoke('atToken', token=token)
token.run(self, locals)
# Medium-level evaluation and execution.
def tokenize(self, name):
"""Take an lvalue string and return a name or a (possibly recursive)
list of names."""
result = []
stack = [result]
for garbage in self.ASSIGN_TOKEN_RE.split(name):
garbage = string.strip(garbage)
if garbage:
raise ParseError, "unexpected assignment token: '%s'" % garbage
tokens = self.ASSIGN_TOKEN_RE.findall(name)
# While processing, put a None token at the start of any list in which
# commas actually appear.
for token in tokens:
if token == '(':
stack.append([])
elif token == ')':
top = stack.pop()
if len(top) == 1:
top = top[0] # no None token means that it's not a 1-tuple
elif top[0] is None:
del top[0] # remove the None token for real tuples
stack[-1].append(top)
elif token == ',':
if len(stack[-1]) == 1:
stack[-1].insert(0, None)
else:
stack[-1].append(token)
# If it's a 1-tuple at the top level, turn it into a real subsequence.
if result and result[0] is None:
result = [result[1:]]
if len(result) == 1:
return result[0]
else:
return result
def significate(self, key, value=None, locals=None):
"""Declare a significator."""
self.invoke('beforeSignificate', key=key, value=value, locals=locals)
name = '__%s__' % key
self.atomic(name, value, locals)
self.invoke('afterSignificate')
def atomic(self, name, value, locals=None):
"""Do an atomic assignment."""
self.invoke('beforeAtomic', name=name, value=value, locals=locals)
if locals is None:
self.globals[name] = value
else:
locals[name] = value
self.invoke('afterAtomic')
def multi(self, names, values, locals=None):
"""Do a (potentially recursive) assignment."""
self.invoke('beforeMulti', names=names, values=values, locals=locals)
# No zip in 1.5, so we have to do it manually.
i = 0
try:
values = tuple(values)
except TypeError:
raise TypeError, "unpack non-sequence"
if len(names) != len(values):
raise ValueError, "unpack tuple of wrong size"
for i in range(len(names)):
name = names[i]
if type(name) is types.StringType:
self.atomic(name, values[i], locals)
else:
self.multi(name, values[i], locals)
self.invoke('afterMulti')
def assign(self, name, value, locals=None):
"""Do a potentially complex (including tuple unpacking) assignment."""
left = self.tokenize(name)
# The return value of tokenize can either be a string or a list of
# (lists of) strings.
if type(left) is types.StringType:
self.atomic(left, value, locals)
else:
self.multi(left, value, locals)
def import_(self, name, locals=None):
"""Do an import."""
self.invoke('beforeImport', name=name, locals=locals)
self.execute('import %s' % name, locals)
self.invoke('afterImport')
def clause(self, catch, locals=None):
"""Given the string representation of an except clause, turn it into
a 2-tuple consisting of the class name, and either a variable name
or None."""
self.invoke('beforeClause', catch=catch, locals=locals)
if catch is None:
exceptionCode, variable = None, None
elif string.find(catch, ',') >= 0:
exceptionCode, variable = string.split(string.strip(catch), ',', 1)
variable = string.strip(variable)
else:
exceptionCode, variable = string.strip(catch), None
if not exceptionCode:
exception = Exception
else:
exception = self.evaluate(exceptionCode, locals)
self.invoke('afterClause', exception=exception, variable=variable)
return exception, variable
def serialize(self, expression, locals=None):
"""Do an expansion, involving evaluating an expression, then
converting it to a string and writing that string to the
output if the evaluation is not None."""
self.invoke('beforeSerialize', expression=expression, locals=locals)
result = self.evaluate(expression, locals)
if result is not None:
self.write(str(result))
self.invoke('afterSerialize')
def defined(self, name, locals=None):
"""Return a Boolean indicating whether or not the name is
defined either in the locals or the globals."""
self.invoke('beforeDefined', name=name, local=local)
if locals is not None:
if locals.has_key(name):
result = True
else:
result = False
elif self.globals.has_key(name):
result = True
else:
result = False
self.invoke('afterDefined', result=result)
def literal(self, text):
"""Process a string literal."""
self.invoke('beforeLiteral', text=text)
self.serialize(text)
self.invoke('afterLiteral')
# Low-level evaluation and execution.
def evaluate(self, expression, locals=None):
"""Evaluate an expression."""
| if expression in ('1', 'True'): return True | 8,731 | lcc_e | python | null | 2666edb375a0bdb0c84b3a8c808c36f7ada53856d1793385 |
|
## This file is part of Invenio.
## Copyright (C) 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
This module implements the low-level API for dealing with fulltext files.
- All the files associated to a I{record} (identified by a I{recid}) can be
managed via an instance of the C{BibRecDocs} class.
- A C{BibRecDocs} is a wrapper of the list of I{documents} attached to the
record.
- Each document is represented by an instance of the C{BibDoc} class.
- A document is identified by a C{docid} and name (C{docname}). The docname
must be unique within the record. A document is the set of all the
formats and revisions of a piece of information.
- A document has a type called C{doctype} and can have a restriction.
- Each physical file, i.e. the concretization of a document into a
particular I{version} and I{format} is represented by an instance of the
C{BibDocFile} class.
- The format is infact the extension of the physical file.
- A comment and a description and other information can be associated to a
BibDocFile.
- A C{bibdoc} is a synonim for a document, while a C{bibdocfile} is a
synonim for a physical file.
@group Main classes: BibRecDocs,BibDoc,BibDocFile
@group Other classes: BibDocMoreInfo,Md5Folder,InvenioWebSubmitFileError
@group Main functions: decompose_file,stream_file,bibdocfile_*,download_url
@group Configuration Variables: CFG_*
"""
__revision__ = "$Id$"
import os
import re
import shutil
import filecmp
import time
import random
import socket
import urllib2
import urllib
import tempfile
import cPickle
import base64
import binascii
import cgi
import sys
if sys.hexversion < 0x2060000:
from md5 import md5
else:
from hashlib import md5
try:
import magic
if not hasattr(magic, "open"):
raise ImportError
CFG_HAS_MAGIC = True
except ImportError:
CFG_HAS_MAGIC = False
## The above flag controls whether HTTP range requests are supported or not
## when serving static files via Python. This is disabled by default as
## it currently breaks support for opening PDF files on Windows platforms
## using Acrobat reader brower plugin.
CFG_ENABLE_HTTP_RANGE_REQUESTS = False
from datetime import datetime
from mimetypes import MimeTypes
from thread import get_ident
from invenio import webinterface_handler_config as apache
## Let's set a reasonable timeout for URL request (e.g. FFT)
socket.setdefaulttimeout(40)
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
from invenio.shellutils import escape_shell_arg
from invenio.dbquery import run_sql, DatabaseError, blob_to_string
from invenio.errorlib import register_exception
from invenio.bibrecord import record_get_field_instances, \
field_get_subfield_values, field_get_subfield_instances, \
encode_for_xml
from invenio.urlutils import create_url
from invenio.textutils import nice_size
from invenio.access_control_engine import acc_authorize_action
from invenio.webuser import collect_user_info
from invenio.access_control_admin import acc_is_user_in_role, acc_get_role_id
from invenio.access_control_firerole import compile_role_definition, acc_firerole_check_user
from invenio.access_control_config import SUPERADMINROLE, CFG_WEBACCESS_WARNING_MSGS
from invenio.config import CFG_SITE_LANG, CFG_SITE_URL, \
CFG_WEBDIR, CFG_WEBSUBMIT_FILEDIR,\
CFG_WEBSUBMIT_ADDITIONAL_KNOWN_FILE_EXTENSIONS, \
CFG_WEBSUBMIT_FILESYSTEM_BIBDOC_GROUP_LIMIT, CFG_SITE_SECURE_URL, \
CFG_BIBUPLOAD_FFT_ALLOWED_LOCAL_PATHS, \
CFG_TMPDIR, CFG_TMPSHAREDDIR, CFG_PATH_MD5SUM, \
CFG_WEBSUBMIT_STORAGEDIR, \
CFG_BIBDOCFILE_USE_XSENDFILE, \
CFG_BIBDOCFILE_MD5_CHECK_PROBABILITY, \
CFG_SITE_RECORD, \
CFG_BIBUPLOAD_FFT_ALLOWED_EXTERNAL_URLS
from invenio.websubmit_config import CFG_WEBSUBMIT_ICON_SUBFORMAT_RE, \
CFG_WEBSUBMIT_DEFAULT_ICON_SUBFORMAT
import invenio.template
websubmit_templates = invenio.template.load('websubmit')
websearch_templates = invenio.template.load('websearch')
#: block size when performing I/O.
CFG_BIBDOCFILE_BLOCK_SIZE = 1024 * 8
#: threshold used do decide when to use Python MD5 of CLI MD5 algorithm.
CFG_BIBDOCFILE_MD5_THRESHOLD = 256 * 1024
#: chunks loaded by the Python MD5 algorithm.
CFG_BIBDOCFILE_MD5_BUFFER = 1024 * 1024
#: whether to normalize e.g. ".JPEG" and ".jpg" into .jpeg.
CFG_BIBDOCFILE_STRONG_FORMAT_NORMALIZATION = False
#: flags that can be associated to files.
CFG_BIBDOCFILE_AVAILABLE_FLAGS = (
'PDF/A',
'STAMPED',
'PDFOPT',
'HIDDEN',
'CONVERTED',
'PERFORM_HIDE_PREVIOUS',
'OCRED'
)
#: constant used if FFT correct with the obvious meaning.
KEEP_OLD_VALUE = 'KEEP-OLD-VALUE'
_CFG_BIBUPLOAD_FFT_ALLOWED_EXTERNAL_URLS = [(re.compile(_regex), _headers)
for _regex, _headers in CFG_BIBUPLOAD_FFT_ALLOWED_EXTERNAL_URLS]
_mimes = MimeTypes(strict=False)
_mimes.suffix_map.update({'.tbz2' : '.tar.bz2'})
_mimes.encodings_map.update({'.bz2' : 'bzip2'})
_magic_cookies = {}
def _get_magic_cookies():
"""
@return: a tuple of magic object.
@rtype: (MAGIC_NONE, MAGIC_COMPRESS, MAGIC_MIME, MAGIC_COMPRESS + MAGIC_MIME)
@note: ... not real magic. Just see: man file(1)
"""
thread_id = get_ident()
if thread_id not in _magic_cookies:
_magic_cookies[thread_id] = {
magic.MAGIC_NONE : magic.open(magic.MAGIC_NONE),
magic.MAGIC_COMPRESS : magic.open(magic.MAGIC_COMPRESS),
magic.MAGIC_MIME : magic.open(magic.MAGIC_MIME),
magic.MAGIC_COMPRESS + magic.MAGIC_MIME : magic.open(magic.MAGIC_COMPRESS + magic.MAGIC_MIME)
}
for key in _magic_cookies[thread_id].keys():
_magic_cookies[thread_id][key].load()
return _magic_cookies[thread_id]
def _generate_extensions():
"""
Generate the regular expression to match all the known extensions.
@return: the regular expression.
@rtype: regular expression object
"""
_tmp_extensions = _mimes.encodings_map.keys() + \
_mimes.suffix_map.keys() + \
_mimes.types_map[1].keys() + \
CFG_WEBSUBMIT_ADDITIONAL_KNOWN_FILE_EXTENSIONS
extensions = []
for ext in _tmp_extensions:
if ext.startswith('.'):
extensions.append(ext)
else:
extensions.append('.' + ext)
extensions.sort()
extensions.reverse()
extensions = set([ext.lower() for ext in extensions])
extensions = '\\' + '$|\\'.join(extensions) + '$'
extensions = extensions.replace('+', '\\+')
return re.compile(extensions, re.I)
#: Regular expression to recognized extensions.
_extensions = _generate_extensions()
class InvenioWebSubmitFileError(Exception):
"""
Exception raised in case of errors related to fulltext files.
"""
pass
class InvenioBibdocfileUnauthorizedURL(Exception):
"""
Exception raised when one tries to download an unauthorized external URL.
"""
pass
def file_strip_ext(afile, skip_version=False, only_known_extensions=False, allow_subformat=True):
"""
Strip in the best way the extension from a filename.
>>> file_strip_ext("foo.tar.gz")
'foo'
>>> file_strip_ext("foo.buz.gz")
'foo.buz'
>>> file_strip_ext("foo.buz")
'foo'
>>> file_strip_ext("foo.buz", only_known_extensions=True)
'foo.buz'
>>> file_strip_ext("foo.buz;1", skip_version=False,
... only_known_extensions=True)
'foo.buz;1'
>>> file_strip_ext("foo.gif;icon")
'foo'
>>> file_strip_ext("foo.gif:icon", allow_subformat=False)
'foo.gif:icon'
@param afile: the path/name of a file.
@type afile: string
@param skip_version: whether to skip a trailing ";version".
@type skip_version: bool
@param only_known_extensions: whether to strip out only known extensions or
to consider as extension anything that follows a dot.
@type only_known_extensions: bool
@param allow_subformat: whether to consider also subformats as part of
the extension.
@type allow_subformat: bool
@return: the name/path without the extension (and version).
@rtype: string
"""
if skip_version or allow_subformat:
afile = afile.split(';')[0]
nextfile = _extensions.sub('', afile)
if nextfile == afile and not only_known_extensions:
nextfile = os.path.splitext(afile)[0]
while nextfile != afile:
afile = nextfile
nextfile = _extensions.sub('', afile)
return nextfile
def normalize_format(format, allow_subformat=True):
"""
Normalize the format, e.g. by adding a dot in front.
@param format: the format/extension to be normalized.
@type format: string
@param allow_subformat: whether to consider also subformats as part of
the extension.
@type allow_subformat: bool
@return: the normalized format.
@rtype; string
"""
if allow_subformat:
subformat = format[format.rfind(';'):]
format = format[:format.rfind(';')]
else:
subformat = ''
if format and format[0] != '.':
format = '.' + format
if CFG_BIBDOCFILE_STRONG_FORMAT_NORMALIZATION:
if format not in ('.Z', '.H', '.C', '.CC'):
format = format.lower()
format = {
'.jpg' : '.jpeg',
'.htm' : '.html',
'.tif' : '.tiff'
}.get(format, format)
return format + subformat
def guess_format_from_url(url):
"""
Given a URL tries to guess it's extension.
Different method will be used, including HTTP HEAD query,
downloading the resource and using mime
@param url: the URL for which the extension shuld be guessed.
@type url: string
@return: the recognized extension or empty string if it's impossible to
recognize it.
@rtype: string
"""
## Let's try to guess the extension by considering the URL as a filename
ext = decompose_file(url, skip_version=True, only_known_extensions=True)[2]
if ext.startswith('.'):
return ext
if is_url_a_local_file(url) and CFG_HAS_MAGIC:
## if the URL corresponds to a local file, let's try to use
## the Python magic library to guess it
try:
magic_cookie = _get_magic_cookies()[magic.MAGIC_MIME]
mimetype = magic_cookie.file(url)
ext = _mimes.guess_extension(mimetype)
if ext:
return normalize_format(ext)
except Exception:
pass
else:
## Since the URL is remote, let's try to perform a HEAD request
## and see the corresponding headers
try:
response = open_url(url, head_request=True)
except (InvenioBibdocfileUnauthorizedURL, urllib2.URLError):
return ""
format = get_format_from_http_response(response)
if format:
return format
if CFG_HAS_MAGIC:
## Last solution: let's download the remote resource
## and use the Python magic library to guess the extension
try:
filename = download_url(url, format='')
magic_cookie = _get_magic_cookies()[magic.MAGIC_MIME]
mimetype = magic_cookie.file(filename)
os.remove(filename)
ext = _mimes.guess_extension(mimetype)
if ext:
return normalize_format(ext)
except Exception:
pass
return ""
_docname_re = re.compile(r'[^-\w.]*')
def normalize_docname(docname):
"""
Normalize the docname.
At the moment the normalization is just returning the same string.
@param docname: the docname to be normalized.
@type docname: string
@return: the normalized docname.
@rtype: string
"""
#return _docname_re.sub('', docname)
return docname
def normalize_version(version):
"""
Normalize the version.
The version can be either an integer or the keyword 'all'. Any other
value will be transformed into the empty string.
@param version: the version (either a number or 'all').
@type version: integer or string
@return: the normalized version.
@rtype: string
"""
try:
int(version)
except ValueError:
if version.lower().strip() == 'all':
return 'all'
else:
return ''
return str(version)
def compose_file(dirname, docname, extension, subformat=None, version=None):
"""
Construct back a fullpath given the separate components.
"""
if version:
version = ";%i" % int(version)
else:
version = ""
if subformat:
if not subformat.startswith(";"):
subformat = ";%s" % subformat
else:
subformat = ""
if extension and not extension.startswith("."):
extension = ".%s" % extension
return os.path.join(dirname, docname + extension + subformat + version)
def decompose_file(afile, skip_version=False, only_known_extensions=False,
allow_subformat=True):
"""
Decompose a file/path into its components dirname, basename and extension.
>>> decompose_file('/tmp/foo.tar.gz')
('/tmp', 'foo', '.tar.gz')
>>> decompose_file('/tmp/foo.tar.gz;1', skip_version=True)
('/tmp', 'foo', '.tar.gz')
>>> decompose_file('http://www.google.com/index.html')
('http://www.google.com', 'index', '.html')
@param afile: the path/name of a file.
@type afile: string
@param skip_version: whether to skip a trailing ";version".
@type skip_version: bool
@param only_known_extensions: whether to strip out only known extensions or
to consider as extension anything that follows a dot.
@type only_known_extensions: bool
@param allow_subformat: whether to consider also subformats as part of
the extension.
@type allow_subformat: bool
@return: a tuple with the directory name, the docname and extension.
@rtype: (dirname, docname, extension)
@note: if a URL is provided, the scheme will be part of the dirname.
@see: L{file_strip_ext} for the algorithm used to retrieve the extension.
"""
if skip_version:
version = afile.split(';')[-1]
try:
int(version)
afile = afile[:-len(version)-1]
except ValueError:
pass
basename = os.path.basename(afile)
dirname = afile[:-len(basename)-1]
base = file_strip_ext(
basename,
only_known_extensions=only_known_extensions,
allow_subformat=allow_subformat)
extension = basename[len(base) + 1:]
if extension:
extension = '.' + extension
return (dirname, base, extension)
def decompose_file_with_version(afile):
"""
Decompose a file into dirname, basename, extension and version.
>>> decompose_file_with_version('/tmp/foo.tar.gz;1')
('/tmp', 'foo', '.tar.gz', 1)
@param afile: the path/name of a file.
@type afile: string
@return: a tuple with the directory name, the docname, extension and
version.
@rtype: (dirname, docname, extension, version)
@raise ValueError: in case version does not exist it will.
@note: if a URL is provided, the scheme will be part of the dirname.
"""
version_str = afile.split(';')[-1]
version = int(version_str)
afile = afile[:-len(version_str)-1]
basename = os.path.basename(afile)
dirname = afile[:-len(basename)-1]
base = file_strip_ext(basename)
extension = basename[len(base) + 1:]
if extension:
extension = '.' + extension
return (dirname, base, extension, version)
def get_subformat_from_format(format):
"""
@return the subformat if any.
@rtype: string
>>> get_superformat_from_format('foo;bar')
'bar'
>>> get_superformat_from_format('foo')
''
"""
try:
return format[format.rindex(';') + 1:]
except ValueError:
return ''
def get_superformat_from_format(format):
"""
@return the superformat if any.
@rtype: string
>>> get_superformat_from_format('foo;bar')
'foo'
>>> get_superformat_from_format('foo')
'foo'
"""
try:
return format[:format.rindex(';')]
except ValueError:
return format
def propose_next_docname(docname):
"""
Given a I{docname}, suggest a new I{docname} (useful when trying to generate
a unique I{docname}).
>>> propose_next_docname('foo')
'foo_1'
>>> propose_next_docname('foo_1')
'foo_2'
>>> propose_next_docname('foo_10')
'foo_11'
@param docname: the base docname.
@type docname: string
@return: the next possible docname based on the given one.
@rtype: string
"""
if '_' in docname:
split_docname = docname.split('_')
try:
split_docname[-1] = str(int(split_docname[-1]) + 1)
docname = '_'.join(split_docname)
except ValueError:
docname += '_1'
else:
docname += '_1'
return docname
class BibRecDocs:
"""
This class represents all the files attached to one record.
@param recid: the record identifier.
@type recid: integer
@param deleted_too: whether to consider deleted documents as normal
documents (useful when trying to recover deleted information).
@type deleted_too: bool
@param human_readable: whether numbers should be printed in human readable
format (e.g. 2048 bytes -> 2Kb)
@ivar id: the record identifier as passed to the constructor.
@type id: integer
@ivar human_readable: the human_readable flag as passed to the constructor.
@type human_readable: bool
@ivar deleted_too: the deleted_too flag as passed to the constructor.
@type deleted_too: bool
@ivar bibdocs: the list of documents attached to the record.
@type bibdocs: list of BibDoc
"""
def __init__(self, recid, deleted_too=False, human_readable=False):
self.id = recid
self.human_readable = human_readable
self.deleted_too = deleted_too
self.bibdocs = []
self.build_bibdoc_list()
def __repr__(self):
"""
@return: the canonical string representation of the C{BibRecDocs}.
@rtype: string
"""
return 'BibRecDocs(%s%s%s)' % (self.id,
self.deleted_too and ', True' or '',
self.human_readable and ', True' or ''
)
def __str__(self):
"""
@return: an easy to be I{grepped} string representation of the
whole C{BibRecDocs} content.
@rtype: string
"""
out = '%i::::total bibdocs attached=%i\n' % (self.id, len(self.bibdocs))
out += '%i::::total size latest version=%s\n' % (self.id, nice_size(self.get_total_size_latest_version()))
out += '%i::::total size all files=%s\n' % (self.id, nice_size(self.get_total_size()))
for bibdoc in self.bibdocs:
out += str(bibdoc)
return out
def empty_p(self):
"""
@return: True when the record has no attached documents.
@rtype: bool
"""
return len(self.bibdocs) == 0
def deleted_p(self):
"""
@return: True if the corresponding record has been deleted.
@rtype: bool
"""
from invenio.search_engine import record_exists
return record_exists(self.id) == -1
def get_xml_8564(self):
"""
Return a snippet of I{MARCXML} representing the I{8564} fields
corresponding to the current state.
@return: the MARCXML representation.
@rtype: string
"""
from invenio.search_engine import get_record
out = ''
record = get_record(self.id)
fields = record_get_field_instances(record, '856', '4', ' ')
for field in fields:
urls = field_get_subfield_values(field, 'u')
if urls and not bibdocfile_url_p(urls[0]):
out += '\t<datafield tag="856" ind1="4" ind2=" ">\n'
for subfield, value in field_get_subfield_instances(field):
out += '\t\t<subfield code="%s">%s</subfield>\n' % (subfield, encode_for_xml(value))
out += '\t</datafield>\n'
for afile in self.list_latest_files(list_hidden=False):
out += '\t<datafield tag="856" ind1="4" ind2=" ">\n'
url = afile.get_url()
description = afile.get_description()
comment = afile.get_comment()
if url:
out += '\t\t<subfield code="u">%s</subfield>\n' % encode_for_xml(url)
if description:
out += '\t\t<subfield code="y">%s</subfield>\n' % encode_for_xml(description)
if comment:
out += '\t\t<subfield code="z">%s</subfield>\n' % encode_for_xml(comment)
out += '\t</datafield>\n'
return out
def get_total_size_latest_version(self):
"""
Returns the total size used on disk by all the files belonging
to this record and corresponding to the latest version.
@return: the total size.
@rtype: integer
"""
size = 0
for bibdoc in self.bibdocs:
size += bibdoc.get_total_size_latest_version()
return size
def get_total_size(self):
"""
Return the total size used on disk of all the files belonging
to this record of any version (not only the last as in
L{get_total_size_latest_version}).
@return: the total size.
@rtype: integer
"""
size = 0
for bibdoc in self.bibdocs:
size += bibdoc.get_total_size()
return size
def build_bibdoc_list(self):
"""
This method must be called everytime a I{bibdoc} is added, removed or
modified.
"""
self.bibdocs = []
if self.deleted_too:
res = run_sql("""SELECT id_bibdoc, type FROM bibrec_bibdoc JOIN
bibdoc ON id=id_bibdoc WHERE id_bibrec=%s
ORDER BY docname ASC""", (self.id,))
else:
res = run_sql("""SELECT id_bibdoc, type FROM bibrec_bibdoc JOIN
bibdoc ON id=id_bibdoc WHERE id_bibrec=%s AND
status<>'DELETED' ORDER BY docname ASC""", (self.id,))
for row in res:
cur_doc = BibDoc(docid=row[0], recid=self.id, doctype=row[1], human_readable=self.human_readable)
self.bibdocs.append(cur_doc)
def list_bibdocs(self, doctype=''):
"""
Returns the list all bibdocs object belonging to a recid.
If C{doctype} is set, it returns just the bibdocs of that doctype.
@param doctype: the optional doctype.
@type doctype: string
@return: the list of bibdocs.
@rtype: list of BibDoc
"""
if not doctype:
return self.bibdocs
else:
return [bibdoc for bibdoc in self.bibdocs if doctype == bibdoc.doctype]
def get_bibdoc_names(self, doctype=''):
"""
Returns all the names of the documents associated with the bibdoc.
If C{doctype} is set, restrict the result to all the matching doctype.
@param doctype: the optional doctype.
@type doctype: string
@return: the list of document names.
@rtype: list of string
"""
return [bibdoc.docname for bibdoc in self.list_bibdocs(doctype)]
def propose_unique_docname(self, docname):
"""
Given C{docname}, return a new docname that is not already attached to
the record.
@param docname: the reference docname.
@type docname: string
@return: a docname not already attached.
@rtype: string
"""
docname = normalize_docname(docname)
goodname = docname
i = 1
while goodname in self.get_bibdoc_names():
i += 1
goodname = "%s_%s" % (docname, i)
return goodname
def merge_bibdocs(self, docname1, docname2):
"""
This method merge C{docname2} into C{docname1}.
1. Given all the formats of the latest version of the files
attached to C{docname2}, these files are added as new formats
into C{docname1}.
2. C{docname2} is marked as deleted.
@raise InvenioWebSubmitFileError: if at least one format in C{docname2}
already exists in C{docname1}. (In this case the two bibdocs are
preserved)
@note: comments and descriptions are also copied.
@note: if C{docname2} has a I{restriction}(i.e. if the I{status} is
set) and C{docname1} doesn't, the restriction is imported.
"""
bibdoc1 = self.get_bibdoc(docname1)
bibdoc2 = self.get_bibdoc(docname2)
## Check for possibility
for bibdocfile in bibdoc2.list_latest_files():
format = bibdocfile.get_format()
if bibdoc1.format_already_exists_p(format):
raise InvenioWebSubmitFileError('Format %s already exists in bibdoc %s of record %s. It\'s impossible to merge bibdoc %s into it.' % (format, docname1, self.id, docname2))
## Importing restriction if needed.
restriction1 = bibdoc1.get_status()
restriction2 = bibdoc2.get_status()
if restriction2 and not restriction1:
bibdoc1.set_status(restriction2)
## Importing formats
for bibdocfile in bibdoc2.list_latest_files():
format = bibdocfile.get_format()
comment = bibdocfile.get_comment()
description = bibdocfile.get_description()
bibdoc1.add_file_new_format(bibdocfile.get_full_path(), description=description, comment=comment, format=format)
## Finally deleting old bibdoc2
bibdoc2.delete()
self.build_bibdoc_list()
def get_docid(self, docname):
"""
@param docname: the document name.
@type docname: string
@return: the identifier corresponding to the given C{docname}.
@rtype: integer
@raise InvenioWebSubmitFileError: if the C{docname} does not
corresponds to a document attached to this record.
"""
for bibdoc in self.bibdocs:
if bibdoc.docname == docname:
return bibdoc.id
raise InvenioWebSubmitFileError, "Recid '%s' is not connected with a " \
"docname '%s'" % (self.id, docname)
def get_docname(self, docid):
"""
@param docid: the document identifier.
@type docid: integer
@return: the name of the document corresponding to the given document
identifier.
@rtype: string
@raise InvenioWebSubmitFileError: if the C{docid} does not
corresponds to a document attached to this record.
"""
for bibdoc in self.bibdocs:
if bibdoc.id == docid:
return bibdoc.docname
raise InvenioWebSubmitFileError, "Recid '%s' is not connected with a " \
"docid '%s'" % (self.id, docid)
def has_docname_p(self, docname):
"""
@param docname: the document name,
@type docname: string
@return: True if a document with the given name is attached to this
record.
@rtype: bool
"""
for bibdoc in self.bibdocs:
if bibdoc.docname == docname:
return True
return False
def get_bibdoc(self, docname):
"""
@return: the bibdoc with a particular docname associated with
this recid"""
for bibdoc in self.bibdocs:
if bibdoc.docname == docname:
return bibdoc
raise InvenioWebSubmitFileError, "Recid '%s' is not connected with " \
" docname '%s'" % (self.id, docname)
def delete_bibdoc(self, docname):
"""
Deletes the document with the specified I{docname}.
@param docname: the document name.
@type docname: string
"""
for bibdoc in self.bibdocs:
if bibdoc.docname == docname:
bibdoc.delete()
self.build_bibdoc_list()
def add_bibdoc(self, doctype="Main", docname='file', never_fail=False):
"""
Add a new empty document object (a I{bibdoc}) to the list of
documents of this record.
@param doctype: the document type.
@type doctype: string
@param docname: the document name.
@type docname: string
@param never_fail: if True, this procedure will not fail, even if
a document with the given name is already attached to this
record. In this case a new name will be generated (see
L{propose_unique_docname}).
@type never_fail: bool
@return: the newly created document object.
@rtype: BibDoc
@raise InvenioWebSubmitFileError: in case of any error.
"""
try:
docname = normalize_docname(docname)
if never_fail:
docname = self.propose_unique_docname(docname)
if docname in self.get_bibdoc_names():
raise InvenioWebSubmitFileError, "%s has already a bibdoc with docname %s" % (self.id, docname)
else:
bibdoc = BibDoc(recid=self.id, doctype=doctype, docname=docname, human_readable=self.human_readable)
self.build_bibdoc_list()
return bibdoc
except Exception, e:
register_exception()
raise InvenioWebSubmitFileError(str(e))
def add_new_file(self, fullpath, doctype="Main", docname=None, never_fail=False, description=None, comment=None, format=None, flags=None):
"""
Directly add a new file to this record.
Adds a new file with the following policy:
- if the C{docname} is not set it is retrieved from the name of the
file.
- If a bibdoc with the given docname doesn't already exist, it is
created and the file is added to it.
- It it exist but it doesn't contain the format that is being
added, the new format is added.
- If the format already exists then if C{never_fail} is True a new
bibdoc is created with a similar name but with a progressive
number as a suffix and the file is added to it (see
L{propose_unique_docname}).
@param fullpath: the filesystme path of the document to be added.
@type fullpath: string
@param doctype: the type of the document.
@type doctype: string
@param docname: the document name.
@type docname: string
@param never_fail: if True, this procedure will not fail, even if
a document with the given name is already attached to this
record. In this case a new name will be generated (see
L{propose_unique_docname}).
@type never_fail: bool
@param description: an optional description of the file.
@type description: string
@param comment: an optional comment to the file.
@type comment: string
@param format: the extension of the file. If not specified it will
be guessed (see L{guess_format_from_url}).
@type format: string
@param flags: a set of flags to be associated with the file (see
L{CFG_BIBDOCFILE_AVAILABLE_FLAGS})
@type flags: list of string
@return: the elaborated document object.
@rtype: BibDoc
@raise InvenioWebSubmitFileError: in case of error.
"""
if docname is None:
docname = decompose_file(fullpath)[1]
if format is None:
format = decompose_file(fullpath)[2]
docname = normalize_docname(docname)
try:
bibdoc = self.get_bibdoc(docname)
except InvenioWebSubmitFileError:
# bibdoc doesn't already exists!
bibdoc = self.add_bibdoc(doctype, docname, False)
bibdoc.add_file_new_version(fullpath, description=description, comment=comment, format=format, flags=flags)
self.build_bibdoc_list()
else:
try:
bibdoc.add_file_new_format(fullpath, description=description, comment=comment, format=format, flags=flags)
self.build_bibdoc_list()
except InvenioWebSubmitFileError, e:
# Format already exist!
if never_fail:
bibdoc = self.add_bibdoc(doctype, docname, True)
bibdoc.add_file_new_version(fullpath, description=description, comment=comment, format=format, flags=flags)
self.build_bibdoc_list()
else:
raise
return bibdoc
def add_new_version(self, fullpath, docname=None, description=None, comment=None, format=None, flags=None):
"""
Adds a new file to an already existent document object as a new
version.
@param fullpath: the filesystem path of the file to be added.
@type fullpath: string
@param docname: the document name. If not specified it will be
extracted from C{fullpath} (see L{decompose_file}).
@type docname: string
@param description: an optional description for the file.
@type description: string
@param comment: an optional comment to the file.
@type comment: string
@param format: the extension of the file. If not specified it will
be guessed (see L{guess_format_from_url}).
@type format: string
@param flags: a set of flags to be associated with the file (see
L{CFG_BIBDOCFILE_AVAILABLE_FLAGS})
@type flags: list of string
@return: the elaborated document object.
@rtype: BibDoc
@raise InvenioWebSubmitFileError: in case of error.
@note: previous files associated with the same document will be
considered obsolete.
"""
if docname is None:
docname = decompose_file(fullpath)[1]
if format is None:
format = decompose_file(fullpath)[2]
if flags is None:
flags = []
if 'pdfa' in get_subformat_from_format(format).split(';') and not 'PDF/A' in flags:
flags.append('PDF/A')
bibdoc = self.get_bibdoc(docname=docname)
bibdoc.add_file_new_version(fullpath, description=description, comment=comment, format=format, flags=flags)
self.build_bibdoc_list()
return bibdoc
def add_new_format(self, fullpath, docname=None, description=None, comment=None, format=None, flags=None):
"""
Adds a new file to an already existent document object as a new
format.
@param fullpath: the filesystem path of the file to be added.
@type fullpath: string
@param docname: the document name. If not specified it will be
extracted from C{fullpath} (see L{decompose_file}).
@type docname: string
@param description: an optional description for the file.
@type description: string
@param comment: an optional comment to the file.
@type comment: string
@param format: the extension of the file. If not specified it will
be guessed (see L{guess_format_from_url}).
@type format: string
@param flags: a set of flags to be associated with the file (see
L{CFG_BIBDOCFILE_AVAILABLE_FLAGS})
@type flags: list of string
@return: the elaborated document object.
@rtype: BibDoc
@raise InvenioWebSubmitFileError: in case the same format already
exists.
"""
if docname is None:
docname = decompose_file(fullpath)[1]
if format is None:
format = decompose_file(fullpath)[2]
if flags is None:
flags = []
if 'pdfa' in get_subformat_from_format(format).split(';') and not 'PDF/A' in flags:
flags.append('PDF/A')
bibdoc = self.get_bibdoc(docname=docname)
bibdoc.add_file_new_format(fullpath, description=description, comment=comment, format=format, flags=flags)
self.build_bibdoc_list()
return bibdoc
def list_latest_files(self, doctype='', list_hidden=True):
"""
Returns a list of the latest files.
@param doctype: if set, only document of the given type will be listed.
@type doctype: string
@param list_hidden: if True, will list also files with the C{HIDDEN}
flag being set.
@type list_hidden: bool
@return: the list of latest files.
@rtype: list of BibDocFile
"""
docfiles = []
for bibdoc in self.list_bibdocs(doctype):
docfiles += bibdoc.list_latest_files(list_hidden=list_hidden)
return docfiles
def display(self, docname="", version="", doctype="", ln=CFG_SITE_LANG, verbose=0, display_hidden=True):
"""
Returns an HTML representation of the the attached documents.
@param docname: if set, include only the requested document.
@type docname: string
@param version: if not set, only the last version will be displayed. If
'all', all versions will be displayed.
@type version: string (integer or 'all')
@param doctype: is set, include only documents of the requested type.
@type doctype: string
@param ln: the language code.
@type ln: string
@param verbose: if greater than 0, includes debug information.
@type verbose: integer
@param display_hidden: whether to include hidden files as well.
@type display_hidden: bool
@return: the formatted representation.
@rtype: HTML string
"""
t = ""
if docname:
try:
bibdocs = [self.get_bibdoc(docname)]
except InvenioWebSubmitFileError:
bibdocs = self.list_bibdocs(doctype)
else:
bibdocs = self.list_bibdocs(doctype)
if bibdocs:
types = list_types_from_array(bibdocs)
fulltypes = []
for mytype in types:
if mytype in ('Plot', 'PlotMisc'):
# FIXME: quick hack to ignore plot-like doctypes
# on Files tab
continue
fulltype = {
'name' : mytype,
'content' : [],
}
for bibdoc in bibdocs:
if mytype == bibdoc.get_type():
fulltype['content'].append(bibdoc.display(version,
ln=ln, display_hidden=display_hidden))
fulltypes.append(fulltype)
if verbose >= 9:
verbose_files = str(self)
else:
verbose_files = ''
t = websubmit_templates.tmpl_bibrecdoc_filelist(
ln=ln,
types = fulltypes,
verbose_files=verbose_files
)
return t
def fix(self, docname):
"""
Algorithm that transform a broken/old bibdoc into a coherent one.
Think of it as being the fsck of BibDocs.
- All the files in the bibdoc directory will be renamed according
to the document name. Proper .recid, .type, .md5 files will be
created/updated.
- In case of more than one file with the same format version a new
bibdoc will be created in order to put does files.
@param docname: the document name that need to be fixed.
@type docname: string
@return: the list of newly created bibdocs if any.
@rtype: list of BibDoc
@raise InvenioWebSubmitFileError: in case of issues that can not be
fixed automatically.
"""
bibdoc = self.get_bibdoc(docname)
versions = {}
res = []
new_bibdocs = [] # List of files with the same version/format of
# existing file which need new bibdoc.
counter = 0
zero_version_bug = False
if os.path.exists(bibdoc.basedir):
for filename in os.listdir(bibdoc.basedir):
if filename[0] != '.' and ';' in filename:
name, version = filename.split(';')
try:
version = int(version)
except ValueError:
# Strange name
register_exception()
raise InvenioWebSubmitFileError, "A file called %s exists under %s. This is not a valid name. After the ';' there must be an integer representing the file version. Please, manually fix this file either by renaming or by deleting it." % (filename, bibdoc.basedir)
if version == 0:
zero_version_bug = True
format = name[len(file_strip_ext(name)):]
format = normalize_format(format)
if not versions.has_key(version):
versions[version] = {}
new_name = 'FIXING-%s-%s' % (str(counter), name)
try:
shutil.move('%s/%s' % (bibdoc.basedir, filename), '%s/%s' % (bibdoc.basedir, new_name))
except Exception, e:
register_exception()
raise InvenioWebSubmitFileError, "Error in renaming '%s' to '%s': '%s'" % ('%s/%s' % (bibdoc.basedir, filename), '%s/%s' % (bibdoc.basedir, new_name), e)
if versions[version].has_key(format):
new_bibdocs.append((new_name, version))
else:
versions[version][format] = new_name
counter += 1
elif filename[0] != '.':
# Strange name
register_exception()
raise InvenioWebSubmitFileError, "A file called %s exists under %s. This is not a valid name. There should be a ';' followed by an integer representing the file version. Please, manually fix this file either by renaming or by deleting it." % (filename, bibdoc.basedir)
else:
# we create the corresponding storage directory
old_umask = os.umask(022)
os.makedirs(bibdoc.basedir)
# and save the father record id if it exists
try:
if self.id != "":
recid_fd = open("%s/.recid" % bibdoc.basedir, "w")
recid_fd.write(str(self.id))
recid_fd.close()
if bibdoc.doctype != "":
type_fd = open("%s/.type" % bibdoc.basedir, "w")
type_fd.write(str(bibdoc.doctype))
type_fd.close()
except Exception, e:
register_exception()
raise InvenioWebSubmitFileError, e
os.umask(old_umask)
if not versions:
bibdoc.delete()
else:
for version, formats in versions.iteritems():
if zero_version_bug:
version += 1
for format, filename in formats.iteritems():
destination = '%s%s;%i' % (docname, format, version)
try:
shutil.move('%s/%s' % (bibdoc.basedir, filename), '%s/%s' % (bibdoc.basedir, destination))
except Exception, e:
register_exception()
raise InvenioWebSubmitFileError, "Error in renaming '%s' to '%s': '%s'" % ('%s/%s' % (bibdoc.basedir, filename), '%s/%s' % (bibdoc.basedir, destination), e)
try:
recid_fd = open("%s/.recid" % bibdoc.basedir, "w")
recid_fd.write(str(self.id))
recid_fd.close()
type_fd = open("%s/.type" % bibdoc.basedir, "w")
type_fd.write(str(bibdoc.doctype))
type_fd.close()
except Exception, e:
register_exception()
raise InvenioWebSubmitFileError, "Error in creating .recid and .type file for '%s' folder: '%s'" % (bibdoc.basedir, e)
self.build_bibdoc_list()
res = []
for (filename, version) in new_bibdocs:
if zero_version_bug:
version += 1
new_bibdoc = self.add_bibdoc(doctype=bibdoc.doctype, docname=docname, never_fail=True)
new_bibdoc.add_file_new_format('%s/%s' % (bibdoc.basedir, filename), version)
res.append(new_bibdoc)
try:
os.remove('%s/%s' % (bibdoc.basedir, filename))
except Exception, e:
register_exception()
raise InvenioWebSubmitFileError, "Error in removing '%s': '%s'" % ('%s/%s' % (bibdoc.basedir, filename), e)
Md5Folder(bibdoc.basedir).update(only_new=False)
bibdoc._build_file_list()
self.build_bibdoc_list()
for bibdoc in self.bibdocs:
if not run_sql('SELECT more_info FROM bibdoc WHERE id=%s', (bibdoc.id,)):
## Import from MARC only if the bibdoc has never had
## its more_info initialized.
try:
bibdoc.import_descriptions_and_comments_from_marc()
except Exception, e:
register_exception()
raise InvenioWebSubmitFileError, "Error in importing description and comment from %s for record %s: %s" % (repr(bibdoc), self.id, e)
return res
def check_format(self, docname):
"""
Check for any format related issue.
In case L{CFG_WEBSUBMIT_ADDITIONAL_KNOWN_FILE_EXTENSIONS} is
altered or Python version changes, it might happen that a docname
contains files which are no more docname + .format ; version, simply
because the .format is now recognized (and it was not before, so
it was contained into the docname).
This algorithm verify if it is necessary to fix (seel L{fix_format}).
@param docname: the document name whose formats should be verified.
@type docname: string
@return: True if format is correct. False if a fix is needed.
@rtype: bool
@raise InvenioWebSubmitFileError: in case of any error.
"""
bibdoc = self.get_bibdoc(docname)
correct_docname = decompose_file(docname + '.pdf')[1]
if docname != correct_docname:
return False
for filename in os.listdir(bibdoc.basedir):
if not filename.startswith('.'):
try:
dummy, dummy, format, version = decompose_file_with_version(filename)
except Exception:
raise InvenioWebSubmitFileError('Incorrect filename "%s" for docname %s for recid %i' % (filename, docname, self.id))
if '%s%s;%i' % (correct_docname, format, version) != filename:
return False
return True
def check_duplicate_docnames(self):
"""
Check wethever the record is connected with at least tho documents
with the same name.
@return: True if everything is fine.
@rtype: bool
"""
docnames = set()
for docname in self.get_bibdoc_names():
if docname in docnames:
return False
else:
docnames.add(docname)
return True
def uniformize_bibdoc(self, docname):
"""
This algorithm correct wrong file name belonging to a bibdoc.
@param docname: the document name whose formats should be verified.
@type docname: string
"""
bibdoc = self.get_bibdoc(docname)
for filename in os.listdir(bibdoc.basedir):
if not filename.startswith('.'):
try:
dummy, dummy, format, version = decompose_file_with_version(filename)
except ValueError:
register_exception(alert_admin=True, prefix= "Strange file '%s' is stored in %s" % (filename, bibdoc.basedir))
else:
os.rename(os.path.join(bibdoc.basedir, filename), os.path.join(bibdoc.basedir, '%s%s;%i' % (docname, format, version)))
Md5Folder(bibdoc.basedir).update()
bibdoc.touch()
bibdoc._build_file_list('rename')
def fix_format(self, docname, skip_check=False):
"""
Fixes format related inconsistencies.
@param docname: the document name whose formats should be verified.
@type docname: string
@param skip_check: if True assume L{check_format} has already been
called and the need for fix has already been found.
If False, will implicitly call L{check_format} and skip fixing
if no error is found.
@type skip_check: bool
@return: in case merging two bibdocs is needed but it's not possible.
@rtype: bool
"""
if not skip_check:
if self.check_format(docname):
return True
bibdoc = self.get_bibdoc(docname)
correct_docname = decompose_file(docname + '.pdf')[1]
need_merge = False
if correct_docname != docname:
need_merge = self.has_docname_p(correct_docname)
if need_merge:
proposed_docname = self.propose_unique_docname(correct_docname)
run_sql('UPDATE bibdoc SET docname=%s WHERE id=%s', (proposed_docname, bibdoc.id))
self.build_bibdoc_list()
self.uniformize_bibdoc(proposed_docname)
try:
self.merge_bibdocs(docname, proposed_docname)
except InvenioWebSubmitFileError:
return False
else:
run_sql('UPDATE bibdoc SET docname=%s WHERE id=%s', (correct_docname, bibdoc.id))
self.build_bibdoc_list()
self.uniformize_bibdoc(correct_docname)
else:
self.uniformize_bibdoc(docname)
return True
def fix_duplicate_docnames(self, skip_check=False):
"""
Algotirthm to fix duplicate docnames.
If a record is connected with at least two bibdoc having the same
docname, the algorithm will try to merge them.
@param skip_check: if True assume L{check_duplicate_docnames} has
already been called and the need for fix has already been found.
If False, will implicitly call L{check_duplicate_docnames} and skip
fixing if no error is found.
@type skip_check: bool
"""
if not skip_check:
if self.check_duplicate_docnames():
return
docnames = set()
for bibdoc in self.list_bibdocs():
docname = bibdoc.docname
if docname in docnames:
new_docname = self.propose_unique_docname(bibdoc.docname)
bibdoc.change_name(new_docname)
self.merge_bibdocs(docname, new_docname)
docnames.add(docname)
def check_file_exists(self, path):
"""
Check if a file with the same content of the file pointed in C{path}
is already attached to this record.
@param path: the file to be checked against.
@type path: string
@return: True if a file with the requested content is already attached
to the record.
@rtype: bool
"""
# Let's consider all the latest files
for bibdoc in self.list_bibdocs():
if bibdoc.check_file_exists(path):
return True
return False
class BibDoc:
"""
This class represents one document (i.e. a set of files with different
formats and with versioning information that consitutes a piece of
information.
To instanciate a new document, the recid and the docname are mandatory.
To instanciate an already existing document, either the recid and docname
or the docid alone are sufficient to retrieve it.
@param docid: the document identifier.
@type docid: integer
@param recid: the record identifier of the record to which this document
belongs to. If the C{docid} is specified the C{recid} is automatically
retrieven from the database.
@type recid: integer
@param docname: the document name.
@type docname: string
@param doctype: the document type (used when instanciating a new document).
@type doctype: string
@param human_readable: whether sizes should be represented in a human
readable format.
@type human_readable: bool
@raise InvenioWebSubmitFileError: in case of error.
"""
def __init__ (self, docid=None, recid=None, docname=None, doctype='Main', human_readable=False):
"""Constructor of a bibdoc. At least the docid or the recid/docname
pair is needed."""
# docid is known, the document already exists
if docname:
docname = normalize_docname(docname)
self.docfiles = []
self.md5s = None
self.related_files = []
self.human_readable = human_readable
if docid:
if not recid:
res = run_sql("SELECT id_bibrec,type FROM bibrec_bibdoc WHERE id_bibdoc=%s LIMIT 1", (docid,), 1)
if res:
recid = res[0][0]
doctype = res[0][1]
else:
res = run_sql("SELECT id_bibdoc1,type FROM bibdoc_bibdoc WHERE id_bibdoc2=%s LIMIT 1", (docid,), 1)
if res:
main_docid = res[0][0]
doctype = res[0][1]
res = run_sql("SELECT id_bibrec,type FROM bibrec_bibdoc WHERE id_bibdoc=%s LIMIT 1", (main_docid,), 1)
if res:
recid = res[0][0]
else:
raise InvenioWebSubmitFileError, "The docid %s associated with docid %s is not associated with any record" % (main_docid, docid)
else:
raise InvenioWebSubmitFileError, "The docid %s is not associated to any recid or other docid" % docid
else:
res = run_sql("SELECT type FROM bibrec_bibdoc WHERE id_bibrec=%s AND id_bibdoc=%s LIMIT 1", (recid, docid,), 1)
if res:
doctype = res[0][0]
else:
#this bibdoc isn't associated with the corresponding bibrec.
raise InvenioWebSubmitFileError, "Docid %s is not associated with the recid %s" % (docid, recid)
# gather the other information
res = run_sql("SELECT id,status,docname,creation_date,modification_date,text_extraction_date,more_info FROM bibdoc WHERE id=%s LIMIT 1", (docid,), 1)
if res:
self.cd = res[0][3]
self.md = res[0][4]
self.td = res[0][5]
self.recid = recid
self.docname = res[0][2]
self.id = docid
self.status = res[0][1]
self.more_info = BibDocMoreInfo(docid, blob_to_string(res[0][6]))
self.basedir = _make_base_dir(self.id)
self.doctype = doctype
else:
# this bibdoc doesn't exist
raise InvenioWebSubmitFileError, "The docid %s does not exist." % docid
# else it is a new document
else:
if not docname:
raise InvenioWebSubmitFileError, "You should specify the docname when creating a new bibdoc"
else:
self.recid = recid
self.doctype = doctype
self.docname = docname
self.status = ''
if recid:
res = run_sql("SELECT b.id FROM bibrec_bibdoc bb JOIN bibdoc b on bb.id_bibdoc=b.id WHERE bb.id_bibrec=%s AND b.docname=%s LIMIT 1", (recid, docname), 1)
if res:
raise InvenioWebSubmitFileError, "A bibdoc called %s already exists for recid %s" % (docname, recid)
self.id = run_sql("INSERT INTO bibdoc (status,docname,creation_date,modification_date) "
"values(%s,%s,NOW(),NOW())", (self.status, docname))
if self.id:
# we link the document to the record if a recid was
# specified
self.more_info = BibDocMoreInfo(self.id)
res = run_sql("SELECT creation_date, modification_date, text_extraction_date FROM bibdoc WHERE id=%s", (self.id,))
self.cd = res[0][0]
self.md = res[0][1]
self.td = res[0][2]
else:
raise InvenioWebSubmitFileError, "New docid cannot be created"
try:
self.basedir = _make_base_dir(self.id)
# we create the corresponding storage directory
if not os.path.exists(self.basedir):
old_umask = os.umask(022)
os.makedirs(self.basedir)
# and save the father record id if it exists
try:
if self.recid:
recid_fd = open("%s/.recid" % self.basedir, "w")
recid_fd.write(str(self.recid))
recid_fd.close()
if self.doctype:
type_fd = open("%s/.type" % self.basedir, "w")
type_fd.write(str(self.doctype))
type_fd.close()
except Exception, e:
register_exception(alert_admin=True)
raise InvenioWebSubmitFileError, e
os.umask(old_umask)
if self.recid:
run_sql("INSERT INTO bibrec_bibdoc (id_bibrec, id_bibdoc, type) VALUES (%s,%s,%s)",
(recid, self.id, self.doctype,))
except Exception, e:
run_sql('DELETE FROM bibdoc WHERE id=%s', (self.id, ))
run_sql('DELETE FROM bibrec_bibdoc WHERE id_bibdoc=%s', (self.id, ))
register_exception(alert_admin=True)
raise InvenioWebSubmitFileError, e
# build list of attached files
self._build_file_list('init')
# link with related_files
self._build_related_file_list()
def __repr__(self):
"""
@return: the canonical string representation of the C{BibDoc}.
@rtype: string
"""
return 'BibDoc(%s, %s, %s, %s, %s)' % (repr(self.id), repr(self.recid), repr(self.docname), repr(self.doctype), repr(self.human_readable))
def __str__(self):
"""
@return: an easy to be I{grepped} string representation of the
whole C{BibDoc} content.
@rtype: string
"""
out = '%s:%i:::docname=%s\n' % (self.recid or '', self.id, self.docname)
out += '%s:%i:::doctype=%s\n' % (self.recid or '', self.id, self.doctype)
out += '%s:%i:::status=%s\n' % (self.recid or '', self.id, self.status)
out += '%s:%i:::basedir=%s\n' % (self.recid or '', self.id, self.basedir)
out += '%s:%i:::creation date=%s\n' % (self.recid or '', self.id, self.cd)
out += '%s:%i:::modification date=%s\n' % (self.recid or '', self.id, self.md)
out += '%s:%i:::text extraction date=%s\n' % (self.recid or '', self.id, self.td)
out += '%s:%i:::total file attached=%s\n' % (self.recid or '', self.id, len(self.docfiles))
if self.human_readable:
out += '%s:%i:::total size latest version=%s\n' % (self.recid or '', self.id, nice_size(self.get_total_size_latest_version()))
out += '%s:%i:::total size all files=%s\n' % (self.recid or '', self.id, nice_size(self.get_total_size()))
else:
out += '%s:%i:::total size latest version=%s\n' % (self.recid or '', self.id, self.get_total_size_latest_version())
out += '%s:%i:::total size all files=%s\n' % (self.recid or '', self.id, self.get_total_size())
for docfile in self.docfiles:
out += str(docfile)
return out
def format_already_exists_p(self, format):
"""
@param format: a format to be checked.
@type format: string
@return: True if a file of the given format already exists among the
latest files.
@rtype: bool
"""
format = normalize_format(format)
for afile in self.list_latest_files():
if format == afile.get_format():
return True
return False
def get_status(self):
"""
@return: the status information.
@rtype: string
"""
return self.status
def get_text(self, version=None):
"""
@param version: the requested version. If not set, the latest version
will be used.
@type version: integer
@return: the textual content corresponding to the specified version
of the document.
@rtype: string
"""
if version is None:
version = self.get_latest_version()
if self.has_text(version):
return open(os.path.join(self.basedir, '.text;%i' % version)).read()
else:
return ""
def get_text_path(self, version=None):
"""
@param version: the requested version. If not set, the latest version
will be used.
@type version: int
@return: the full path to the textual content corresponding to the specified version
of the document.
@rtype: string
"""
if version is None:
version = self.get_latest_version()
if self.has_text(version):
return os.path.join(self.basedir, '.text;%i' % version)
else:
return ""
def extract_text(self, version=None, perform_ocr=False, ln='en'):
"""
Try what is necessary to extract the textual information of a document.
@param version: the version of the document for which text is required.
If not specified the text will be retrieved from the last version.
@type version: integer
@param perform_ocr: whether to perform OCR.
@type perform_ocr: bool
@param ln: a two letter language code to give as a hint to the OCR
procedure.
@type ln: string
@raise InvenioWebSubmitFileError: in case of error.
@note: the text is extracted and cached for later use. Use L{get_text}
to retrieve it.
"""
from invenio.websubmit_file_converter import get_best_format_to_extract_text_from, convert_file, InvenioWebSubmitFileConverterError
if version is None:
version = self.get_latest_version()
docfiles = self.list_version_files(version)
## We try to extract text only from original or OCRed documents.
filenames = [docfile.get_full_path() for docfile in docfiles if 'CONVERTED' not in docfile.flags or 'OCRED' in docfile.flags]
try:
filename = get_best_format_to_extract_text_from(filenames)
except InvenioWebSubmitFileConverterError:
## We fall back on considering all the documents
filenames = [docfile.get_full_path() for docfile in docfiles]
try:
filename = get_best_format_to_extract_text_from(filenames)
except InvenioWebSubmitFileConverterError:
open(os.path.join(self.basedir, '.text;%i' % version), 'w').write('')
return
try:
convert_file(filename, os.path.join(self.basedir, '.text;%i' % version), '.txt', perform_ocr=perform_ocr, ln=ln)
if version == self.get_latest_version():
run_sql("UPDATE bibdoc SET text_extraction_date=NOW() WHERE id=%s", (self.id, ))
except InvenioWebSubmitFileConverterError, e:
register_exception(alert_admin=True, prefix="Error in extracting text from bibdoc %i, version %i" % (self.id, version))
raise InvenioWebSubmitFileError, str(e)
def touch(self):
"""
Update the modification time of the bibdoc (as in the UNIX command
C{touch}).
"""
run_sql('UPDATE bibdoc SET modification_date=NOW() WHERE id=%s', (self.id, ))
#if self.recid:
#run_sql('UPDATE bibrec SET modification_date=NOW() WHERE id=%s', (self.recid, ))
def set_status(self, new_status):
"""
Set a new status. A document with a status information is a restricted
document that can be accessed only to user which as an authorization
to the I{viewrestrdoc} WebAccess action with keyword status with value
C{new_status}.
@param new_status: the new status. If empty the document will be
unrestricted.
@type new_status: string
@raise InvenioWebSubmitFileError: in case the reserved word
'DELETED' is used.
"""
if new_status != KEEP_OLD_VALUE:
if new_status == 'DELETED':
raise InvenioWebSubmitFileError('DELETED is a reserved word and can not be used for setting the status')
run_sql('UPDATE bibdoc SET status=%s WHERE id=%s', (new_status, self.id))
self.status = new_status
self.touch()
self._build_file_list()
self._build_related_file_list()
def add_file_new_version(self, filename, description=None, comment=None, format=None, flags=None):
"""
Add a new version of a file. If no physical file is already attached
to the document a the given file will have version 1. Otherwise the
new file will have the current version number plus one.
@param filename: the local path of the file.
@type filename: string
@param description: an optional description for the file.
@type description: string
@param comment: an optional comment to the file.
@type comment: string
@param format: the extension of the file. If not specified it will
be retrieved from the filename (see L{decompose_file}).
@type format: string
@param flags: a set of flags to be associated with the file (see
L{CFG_BIBDOCFILE_AVAILABLE_FLAGS})
@type flags: list of string
@raise InvenioWebSubmitFileError: in case of error.
"""
try:
latestVersion = self.get_latest_version()
if latestVersion == 0:
myversion = 1
else:
myversion = latestVersion + 1
if os.path.exists(filename):
if not os.path.getsize(filename) > 0:
raise InvenioWebSubmitFileError, "%s seems to be empty" % filename
if format is None:
format = decompose_file(filename)[2]
else:
format = normalize_format(format)
destination = "%s/%s%s;%i" % (self.basedir, self.docname, format, myversion)
try:
shutil.copyfile(filename, destination)
os.chmod(destination, 0644)
except Exception, e:
register_exception()
raise InvenioWebSubmitFileError, "Encountered an exception while copying '%s' to '%s': '%s'" % (filename, destination, e)
self.more_info.set_description(description, format, myversion)
self.more_info.set_comment(comment, format, myversion)
if flags is None:
flags = []
if 'pdfa' in get_subformat_from_format(format).split(';') and not 'PDF/A' in flags:
flags.append('PDF/A')
for flag in flags:
if flag == 'PERFORM_HIDE_PREVIOUS':
for afile in self.list_all_files():
format = afile.get_format()
version = afile.get_version()
if version < myversion:
self.more_info.set_flag('HIDDEN', format, myversion)
else:
self.more_info.set_flag(flag, format, myversion)
else:
raise InvenioWebSubmitFileError, "'%s' does not exists!" % filename
finally:
self.touch()
Md5Folder(self.basedir).update()
self._build_file_list()
def add_file_new_format(self, filename, version=None, description=None, comment=None, format=None, flags=None):
"""
Add a file as a new format.
@param filename: the local path of the file.
@type filename: string
@param version: an optional specific version to which the new format
should be added. If None, the last version will be used.
@type version: integer
@param description: an optional description for the file.
@type description: string
@param comment: an optional comment to the file.
@type comment: string
@param format: the extension of the file. If not specified it will
be retrieved from the filename (see L{decompose_file}).
@type format: string
@param flags: a set of flags to be associated with the file (see
L{CFG_BIBDOCFILE_AVAILABLE_FLAGS})
@type flags: list of string
@raise InvenioWebSubmitFileError: if the given format already exists.
"""
try:
if version is None:
version = self.get_latest_version()
if version == 0:
version = 1
if os.path.exists(filename):
if not os.path.getsize(filename) > 0:
raise InvenioWebSubmitFileError, "%s seems to be empty" % filename
if format is None:
format = decompose_file(filename)[2]
else:
format = normalize_format(format)
destination = "%s/%s%s;%i" % (self.basedir, self.docname, format, version)
if os.path.exists(destination):
raise InvenioWebSubmitFileError, "A file for docname '%s' for the recid '%s' already exists for the format '%s'" % (self.docname, self.recid, format)
try:
shutil.copyfile(filename, destination)
os.chmod(destination, 0644)
except Exception, e:
register_exception()
raise InvenioWebSubmitFileError, "Encountered an exception while copying '%s' to '%s': '%s'" % (filename, destination, e)
self.more_info.set_comment(comment, format, version)
self.more_info.set_description(description, format, version)
if flags is None:
flags = []
if 'pdfa' in get_subformat_from_format(format).split(';') and not 'PDF/A' in flags:
flags.append('PDF/A')
for flag in flags:
if flag != 'PERFORM_HIDE_PREVIOUS':
self.more_info.set_flag(flag, format, version)
else:
raise InvenioWebSubmitFileError, "'%s' does not exists!" % filename
finally:
Md5Folder(self.basedir).update()
self.touch()
self._build_file_list()
def purge(self):
"""
Physically removes all the previous version of the given bibdoc.
Everything but the last formats will be erased.
"""
version = self.get_latest_version()
if version > 1:
for afile in self.docfiles:
if afile.get_version() < version:
self.more_info.unset_comment(afile.get_format(), afile.get_version())
self.more_info.unset_description(afile.get_format(), afile.get_version())
for flag in CFG_BIBDOCFILE_AVAILABLE_FLAGS:
self.more_info.unset_flag(flag, afile.get_format(), afile.get_version())
try:
os.remove(afile.get_full_path())
except Exception, e:
register_exception()
Md5Folder(self.basedir).update()
self.touch()
self._build_file_list()
def expunge(self):
"""
Physically remove all the traces of a given document.
@note: an expunged BibDoc object shouldn't be used anymore or the
result might be unpredicted.
"""
del self.md5s
del self.more_info
os.system('rm -rf %s' % escape_shell_arg(self.basedir))
run_sql('DELETE FROM bibrec_bibdoc WHERE id_bibdoc=%s', (self.id, ))
run_sql('DELETE FROM bibdoc_bibdoc WHERE id_bibdoc1=%s OR id_bibdoc2=%s', (self.id, self.id))
run_sql('DELETE FROM bibdoc WHERE id=%s', (self.id, ))
run_sql('INSERT DELAYED INTO hstDOCUMENT(action, id_bibdoc, docname, doctimestamp) VALUES("EXPUNGE", %s, %s, NOW())', (self.id, self.docname))
del self.docfiles
del self.id
del self.cd
del self.md
del self.td
del self.basedir
del self.recid
del self.doctype
del self.docname
def revert(self, version):
"""
Revert the document to a given version. All the formats corresponding
to that version are copied forward to a new version.
@param version: the version to revert to.
@type version: integer
@raise InvenioWebSubmitFileError: in case of errors
"""
try:
version = int(version)
new_version = self.get_latest_version() + 1
for docfile in self.list_version_files(version):
destination = "%s/%s%s;%i" % (self.basedir, self.docname, docfile.get_format(), new_version)
if os.path.exists(destination):
raise InvenioWebSubmitFileError, "A file for docname '%s' for the recid '%s' already exists for the format '%s'" % (self.docname, self.recid, docfile.get_format())
try:
shutil.copyfile(docfile.get_full_path(), destination)
os.chmod(destination, 0644)
self.more_info.set_comment(self.more_info.get_comment(docfile.get_format(), version), docfile.get_format(), new_version)
self.more_info.set_description(self.more_info.get_description(docfile.get_format(), version), docfile.get_format(), new_version)
except Exception, e:
register_exception()
raise InvenioWebSubmitFileError, "Encountered an exception while copying '%s' to '%s': '%s'" % (docfile.get_full_path(), destination, e)
finally:
Md5Folder(self.basedir).update()
self.touch()
self._build_file_list()
def import_descriptions_and_comments_from_marc(self, record=None):
"""
Import descriptions and comments from the corresponding MARC metadata.
@param record: the record (if None it will be calculated).
@type record: bibrecord recstruct
@note: If record is passed it is directly used, otherwise it is retrieved
from the MARCXML stored in the database.
"""
## Let's get the record
from invenio.search_engine import get_record
if record is None:
record = get_record(self.id)
fields = record_get_field_instances(record, '856', '4', ' ')
global_comment = None
global_description = None
local_comment = {}
local_description = {}
for field in fields:
url = field_get_subfield_values(field, 'u')
if url:
## Given a url
url = url[0]
if url == '%s/%s/%s/files/' % (CFG_SITE_URL, CFG_SITE_RECORD, self.recid):
## If it is a traditional /CFG_SITE_RECORD/1/files/ one
## We have global description/comment for all the formats
description = field_get_subfield_values(field, 'y')
if description:
global_description = description[0]
comment = field_get_subfield_values(field, 'z')
if comment:
global_comment = comment[0]
elif bibdocfile_url_p(url):
## Otherwise we have description/comment per format
dummy, docname, format = decompose_bibdocfile_url(url)
if docname == self.docname:
description = field_get_subfield_values(field, 'y')
if description:
local_description[format] = description[0]
comment = field_get_subfield_values(field, 'z')
if comment:
local_comment[format] = comment[0]
## Let's update the tables
version = self.get_latest_version()
for docfile in self.list_latest_files():
format = docfile.get_format()
if format in local_comment:
self.set_comment(local_comment[format], format, version)
else:
self.set_comment(global_comment, format, version)
if format in local_description:
self.set_description(local_description[format], format, version)
else:
self.set_description(global_description, format, version)
self._build_file_list('init')
def get_icon(self, subformat_re=CFG_WEBSUBMIT_ICON_SUBFORMAT_RE, display_hidden=True):
"""
@param subformat_re: by default the convention is that
L{CFG_WEBSUBMIT_ICON_SUBFORMAT_RE} is used as a subformat indicator to
mean that a particular format is to be used as an icon.
Specifiy a different subformat if you need to use a different
convention.
@type subformat_re: compiled regular expression
@return: the bibdocfile corresponding to the icon of this document, or
None if any icon exists for this document.
@rtype: BibDocFile
@warning: before I{subformat} were introduced this method was
returning a BibDoc, while now is returning a BibDocFile. Check
if your client code is compatible with this.
"""
for docfile in self.list_latest_files(list_hidden=display_hidden):
if subformat_re.match(docfile.get_subformat()):
return docfile
return None
def add_icon(self, filename, format=None, subformat=CFG_WEBSUBMIT_DEFAULT_ICON_SUBFORMAT):
"""
Attaches icon to this document.
@param filename: the local filesystem path to the icon.
@type filename: string
@param format: an optional format for the icon. If not specified it
will be calculated after the filesystem path.
@type format: string
@param subformat: by default the convention is that
CFG_WEBSUBMIT_DEFAULT_ICON_SUBFORMAT is used as a subformat indicator to
mean that a particular format is to be used as an icon.
Specifiy a different subformat if you need to use a different
convention.
@type subformat: string
@raise InvenioWebSubmitFileError: in case of errors.
"""
#first check if an icon already exists
if not format:
format = decompose_file(filename)[2]
if subformat:
format += ";%s" % subformat
self.add_file_new_format(filename, format=format)
def delete_icon(self, subformat_re=CFG_WEBSUBMIT_ICON_SUBFORMAT_RE):
"""
@param subformat_re: by default the convention is that
L{CFG_WEBSUBMIT_ICON_SUBFORMAT_RE} is used as a subformat indicator to
mean that a particular format is to be used as an icon.
Specifiy a different subformat if you need to use a different
convention.
@type subformat: compiled regular expression
Removes the icon attached to the document if it exists.
"""
for docfile in self.list_latest_files():
if subformat_re.match(docfile.get_subformat()):
self.delete_file(docfile.get_format(), docfile.get_version())
def display(self, version="", ln=CFG_SITE_LANG, display_hidden=True):
"""
Returns an HTML representation of the this document.
@param version: if not set, only the last version will be displayed. If
'all', all versions will be displayed.
@type version: string (integer or 'all')
@param ln: the language code.
@type ln: string
@param display_hidden: whether to include hidden files as well.
@type display_hidden: bool
@return: the formatted representation.
@rtype: HTML string
"""
t = ""
if version == "all":
docfiles = self.list_all_files(list_hidden=display_hidden)
elif version != "":
version = int(version)
docfiles = self.list_version_files(version, list_hidden=display_hidden)
else:
docfiles = self.list_latest_files(list_hidden=display_hidden)
icon = self.get_icon(display_hidden=display_hidden)
if icon:
imageurl = icon.get_url()
else:
imageurl = "%s/img/smallfiles.gif" % CFG_SITE_URL
versions = []
for version in list_versions_from_array(docfiles):
currversion = {
'version' : version,
'previous' : 0,
'content' : []
}
if version == self.get_latest_version() and version != 1:
currversion['previous'] = 1
for docfile in docfiles:
if docfile.get_version() == version:
currversion['content'].append(docfile.display(ln = ln))
versions.append(currversion)
if versions:
return websubmit_templates.tmpl_bibdoc_filelist(
ln = ln,
versions = versions,
imageurl = imageurl,
docname = self.docname,
recid = self.recid
)
else:
return ""
def change_name(self, newname):
"""
Renames this document name.
@param newname: the new name.
@type newname: string
@raise InvenioWebSubmitFileError: if the new name corresponds to
a document already attached to the record owning this document.
"""
try:
newname = normalize_docname(newname)
res = run_sql("SELECT b.id FROM bibrec_bibdoc bb JOIN bibdoc b on bb.id_bibdoc=b.id WHERE bb.id_bibrec=%s AND b.docname=%s", (self.recid, newname))
if res:
raise InvenioWebSubmitFileError, "A bibdoc called %s already exists for recid %s" % (newname, self.recid)
try:
for f in os.listdir(self.basedir):
if not f.startswith('.'):
try:
(dummy, base, extension, version) = decompose_file_with_version(f)
except ValueError:
register_exception(alert_admin=True, prefix="Strange file '%s' is stored in %s" % (f, self.basedir))
else:
shutil.move(os.path.join(self.basedir, f), os.path.join(self.basedir, '%s%s;%i' % (newname, extension, version)))
except Exception, e:
register_exception()
raise InvenioWebSubmitFileError("Error in renaming the bibdoc %s to %s for recid %s: %s" % (self.docname, newname, self.recid, e))
run_sql("update bibdoc set docname=%s where id=%s", (newname, self.id,))
self.docname = newname
finally:
Md5Folder(self.basedir).update()
self.touch()
self._build_file_list('rename')
self._build_related_file_list()
def set_comment(self, comment, format, version=None):
"""
Updates the comment of a specific format/version of the document.
@param comment: the new comment.
@type comment: string
@param format: the specific format for which the comment should be
updated.
@type format: string
@param version: the specific version for which the comment should be
updated. If not specified the last version will be used.
@type version: integer
"""
if version is None:
version = self.get_latest_version()
format = normalize_format(format)
self.more_info.set_comment(comment, format, version)
self.touch()
self._build_file_list('init')
def set_description(self, description, format, version=None):
"""
Updates the description of a specific format/version of the document.
@param description: the new description.
@type description: string
@param format: the specific format for which the description should be
updated.
@type format: string
@param version: the specific version for which the description should be
updated. If not specified the last version will be used.
@type version: integer
"""
if version is None:
version = self.get_latest_version()
format = normalize_format(format)
self.more_info.set_description(description, format, version)
self.touch()
self._build_file_list('init')
def set_flag(self, flagname, format, version=None):
"""
Sets a flag for a specific format/version of the document.
@param flagname: a flag from L{CFG_BIBDOCFILE_AVAILABLE_FLAGS}.
@type flagname: string
@param format: the specific format for which the flag should be
set.
@type format: string
@param version: the specific version for which the flag should be
set. If not specified the last version will be used.
@type version: integer
"""
if version is None:
version = self.get_latest_version()
format = normalize_format(format)
self.more_info.set_flag(flagname, format, version)
self.touch()
self._build_file_list('init')
def has_flag(self, flagname, format, version=None):
"""
Checks if a particular flag for a format/version is set.
@param flagname: a flag from L{CFG_BIBDOCFILE_AVAILABLE_FLAGS}.
@type flagname: string
@param format: the specific format for which the flag should be
set.
@type format: string
@param version: the specific version for which the flag should be
set. If not specified the last version will be used.
@type version: integer
@return: True if the flag is set.
@rtype: bool
"""
if version is None:
version = self.get_latest_version()
format = normalize_format(format)
return self.more_info.has_flag(flagname, format, version)
def unset_flag(self, flagname, format, version=None):
"""
Unsets a flag for a specific format/version of the document.
@param flagname: a flag from L{CFG_BIBDOCFILE_AVAILABLE_FLAGS}.
@type flagname: string
@param format: the specific format for which the flag should be
unset.
@type format: string
@param version: the specific version for which the flag should be
unset. If not specified the last version will be used.
@type version: integer
"""
if version is None:
version = self.get_latest_version()
format = normalize_format(format)
self.more_info.unset_flag(flagname, format, version)
self.touch()
self._build_file_list('init')
def get_comment(self, format, version=None):
"""
Retrieve the comment of a specific format/version of the document.
@param format: the specific format for which the comment should be
retrieved.
@type format: string
@param version: the specific version for which the comment should be
retrieved. If not specified the last version will be used.
@type version: integer
@return: the comment.
@rtype: string
"""
if version is None:
version = self.get_latest_version()
format = normalize_format(format)
return self.more_info.get_comment(format, version)
def get_description(self, format, version=None):
"""
Retrieve the description of a specific format/version of the document.
@param format: the specific format for which the description should be
retrieved.
@type format: string
@param version: the specific version for which the description should
be retrieved. If not specified the last version will be used.
@type version: integer
@return: the description.
@rtype: string
"""
if version is None:
version = self.get_latest_version()
format = normalize_format(format)
return self.more_info.get_description(format, version)
def hidden_p(self, format, version=None):
"""
Returns True if the file specified by the given format/version is
hidden.
@param format: the specific format for which the description should be
retrieved.
@type format: string
@param version: the specific version for which the description should
be retrieved. If not specified the last version will be used.
@type version: integer
@return: True if hidden.
@rtype: bool
"""
if version is None:
version = self.get_latest_version()
return self.more_info.has_flag('HIDDEN', format, version)
def get_docname(self):
"""
@return: the name of this document.
@rtype: string
"""
return self.docname
def get_base_dir(self):
"""
@return: the base directory on the local filesystem for this document
(e.g. C{/soft/cdsweb/var/data/files/g0/123})
@rtype: string
"""
return self.basedir
def get_type(self):
"""
@return: the type of this document.
@rtype: string"""
return self.doctype
def get_recid(self):
"""
@return: the record id of the record to which this document is
attached.
@rtype: integer
"""
return self.recid
def get_id(self):
"""
@return: the id of this document.
@rtype: integer
"""
return self.id
def pdf_a_p(self):
"""
@return: True if this document contains a PDF in PDF/A format.
@rtype: bool"""
return self.has_flag('PDF/A', 'pdf')
def has_text(self, require_up_to_date=False, version=None):
"""
Return True if the text of this document has already been extracted.
@param require_up_to_date: if True check the text was actually
extracted after the most recent format of the given version.
@type require_up_to_date: bool
@param version: a version for which the text should have been
extracted. If not specified the latest version is considered.
@type version: integer
@return: True if the text has already been extracted.
@rtype: bool
"""
if version is None:
version = self.get_latest_version()
if os.path.exists(os.path.join(self.basedir, '.text;%i' % version)):
if not require_up_to_date:
return True
else:
docfiles = self.list_version_files(version)
text_md = datetime.fromtimestamp(os.path.getmtime(os.path.join(self.basedir, '.text;%i' % version)))
for docfile in docfiles:
if text_md <= docfile.md:
return False
return True
return False
def get_file(self, format, version=""):
"""
Returns a L{BibDocFile} instance of this document corresponding to the
specific format and version.
@param format: the specific format.
@type format: string
@param version: the specific version for which the description should
be retrieved. If not specified the last version will be used.
@type version: integer
@return: the L{BibDocFile} instance.
@rtype: BibDocFile
"""
if version == "":
docfiles = self.list_latest_files()
else:
version = int(version)
docfiles = self.list_version_files(version)
format = normalize_format(format)
for docfile in docfiles:
if (docfile.get_format()==format or not format):
return docfile
## Let's skip the subformat specification and consider just the
## superformat
superformat = get_superformat_from_format(format)
for docfile in docfiles:
if get_superformat_from_format(docfile.get_format()) == superformat:
return docfile
raise InvenioWebSubmitFileError, "No file called '%s' of format '%s', version '%s'" % (self.docname, format, version)
def list_versions(self):
"""
@return: the list of existing version numbers for this document.
@rtype: list of integer
"""
versions = []
for docfile in self.docfiles:
if not docfile.get_version() in versions:
versions.append(docfile.get_version())
versions.sort()
return versions
def delete(self):
"""
Delete this document.
@see: L{undelete} for how to undelete the document.
@raise InvenioWebSubmitFileError: in case of errors.
"""
try:
today = datetime.today()
self.change_name('DELETED-%s%s-%s' % (today.strftime('%Y%m%d%H%M%S'), today.microsecond, self.docname))
run_sql("UPDATE bibdoc SET status='DELETED' WHERE id=%s", (self.id,))
self.status = 'DELETED'
except Exception, e:
register_exception()
raise InvenioWebSubmitFileError, "It's impossible to delete bibdoc %s: %s" % (self.id, e)
def deleted_p(self):
"""
@return: True if this document has been deleted.
@rtype: bool
"""
return self.status == 'DELETED'
def empty_p(self):
"""
@return: True if this document is empty, i.e. it has no bibdocfile
connected.
@rtype: bool
"""
return len(self.docfiles) == 0
def undelete(self, previous_status=''):
"""
Undelete a deleted file (only if it was actually deleted via L{delete}).
The previous C{status}, i.e. the restriction key can be provided.
Otherwise the undeleted document will be public.
@param previous_status: the previous status the should be restored.
@type previous_status: string
@raise InvenioWebSubmitFileError: in case of any error.
"""
bibrecdocs = BibRecDocs(self.recid)
try:
run_sql("UPDATE bibdoc SET status=%s WHERE id=%s AND status='DELETED'", (previous_status, self.id))
except Exception, e:
raise InvenioWebSubmitFileError, "It's impossible to undelete bibdoc %s: %s" % (self.id, e)
if self.docname.startswith('DELETED-'):
try:
# Let's remove DELETED-20080214144322- in front of the docname
original_name = '-'.join(self.docname.split('-')[2:])
original_name = bibrecdocs.propose_unique_docname(original_name)
self.change_name(original_name)
except Exception, e:
raise InvenioWebSubmitFileError, "It's impossible to restore the previous docname %s. %s kept as docname because: %s" % (original_name, self.docname, e)
else:
raise InvenioWebSubmitFileError, "Strange just undeleted docname isn't called DELETED-somedate-docname but %s" % self.docname
def delete_file(self, format, version):
"""
Delete a specific format/version of this document on the filesystem.
@param format: the particular format to be deleted.
@type format: string
@param version: the particular version to be deleted.
@type version: integer
@note: this operation is not reversible!"""
try:
afile = self.get_file(format, version)
except InvenioWebSubmitFileError:
return
try:
os.remove(afile.get_full_path())
except OSError:
pass
self.touch()
self._build_file_list()
def get_history(self):
"""
@return: a human readable and parsable string that represent the
history of this document.
@rtype: string
"""
ret = []
hst = run_sql("""SELECT action, docname, docformat, docversion,
docsize, docchecksum, doctimestamp
FROM hstDOCUMENT
WHERE id_bibdoc=%s ORDER BY doctimestamp ASC""", (self.id, ))
for row in hst:
ret.append("%s %s '%s', format: '%s', version: %i, size: %s, checksum: '%s'" % (row[6].strftime('%Y-%m-%d %H:%M:%S'), row[0], row[1], row[2], row[3], nice_size(row[4]), row[5]))
return ret
def _build_file_list(self, context=''):
"""
Lists all files attached to the bibdoc. This function should be
called everytime the bibdoc is modified.
As a side effect it log everything that has happened to the bibdocfiles
in the log facility, according to the context:
"init": means that the function has been called;
for the first time by a constructor, hence no logging is performed
"": by default means to log every deleted file as deleted and every
added file as added;
"rename": means that every appearently deleted file is logged as
renamef and every new file as renamet.
"""
def log_action(action, docid, docname, format, version, size, checksum, timestamp=''):
"""Log an action into the bibdoclog table."""
try:
if timestamp:
run_sql('INSERT DELAYED INTO hstDOCUMENT(action, id_bibdoc, docname, docformat, docversion, docsize, docchecksum, doctimestamp) VALUES(%s, %s, %s, %s, %s, %s, %s, %s)', (action, docid, docname, format, version, size, checksum, timestamp))
else:
run_sql('INSERT DELAYED INTO hstDOCUMENT(action, id_bibdoc, docname, docformat, docversion, docsize, docchecksum, doctimestamp) VALUES(%s, %s, %s, %s, %s, %s, %s, NOW())', (action, docid, docname, format, version, size, checksum))
except DatabaseError:
register_exception()
def make_removed_added_bibdocfiles(previous_file_list):
"""Internal function for build the log of changed files."""
# Let's rebuild the previous situation
old_files = {}
for bibdocfile in previous_file_list:
old_files[(bibdocfile.name, bibdocfile.format, bibdocfile.version)] = (bibdocfile.size, bibdocfile.checksum, bibdocfile.md)
# Let's rebuild the new situation
new_files = {}
for bibdocfile in self.docfiles:
new_files[(bibdocfile.name, bibdocfile.format, bibdocfile.version)] = (bibdocfile.size, bibdocfile.checksum, bibdocfile.md)
# Let's subtract from added file all the files that are present in
# the old list, and let's add to deleted files that are not present
# added file.
added_files = dict(new_files)
deleted_files = {}
for key, value in old_files.iteritems():
if added_files.has_key(key):
del added_files[key]
else:
deleted_files[key] = value
return (added_files, deleted_files)
if context != 'init':
previous_file_list = list(self.docfiles)
res = run_sql("SELECT status,docname,creation_date,"
"modification_date,more_info FROM bibdoc WHERE id=%s", (self.id,))
self.cd = res[0][2]
self.md = res[0][3]
self.docname = res[0][1]
self.status = res[0][0]
self.more_info = BibDocMoreInfo(self.id, blob_to_string(res[0][4]))
self.docfiles = []
if os.path.exists(self.basedir):
self.md5s = Md5Folder(self.basedir)
files = os.listdir(self.basedir)
files.sort()
for afile in files:
if not afile.startswith('.'):
try:
filepath = os.path.join(self.basedir, afile)
dirname, basename, format, fileversion = decompose_file_with_version(filepath)
checksum = self.md5s.get_checksum(afile)
# we can append file:
self.docfiles.append(BibDocFile(filepath, self.doctype,
fileversion, basename, format,
self.recid, self.id, self.status, checksum,
self.more_info, human_readable=self.human_readable))
except Exception, e:
register_exception()
if context == 'init':
return
else:
added_files, deleted_files = make_removed_added_bibdocfiles(previous_file_list)
deletedstr = "DELETED"
addedstr = "ADDED"
if context == 'rename':
deletedstr = "RENAMEDFROM"
addedstr = "RENAMEDTO"
for (docname, format, version), (size, checksum, md) in added_files.iteritems():
if context == 'rename':
md = '' # No modification time
log_action(addedstr, self.id, docname, format, version, size, checksum, md)
for (docname, format, version), (size, checksum, md) in deleted_files.iteritems():
if context == 'rename':
md = '' # No modification time
log_action(deletedstr, self.id, docname, format, version, size, checksum, md)
def _build_related_file_list(self):
"""Lists all files attached to the bibdoc. This function should be
called everytime the bibdoc is modified within e.g. its icon.
@deprecated: use subformats instead.
"""
self.related_files = {}
res = run_sql("SELECT ln.id_bibdoc2,ln.type,bibdoc.status FROM "
"bibdoc_bibdoc AS ln,bibdoc WHERE id=ln.id_bibdoc2 AND "
"ln.id_bibdoc1=%s", (self.id,))
for row in res:
docid = row[0]
doctype = row[1]
if row[2] != 'DELETED':
if not self.related_files.has_key(doctype):
self.related_files[doctype] = []
cur_doc = BibDoc(docid=docid, human_readable=self.human_readable)
self.related_files[doctype].append(cur_doc)
def get_total_size_latest_version(self):
"""Return the total size used on disk of all the files belonging
to this bibdoc and corresponding to the latest version."""
ret = 0
for bibdocfile in self.list_latest_files():
ret += bibdocfile.get_size()
return ret
def get_total_size(self):
"""Return the total size used on disk of all the files belonging
to this bibdoc."""
ret = 0
for bibdocfile in self.list_all_files():
ret += bibdocfile.get_size()
return ret
def list_all_files(self, list_hidden=True):
"""Returns all the docfiles linked with the given bibdoc."""
if list_hidden:
return self.docfiles
else:
return [afile for afile in self.docfiles if not afile.hidden_p()]
def list_latest_files(self, list_hidden=True):
"""Returns all the docfiles within the last version."""
return self.list_version_files(self.get_latest_version(), list_hidden=list_hidden)
def list_version_files(self, version, list_hidden=True):
"""Return all the docfiles of a particular version."""
version = int(version)
return [docfile for docfile in self.docfiles if docfile.get_version() == version and (list_hidden or not docfile.hidden_p())]
def check_file_exists(self, path):
"""
Check if a file with the same content of the file pointed in C{path}
is already attached to this record.
@param path: the file to be checked against.
@type path: string
@return: True if a file with the requested content is already attached
to the record.
@rtype: bool
"""
# Let's consider all the latest files
for afile in self.list_latest_files():
if afile.is_identical_to(path):
return True
return False
def get_latest_version(self):
""" Returns the latest existing version number for the given bibdoc.
If no file is associated to this bibdoc, returns '0'.
"""
version = 0
for bibdocfile in self.docfiles:
if bibdocfile.get_version() > version:
version = bibdocfile.get_version()
return version
def get_file_number(self):
"""Return the total number of files."""
return len(self.docfiles)
def register_download(self, ip_address, version, format, userid=0):
"""Register the information about a download of a particular file."""
format = normalize_format(format)
if format[:1] == '.':
format = format[1:]
format = format.upper()
return run_sql("INSERT INTO rnkDOWNLOADS "
"(id_bibrec,id_bibdoc,file_version,file_format,"
"id_user,client_host,download_time) VALUES "
"(%s,%s,%s,%s,%s,INET_ATON(%s),NOW())",
(self.recid, self.id, version, format,
userid, ip_address,))
def generic_path2bidocfile(fullpath):
"""
Returns a BibDocFile objects that wraps the given fullpath.
@note: the object will contain the minimum information that can be
guessed from the fullpath (e.g. docname, format, subformat, version,
md5, creation_date, modification_date). It won't contain for example
a comment, a description, a doctype, a restriction.
"""
fullpath = os.path.abspath(fullpath)
try:
path, name, format, version = decompose_file_with_version(fullpath)
except ValueError:
## There is no version
version = 0
path, name, format = decompose_file(fullpath)
md5folder = Md5Folder(path)
checksum = md5folder.get_checksum(os.path.basename(fullpath))
return BibDocFile(fullpath=fullpath,
doctype=None,
version=version,
name=name,
format=format,
recid=0,
docid=0,
status=None,
checksum=checksum,
more_info=None)
class BibDocFile:
"""This class represents a physical file in the Invenio filesystem.
It should never be instantiated directly"""
def __init__(self, fullpath, doctype, version, name, format, recid, docid, status, checksum, more_info=None, human_readable=False):
self.fullpath = os.path.abspath(fullpath)
self.doctype = doctype
self.docid = docid
self.recid = recid
self.version = version
self.status = status
self.checksum = checksum
self.human_readable = human_readable
if more_info:
self.description = more_info.get_description(format, version)
self.comment = more_info.get_comment(format, version)
self.flags = more_info.get_flags(format, version)
else:
self.description = None
self.comment = None
self.flags = []
self.format = normalize_format(format)
self.superformat = get_superformat_from_format(self.format)
self.subformat = get_subformat_from_format(self.format)
if format == "":
self.mime = "application/octet-stream"
self.encoding = ""
self.fullname = name
else:
self.fullname = "%s%s" % (name, self.superformat)
(self.mime, self.encoding) = _mimes.guess_type(self.fullname)
if self.mime is None:
self.mime = "application/octet-stream"
self.more_info = more_info
self.hidden = 'HIDDEN' in self.flags
self.size = os.path.getsize(fullpath)
self.md = datetime.fromtimestamp(os.path.getmtime(fullpath))
try:
self.cd = datetime.fromtimestamp(os.path.getctime(fullpath))
except OSError:
self.cd = self.md
self.name = name
self.dir = os.path.dirname(fullpath)
if self.subformat:
self.url = create_url('%s/%s/%s/files/%s%s' % (CFG_SITE_URL, CFG_SITE_RECORD, self.recid, self.name, self.superformat), {'subformat' : self.subformat})
self.fullurl = create_url('%s/%s/%s/files/%s%s' % (CFG_SITE_URL, CFG_SITE_RECORD, self.recid, self.name, self.superformat), {'subformat' : self.subformat, 'version' : self.version})
else:
self.url = create_url('%s/%s/%s/files/%s%s' % (CFG_SITE_URL, CFG_SITE_RECORD, self.recid, self.name, self.superformat), {})
self.fullurl = create_url('%s/%s/%s/files/%s%s' % (CFG_SITE_URL, CFG_SITE_RECORD, self.recid, self.name, self.superformat), {'version' : self.version})
self.etag = '"%i%s%i"' % (self.docid, self.format, self.version)
self.magic = None
def __repr__(self):
return ('BibDocFile(%s, %s, %i, %s, %s, %i, %i, %s, %s, %s, %s)' % (repr(self.fullpath), repr(self.doctype), self.version, repr(self.name), repr(self.format), self.recid, self.docid, repr(self.status), repr(self.checksum), repr(self.more_info), repr(self.human_readable)))
def __str__(self):
out = '%s:%s:%s:%s:fullpath=%s\n' % (self.recid, self.docid, self.version, self.format, self.fullpath)
out += '%s:%s:%s:%s:fullname=%s\n' % (self.recid, self.docid, self.version, self.format, self.fullname)
out += '%s:%s:%s:%s:name=%s\n' % (self.recid, self.docid, self.version, self.format, self.name)
out += '%s:%s:%s:%s:subformat=%s\n' % (self.recid, self.docid, self.version, self.format, get_subformat_from_format(self.format))
out += '%s:%s:%s:%s:status=%s\n' % (self.recid, self.docid, self.version, self.format, self.status)
out += '%s:%s:%s:%s:checksum=%s\n' % (self.recid, self.docid, self.version, self.format, self.checksum)
if self.human_readable:
out += '%s:%s:%s:%s:size=%s\n' % (self.recid, self.docid, self.version, self.format, nice_size(self.size))
else:
out += '%s:%s:%s:%s:size=%s\n' % (self.recid, self.docid, self.version, self.format, self.size)
out += '%s:%s:%s:%s:creation time=%s\n' % (self.recid, self.docid, self.version, self.format, self.cd)
out += '%s:%s:%s:%s:modification time=%s\n' % (self.recid, self.docid, self.version, self.format, self.md)
out += '%s:%s:%s:%s:magic=%s\n' % (self.recid, self.docid, self.version, self.format, self.get_magic())
out += '%s:%s:%s:%s:mime=%s\n' % (self.recid, self.docid, self.version, self.format, self.mime)
out += '%s:%s:%s:%s:encoding=%s\n' % (self.recid, self.docid, self.version, self.format, self.encoding)
out += '%s:%s:%s:%s:url=%s\n' % (self.recid, self.docid, self.version, self.format, self.url)
out += '%s:%s:%s:%s:fullurl=%s\n' % (self.recid, self.docid, self.version, self.format, self.fullurl)
out += '%s:%s:%s:%s:description=%s\n' % (self.recid, self.docid, self.version, self.format, self.description)
out += '%s:%s:%s:%s:comment=%s\n' % (self.recid, self.docid, self.version, self.format, self.comment)
out += '%s:%s:%s:%s:hidden=%s\n' % (self.recid, self.docid, self.version, self.format, self.hidden)
out += '%s:%s:%s:%s:flags=%s\n' % (self.recid, self.docid, self.version, self.format, self.flags)
out += '%s:%s:%s:%s:etag=%s\n' % (self.recid, self.docid, self.version, self.format, self.etag)
return out
def display(self, ln = CFG_SITE_LANG):
"""Returns a formatted representation of this docfile."""
return websubmit_templates.tmpl_bibdocfile_filelist(
ln = ln,
recid = self.recid,
version = self.version,
md = self.md,
name = self.name,
superformat = self.superformat,
subformat = self.subformat,
nice_size = nice_size(self.size),
description = self.description or ''
)
def is_identical_to(self, path):
"""
@path: the path of another file on disk.
@return: True if L{path} is contains bitwise the same content.
"""
if os.path.getsize(path) != self.size:
return False
if calculate_md5(path) != self.checksum:
return False
return filecmp.cmp(self.get_full_path(), path)
def is_restricted(self, user_info):
"""Returns restriction state. (see acc_authorize_action return values)"""
if self.status not in ('', 'DELETED'):
return check_bibdoc_authorization(user_info, status=self.status)
elif self.status == 'DELETED':
return (1, 'File has ben deleted')
else:
return (0, '')
def is_icon(self, subformat_re=CFG_WEBSUBMIT_ICON_SUBFORMAT_RE):
"""
@param subformat_re: by default the convention is that
L{CFG_WEBSUBMIT_ICON_SUBFORMAT_RE} is used as a subformat indicator to
mean that a particular format is to be used as an icon.
Specifiy a different subformat if you need to use a different
convention.
@type subformat: compiled regular expression
@return: True if this file is an icon.
@rtype: bool
"""
return bool(subformat_re.match(self.subformat))
def hidden_p(self):
return self.hidden
def get_url(self):
return self.url
def get_type(self):
return self.doctype
def get_path(self):
return self.fullpath
def get_bibdocid(self):
return self.docid
def get_name(self):
return self.name
def get_full_name(self):
return self.fullname
def get_full_path(self):
return self.fullpath
def get_format(self):
return self.format
def get_subformat(self):
return self.subformat
def get_superformat(self):
return self.superformat
def get_size(self):
return self.size
def get_version(self):
return self.version
def get_checksum(self):
return self.checksum
def get_description(self):
return self.description
def get_comment(self):
return self.comment
def get_content(self):
"""Returns the binary content of the file."""
content_fd = open(self.fullpath, 'rb')
content = content_fd.read()
content_fd.close()
return content
def get_recid(self):
"""Returns the recid connected with the bibdoc of this file."""
return self.recid
def get_status(self):
"""Returns the status of the file, i.e. either '', 'DELETED' or a
restriction keyword."""
return self.status
def get_magic(self):
"""Return all the possible guesses from the magic library about
the content of the file."""
if self.magic is None and CFG_HAS_MAGIC:
magic_cookies = _get_magic_cookies()
magic_result = []
for key in magic_cookies.keys():
magic_result.append(magic_cookies[key].file(self.fullpath))
self.magic = tuple(magic_result)
return self.magic
def check(self):
"""Return True if the checksum corresponds to the file."""
return calculate_md5(self.fullpath) == self.checksum
def stream(self, req):
"""Stream the file. Note that no restriction check is being
done here, since restrictions have been checked previously
inside websubmit_webinterface.py."""
if os.path.exists(self.fullpath):
if random.random() < CFG_BIBDOCFILE_MD5_CHECK_PROBABILITY and calculate_md5(self.fullpath) != self.checksum:
raise InvenioWebSubmitFileError, "File %s, version %i, for record %s is corrupted!" % (self.fullname, self.version, self.recid)
stream_file(req, self.fullpath, "%s%s" % (self.name, self.superformat), self.mime, self.encoding, self.etag, self.checksum, self.fullurl)
raise apache.SERVER_RETURN, apache.DONE
else:
req.status = apache.HTTP_NOT_FOUND
raise InvenioWebSubmitFileError, "%s does not exists!" % self.fullpath
_RE_STATUS_PARSER = re.compile(r'^(?P<type>email|group|egroup|role|firerole|status):\s*(?P<value>.*)$', re.S + re.I)
def check_bibdoc_authorization(user_info, status):
"""
Check if the user is authorized to access a document protected with the given status.
L{status} is a string of the form::
auth_type: auth_value
where C{auth_type} can have values in::
email, group, role, firerole, status
and C{auth_value} has a value interpreted againsta C{auth_type}:
- C{email}: the user can access the document if his/her email matches C{auth_value}
- C{group}: the user can access the document if one of the groups (local or
external) of which he/she is member matches C{auth_value}
- C{role}: the user can access the document if he/she belongs to the WebAccess
role specified in C{auth_value}
- C{firerole}: the user can access the document if he/she is implicitly matched
by the role described by the firewall like role definition in C{auth_value}
- C{status}: the user can access the document if he/she is authorized to
for the action C{viewrestrdoc} with C{status} paramter having value
C{auth_value}
@note: If no C{auth_type} is specified or if C{auth_type} is not one of the
above, C{auth_value} will be set to the value contained in the
parameter C{status}, and C{auth_type} will be considered to be C{status}.
@param user_info: the user_info dictionary
@type: dict
@param status: the status of the document.
@type status: string
@return: a tuple, of the form C{(auth_code, auth_message)} where auth_code is 0
if the authorization is granted and greater than 0 otherwise.
@rtype: (int, string)
@raise ValueError: in case of unexpected parsing error.
"""
def parse_status(status):
g = _RE_STATUS_PARSER.match(status)
if g:
return (g.group('type').lower(), g.group('value'))
else:
return ('status', status)
if acc_is_user_in_role(user_info, acc_get_role_id(SUPERADMINROLE)):
return (0, CFG_WEBACCESS_WARNING_MSGS[0])
auth_type, auth_value = parse_status(status)
if auth_type == 'status':
return acc_authorize_action(user_info, 'viewrestrdoc', status=auth_value)
elif auth_type == 'email':
if not auth_value.lower().strip() == user_info['email'].lower().strip():
return (1, 'You must be member of the group %s in order to access this document' % repr(auth_value))
elif auth_type == 'group':
if not auth_value in user_info['group']:
return (1, 'You must be member of the group %s in order to access this document' % repr(auth_value))
elif auth_type == 'role':
if not acc_is_user_in_role(user_info, acc_get_role_id(auth_value)):
return (1, 'You must be member in the role %s in order to access this document' % repr(auth_value))
elif auth_type == 'firerole':
if not acc_firerole_check_user(user_info, compile_role_definition(auth_value)):
return (1, 'You must be authorized in order to access this document')
else:
raise ValueError, 'Unexpected authorization type %s for %s' % (repr(auth_type), repr(auth_value))
| return (0, CFG_WEBACCESS_WARNING_MSGS[0]) | 11,744 | lcc_e | python | null | 93f6174b3813a43229cb32258038d980e42a151b11a8e2fb |
|
# Copyright (C) 2018-2019 Chris Lalancette <clalancette@gmail.com>
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation;
# version 2.1 of the License.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Classes to support UDF."""
from __future__ import absolute_import
import random
import struct
import sys
import time
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO # pylint: disable=ungrouped-imports
from pycdlib import pycdlibexception
from pycdlib import utils
# For mypy annotations
if False: # pylint: disable=using-constant-test
from typing import List, Optional, Type, Union # NOQA pylint: disable=unused-import
# NOTE: this import has to be here to avoid circular deps
from pycdlib import inode # NOQA pylint: disable=unused-import
# This is the CRC CCITT table generated with a polynomial of 0x11021 and
# 16-bits. The following code will re-generate the table:
#
# def _bytecrc(crc, poly, n):
# mask = 1<<(n-1)
# for i in range(8):
# if crc & mask:
# crc = (crc << 1) ^ poly
# else:
# crc = crc << 1
# mask = (1<<n) - 1
# crc = crc & mask
# return crc
#
# def _mkTable(poly, n):
# mask = (1<<n) - 1
# poly = poly & mask
# table = [_bytecrc(i<<(n-8),poly,n) for i in range(256)]
# return table
crc_ccitt_table = (0, 4129, 8258, 12387, 16516, 20645, 24774, 28903, 33032,
37161, 41290, 45419, 49548, 53677, 57806, 61935, 4657, 528,
12915, 8786, 21173, 17044, 29431, 25302, 37689, 33560, 45947,
41818, 54205, 50076, 62463, 58334, 9314, 13379, 1056, 5121,
25830, 29895, 17572, 21637, 42346, 46411, 34088, 38153,
58862, 62927, 50604, 54669, 13907, 9842, 5649, 1584, 30423,
26358, 22165, 18100, 46939, 42874, 38681, 34616, 63455, 59390,
55197, 51132, 18628, 22757, 26758, 30887, 2112, 6241, 10242,
14371, 51660, 55789, 59790, 63919, 35144, 39273, 43274, 47403,
23285, 19156, 31415, 27286, 6769, 2640, 14899, 10770, 56317,
52188, 64447, 60318, 39801, 35672, 47931, 43802, 27814, 31879,
19684, 23749, 11298, 15363, 3168, 7233, 60846, 64911, 52716,
56781, 44330, 48395, 36200, 40265, 32407, 28342, 24277, 20212,
15891, 11826, 7761, 3696, 65439, 61374, 57309, 53244, 48923,
44858, 40793, 36728, 37256, 33193, 45514, 41451, 53516, 49453,
61774, 57711, 4224, 161, 12482, 8419, 20484, 16421, 28742,
24679, 33721, 37784, 41979, 46042, 49981, 54044, 58239, 62302,
689, 4752, 8947, 13010, 16949, 21012, 25207, 29270, 46570,
42443, 38312, 34185, 62830, 58703, 54572, 50445, 13538, 9411,
5280, 1153, 29798, 25671, 21540, 17413, 42971, 47098, 34713,
38840, 59231, 63358, 50973, 55100, 9939, 14066, 1681, 5808,
26199, 30326, 17941, 22068, 55628, 51565, 63758, 59695,
39368, 35305, 47498, 43435, 22596, 18533, 30726, 26663, 6336,
2273, 14466, 10403, 52093, 56156, 60223, 64286, 35833, 39896,
43963, 48026, 19061, 23124, 27191, 31254, 2801, 6864, 10931,
14994, 64814, 60687, 56684, 52557, 48554, 44427, 40424, 36297,
31782, 27655, 23652, 19525, 15522, 11395, 7392, 3265, 61215,
65342, 53085, 57212, 44955, 49082, 36825, 40952, 28183, 32310,
20053, 24180, 11923, 16050, 3793, 7920)
have_py_3 = True
if sys.version_info.major == 2:
have_py_3 = False
def crc_ccitt(data):
# type: (bytes) -> int
"""
Calculate the CRC over a range of bytes using the CCITT polynomial.
Parameters:
data - The array of bytes to calculate the CRC over.
Returns:
The CCITT CRC of the data.
"""
crc = 0
if have_py_3:
for x in data:
crc = crc_ccitt_table[x ^ ((crc >> 8) & 0xFF)] ^ ((crc << 8) & 0xFF00)
else:
for x in data:
crc = crc_ccitt_table[ord(x) ^ ((crc >> 8) & 0xFF)] ^ ((crc << 8) & 0xFF00) # type: ignore
return crc
def _ostaunicode(src):
# type: (str) -> bytes
"""Internal function to create an OSTA byte string from a source string."""
if have_py_3:
bytename = src
else:
bytename = src.decode('utf-8') # type: ignore
try:
enc = bytename.encode('latin-1')
encbyte = b'\x08'
except (UnicodeEncodeError, UnicodeDecodeError):
enc = bytename.encode('utf-16_be')
encbyte = b'\x10'
return encbyte + enc
def _ostaunicode_zero_pad(src, fulllen):
# type: (str, int) -> bytes
"""
Internal function to create a zero-padded Identifier byte string from a
source string.
Parameters:
src - The src string to start from.
fulllen - The padded out length of the result.
Returns:
A full identifier byte string containing the source string.
"""
byte_src = _ostaunicode(src)
return byte_src + b'\x00' * (fulllen - 1 - len(byte_src)) + (struct.pack('=B', len(byte_src)))
class BEAVolumeStructure(object):
"""
A class representing a UDF Beginning Extended Area Volume Structure
(ECMA-167, Part 2, 9.2).
"""
__slots__ = ('_initialized', 'orig_extent_loc', 'new_extent_loc')
FMT = '=B5sB2041s'
def __init__(self):
# type: () -> None
self.new_extent_loc = -1
self._initialized = False
def parse(self, data, extent):
# type: (bytes, int) -> None
"""
Parse the passed in data into a UDF BEA Volume Structure.
Parameters:
data - The data to parse.
extent - The extent that this descriptor currently lives at.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('BEA Volume Structure already initialized')
(structure_type, standard_ident, structure_version,
reserved_unused) = struct.unpack_from(self.FMT, data, 0)
if structure_type != 0:
raise pycdlibexception.PyCdlibInvalidISO('Invalid structure type')
if standard_ident != b'BEA01':
raise pycdlibexception.PyCdlibInvalidISO('Invalid standard identifier')
if structure_version != 1:
raise pycdlibexception.PyCdlibInvalidISO('Invalid structure version')
self.orig_extent_loc = extent
self._initialized = True
def record(self):
# type: () -> bytes
"""
Generate the string representing this UDF BEA Volume Structure.
Parameters:
None.
Returns:
A string representing this UDF BEA Volume Strucutre.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('BEA Volume Structure not initialized')
return struct.pack(self.FMT, 0, b'BEA01', 1, b'\x00' * 2041)
def new(self):
# type: () -> None
"""
Create a new UDF BEA Volume Structure.
Parameters:
None.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('BEA Volume Structure already initialized')
self._initialized = True
def extent_location(self):
# type: () -> int
"""
Get the extent location of this UDF BEA Volume Structure.
Parameters:
None.
Returns:
Integer extent location of this UDF BEA Volume Structure.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF BEA Volume Structure not initialized')
if self.new_extent_loc < 0:
return self.orig_extent_loc
return self.new_extent_loc
def set_extent_location(self, extent):
# type: (int) -> None
"""
Set the new location for this UDF BEA Volume Structure.
Parameters:
extent - The new extent location to set for this UDF BEA Volume Structure.
Returns:
Nothing.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('This Volume Descriptor is not initialized')
self.new_extent_loc = extent
class NSRVolumeStructure(object):
"""A class representing a UDF NSR Volume Structure (ECMA-167, Part 3, 9.1)."""
__slots__ = ('_initialized', 'orig_extent_loc', 'new_extent_loc',
'standard_ident')
FMT = '=B5sB2041s'
def __init__(self):
# type: () -> None
self.new_extent_loc = -1
self._initialized = False
def parse(self, data, extent):
# type: (bytes, int) -> None
"""
Parse the passed in data into a UDF NSR Volume Structure.
Parameters:
data - The data to parse.
extent - The extent that this descriptor currently lives at.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF NSR Volume Structure already initialized')
(structure_type, self.standard_ident, structure_version,
reserved_unused) = struct.unpack_from(self.FMT, data, 0)
if structure_type != 0:
raise pycdlibexception.PyCdlibInvalidISO('Invalid structure type')
if self.standard_ident not in (b'NSR02', b'NSR03'):
raise pycdlibexception.PyCdlibInvalidISO('Invalid standard identifier')
if structure_version != 1:
raise pycdlibexception.PyCdlibInvalidISO('Invalid structure version')
self.orig_extent_loc = extent
self._initialized = True
def record(self):
# type: () -> bytes
"""
Generate the string representing this UDF NSR Volume Structure.
Parameters:
None.
Returns:
A string representing this UDF BEA Volume Strucutre.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF NSR Volume Structure not initialized')
return struct.pack(self.FMT, 0, self.standard_ident, 1, b'\x00' * 2041)
def new(self, version):
# type: (int) -> None
"""
Create a new UDF NSR Volume Structure.
Parameters:
version - The version of the NSR Volume Structure to create; only 2
and 3 are supported.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF NSR Volume Structure already initialized')
if version == 2:
self.standard_ident = b'NSR02'
elif version == 3:
self.standard_ident = b'NSR03'
else:
raise pycdlibexception.PyCdlibInternalError('Invalid NSR version requested')
self._initialized = True
def extent_location(self):
# type: () -> int
"""
Get the extent location of this UDF NSR Volume Structure.
Parameters:
None.
Returns:
Integer extent location of this UDF NSR Volume Structure.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF NSR Volume Structure not initialized')
if self.new_extent_loc < 0:
return self.orig_extent_loc
return self.new_extent_loc
def set_extent_location(self, extent):
# type: (int) -> None
"""
Set the new location for this UDF NSR Volume Structure.
Parameters:
extent - The new extent location to set for this UDF NSR Volume Structure.
Returns:
Nothing.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('This Volume Descriptor is not initialized')
self.new_extent_loc = extent
class TEAVolumeStructure(object):
"""
A class representing a UDF Terminating Extended Area Volume Structure
(ECMA-167, Part 2, 9.3).
"""
__slots__ = ('_initialized', 'orig_extent_loc', 'new_extent_loc')
FMT = '=B5sB2041s'
def __init__(self):
# type: () -> None
self.new_extent_loc = -1
self._initialized = False
def parse(self, data, extent):
# type: (bytes, int) -> None
"""
Parse the passed in data into a UDF TEA Volume Structure.
Parameters:
data - The data to parse.
extent - The extent that this descriptor currently lives at.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('TEA Volume Structure already initialized')
(structure_type, standard_ident, structure_version,
reserved_unused) = struct.unpack_from(self.FMT, data, 0)
if structure_type != 0:
raise pycdlibexception.PyCdlibInvalidISO('Invalid structure type')
if standard_ident != b'TEA01':
raise pycdlibexception.PyCdlibInvalidISO('Invalid standard identifier')
if structure_version != 1:
raise pycdlibexception.PyCdlibInvalidISO('Invalid structure version')
self.orig_extent_loc = extent
self._initialized = True
def record(self):
# type: () -> bytes
"""
Generate the string representing this UDF TEA Volume Structure.
Parameters:
None.
Returns:
A string representing this UDF TEA Volume Strucutre.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF TEA Volume Structure not initialized')
return struct.pack(self.FMT, 0, b'TEA01', 1, b'\x00' * 2041)
def new(self):
# type: () -> None
"""
Create a new UDF TEA Volume Structure.
Parameters:
None.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF TEA Volume Structure already initialized')
self._initialized = True
def extent_location(self):
# type: () -> int
"""
Get the extent location of this UDF TEA Volume Structure.
Parameters:
None.
Returns:
Integer extent location of this UDF TEA Volume Structure.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF TEA Volume Structure not initialized')
if self.new_extent_loc < 0:
return self.orig_extent_loc
return self.new_extent_loc
def set_extent_location(self, extent):
# type: (int) -> None
"""
Set the new location for this UDF TEA Volume Structure.
Parameters:
extent - The new extent location to set for this UDF TEA Volume Structure.
Returns:
Nothing.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('This Volume Descriptor is not initialized')
self.new_extent_loc = extent
class UDFBootDescriptor(object):
"""A class representing a UDF Boot Descriptor (ECMA-167, Part 2, 9.4)."""
__slots__ = ('_initialized', 'architecture_type', 'boot_identifier',
'boot_extent_loc', 'boot_extent_len', 'load_address',
'start_address', 'desc_creation_time', 'flags', 'boot_use',
'orig_extent_loc', 'new_extent_loc')
FMT = '<B5sBB32s32sLLQQ12sH32s1906s'
def __init__(self):
# type: () -> None
self.new_extent_loc = -1
self._initialized = False
def parse(self, data, extent):
# type: (bytes, int) -> None
"""
Parse the passed in data into a UDF Boot Descriptor.
Parameters:
data - The data to parse.
extent - The extent that this descriptor currently lives at.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Boot Descriptor already initialized')
(structure_type, standard_ident, structure_version, reserved1,
architecture_type, boot_ident, self.boot_extent_loc,
self.boot_extent_len, self.load_address, self.start_address,
desc_creation_time, self.flags, reserved2,
self.boot_use) = struct.unpack_from(self.FMT, data, 0)
if structure_type != 0:
raise pycdlibexception.PyCdlibInvalidISO('Invalid structure type')
if standard_ident != b'BOOT2':
raise pycdlibexception.PyCdlibInvalidISO('Invalid standard identifier')
if structure_version != 1:
raise pycdlibexception.PyCdlibInvalidISO('Invalid structure version')
if reserved1 != 0:
raise pycdlibexception.PyCdlibInvalidISO('Invalid reserved1')
if self.flags > 1:
raise pycdlibexception.PyCdlibInvalidISO('Invalid flags (must be 0 or 1)')
if reserved2 != b'\x00' * 32:
raise pycdlibexception.PyCdlibInvalidISO('Invalid reserved2')
self.architecture_type = UDFEntityID()
self.architecture_type.parse(architecture_type)
self.boot_identifier = UDFEntityID()
self.boot_identifier.parse(boot_ident)
self.desc_creation_time = UDFTimestamp()
self.desc_creation_time.parse(desc_creation_time)
self.orig_extent_loc = extent
self._initialized = True
def new(self):
# type: () -> None
"""
Create a new Boot Descriptor.
Parameters:
None.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Boot Descriptor already initialized')
self.flags = 0 # FIXME: allow the user to set this
self.architecture_type = UDFEntityID()
self.architecture_type.new(0) # FIXME: allow the user to set this
self.boot_identifier = UDFEntityID()
self.boot_identifier.new(0) # FIXME: allow the user to set this
self.boot_extent_loc = 0 # FIXME: allow the user to set this
self.boot_extent_len = 0 # FIXME: allow the user to set this
self.load_address = 0 # FIXME: allow the user to set this
self.start_address = 0 # FIXME: allow the user to set this
self.desc_creation_time = UDFTimestamp()
self.desc_creation_time.new()
self.flags = 0 # FIXME: allow the user to set this
self.boot_use = b'\x00' * 1906 # FIXME: allow the user to set this
self._initialized = True
def record(self):
# type: () -> bytes
"""
Generate the string representing this UDF Boot Descriptor.
Parameters:
None.
Returns:
A string representing this UDF Boot Descriptor.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Boot Descriptor not initialized')
return struct.pack(self.FMT, 0, b'BOOT2', 1, 0,
self.architecture_type.record(),
self.boot_identifier.record(),
self.boot_extent_loc, self.boot_extent_len,
self.load_address, self.start_address,
self.desc_creation_time.record(), self.flags,
b'\x00' * 32, self.boot_use)
def extent_location(self):
# type: () -> int
"""
Get the extent location of this UDF Boot Descriptor.
Parameters:
None.
Returns:
Integer extent location of this UDF Boot Descriptor.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Boot Descriptor not initialized')
if self.new_extent_loc < 0:
return self.orig_extent_loc
return self.new_extent_loc
def set_extent_location(self, extent):
# type: (int) -> None
"""
Set the new location for this UDF Boot Descriptor.
Parameters:
extent - The new extent location to set for this UDF Boot Descriptor.
Returns:
Nothing.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('This UDF Boot Descriptor is not initialized')
self.new_extent_loc = extent
def _compute_csum(data):
# type: (bytes) -> int
"""
Compute a simple checksum over the given data.
Parameters:
data - The data to compute the checksum over.
Returns:
The checksum.
"""
csum = 0
if have_py_3:
for byte in data:
csum += byte
csum -= data[4]
else:
for byte in data:
csum += ord(byte) # type: ignore
csum -= ord(data[4]) # type: ignore
return csum % 256
class UDFTag(object):
"""A class representing a UDF Descriptor Tag (ECMA-167, Part 3, 7.2)."""
__slots__ = ('_initialized', 'tag_ident', 'desc_version',
'tag_serial_number', 'tag_location', 'desc_crc_length')
FMT = '<HHBBHHHL'
def __init__(self):
# type: () -> None
self.desc_crc_length = -1
self._initialized = False
def parse(self, data, extent):
# type: (bytes, int) -> None
"""
Parse the passed in data into a UDF Descriptor tag.
Parameters:
data - The data to parse.
extent - The extent to compare against for the tag location.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Tag already initialized')
(self.tag_ident, self.desc_version, tag_checksum, reserved,
self.tag_serial_number, desc_crc, self.desc_crc_length,
self.tag_location) = struct.unpack_from(self.FMT, data, 0)
if reserved != 0:
raise pycdlibexception.PyCdlibInvalidISO('Reserved data not 0!')
if _compute_csum(data[:16]) != tag_checksum:
raise pycdlibexception.PyCdlibInvalidISO('Tag checksum does not match!')
if self.tag_location != extent:
# In theory, we should abort (throw an exception) if we see that a
# tag location that doesn't match an actual location. However, we
# have seen UDF ISOs in the wild (most notably PS2 GT4 ISOs) that
# have an invalid tag location for the second anchor and File Set
# Terminator. So that we can support those ISOs, just silently
# fix it up. We lose a little bit of detection of whether this is
# "truly" a UDFTag, but it is really not a big risk.
self.tag_location = extent
if self.desc_version not in (2, 3):
raise pycdlibexception.PyCdlibInvalidISO('Tag version not 2 or 3')
if (len(data) - 16) < self.desc_crc_length:
raise pycdlibexception.PyCdlibInternalError('Not enough bytes to compute CRC')
if desc_crc != crc_ccitt(data[16:16 + self.desc_crc_length]):
raise pycdlibexception.PyCdlibInvalidISO('Tag CRC does not match!')
self._initialized = True
def record(self, crc_bytes):
# type: (bytes) -> bytes
"""
Generate the string representing this UDF Descriptor Tag.
Parameters:
crc_bytes - The string to compute the CRC over.
Returns:
A string representing this UDF Descriptor Tag.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Descriptor Tag not initialized')
crc_byte_len = len(crc_bytes)
if self.desc_crc_length >= 0:
crc_byte_len = self.desc_crc_length
# We need to compute the checksum, but we'll do that by first creating
# the output buffer with the csum field set to 0, computing the csum,
# and then setting that record back as usual.
rec = struct.pack(self.FMT, self.tag_ident, self.desc_version,
0, 0, self.tag_serial_number,
crc_ccitt(crc_bytes[:crc_byte_len]),
crc_byte_len, self.tag_location)
csum = _compute_csum(rec)
ba = bytearray(rec)
ba[4] = csum
return bytes(ba)
def new(self, tag_ident, tag_serial=0):
# type: (int, int) -> None
"""
Create a new UDF Descriptor Tag.
Parameters:
tag_ident - The tag identifier number for this tag.
tag_serial - The tag serial number for this tag.
Returns:
Nothing
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Tag already initialized')
self.tag_ident = tag_ident
self.desc_version = 2
self.tag_serial_number = tag_serial
self.tag_location = 0 # This will be set later.
self._initialized = True
def __eq__(self, other):
# type: (object) -> bool
if not isinstance(other, UDFTag):
return NotImplemented
return self.tag_ident == other.tag_ident and \
self.desc_version == other.desc_version and \
self.tag_serial_number == other.tag_serial_number and \
self.tag_location == other.tag_location and \
self.desc_crc_length == other.desc_crc_length
class UDFAnchorVolumeStructure(object):
"""A class representing a UDF Anchor Volume Structure (ECMA-167, Part 3, 10.2)."""
__slots__ = ('_initialized', 'orig_extent_loc', 'new_extent_loc',
'main_vd', 'reserve_vd', 'desc_tag')
FMT = '=16s8s8s480s'
def __init__(self):
# type: () -> None
self.new_extent_loc = -1
self._initialized = False
def parse(self, data, extent, desc_tag):
# type: (bytes, int, UDFTag) -> None
"""
Parse the passed in data into a UDF Anchor Volume Structure.
Parameters:
data - The data to parse.
extent - The extent that this descriptor currently lives at.
desc_tag - A UDFTag object that represents the Descriptor Tag.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('Anchor Volume Structure already initialized')
(tag_unused, main_vd,
reserve_vd, reserved_unused) = struct.unpack_from(self.FMT, data, 0)
self.main_vd = UDFExtentAD()
self.main_vd.parse(main_vd)
self.reserve_vd = UDFExtentAD()
self.reserve_vd.parse(reserve_vd)
self.desc_tag = desc_tag
self.orig_extent_loc = extent
self._initialized = True
def record(self):
# type: () -> bytes
"""
Generate the string representing this UDF Anchor Volume Structure.
Parameters:
None.
Returns:
A string representing this UDF Anchor Volume Structure.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Anchor Volume Descriptor not initialized')
rec = struct.pack(self.FMT, b'\x00' * 16, self.main_vd.record(),
self.reserve_vd.record(), b'\x00' * 480)[16:]
return self.desc_tag.record(rec) + rec
def extent_location(self):
# type: () -> int
"""
Get the extent location of this UDF Anchor Volume Structure.
Parameters:
None.
Returns:
Integer extent location of this UDF Anchor Volume Structure.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Anchor Volume Structure not initialized')
if self.new_extent_loc < 0:
return self.orig_extent_loc
return self.new_extent_loc
def new(self):
# type: () -> None
"""
Create a new UDF Anchor Volume Structure.
Parameters:
None.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Anchor Volume Structure already initialized')
self.desc_tag = UDFTag()
self.desc_tag.new(2) # FIXME: let the user set serial_number
self.main_vd = UDFExtentAD()
self.main_vd.new(32768, 0) # The location will get set later.
self.reserve_vd = UDFExtentAD()
self.reserve_vd.new(32768, 0) # The location will get set later.
self._initialized = True
def set_extent_location(self, new_location, main_vd_extent, reserve_vd_extent):
# type: (int, int, int) -> None
"""
Set a new location for this Anchor Volume Structure.
Parameters:
new_location - The new extent that this Anchor Volume Structure should be located at.
main_vd_extent - The extent containing the main Volume Descriptors.
reserve_vd_extent - The extent containing the reserve Volume Descriptors.
Returns:
Nothing.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Anchor Volume Structure not initialized')
self.new_extent_loc = new_location
self.desc_tag.tag_location = new_location
self.main_vd.extent_location = main_vd_extent
self.reserve_vd.extent_location = reserve_vd_extent
def __ne__(self, other):
# type: (object) -> bool
return not self == other
def __eq__(self, other):
# type: (object) -> bool
if not isinstance(other, UDFAnchorVolumeStructure):
return NotImplemented
return self.main_vd.extent_location == other.main_vd.extent_location and self.reserve_vd.extent_location == other.reserve_vd.extent_location
class UDFVolumeDescriptorPointer(object):
"""A class representing a UDF Volume Descriptor Pointer (ECMA-167, Part 3, 10.3)."""
__slots__ = ('initialized', 'orig_extent_loc', 'new_extent_loc',
'vol_seqnum', 'next_vol_desc_seq_extent', 'desc_tag')
FMT = '<16sL8s484s'
def __init__(self):
# type: () -> None
self.new_extent_loc = -1
self.initialized = False
def parse(self, data, extent, desc_tag):
# type: (bytes, int, UDFTag) -> None
"""
Parse the passed in data into a UDF Volume Descriptor Pointer.
Parameters:
data - The data to parse.
extent - The extent that this descriptor currently lives at.
desc_tag - A UDFTag object that represents the Descriptor Tag.
Returns:
Nothing.
"""
if self.initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Volume Descriptor Pointer already initialized')
(tag_unused, self.vol_seqnum, next_vol_extent,
reserved_unused) = struct.unpack_from(self.FMT, data, 0)
self.next_vol_desc_seq_extent = UDFExtentAD()
self.next_vol_desc_seq_extent.parse(next_vol_extent)
self.desc_tag = desc_tag
self.orig_extent_loc = extent
self.initialized = True
def record(self):
# type: () -> bytes
"""
Generate the string representing this UDF Volume Descriptor Pointer.
Parameters:
None.
Returns:
A string representing this UDF Volume Descriptor Pointer.
"""
if not self.initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Volume Descriptor Pointer not initialized')
rec = struct.pack(self.FMT, b'\x00' * 16, self.vol_seqnum,
self.next_vol_desc_seq_extent.record(), b'\x00' * 484)[16:]
return self.desc_tag.record(rec) + rec
def extent_location(self):
# type: () -> int
"""
Get the extent location of this UDF Volume Descriptor Pointer.
Parameters:
None.
Returns:
Integer extent location of this UDF Volume Descriptor Pointer.
"""
if not self.initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Volume Descriptor Pointer not initialized')
if self.new_extent_loc < 0:
return self.orig_extent_loc
return self.new_extent_loc
def new(self):
# type: () -> None
"""
Create a new UDF Volume Descriptor Pointer.
Parameters:
None.
Returns:
Nothing.
"""
if self.initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Volume Descriptor Pointer already initialized')
self.desc_tag = UDFTag()
self.desc_tag.new(3) # FIXME: let the user set serial_number
self.vol_seqnum = 0 # FIXME: let the user set this
self.next_vol_desc_seq_extent = UDFExtentAD()
self.next_vol_desc_seq_extent.new(0, 0) # FIXME: let the user set this
self.new_extent_loc = 0 # This will be set later
self.initialized = True
def set_extent_location(self, new_location):
# type: (int) -> None
"""
Set the new location for this UDF Volume Descriptor Pointer.
Parameters:
new_location - The new extent this UDF Volume Descriptor Pointer should be located at.
Returns:
Nothing.
"""
if not self.initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Volume Descriptor Pointer not initialized')
self.new_extent_loc = new_location
self.desc_tag.tag_location = new_location
class UDFTimestamp(object):
"""A class representing a UDF timestamp (ECMA-167, Part 1, 7.3)."""
__slots__ = ('_initialized', 'year', 'month', 'day', 'hour', 'minute',
'second', 'centiseconds', 'hundreds_microseconds',
'microseconds', 'timetype', 'tz')
FMT = '<BBHBBBBBBBB'
def __init__(self):
# type: () -> None
self._initialized = False
def parse(self, data):
# type: (bytes) -> None
"""
Parse the passed in data into a UDF Timestamp.
Parameters:
data - The data to parse.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Timestamp already initialized')
(tz, timetype, self.year, self.month, self.day, self.hour, self.minute,
self.second, self.centiseconds, self.hundreds_microseconds,
self.microseconds) = struct.unpack_from(self.FMT, data, 0)
self.timetype = timetype >> 4
def twos_comp(val, bits):
# type: (int, int) -> int
"""Compute the 2's complement of int value val"""
if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255
val = val - (1 << bits) # compute negative value
return val # return positive value as is
self.tz = twos_comp(((timetype & 0xf) << 8) | tz, 12)
if self.tz < -1440 or self.tz > 1440:
if self.tz != -2047:
raise pycdlibexception.PyCdlibInvalidISO('Invalid UDF timezone')
if self.year < 1 or self.year > 9999:
raise pycdlibexception.PyCdlibInvalidISO('Invalid UDF year')
if self.month < 1 or self.month > 12:
raise pycdlibexception.PyCdlibInvalidISO('Invalid UDF month')
if self.day < 1 or self.day > 31:
raise pycdlibexception.PyCdlibInvalidISO('Invalid UDF day')
if self.hour < 0 or self.hour > 23:
raise pycdlibexception.PyCdlibInvalidISO('Invalid UDF hour')
if self.minute < 0 or self.minute > 59:
raise pycdlibexception.PyCdlibInvalidISO('Invalid UDF minute')
if self.second < 0 or self.second > 59:
raise pycdlibexception.PyCdlibInvalidISO('Invalid UDF second')
self._initialized = True
def record(self):
# type: () -> bytes
"""
Generate the string representing this UDF Timestamp.
Parameters:
None.
Returns:
A string representing this UDF Timestamp.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Timestamp not initialized')
tmp = ((1 << 16) - 1) & self.tz
newtz = tmp & 0xff
newtimetype = ((tmp >> 8) & 0x0f) | (self.timetype << 4)
return struct.pack(self.FMT, newtz, newtimetype, self.year, self.month,
self.day, self.hour, self.minute, self.second,
self.centiseconds, self.hundreds_microseconds,
self.microseconds)
def new(self):
# type: () -> None
"""
Create a new UDF Timestamp.
Parameters:
None.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Timestamp already initialized')
tm = time.time()
local = time.localtime(tm)
self.tz = utils.gmtoffset_from_tm(tm, local)
# FIXME: for the timetype, 0 is UTC, 1 is local, 2 is 'agreement'.
# let the user set this.
self.timetype = 1
self.year = local.tm_year
self.month = local.tm_mon
self.day = local.tm_mon
self.hour = local.tm_hour
self.minute = local.tm_min
self.second = local.tm_sec
self.centiseconds = 0
self.hundreds_microseconds = 0
self.microseconds = 0
self._initialized = True
def __eq__(self, other):
# type: (object) -> bool
if not isinstance(other, UDFTimestamp):
return NotImplemented
return self.year == other.year and self.month == other.month and \
self.day == other.day and self.hour == other.hour and \
self.minute == other.minute and self.second == other.second and \
self.centiseconds == other.centiseconds and \
self.hundreds_microseconds == other.hundreds_microseconds and \
self.microseconds == other.microseconds and \
self.timetype == other.timetype and self.tz == other.tz
class UDFEntityID(object):
"""A class representing a UDF Entity ID (ECMA-167, Part 1, 7.4)."""
__slots__ = ('_initialized', 'flags', 'identifier', 'suffix')
FMT = '=B23s8s'
def __init__(self):
# type: () -> None
self._initialized = False
def parse(self, data):
# type: (bytes) -> None
"""
Parse the passed in data into a UDF Entity ID.
Parameters:
data - The data to parse.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Entity ID already initialized')
(self.flags, self.identifier, self.suffix) = struct.unpack_from(self.FMT, data, 0)
if self.flags > 3:
raise pycdlibexception.PyCdlibInvalidISO('UDF Entity ID flags must be between 0 and 3')
self._initialized = True
def record(self):
# type: () -> bytes
"""
Generate the string representing this UDF Entity ID.
Parameters:
None.
Returns:
A string representing this UDF Entity ID.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Entity ID not initialized')
return struct.pack(self.FMT, self.flags, self.identifier, self.suffix)
def new(self, flags, identifier=b'', suffix=b''):
# type: (int, bytes, bytes) -> None
"""
Create a new UDF Entity ID.
Parameters:
flags - The flags to set for this Entity ID.
identifier - The identifier to set for this Entity ID.
suffix - The suffix to set for this Entity ID.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Entity ID already initialized')
if flags > 3:
raise pycdlibexception.PyCdlibInvalidInput('UDF Entity ID flags must be between 0 and 3')
if len(identifier) > 23:
raise pycdlibexception.PyCdlibInvalidInput('UDF Entity ID identifier must be less than 23 characters')
if len(suffix) > 8:
raise pycdlibexception.PyCdlibInvalidInput('UDF Entity ID suffix must be less than 8 characters')
self.flags = flags
self.identifier = identifier + b'\x00' * (23 - len(identifier))
self.suffix = suffix + b'\x00' * (8 - len(suffix))
self._initialized = True
def __eq__(self, other):
# type: (object) -> bool
if not isinstance(other, UDFEntityID):
return NotImplemented
return self.flags == other.flags and self.identifier == other.identifier and self.suffix == other.suffix
class UDFCharspec(object):
"""A class representing a UDF charspec (ECMA-167, Part 1, 7.2.1)."""
__slots__ = ('_initialized', 'set_type', 'set_information')
FMT = '=B63s'
def __init__(self):
# type: () -> None
self._initialized = False
def parse(self, data):
# type: (bytes) -> None
"""
Parse the passed in data into a UDF Charspec.
Parameters:
data - The data to parse.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Charspec already initialized')
(self.set_type,
self.set_information) = struct.unpack_from(self.FMT, data, 0)
if self.set_type > 8:
raise pycdlibexception.PyCdlibInvalidISO('Invalid charset parsed; only 0-8 supported')
self._initialized = True
def new(self, set_type, set_information):
# type: (int, bytes) -> None
"""
Create a new UDF Charspc.
Parameters:
set_type - The integer set type. Must be between 0 and 8.
set_information - Additional set information. Must be less than 64
bytes.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Charspec already initialized')
if set_type > 8:
raise pycdlibexception.PyCdlibInvalidInput('Invalid charset specified; only 0-8 supported')
if len(set_information) > 63:
raise pycdlibexception.PyCdlibInvalidInput('Invalid charset information; exceeds maximum size of 63')
self.set_type = set_type
self.set_information = set_information + b'\x00' * (63 - len(set_information))
self._initialized = True
def record(self):
# type: () -> bytes
"""
Generate the string representing this UDF Charspec.
Parameters:
None.
Returns:
A string representing this UDF Charspec.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Charspec not initialized')
return struct.pack(self.FMT, self.set_type, self.set_information)
def __eq__(self, other):
# type: (object) -> bool
if not isinstance(other, UDFCharspec):
return NotImplemented
return self.set_type == other.set_type and self.set_information == other.set_information
class UDFExtentAD(object):
"""A class representing a UDF Extent Descriptor (ECMA-167, Part 3, 7.1)."""
__slots__ = ('_initialized', 'extent_length', 'extent_location')
FMT = '<LL'
def __init__(self):
# type: () -> None
self._initialized = False
def parse(self, data):
# type: (bytes) -> None
"""
Parse the passed in data into a UDF Extent AD.
Parameters:
data - The data to parse.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Extent descriptor already initialized')
(self.extent_length,
self.extent_location) = struct.unpack_from(self.FMT, data, 0)
if self.extent_length >= 0x3fffffff:
raise pycdlibexception.PyCdlibInvalidISO('UDF Extent descriptor length must be less than 0x3fffffff')
self._initialized = True
def record(self):
# type: () -> bytes
"""
Generate the string representing this UDF Extent AD.
Parameters:
None.
Returns:
A string representing this UDF Extent AD.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Extent AD not initialized')
return struct.pack(self.FMT, self.extent_length, self.extent_location)
def new(self, length, blocknum):
# type: (int, int) -> None
"""
Create a new UDF Short AD.
Parameters:
length - The length of the data in the allocation.
blocknum - The logical block number the allocation starts at.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Extent AD already initialized')
if length >= 0x3fffffff:
raise pycdlibexception.PyCdlibInternalError('UDF Extent AD length must be less than 0x3fffffff')
self.extent_length = length
self.extent_location = blocknum
self._initialized = True
def __eq__(self, other):
# type: (object) -> bool
if not isinstance(other, UDFExtentAD):
return NotImplemented
return self.extent_length == other.extent_length and self.extent_location == other.extent_location
class UDFPrimaryVolumeDescriptor(object):
"""A class representing a UDF Primary Volume Descriptor (ECMA-167, Part 3, 10.1)."""
__slots__ = ('_initialized', 'orig_extent_loc', 'new_extent_loc',
'vol_desc_seqnum', 'desc_num', 'vol_ident', 'vol_set_ident',
'desc_char_set', 'explanatory_char_set', 'vol_abstract',
'vol_copyright', 'implementation_use',
'predecessor_vol_desc_location', 'desc_tag', 'recording_date',
'app_ident', 'impl_ident', 'max_interchange_level',
'interchange_level', 'flags')
FMT = '<16sLL32sHHHHLL128s64s64s8s8s32s12s32s64sLH22s'
def __init__(self):
# type: () -> None
self.new_extent_loc = -1
self._initialized = False
def parse(self, data, extent, desc_tag):
# type: (bytes, int, UDFTag) -> None
"""
Parse the passed in data into a UDF Primary Volume Descriptor.
Parameters:
data - The data to parse.
extent - The extent that this descriptor currently lives at.
desc_tag - A UDFTag object that represents the Descriptor Tag.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Primary Volume Descriptor already initialized')
(tag_unused, self.vol_desc_seqnum, self.desc_num, self.vol_ident,
vol_seqnum, max_vol_seqnum, self.interchange_level,
self.max_interchange_level, char_set_list,
max_char_set_list, self.vol_set_ident, desc_char_set,
explanatory_char_set, vol_abstract, vol_copyright, app_ident,
recording_date, impl_ident, self.implementation_use,
self.predecessor_vol_desc_location, self.flags,
reserved) = struct.unpack_from(self.FMT, data, 0)
self.desc_tag = desc_tag
if vol_seqnum != 1:
raise pycdlibexception.PyCdlibInvalidISO('Only DVD Read-Only disks are supported')
if max_vol_seqnum != 1:
raise pycdlibexception.PyCdlibInvalidISO('Only DVD Read-Only disks are supported')
if self.interchange_level not in (2, 3):
raise pycdlibexception.PyCdlibInvalidISO('Unsupported interchange level (only 2 and 3 supported)')
if char_set_list != 1:
raise pycdlibexception.PyCdlibInvalidISO('Only DVD Read-Only disks are supported')
if max_char_set_list != 1:
raise pycdlibexception.PyCdlibInvalidISO('Only DVD Read-Only disks are supported')
if self.flags not in (0, 1):
raise pycdlibexception.PyCdlibInvalidISO('Invalid UDF flags')
if reserved != b'\x00' * 22:
raise pycdlibexception.PyCdlibInvalidISO('UDF Primary Volume Descriptor reserved data not 0')
self.desc_char_set = UDFCharspec()
self.desc_char_set.parse(desc_char_set)
self.explanatory_char_set = UDFCharspec()
self.explanatory_char_set.parse(explanatory_char_set)
self.vol_abstract = UDFExtentAD()
self.vol_abstract.parse(vol_abstract)
self.vol_copyright = UDFExtentAD()
self.vol_copyright.parse(vol_copyright)
self.recording_date = UDFTimestamp()
self.recording_date.parse(recording_date)
self.app_ident = UDFEntityID()
self.app_ident.parse(app_ident)
self.impl_ident = UDFEntityID()
self.impl_ident.parse(impl_ident)
self.orig_extent_loc = extent
self._initialized = True
def record(self):
# type: () -> bytes
"""
Generate the string representing this UDF Primary Volume Descriptor.
Parameters:
None.
Returns:
A string representing this UDF Primary Volume Descriptor.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Primary Volume Descriptor not initialized')
rec = struct.pack(self.FMT, b'\x00' * 16,
self.vol_desc_seqnum, self.desc_num,
self.vol_ident, 1, 1, self.interchange_level,
self.max_interchange_level, 1, 1, self.vol_set_ident,
self.desc_char_set.record(),
self.explanatory_char_set.record(),
self.vol_abstract.record(),
self.vol_copyright.record(),
self.app_ident.record(), self.recording_date.record(),
self.impl_ident.record(), self.implementation_use,
self.predecessor_vol_desc_location, self.flags,
b'\x00' * 22)[16:]
return self.desc_tag.record(rec) + rec
def extent_location(self):
# type: () -> int
"""
Get the extent location of this UDF Primary Volume Descriptor.
Parameters:
None.
Returns:
Integer extent location of this UDF Primary Volume Descriptor.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Primary Volume Descriptor not initialized')
if self.new_extent_loc < 0:
return self.orig_extent_loc
return self.new_extent_loc
def new(self):
# type: () -> None
"""
Create a new UDF Primary Volume Descriptor.
Parameters:
None.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Primary Volume Descriptor already initialized')
self.desc_tag = UDFTag()
self.desc_tag.new(1) # FIXME: let the user set serial_number
self.vol_desc_seqnum = 0 # FIXME: let the user set this
self.desc_num = 0 # FIXME: let the user set this
self.vol_ident = _ostaunicode_zero_pad('CDROM', 32)
# According to UDF 2.60, 2.2.2.5, the VolumeSetIdentifier should have
# at least the first 16 characters be a unique value. Further, the
# first 8 bytes of that should be a time value in ASCII hexadecimal
# representation. To make it truly unique, we use that time plus a
# random value, all ASCII encoded.
unique = format(int(time.time()), '08x') + format(random.getrandbits(26), '08x')
self.vol_set_ident = _ostaunicode_zero_pad(unique, 128)
self.desc_char_set = UDFCharspec()
self.desc_char_set.new(0, b'OSTA Compressed Unicode') # FIXME: let the user set this
self.explanatory_char_set = UDFCharspec()
self.explanatory_char_set.new(0, b'OSTA Compressed Unicode') # FIXME: let the user set this
self.vol_abstract = UDFExtentAD()
self.vol_abstract.new(0, 0) # FIXME: let the user set this
self.vol_copyright = UDFExtentAD()
self.vol_copyright.new(0, 0) # FIXME: let the user set this
self.app_ident = UDFEntityID()
self.app_ident.new(0)
self.recording_date = UDFTimestamp()
self.recording_date.new()
self.impl_ident = UDFEntityID()
self.impl_ident.new(0, b'*pycdlib')
self.implementation_use = b'\x00' * 64 # FIXME: let the user set this
self.predecessor_vol_desc_location = 0 # FIXME: let the user set this
self.interchange_level = 2
self.max_interchange_level = 2
self.flags = 0
self._initialized = True
def set_extent_location(self, new_location):
# type: (int) -> None
"""
Set the new location for this UDF Primary Volume Descriptor.
Parameters:
new_location - The extent that this Primary Volume Descriptor should be located at.
Returns:
Nothing.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Primary Volume Descriptor not initialized')
self.new_extent_loc = new_location
self.desc_tag.tag_location = new_location
def __eq__(self, other):
# type: (object) -> bool
if not isinstance(other, UDFPrimaryVolumeDescriptor):
return NotImplemented
return self.vol_desc_seqnum == other.vol_desc_seqnum and \
self.desc_num == other.desc_num and \
self.vol_ident == other.vol_ident and \
self.vol_set_ident == other.vol_set_ident and \
self.desc_char_set == other.desc_char_set and \
self.explanatory_char_set == other.explanatory_char_set and \
self.vol_abstract == other.vol_abstract and \
self.vol_copyright == other.vol_copyright and \
self.implementation_use == other.implementation_use and \
self.predecessor_vol_desc_location == other.predecessor_vol_desc_location and \
self.desc_tag == other.desc_tag and \
self.recording_date == other.recording_date and \
self.app_ident == other.app_ident and \
self.impl_ident == other.impl_ident and \
self.max_interchange_level == other.max_interchange_level and \
self.interchange_level == other.interchange_level and \
self.flags == other.flags
class UDFImplementationUseVolumeDescriptorImplementationUse(object):
"""
A class representing the Implementation Use field of the Implementation Use
Volume Descriptor.
"""
__slots__ = ('_initialized', 'char_set', 'log_vol_ident', 'lv_info1',
'lv_info2', 'lv_info3', 'impl_ident', 'impl_use')
FMT = '=64s128s36s36s36s32s128s'
def __init__(self):
# type: () -> None
self._initialized = False
def parse(self, data):
# type: (bytes) -> None
"""
Parse the passed in data into a UDF Implementation Use Volume
Descriptor Implementation Use field.
Parameters:
data - The data to parse.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Implementation Use Volume Descriptor Implementation Use field already initialized')
(char_set, self.log_vol_ident, self.lv_info1, self.lv_info2,
self.lv_info3, impl_ident,
self.impl_use) = struct.unpack_from(self.FMT, data, 0)
self.char_set = UDFCharspec()
self.char_set.parse(char_set)
self.impl_ident = UDFEntityID()
self.impl_ident.parse(impl_ident)
self._initialized = True
def record(self):
# type: () -> bytes
"""
Generate the string representing this UDF Implementation Use Volume
Descriptor Implementation Use field.
Parameters:
None.
Returns:
A string representing this UDF Implementation Use Volume Descriptor.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Implementation Use Volume Descriptor Implementation Use field not initialized')
return struct.pack(self.FMT, self.char_set.record(), self.log_vol_ident,
self.lv_info1, self.lv_info2, self.lv_info3,
self.impl_ident.record(), self.impl_use)
def new(self):
# type: () -> None
"""
Create a new UDF Implementation Use Volume Descriptor Implementation Use
field.
Parameters:
None:
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Implementation Use Volume Descriptor Implementation Use field already initialized')
self.char_set = UDFCharspec()
self.char_set.new(0, b'OSTA Compressed Unicode') # FIXME: let the user set this
self.log_vol_ident = _ostaunicode_zero_pad('CDROM', 128)
self.lv_info1 = b'\x00' * 36
self.lv_info2 = b'\x00' * 36
self.lv_info3 = b'\x00' * 36
self.impl_ident = UDFEntityID()
self.impl_ident.new(0, b'*pycdlib', b'')
self.impl_use = b'\x00' * 128
self._initialized = True
def __eq__(self, other):
# type: (object) -> bool
if not isinstance(other, UDFImplementationUseVolumeDescriptorImplementationUse):
return NotImplemented
return self.char_set == other.char_set and \
self.log_vol_ident == other.log_vol_ident and \
self.lv_info1 == other.lv_info1 and \
self.lv_info2 == other.lv_info2 and \
self.lv_info3 == other.lv_info3 and \
self.impl_ident == other.impl_ident and \
self.impl_use == other.impl_use
class UDFImplementationUseVolumeDescriptor(object):
"""A class representing a UDF Implementation Use Volume Structure (ECMA-167, Part 3, 10.4)."""
__slots__ = ('_initialized', 'orig_extent_loc', 'new_extent_loc',
'vol_desc_seqnum', 'impl_use', 'desc_tag', 'impl_ident')
FMT = '<16sL32s460s'
def __init__(self):
# type: () -> None
self.new_extent_loc = -1
self._initialized = False
def parse(self, data, extent, desc_tag):
# type: (bytes, int, UDFTag) -> None
"""
Parse the passed in data into a UDF Implementation Use Volume
Descriptor.
Parameters:
data - The data to parse.
extent - The extent that this descriptor currently lives at.
desc_tag - A UDFTag object that represents the Descriptor Tag.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Implementation Use Volume Descriptor already initialized')
(tag_unused, self.vol_desc_seqnum, impl_ident,
impl_use) = struct.unpack_from(self.FMT, data, 0)
self.desc_tag = desc_tag
self.impl_ident = UDFEntityID()
self.impl_ident.parse(impl_ident)
if self.impl_ident.identifier[:12] != b'*UDF LV Info':
raise pycdlibexception.PyCdlibInvalidISO("Implementation Use Identifier not '*UDF LV Info'")
self.impl_use = UDFImplementationUseVolumeDescriptorImplementationUse()
self.impl_use.parse(impl_use)
self.orig_extent_loc = extent
self._initialized = True
def record(self):
# type: () -> bytes
"""
Generate the string representing this UDF Implementation Use Volume
Descriptor.
Parameters:
None.
Returns:
A string representing this UDF Implementation Use Volume Descriptor.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Implementation Use Volume Descriptor not initialized')
rec = struct.pack(self.FMT, b'\x00' * 16,
self.vol_desc_seqnum, self.impl_ident.record(),
self.impl_use.record())[16:]
return self.desc_tag.record(rec) + rec
def extent_location(self):
# type: () -> int
"""
Get the extent location of this UDF Implementation Use Volume
Descriptor.
Parameters:
None.
Returns:
Integer extent location of this UDF Implementation Use Volume
Descriptor.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Implementation Use Volume Descriptor not initialized')
if self.new_extent_loc < 0:
return self.orig_extent_loc
return self.new_extent_loc
def new(self):
# type: () -> None
"""
Create a new UDF Implementation Use Volume Descriptor.
Parameters:
None:
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Implementation Use Volume Descriptor already initialized')
self.desc_tag = UDFTag()
self.desc_tag.new(4) # FIXME: let the user set serial_number
self.vol_desc_seqnum = 1
self.impl_ident = UDFEntityID()
self.impl_ident.new(0, b'*UDF LV Info', b'\x02\x01')
self.impl_use = UDFImplementationUseVolumeDescriptorImplementationUse()
self.impl_use.new()
self._initialized = True
def set_extent_location(self, new_location):
# type: (int) -> None
"""
Set the new location for this UDF Implementation Use Volume Descriptor.
Parameters:
new_location - The new extent this UDF Implementation Use Volume Descriptor should be located at.
Returns:
Nothing.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Implementation Use Volume Descriptor not initialized')
self.new_extent_loc = new_location
self.desc_tag.tag_location = new_location
def __eq__(self, other):
# type: (object) -> bool
if not isinstance(other, UDFImplementationUseVolumeDescriptor):
return NotImplemented
return self.vol_desc_seqnum == other.vol_desc_seqnum and \
self.impl_use == other.impl_use and \
self.desc_tag == other.desc_tag and \
self.impl_ident == other.impl_ident
class UDFPartitionHeaderDescriptor(object):
"""A class representing a UDF Partition Header Descriptor."""
__slots__ = ('_initialized', 'unalloc_space_table', 'unalloc_space_bitmap',
'partition_integrity_table', 'freed_space_table',
'freed_space_bitmap')
FMT = '=8s8s8s8s8s88s'
def __init__(self):
# type: () -> None
self._initialized = False
def parse(self, data):
# type: (bytes) -> None
"""
Parse the passed in data into a UDF Partition Header Descriptor.
Parameters:
data - The data to parse.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Partition Header Descriptor already initialized')
(unalloc_space_table, unalloc_space_bitmap, partition_integrity_table,
freed_space_table, freed_space_bitmap,
reserved_unused) = struct.unpack_from(self.FMT, data, 0)
self.unalloc_space_table = UDFShortAD()
self.unalloc_space_table.parse(unalloc_space_table)
self.unalloc_space_bitmap = UDFShortAD()
self.unalloc_space_bitmap.parse(unalloc_space_bitmap)
self.partition_integrity_table = UDFShortAD()
self.partition_integrity_table.parse(partition_integrity_table)
self.freed_space_table = UDFShortAD()
self.freed_space_table.parse(freed_space_table)
self.freed_space_bitmap = UDFShortAD()
self.freed_space_bitmap.parse(freed_space_bitmap)
self._initialized = True
def record(self):
# type: () -> bytes
"""
Generate the string representing this UDF Partition Header Descriptor.
Parameters:
None.
Returns:
A string representing this UDF Partition Header Descriptor.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Partition Header Descriptor not initialized')
return struct.pack(self.FMT, self.unalloc_space_table.record(),
self.unalloc_space_bitmap.record(),
self.partition_integrity_table.record(),
self.freed_space_table.record(),
self.freed_space_bitmap.record(),
b'\x00' * 88)
def new(self):
# type: () -> None
"""
Create a new UDF Partition Header Descriptor.
Parameters:
None.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Partition Header Descriptor already initialized')
self.unalloc_space_table = UDFShortAD()
self.unalloc_space_table.new(0)
self.unalloc_space_bitmap = UDFShortAD()
self.unalloc_space_bitmap.new(0)
self.partition_integrity_table = UDFShortAD()
self.partition_integrity_table.new(0)
self.freed_space_table = UDFShortAD()
self.freed_space_table.new(0)
self.freed_space_bitmap = UDFShortAD()
self.freed_space_bitmap.new(0)
self._initialized = True
def __eq__(self, other):
# type: (object) -> bool
if not isinstance(other, UDFPartitionHeaderDescriptor):
return NotImplemented
return self.unalloc_space_table == other.unalloc_space_table and \
self.unalloc_space_bitmap == other.unalloc_space_bitmap and \
self.partition_integrity_table == other.partition_integrity_table and \
self.freed_space_table == other.freed_space_table and \
self.freed_space_bitmap == other.freed_space_bitmap
class UDFPartitionVolumeDescriptor(object):
"""A class representing a UDF Partition Volume Structure (ECMA-167, Part 3, 10.5)."""
__slots__ = ('_initialized', 'orig_extent_loc', 'new_extent_loc',
'vol_desc_seqnum', 'part_flags', 'part_num', 'access_type',
'part_start_location', 'part_length', 'implementation_use',
'desc_tag', 'part_contents', 'impl_ident', 'part_contents_use')
FMT = '<16sLHH32s128sLLL32s128s156s'
def __init__(self):
# type: () -> None
self.new_extent_loc = -1
self._initialized = False
def parse(self, data, extent, desc_tag):
# type: (bytes, int, UDFTag) -> None
"""
Parse the passed in data into a UDF Partition Volume Descriptor.
Parameters:
data - The data to parse.
extent - The extent that this descriptor currently lives at.
desc_tag - A UDFTag object that represents the Descriptor Tag.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Partition Volume Descriptor already initialized')
(tag_unused, self.vol_desc_seqnum, self.part_flags, self.part_num,
part_contents, part_contents_use, self.access_type,
self.part_start_location, self.part_length, impl_ident,
self.implementation_use, reserved_unused) = struct.unpack_from(self.FMT, data, 0)
if self.part_flags not in (0, 1):
raise pycdlibexception.PyCdlibInvalidISO('Invalid partition flags')
self.desc_tag = desc_tag
self.part_contents = UDFEntityID()
self.part_contents.parse(part_contents)
if self.part_contents.identifier[:6] not in (b'+FDC01', b'+CD001', b'+CDW02', b'+NSR02', b'+NSR03'):
raise pycdlibexception.PyCdlibInvalidISO("Partition Contents Identifier not '+FDC01', '+CD001', '+CDW02', '+NSR02', or '+NSR03'")
if self.access_type > 0x1f:
raise pycdlibexception.PyCdlibInvalidISO('Invalid UDF partition access type')
self.part_contents_use = UDFPartitionHeaderDescriptor()
self.part_contents_use.parse(part_contents_use)
self.impl_ident = UDFEntityID()
self.impl_ident.parse(impl_ident)
self.orig_extent_loc = extent
self._initialized = True
def record(self):
# type: () -> bytes
"""
Generate the string representing this UDF Partition Volume Descriptor.
Parameters:
None.
Returns:
A string representing this UDF Partition Volume Descriptor.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Partition Volume Descriptor not initialized')
rec = struct.pack(self.FMT, b'\x00' * 16,
self.vol_desc_seqnum, self.part_flags,
self.part_num, self.part_contents.record(),
self.part_contents_use.record(), self.access_type,
self.part_start_location, self.part_length,
self.impl_ident.record(), self.implementation_use,
b'\x00' * 156)[16:]
return self.desc_tag.record(rec) + rec
def extent_location(self):
# type: () -> int
"""
Get the extent location of this UDF Partition Volume Descriptor.
Parameters:
None.
Returns:
Integer extent location of this UDF Partition Volume Descriptor.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Partition Volume Descriptor not initialized')
if self.new_extent_loc < 0:
return self.orig_extent_loc
return self.new_extent_loc
def new(self, version):
# type: (int) -> None
"""
Create a new UDF Partition Volume Descriptor.
Parameters:
version - The version of to make this partition; must be 2 or 3.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Partition Volume Descriptor already initialized')
self.desc_tag = UDFTag()
self.desc_tag.new(5) # FIXME: let the user set serial_number
self.vol_desc_seqnum = 2
self.part_flags = 1 # FIXME: how should we set this?
self.part_num = 0 # FIXME: how should we set this?
self.part_contents = UDFEntityID()
if version == 2:
self.part_contents.new(2, b'+NSR02')
elif version == 3:
self.part_contents.new(2, b'+NSR03')
else:
raise pycdlibexception.PyCdlibInternalError('Invalid NSR version requested')
self.part_contents_use = UDFPartitionHeaderDescriptor()
self.part_contents_use.new()
self.access_type = 1
self.part_start_location = 0 # This will get set later
self.part_length = 3 # This will get set later
self.impl_ident = UDFEntityID()
self.impl_ident.new(0, b'*pycdlib')
self.implementation_use = b'\x00' * 128 # FIXME: let the user set this
self._initialized = True
def set_extent_location(self, new_location):
# type: (int) -> None
"""
Set the location of this UDF Partition Volume Descriptor.
Parameters:
new_location - The new extent this UDF Partition Volume Descriptor should be located at.
Returns:
Nothing.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Partition Volume Descriptor not initialized')
self.new_extent_loc = new_location
self.desc_tag.tag_location = new_location
def set_start_location(self, new_location):
# type: (int) -> None
"""
Set the location of the start of the UDF partition.
Parameters:
new_location - The new extent the UDF partition should start at.
Returns:
Nothing.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Partition Volume Descriptor not initialized')
self.part_start_location = new_location
def __eq__(self, other):
# type: (object) -> bool
if not isinstance(other, UDFPartitionVolumeDescriptor):
return NotImplemented
return self.vol_desc_seqnum == other.vol_desc_seqnum and \
self.part_flags == other.part_flags and \
self.part_num == other.part_num and \
self.access_type == other.access_type and \
self.part_start_location == other.part_start_location and \
self.part_length == other.part_length and \
self.implementation_use == other.implementation_use and \
self.desc_tag == other.desc_tag and \
self.part_contents == other.part_contents and \
self.impl_ident == other.impl_ident and \
self.part_contents_use == other.part_contents_use
class UDFType0PartitionMap(object):
"""A class representing a UDF Type 0 Partition Map (ECMA-167, Part 3, 10.7)."""
__slots__ = ('_initialized', 'data')
FMT = '=BB'
def __init__(self):
# type: () -> None
self._initialized = False
def parse(self, data):
# type: (bytes) -> None
"""
Parse the passed in data into a UDF Type 0 Partition Map.
Parameters:
data - The data to parse.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Type 0 Partition Map already initialized')
(map_type, map_length) = struct.unpack_from(self.FMT, data, 0)
if map_type != 0:
raise pycdlibexception.PyCdlibInvalidISO('UDF Type 0 Partition Map type is not 0')
if map_length != len(data):
raise pycdlibexception.PyCdlibInvalidISO('UDF Type 0 Partition Map length does not equal data length')
self.data = data[2:]
self._initialized = True
def record(self):
# type: () -> bytes
"""
Generate the string representing this UDF Type 0 Partition Map.
Parameters:
None.
Returns:
A string representing this UDF Type 0 Partition Map.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Type 0 Partition Map not initialized')
return struct.pack(self.FMT, 0, 2 + len(self.data)) + self.data
def new(self):
# type: () -> None
"""
Create a new UDF Type 0 Partition Map.
Parameters:
None.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Type 0 Partition Map already initialized')
self.data = b'' # FIXME: let the user set this
self._initialized = True
class UDFType1PartitionMap(object):
"""A class representing a UDF Type 1 Partition Map (ECMA-167, Part 3, 10.7)."""
__slots__ = ('_initialized', 'part_num', 'vol_seqnum')
FMT = '<BBHH'
def __init__(self):
# type: () -> None
self._initialized = False
def parse(self, data):
# type: (bytes) -> None
"""
Parse the passed in data into a UDF Type 1 Partition Map.
Parameters:
data - The data to parse.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Type 1 Partition Map already initialized')
(map_type, map_length, self.vol_seqnum,
self.part_num) = struct.unpack_from(self.FMT, data, 0)
if map_type != 1:
raise pycdlibexception.PyCdlibInvalidISO('UDF Type 1 Partition Map type is not 1')
if map_length != 6:
raise pycdlibexception.PyCdlibInvalidISO('UDF Type 1 Partition Map length is not 6')
self._initialized = True
def record(self):
# type: () -> bytes
"""
Generate the string representing this UDF Type 1 Partition Map.
Parameters:
None.
Returns:
A string representing this UDF Type 1 Partition Map.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Type 1 Partition Map not initialized')
return struct.pack(self.FMT, 1, 6, self.vol_seqnum, self.part_num)
def new(self):
# type: () -> None
"""
Create a new UDF Type 1 Partition Map.
Parameters:
None.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Type 1 Partition Map already initialized')
self.part_num = 0 # FIXME: let the user set this
self.vol_seqnum = 1 # FIXME: let the user set this
self._initialized = True
class UDFType2PartitionMap(object):
"""A class representing a UDF Type 2 Partition Map (ECMA-167, Part 3, 10.7)."""
__slots__ = ('_initialized', 'part_ident')
FMT = '=BB62s'
def __init__(self):
# type: () -> None
self._initialized = False
def parse(self, data):
# type: (bytes) -> None
"""
Parse the passed in data into a UDF Type 2 Partition Map.
Parameters:
data - The data to parse.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Type 2 Partition Map already initialized')
(map_type, map_length, self.part_ident) = struct.unpack_from(self.FMT, data, 0)
if map_type != 2:
raise pycdlibexception.PyCdlibInvalidISO('UDF Type 2 Partition Map type is not 2')
if map_length != 64:
raise pycdlibexception.PyCdlibInvalidISO('UDF Type 2 Partition Map length is not 64')
self._initialized = True
def record(self):
# type: () -> bytes
"""
Generate the string representing this UDF Type 2 Partition Map.
Parameters:
None.
Returns:
A string representing this UDF Type 2 Partition Map.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Type 2 Partition Map not initialized')
return struct.pack(self.FMT, 2, 64, self.part_ident)
def new(self):
# type: () -> None
"""
Create a new UDF Partition Map.
Parameters:
None.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Type 2 Partition Map already initialized')
self.part_ident = b'\x00' * 62 # FIXME: let the user set this
self._initialized = True
class UDFExtendedAD(object):
"""A class representing a UDF Extended Allocation Descriptor (ECMA-167, Part 4, 14.14.3)."""
__slots__ = ('_initialized', 'extent_length', 'recorded_length',
'information_length', 'extent_location', 'impl_use')
FMT = '<LLL6s2s'
def __init__(self):
# type: () -> None
self._initialized = False
def parse(self, data):
# type: (bytes) -> None
"""
Parsed the passed in data into a UDF Extended Allocation Descriptor.
Parameters:
data - The data to parse.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Extended Allocation descriptor already initialized')
(self.extent_length, self.recorded_length, self.information_length,
extent_location, self.impl_use) = struct.unpack_from(self.FMT, data, 0)
self.extent_location = UDFLBAddr()
self.extent_location.parse(extent_location)
self._initialized = True
def record(self):
# type: () -> bytes
"""
Generate the string representing this UDF Extended Allocation Descriptor.
Parameters:
None.
Returns:
A string representing this UDF Extended Allocation Descriptor.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Extended Allocation Descriptor not initialized')
return struct.pack(self.FMT, self.extent_length, self.recorded_length,
self.information_length,
self.extent_location.record(), self.impl_use)
def new(self):
# type: () -> None
"""
Create a new UDF Extended AD.
Parameters:
None.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Extended Allocation Descriptor already initialized')
self.extent_length = 0 # FIXME: let the user set this
self.recorded_length = 0 # FIXME: let the user set this
self.information_length = 0 # FIXME: let the user set this
self.extent_location = UDFLBAddr()
self.extent_location.new(0)
self.impl_use = b'\x00\x00'
self._initialized = True
class UDFShortAD(object):
"""A class representing a UDF Short Allocation Descriptor (ECMA-167, Part 4, 14.14.1)."""
__slots__ = ('_initialized', 'extent_length', 'log_block_num', 'offset',
'extent_type')
FMT = '<LL'
def __init__(self):
# type: () -> None
self.offset = 0
self._initialized = False
def parse(self, data):
# type: (bytes) -> None
"""
Parse the passed in data into a UDF Short AD.
Parameters:
data - The data to parse.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Short Allocation descriptor already initialized')
(self.extent_length,
self.log_block_num) = struct.unpack_from(self.FMT, data, 0)
self.extent_length = self.extent_length & 0x3FFFFFFF
self.extent_type = (self.extent_length & 0xc0000000) >> 30
self._initialized = True
def record(self):
# type: () -> bytes
"""
Generate the string representing this UDF Short AD.
Parameters:
None.
Returns:
A string representing this UDF Short AD.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Short AD not initialized')
length = self.extent_length | (self.extent_type << 30)
return struct.pack(self.FMT, length, self.log_block_num)
def new(self, length):
# type: (int) -> None
"""
Create a new UDF Short AD.
Parameters:
length - The length of the data in the allocation.
blocknum - The logical block number the allocation starts at.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Short AD already initialized')
if length > 0x3fffffff:
raise pycdlibexception.PyCdlibInternalError('UDF Short AD length must be less than or equal to 0x3fffffff')
self.extent_length = length
self.extent_type = 0 # FIXME: let the user set this
self.log_block_num = 0 # this will get set later
self._initialized = True
def set_extent_location(self, new_location, tag_location): # pylint: disable=unused-argument
# type: (int, int) -> None
"""
Set the location fields of this UDF Short AD.
Parameters:
new_location - The new relative extent that this UDF Short AD references.
tag_location - The new absolute extent that this UDF Short AD references.
Returns:
Nothing.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Short AD not initialized')
self.log_block_num = tag_location
def length(self): # pylint: disable=no-self-use
# type: () -> int
"""
Method to return the length of the UDF Short Allocation Descriptor.
Parameters:
None.
Returns:
The length of this descriptor in bytes.
"""
return 8
def __eq__(self, other):
# type: (object) -> bool
if not isinstance(other, UDFShortAD):
return NotImplemented
return self.extent_length == other.extent_length and self.log_block_num == other.log_block_num
class UDFLongAD(object):
"""
A class representing a UDF Long Allocation Descriptor (ECMA-167, Part 4,
14.14.2).
"""
__slots__ = ('_initialized', 'extent_length', 'log_block_num',
'part_ref_num', 'impl_use', 'offset')
FMT = '<LLH6s'
def __init__(self):
# type: () -> None
self.offset = 0
self._initialized = False
def parse(self, data):
# type: (bytes) -> None
"""
Parse the passed in data into a UDF Long AD.
Parameters:
data - The data to parse.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Long Allocation descriptor already initialized')
(self.extent_length, self.log_block_num, self.part_ref_num,
self.impl_use) = struct.unpack_from(self.FMT, data, 0)
self._initialized = True
def record(self):
# type: () -> bytes
"""
Generate the string representing this UDF Long AD.
Parameters:
None.
Returns:
A string representing this UDF Long AD.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Long AD not initialized')
return struct.pack(self.FMT, self.extent_length, self.log_block_num,
self.part_ref_num, self.impl_use)
def new(self, length, blocknum):
# type: (int, int) -> None
"""
Create a new UDF Long AD.
Parameters:
length - The length of the data in the allocation.
blocknum - The logical block number the allocation starts at.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Long AD already initialized')
self.extent_length = length
self.log_block_num = blocknum
self.part_ref_num = 0 # FIXME: let the user set this
self.impl_use = b'\x00' * 6
self._initialized = True
def set_extent_location(self, new_location, tag_location):
# type: (int, int) -> None
"""
Set the location fields of this UDF Long AD.
Parameters:
new_location - The new relative extent that this UDF Long AD references.
tag_location - The new absolute extent that this UDF Long AD references.
Returns:
Nothing.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Long AD not initialized')
self.log_block_num = tag_location
self.impl_use = b'\x00\x00' + struct.pack('<L', new_location)
def length(self): # pylint: disable=no-self-use
# type: () -> int
"""
Method to return the length of the UDF Long Allocation Descriptor.
Parameters:
None.
Returns:
The length of this descriptor in bytes.
"""
return 16
def __eq__(self, other):
# type: (object) -> bool
if not isinstance(other, UDFLongAD):
return NotImplemented
return self.extent_length == other.extent_length and \
self.log_block_num == other.log_block_num and \
self.part_ref_num == other.part_ref_num and \
self.impl_use == other.impl_use
class UDFInlineAD(object):
"""
A class representing a UDF Inline Allocation Descriptor. This isn't
explicitly defined in the specification, but is a convenient structure
to use for ICBTag flags type 3 Allocation Descriptors.
"""
__slots__ = ('_initialized', 'extent_length', 'log_block_num', 'offset')
def __init__(self):
# type: () -> None
self._initialized = False
def parse(self, extent_length, log_block_num, offset):
# type: (int, int, int) -> None
"""
Create a new UDF Inline AD from the given data.
Parameters:
data - The data to parse.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Inline Allocation Descriptor already initialized')
self.extent_length = extent_length
self.log_block_num = log_block_num
self.offset = offset
self._initialized = True
def record(self):
# type: () -> bytes
"""
Generate the string representing this UDF Inline AD.
Parameters:
None.
Returns:
A string representing this UDF Inline AD.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Inline AD not initialized')
return b''
def new(self, extent_length, log_block_num, offset):
# type: (int, int, int) -> None
"""
Create a new UDF Inline AD.
Parameters:
extent_length - The length of the data in the allocation.
log_block_num - The logical block number the allocation starts at.
offset - The offset the allocation starts at.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Inline AD already initialized')
self.extent_length = extent_length
self.log_block_num = log_block_num
self.offset = offset
self._initialized = True
def set_extent_location(self, new_location, tag_location): # pylint: disable=unused-argument
# type: (int, int) -> None
"""
Set the location fields of this UDF Inline AD.
Parameters:
new_location - The new relative extent that this UDF Inline AD references.
tag_location - The new absolute extent that this UDF Inline AD references.
Returns:
Nothing.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Inline AD not initialized')
self.log_block_num = tag_location
def length(self):
# type: () -> int
"""
Method to return the length of the UDF Inline Allocation Descriptor.
Parameters:
None.
Returns:
The length of this descriptor in bytes.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Inline AD not initialized')
return self.extent_length
class UDFLogicalVolumeDescriptor(object):
"""A class representing a UDF Logical Volume Descriptor (ECMA-167, Part 3, 10.6)."""
__slots__ = ('_initialized', 'orig_extent_loc', 'new_extent_loc',
'vol_desc_seqnum', 'desc_char_set', 'logical_vol_ident',
'implementation_use', 'integrity_sequence', 'desc_tag',
'domain_ident', 'impl_ident', 'partition_maps',
'logical_volume_contents_use')
FMT = '<16sL64s128sL32s16sLL32s128s8s72s'
def __init__(self):
# type: () -> None
self.new_extent_loc = -1
self.partition_maps = [] # type: List[Union[UDFType0PartitionMap, UDFType1PartitionMap, UDFType2PartitionMap]]
self._initialized = False
def parse(self, data, extent, desc_tag):
# type: (bytes, int, UDFTag) -> None
"""
Parse the passed in data into a UDF Logical Volume Descriptor.
Parameters:
data - The data to parse.
extent - The extent that this descriptor currently lives at.
desc_tag - A UDFTag object that represents the Descriptor Tag.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Logical Volume Descriptor already initialized')
(tag_unused, self.vol_desc_seqnum, desc_char_set,
self.logical_vol_ident, logical_block_size, domain_ident,
logical_volume_contents_use, map_table_length, num_partition_maps,
impl_ident, self.implementation_use, integrity_sequence,
partition_maps) = struct.unpack_from(self.FMT, data, 0)
self.desc_tag = desc_tag
self.desc_char_set = UDFCharspec()
self.desc_char_set.parse(desc_char_set)
if logical_block_size != 2048:
raise pycdlibexception.PyCdlibInvalidISO('Volume Descriptor block size is not 2048')
self.domain_ident = UDFEntityID()
self.domain_ident.parse(domain_ident)
if self.domain_ident.identifier[:19] != b'*OSTA UDF Compliant':
raise pycdlibexception.PyCdlibInvalidISO("Volume Descriptor Identifier not '*OSTA UDF Compliant'")
if map_table_length >= len(partition_maps):
raise pycdlibexception.PyCdlibInvalidISO('Map table length greater than size of partition map data; ISO corrupt')
self.impl_ident = UDFEntityID()
self.impl_ident.parse(impl_ident)
self.integrity_sequence = UDFExtentAD()
self.integrity_sequence.parse(integrity_sequence)
offset = 0
map_table_length_left = map_table_length
for p_unused in range(0, num_partition_maps):
# The generic partition map starts with 1 byte for the type and
# 1 byte for the length.
(map_type, map_len) = struct.unpack_from('=BB', partition_maps, offset)
if offset + map_len > len(partition_maps[offset:]):
raise pycdlibexception.PyCdlibInvalidISO('Partition map goes beyond end of data, ISO corrupt')
if offset + map_len > map_table_length_left:
raise pycdlibexception.PyCdlibInvalidISO('Partition map goes beyond map_table_length left, ISO corrupt')
if map_type == 0:
partmap0 = UDFType0PartitionMap()
partmap0.parse(partition_maps[offset:offset + map_len])
self.partition_maps.append(partmap0)
elif map_type == 1:
partmap1 = UDFType1PartitionMap()
partmap1.parse(partition_maps[offset:offset + map_len])
self.partition_maps.append(partmap1)
elif map_type == 2:
partmap2 = UDFType2PartitionMap()
partmap2.parse(partition_maps[offset:offset + map_len])
self.partition_maps.append(partmap2)
else:
raise pycdlibexception.PyCdlibInvalidISO('Unsupported partition map type')
offset += map_len
map_table_length_left -= map_len
self.logical_volume_contents_use = UDFLongAD()
self.logical_volume_contents_use.parse(logical_volume_contents_use)
self.orig_extent_loc = extent
self._initialized = True
def record(self):
# type: () -> bytes
"""
Generate the string representing this UDF Logical Volume Descriptor.
Parameters:
None.
Returns:
A string representing this UDF Logical Volume Descriptor.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Logical Volume Descriptor not initialized')
all_partmaps = b''
for part in self.partition_maps:
all_partmaps += part.record()
partmap_pad = BytesIO()
utils.zero_pad(partmap_pad, len(all_partmaps), 72)
rec = struct.pack(self.FMT, b'\x00' * 16,
self.vol_desc_seqnum, self.desc_char_set.record(),
self.logical_vol_ident, 2048,
self.domain_ident.record(),
self.logical_volume_contents_use.record(),
len(all_partmaps), len(self.partition_maps),
self.impl_ident.record(), self.implementation_use,
self.integrity_sequence.record(),
all_partmaps + partmap_pad.getvalue())[16:]
return self.desc_tag.record(rec) + rec
def extent_location(self):
# type: () -> int
"""
Get the extent location of this UDF Logical Volume
Descriptor.
Parameters:
None.
Returns:
Integer extent location of this UDF Logical Volume Descriptor.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Logical Volume Descriptor not initialized')
if self.new_extent_loc < 0:
return self.orig_extent_loc
return self.new_extent_loc
def new(self):
# type: () -> None
"""
Create a new UDF Logical Volume Descriptor.
Parameters:
None.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Logical Volume Descriptor already initialized')
self.desc_tag = UDFTag()
self.desc_tag.new(6) # FIXME: let the user set serial_number
self.vol_desc_seqnum = 3
self.desc_char_set = UDFCharspec()
self.desc_char_set.new(0, b'OSTA Compressed Unicode') # FIXME: let the user set this
self.logical_vol_ident = _ostaunicode_zero_pad('CDROM', 128)
self.domain_ident = UDFEntityID()
self.domain_ident.new(0, b'*OSTA UDF Compliant', b'\x02\x01\x03')
self.logical_volume_contents_use = UDFLongAD()
self.logical_volume_contents_use.new(4096, 0)
self.impl_ident = UDFEntityID()
self.impl_ident.new(0, b'*pycdlib')
self.implementation_use = b'\x00' * 128 # FIXME: let the user set this
self.integrity_sequence = UDFExtentAD()
self.integrity_sequence.new(4096, 0) # The location will get set later.
self._initialized = True
def add_partition_map(self, partmaptype):
# type: (int) -> None
"""
Add a new partition map to this UDF Logical Volume Descriptor.
Parameters:
partmaptype - Must be 0, 1, or 2.
Returns:
Nothing.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Logical Volume Descriptor not initialized')
partmap = None # type: Optional[Union[UDFType0PartitionMap, UDFType1PartitionMap, UDFType2PartitionMap]]
if partmaptype == 0:
partmap = UDFType0PartitionMap()
elif partmaptype == 1:
partmap = UDFType1PartitionMap()
elif partmaptype == 2:
partmap = UDFType2PartitionMap()
else:
raise pycdlibexception.PyCdlibInternalError('UDF Partition map type must be 0, 1, or 2')
partmap.new()
all_partmaps = b''
for part in self.partition_maps:
all_partmaps += part.record()
if len(all_partmaps) > 72:
raise pycdlibexception.PyCdlibInternalError('Too many UDF partition maps')
self.partition_maps.append(partmap)
def set_extent_location(self, new_location):
# type: (int) -> None
"""
Set the location of this UDF Logical Volume Descriptor.
Parameters:
new_location - The new extent this UDF Logical Volume Descriptor should be located at.
Returns:
Nothing.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Logical Volume Descriptor not initialized')
self.new_extent_loc = new_location
self.desc_tag.tag_location = new_location
def set_integrity_location(self, integrity_extent):
# type: (int) -> None
"""
Set the location of the UDF Integrity sequence that this descriptor
references.
Parameters:
integrity_extent - The new extent that the UDF Integrity sequence
should start at.
Returns:
Nothing.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Logical Volume Descriptor not initialized')
self.integrity_sequence.extent_location = integrity_extent
def __eq__(self, other):
# type: (object) -> bool
if not isinstance(other, UDFLogicalVolumeDescriptor):
return NotImplemented
return self.vol_desc_seqnum == other.vol_desc_seqnum and \
self.desc_char_set == other.desc_char_set and \
self.logical_vol_ident == other.logical_vol_ident and \
self.implementation_use == other.implementation_use and \
self.integrity_sequence == other.integrity_sequence and \
self.desc_tag == other.desc_tag and \
self.domain_ident == other.domain_ident and \
self.impl_ident == other.impl_ident and \
self.logical_volume_contents_use == other.logical_volume_contents_use
class UDFUnallocatedSpaceDescriptor(object):
"""A class representing a UDF Unallocated Space Descriptor (ECMA-167, Part 3, 10.8)."""
__slots__ = ('_initialized', 'orig_extent_loc', 'new_extent_loc',
'vol_desc_seqnum', 'desc_tag', 'num_alloc_descriptors',
'alloc_descs')
FMT = '<16sLL488s'
def __init__(self):
# type: () -> None
self.new_extent_loc = -1
self.alloc_descs = [] # type: List[UDFExtentAD]
self._initialized = False
def parse(self, data, extent, desc_tag):
# type: (bytes, int, UDFTag) -> None
"""
Parse the passed in data into a UDF Unallocated Space Descriptor.
Parameters:
data - The data to parse.
extent - The extent that this descriptor currently lives at.
desc_tag - A UDFTag object that represents the Descriptor Tag.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Unallocated Space Descriptor already initialized')
(tag_unused, self.vol_desc_seqnum,
self.num_alloc_descriptors,
alloc_descs) = struct.unpack_from(self.FMT, data, 0)
self.desc_tag = desc_tag
if self.num_alloc_descriptors * 8 > len(alloc_descs):
raise pycdlibexception.PyCdlibInvalidISO('Too many allocation descriptors')
for num in range(0, self.num_alloc_descriptors):
offset = num * 8
extent_ad = UDFExtentAD()
extent_ad.parse(alloc_descs[offset:offset + 8])
self.alloc_descs.append(extent_ad)
self.orig_extent_loc = extent
self._initialized = True
def record(self):
# type: () -> bytes
"""
Generate the string representing this UDF Unallocated Space Descriptor.
Parameters:
None.
Returns:
A string representing this UDF Unallocated Space Descriptor.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Unallocated Space Descriptor not initialized')
alloc_desc_bytes = b''
for desc in self.alloc_descs:
alloc_desc_bytes += desc.record()
alloc_desc_bytes += b'\x00' * (488 - len(alloc_desc_bytes))
rec = struct.pack(self.FMT, b'\x00' * 16,
self.vol_desc_seqnum, self.num_alloc_descriptors,
alloc_desc_bytes)[16:]
return self.desc_tag.record(rec) + rec
def extent_location(self):
# type: () -> int
"""
Get the extent location of this UDF Unallocated Space Descriptor.
Parameters:
None.
Returns:
Integer extent location of this UDF Unallocated Space Descriptor.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Unallocated Space Descriptor not initialized')
if self.new_extent_loc < 0:
return self.orig_extent_loc
return self.new_extent_loc
def new(self):
# type: () -> None
"""
Create a new UDF Unallocated Space Descriptor.
Parameters:
None.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Unallocated Space Descriptor already initialized')
self.desc_tag = UDFTag()
self.desc_tag.new(7) # FIXME: let the user set serial_number
self.vol_desc_seqnum = 4
self.num_alloc_descriptors = 0
self._initialized = True
def set_extent_location(self, new_location):
# type: (int) -> None
"""
Set the location of this UDF Unallocated Space Descriptor.
Parameters:
new_location - The new extent this UDF Unallocated Space Descriptor should be located at.
Returns:
Nothing.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Unallocated Space Descriptor not initialized')
self.new_extent_loc = new_location
self.desc_tag.tag_location = new_location
def __eq__(self, other):
# type: (object) -> bool
if not isinstance(other, UDFUnallocatedSpaceDescriptor):
return NotImplemented
return self.vol_desc_seqnum == other.vol_desc_seqnum and \
self.desc_tag == other.desc_tag and \
self.num_alloc_descriptors == other.num_alloc_descriptors
class UDFTerminatingDescriptor(object):
"""A class representing a UDF Terminating Descriptor (ECMA-167, Part 3, 10.9)."""
__slots__ = ('initialized', 'orig_extent_loc', 'new_extent_loc',
'desc_tag')
FMT = '=16s496s'
def __init__(self):
# type: () -> None
self.new_extent_loc = -1
self.initialized = False
def parse(self, extent, desc_tag):
# type: (int, UDFTag) -> None
"""
Parse the passed in data into a UDF Terminating Descriptor.
Parameters:
extent - The extent that this descriptor currently lives at.
desc_tag - A UDFTag object that represents the Descriptor Tag.
Returns:
Nothing.
"""
if self.initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Terminating Descriptor already initialized')
self.desc_tag = desc_tag
self.orig_extent_loc = extent
self.initialized = True
def record(self):
# type: () -> bytes
"""
Generate the string representing this UDF Terminating Descriptor.
Parameters:
None.
Returns:
A string representing this UDF Terminating Descriptor.
"""
if not self.initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Terminating Descriptor not initialized')
rec = struct.pack(self.FMT, b'\x00' * 16, b'\x00' * 496)[16:]
return self.desc_tag.record(rec) + rec
def extent_location(self):
# type: () -> int
"""
Get the extent location of this UDF Terminating Descriptor.
Parameters:
None.
Returns:
Integer extent location of this UDF Terminating Descriptor.
"""
if not self.initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Terminating Descriptor not initialized')
if self.new_extent_loc < 0:
return self.orig_extent_loc
return self.new_extent_loc
def new(self):
# type: () -> None
"""
Create a new UDF Terminating Descriptor.
Parameters:
None.
Returns:
Nothing.
"""
if self.initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Terminating Descriptor already initialized')
self.desc_tag = UDFTag()
self.desc_tag.new(8) # FIXME: let the user set serial_number
self.initialized = True
def set_extent_location(self, new_location, tag_location=None):
# type: (int, int) -> None
"""
Set the location of this UDF Terminating Descriptor.
Parameters:
new_location - The new extent this UDF Terminating Descriptor should be located at.
tag_location - The tag location to set for this UDF Terminator Descriptor.
Returns:
Nothing.
"""
if not self.initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Terminating Descriptor not initialized')
self.new_extent_loc = new_location
if tag_location is None:
tag_location = new_location
self.desc_tag.tag_location = tag_location
class UDFLogicalVolumeHeaderDescriptor(object):
"""A class representing a UDF Logical Volume Header Descriptor (ECMA-167, Part 4, 14.15)."""
__slots__ = ('_initialized', 'unique_id')
FMT = '<Q24s'
def __init__(self):
# type: () -> None
self._initialized = False
def parse(self, data):
# type: (bytes) -> None
"""
Parse the passed in data into a UDF Logical Volume Header Descriptor.
Parameters:
data - The data to parse.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Logical Volume Header Descriptor already initialized')
(self.unique_id, reserved_unused) = struct.unpack_from(self.FMT, data, 0)
self._initialized = True
def record(self):
# type: () -> bytes
"""
Generate the string representing this UDF Logical Volume Header
Descriptor.
Parameters:
None.
Returns:
A string representing this UDF Logical Volume Header Descriptor.
"""
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Logical Volume Header Descriptor not initialized')
return struct.pack(self.FMT, self.unique_id, b'\x00' * 24)
def new(self):
# type: () -> None
"""
Create a new UDF Logical Volume Header Descriptor.
Parameters:
None.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Logical Volume Header Descriptor already initialized')
self.unique_id = 261
self._initialized = True
class UDFLogicalVolumeImplementationUse(object):
"""A class representing a UDF Logical Volume Implementation Use."""
__slots__ = ('_initialized', 'num_files', 'num_dirs',
'min_udf_read_revision', 'min_udf_write_revision',
'max_udf_write_revision', 'impl_id', 'impl_use')
FMT = '<32sLLHHH'
def __init__(self):
# type: () -> None
self._initialized = False
def parse(self, data):
# type: (bytes) -> None
"""
Parse the passed in data into a UDF Logical Volume Implementation Use.
Parameters:
data - The data to parse.
Returns:
Nothing.
"""
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Logical Volume Implementation Use already initialized')
(impl_id, self.num_files, self.num_dirs, self.min_udf_read_revision,
self.min_udf_write_revision,
self.max_udf_write_revision) = struct.unpack_from(self.FMT, data, 0)
| self.impl_id = UDFEntityID() | 10,644 | lcc_e | python | null | faf4c765c2effbe043cfaeea52bf775d59a0d58b6c264566 |
|
import types
from functools import wraps
import numpy as np
import datetime
import collections
import warnings
import copy
from pandas.compat import(
zip, builtins, range, long, lzip,
OrderedDict, callable, filter, map
)
from pandas import compat
from pandas.core.base import PandasObject
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.index import Index, MultiIndex, CategoricalIndex, _ensure_index
from pandas.core.internals import BlockManager, make_block
from pandas.core.series import Series
from pandas.core.panel import Panel
from pandas.util.decorators import (cache_readonly, Appender, make_signature,
deprecate_kwarg)
import pandas.core.algorithms as algos
import pandas.core.common as com
from pandas.core.common import(_possibly_downcast_to_dtype, isnull,
notnull, _DATELIKE_DTYPES, is_numeric_dtype,
is_timedelta64_dtype, is_datetime64_dtype,
is_categorical_dtype, _values_from_object,
is_datetime_or_timedelta_dtype, is_bool,
is_bool_dtype, AbstractMethodError)
from pandas.core.config import option_context
import pandas.lib as lib
from pandas.lib import Timestamp
import pandas.tslib as tslib
import pandas.algos as _algos
import pandas.hashtable as _hash
_agg_doc = """Aggregate using input function or dict of {column -> function}
Parameters
----------
arg : function or dict
Function to use for aggregating groups. If a function, must either
work when passed a DataFrame or when passed to DataFrame.apply. If
passed a dict, the keys must be DataFrame column names.
Notes
-----
Numpy functions mean/median/prod/sum/std/var are special cased so the
default behavior is applying the function along axis=0
(e.g., np.mean(arr_2d, axis=0)) as opposed to
mimicking the default Numpy behavior (e.g., np.mean(arr_2d)).
Returns
-------
aggregated : DataFrame
"""
# special case to prevent duplicate plots when catching exceptions when
# forwarding methods from NDFrames
_plotting_methods = frozenset(['plot', 'boxplot', 'hist'])
_common_apply_whitelist = frozenset([
'last', 'first',
'head', 'tail', 'median',
'mean', 'sum', 'min', 'max',
'cumsum', 'cumprod', 'cummin', 'cummax', 'cumcount',
'resample',
'describe',
'rank', 'quantile',
'fillna',
'mad',
'any', 'all',
'take',
'idxmax', 'idxmin',
'shift', 'tshift',
'ffill', 'bfill',
'pct_change', 'skew',
'corr', 'cov', 'diff',
]) | _plotting_methods
_series_apply_whitelist = \
(_common_apply_whitelist - set(['boxplot'])) | \
frozenset(['dtype', 'unique'])
_dataframe_apply_whitelist = \
_common_apply_whitelist | frozenset(['dtypes', 'corrwith'])
_cython_transforms = frozenset(['cumprod', 'cumsum', 'shift'])
class GroupByError(Exception):
pass
class DataError(GroupByError):
pass
class SpecificationError(GroupByError):
pass
def _groupby_function(name, alias, npfunc, numeric_only=True,
_convert=False):
def f(self):
self._set_selection_from_grouper()
try:
return self._cython_agg_general(alias, numeric_only=numeric_only)
except AssertionError as e:
raise SpecificationError(str(e))
except Exception:
result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
if _convert:
result = result._convert(datetime=True)
return result
f.__doc__ = "Compute %s of group values" % name
f.__name__ = name
return f
def _first_compat(x, axis=0):
def _first(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[0]
if isinstance(x, DataFrame):
return x.apply(_first, axis=axis)
else:
return _first(x)
def _last_compat(x, axis=0):
def _last(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[-1]
if isinstance(x, DataFrame):
return x.apply(_last, axis=axis)
else:
return _last(x)
class Grouper(object):
"""
A Grouper allows the user to specify a groupby instruction for a target object
This specification will select a column via the key parameter, or if the level and/or
axis parameters are given, a level of the index of the target object.
These are local specifications and will override 'global' settings, that is the parameters
axis and level which are passed to the groupby itself.
Parameters
----------
key : string, defaults to None
groupby key, which selects the grouping column of the target
level : name/number, defaults to None
the level for the target index
freq : string / frequency object, defaults to None
This will groupby the specified frequency if the target selection (via key or level) is
a datetime-like object. For full specification of available frequencies, please see
`here <http://pandas.pydata.org/pandas-docs/stable/timeseries.html>`_.
axis : number/name of the axis, defaults to 0
sort : boolean, default to False
whether to sort the resulting labels
additional kwargs to control time-like groupers (when freq is passed)
closed : closed end of interval; left or right
label : interval boundary to use for labeling; left or right
convention : {'start', 'end', 'e', 's'}
If grouper is PeriodIndex
Returns
-------
A specification for a groupby instruction
Examples
--------
Syntactic sugar for ``df.groupby('A')``
>>> df.groupby(Grouper(key='A'))
Specify a resample operation on the column 'date'
>>> df.groupby(Grouper(key='date', freq='60s'))
Specify a resample operation on the level 'date' on the columns axis
with a frequency of 60s
>>> df.groupby(Grouper(level='date', freq='60s', axis=1))
"""
def __new__(cls, *args, **kwargs):
if kwargs.get('freq') is not None:
from pandas.tseries.resample import TimeGrouper
cls = TimeGrouper
return super(Grouper, cls).__new__(cls)
def __init__(self, key=None, level=None, freq=None, axis=0, sort=False):
self.key=key
self.level=level
self.freq=freq
self.axis=axis
self.sort=sort
self.grouper=None
self.obj=None
self.indexer=None
self.binner=None
@property
def ax(self):
return self.grouper
def _get_grouper(self, obj):
"""
Parameters
----------
obj : the subject object
Returns
-------
a tuple of binner, grouper, obj (possibly sorted)
"""
self._set_grouper(obj)
self.grouper, exclusions, self.obj = _get_grouper(self.obj, [self.key], axis=self.axis,
level=self.level, sort=self.sort)
return self.binner, self.grouper, self.obj
def _set_grouper(self, obj, sort=False):
"""
given an object and the specifications, setup the internal grouper for this particular specification
Parameters
----------
obj : the subject object
"""
if self.key is not None and self.level is not None:
raise ValueError("The Grouper cannot specify both a key and a level!")
# the key must be a valid info item
if self.key is not None:
key = self.key
if key not in obj._info_axis:
raise KeyError("The grouper name {0} is not found".format(key))
ax = Index(obj[key], name=key)
else:
ax = obj._get_axis(self.axis)
if self.level is not None:
level = self.level
# if a level is given it must be a mi level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
ax = Index(ax.get_level_values(level), name=ax.names[level])
else:
if level not in (0, ax.name):
raise ValueError("The level {0} is not valid".format(level))
# possibly sort
if (self.sort or sort) and not ax.is_monotonic:
indexer = self.indexer = ax.argsort(kind='quicksort')
ax = ax.take(indexer)
obj = obj.take(indexer, axis=self.axis, convert=False, is_copy=False)
self.obj = obj
self.grouper = ax
return self.grouper
def _get_binner_for_grouping(self, obj):
""" default to the standard binner here """
group_axis = obj._get_axis(self.axis)
return Grouping(group_axis, None, obj=obj, name=self.key,
level=self.level, sort=self.sort, in_axis=False)
@property
def groups(self):
return self.grouper.groups
class GroupByPlot(PandasObject):
"""
Class implementing the .plot attribute for groupby objects
"""
def __init__(self, groupby):
self._groupby = groupby
def __call__(self, *args, **kwargs):
def f(self, *args, **kwargs):
return self.plot(*args, **kwargs)
f.__name__ = 'plot'
return self._groupby.apply(f)
def __getattr__(self, name):
def attr(*args, **kwargs):
def f(self):
return getattr(self.plot, name)(*args, **kwargs)
return self._groupby.apply(f)
return attr
class GroupBy(PandasObject):
"""
Class for grouping and aggregating relational data. See aggregate,
transform, and apply functions on this object.
It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:
::
grouped = groupby(obj, ...)
Parameters
----------
obj : pandas object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : string
Most users should ignore this
Notes
-----
After grouping, see aggregate, apply, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.groupby(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function calls on GroupBy, if not specially implemented, "dispatch" to the
grouped data. So if you group a DataFrame and wish to invoke the std()
method on each group, you can simply do:
::
df.groupby(mapper).std()
rather than
::
df.groupby(mapper).aggregate(np.std)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
len(grouped) : int
Number of groups
"""
_apply_whitelist = _common_apply_whitelist
_internal_names = ['_cache']
_internal_names_set = set(_internal_names)
_group_selection = None
def __init__(self, obj, keys=None, axis=0, level=None,
grouper=None, exclusions=None, selection=None, as_index=True,
sort=True, group_keys=True, squeeze=False):
self._selection = selection
if isinstance(obj, NDFrame):
obj._consolidate_inplace()
self.level = level
if not as_index:
if not isinstance(obj, DataFrame):
raise TypeError('as_index=False only valid with DataFrame')
if axis != 0:
raise ValueError('as_index=False only valid for axis=0')
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
if grouper is None:
grouper, exclusions, obj = _get_grouper(obj, keys, axis=axis,
level=level, sort=sort)
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
def __len__(self):
return len(self.groups)
def __unicode__(self):
# TODO: Better unicode/repr for GroupBy object
return object.__repr__(self)
@property
def groups(self):
""" dict {group name -> group labels} """
return self.grouper.groups
@property
def ngroups(self):
return self.grouper.ngroups
@property
def indices(self):
""" dict {group name -> group indices} """
return self.grouper.indices
def _get_indices(self, names):
""" safe get multiple indices, translate keys for datelike to underlying repr """
def get_converter(s):
# possibly convert to the actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isinstance(s, (Timestamp,datetime.datetime)):
return lambda key: Timestamp(key)
elif isinstance(s, np.datetime64):
return lambda key: Timestamp(key).asm8
else:
return lambda key: key
if len(names) == 0:
return []
if len(self.indices) > 0:
index_sample = next(iter(self.indices))
else:
index_sample = None # Dummy sample
name_sample = names[0]
if isinstance(index_sample, tuple):
if not isinstance(name_sample, tuple):
msg = ("must supply a tuple to get_group with multiple"
" grouping keys")
raise ValueError(msg)
if not len(name_sample) == len(index_sample):
try:
# If the original grouper was a tuple
return [self.indices[name] for name in names]
except KeyError:
# turns out it wasn't a tuple
msg = ("must supply a a same-length tuple to get_group"
" with multiple grouping keys")
raise ValueError(msg)
converters = [get_converter(s) for s in index_sample]
names = [tuple([f(n) for f, n in zip(converters, name)]) for name in names]
else:
converter = get_converter(index_sample)
names = [converter(name) for name in names]
return [self.indices.get(name, []) for name in names]
def _get_index(self, name):
""" safe get index, translate keys for datelike to underlying repr """
return self._get_indices([name])[0]
@property
def name(self):
if self._selection is None:
return None # 'result'
else:
return self._selection
@property
def _selection_list(self):
if not isinstance(self._selection, (list, tuple, Series, Index, np.ndarray)):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, Series):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _set_selection_from_grouper(self):
""" we may need create a selection if we have non-level groupers """
grp = self.grouper
if self.as_index and getattr(grp,'groupings',None) is not None and self.obj.ndim > 1:
ax = self.obj._info_axis
groupers = [g.name for g in grp.groupings
if g.level is None and g.in_axis]
if len(groupers):
self._group_selection = ax.difference(Index(groupers)).tolist()
def _set_result_index_ordered(self, result):
# set the result index on the passed values object
# return the new object
# related 8046
# the values/counts are repeated according to the group index
indices = self.indices
# shortcut of we have an already ordered grouper
if not self.grouper.is_monotonic:
index = Index(np.concatenate(self._get_indices(self.grouper.result_index)))
result.index = index
result = result.sort_index()
result.index = self.obj.index
return result
def _dir_additions(self):
return self.obj._dir_additions() | self._apply_whitelist
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
if hasattr(self.obj, attr):
return self._make_wrapper(attr)
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
def __getitem__(self, key):
raise NotImplementedError('Not implemented: %s' % key)
plot = property(GroupByPlot)
def _make_wrapper(self, name):
if name not in self._apply_whitelist:
is_callable = callable(getattr(self._selected_obj, name, None))
kind = ' callable ' if is_callable else ' '
msg = ("Cannot access{0}attribute {1!r} of {2!r} objects, try "
"using the 'apply' method".format(kind, name,
type(self).__name__))
raise AttributeError(msg)
# need to setup the selection
# as are not passed directly but in the grouper
self._set_selection_from_grouper()
f = getattr(self._selected_obj, name)
if not isinstance(f, types.MethodType):
return self.apply(lambda self: getattr(self, name))
f = getattr(type(self._selected_obj), name)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
kwargs_with_axis = kwargs.copy()
if 'axis' not in kwargs_with_axis or kwargs_with_axis['axis']==None:
kwargs_with_axis['axis'] = self.axis
def curried_with_axis(x):
return f(x, *args, **kwargs_with_axis)
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when calling plot methods,
# to avoid duplicates
curried.__name__ = curried_with_axis.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in _plotting_methods:
return self.apply(curried)
try:
return self.apply(curried_with_axis)
except Exception:
try:
return self.apply(curried)
except Exception:
# related to : GH3688
# try item-by-item
# this can be called recursively, so need to raise ValueError if
# we don't have this method to indicated to aggregate to
# mark this column as an error
try:
return self._aggregate_item_by_item(name, *args, **kwargs)
except (AttributeError):
raise ValueError
return wrapper
def get_group(self, name, obj=None):
"""
Constructs NDFrame from group with provided name
Parameters
----------
name : object
the name of the group to get as a DataFrame
obj : NDFrame, default None
the NDFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
be used
Returns
-------
group : type of obj
"""
if obj is None:
obj = self._selected_obj
inds = self._get_index(name)
if not len(inds):
raise KeyError(name)
return obj.take(inds, axis=self.axis, convert=False)
def __iter__(self):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.get_iterator(self.obj, axis=self.axis)
def apply(self, func, *args, **kwargs):
"""
Apply function and combine results together in an intelligent way. The
split-apply-combine combination rules attempt to be as common sense
based as possible. For example:
case 1:
group DataFrame
apply aggregation function (f(chunk) -> Series)
yield DataFrame, with group axis having group labels
case 2:
group DataFrame
apply transform function ((f(chunk) -> DataFrame with same indexes)
yield DataFrame with resulting chunks glued together
case 3:
group Series
apply function with f(chunk) -> DataFrame
yield DataFrame with result of chunks glued together
Parameters
----------
func : function
Notes
-----
See online documentation for full exposition on how to use apply.
In the current implementation apply calls func twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if func has
side-effects, as they will take effect twice for the first
group.
See also
--------
aggregate, transform
Returns
-------
applied : type depending on grouped object and function
"""
func = _intercept_function(func)
@wraps(func)
def f(g):
return func(g, *args, **kwargs)
# ignore SettingWithCopy here in case the user mutates
with option_context('mode.chained_assignment',None):
return self._python_apply_general(f)
def _python_apply_general(self, f):
keys, values, mutated = self.grouper.apply(f, self._selected_obj,
self.axis)
return self._wrap_applied_output(keys, values,
not_indexed_same=mutated)
def aggregate(self, func, *args, **kwargs):
raise AbstractMethodError(self)
@Appender(_agg_doc)
def agg(self, func, *args, **kwargs):
return self.aggregate(func, *args, **kwargs)
def _iterate_slices(self):
yield self.name, self._selected_obj
def transform(self, func, *args, **kwargs):
raise AbstractMethodError(self)
def irow(self, i):
"""
DEPRECATED. Use ``.nth(i)`` instead
"""
# 10177
warnings.warn("irow(i) is deprecated. Please use .nth(i)",
FutureWarning, stacklevel=2)
return self.nth(i)
def count(self):
""" Compute count of group, excluding missing values """
# defined here for API doc
raise NotImplementedError
def mean(self):
"""
Compute mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('mean')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
f = lambda x: x.mean(axis=self.axis)
return self._python_agg_general(f)
def median(self):
"""
Compute median of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('median')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
def f(x):
if isinstance(x, np.ndarray):
x = Series(x)
return x.median(axis=self.axis)
return self._python_agg_general(f)
def std(self, ddof=1):
"""
Compute standard deviation of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
# todo, implement at cython level?
return np.sqrt(self.var(ddof=ddof))
def var(self, ddof=1):
"""
Compute variance of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
if ddof == 1:
return self._cython_agg_general('var')
else:
self._set_selection_from_grouper()
f = lambda x: x.var(ddof=ddof)
return self._python_agg_general(f)
def sem(self, ddof=1):
"""
Compute standard error of the mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self.std(ddof=ddof)/np.sqrt(self.count())
def size(self):
"""
Compute group sizes
"""
return self.grouper.size()
sum = _groupby_function('sum', 'add', np.sum)
prod = _groupby_function('prod', 'prod', np.prod)
min = _groupby_function('min', 'min', np.min, numeric_only=False)
max = _groupby_function('max', 'max', np.max, numeric_only=False)
first = _groupby_function('first', 'first', _first_compat,
numeric_only=False, _convert=True)
last = _groupby_function('last', 'last', _last_compat, numeric_only=False,
_convert=True)
def ohlc(self):
"""
Compute sum of values, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self._apply_to_column_groupbys(
lambda x: x._cython_agg_general('ohlc'))
def nth(self, n, dropna=None):
"""
Take the nth row from each group if n is an int, or a subset of rows
if n is a list of ints.
If dropna, will take the nth non-null row, dropna is either
Truthy (if a Series) or 'all', 'any' (if a DataFrame); this is equivalent
to calling dropna(how=dropna) before the groupby.
Parameters
----------
n : int or list of ints
a single nth value for the row or a list of nth values
dropna : None or str, optional
apply the specified dropna operation before counting which row is
the nth row. Needs to be None, 'any' or 'all'
Examples
--------
>>> df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
>>> g = df.groupby('A')
>>> g.nth(0)
A B
0 1 NaN
2 5 6
>>> g.nth(1)
A B
1 1 4
>>> g.nth(-1)
A B
1 1 4
2 5 6
>>> g.nth(0, dropna='any')
B
A
1 4
5 6
>>> g.nth(1, dropna='any') # NaNs denote group exhausted when using dropna
B
A
1 NaN
5 NaN
"""
if isinstance(n, int):
nth_values = [n]
elif isinstance(n, (set, list, tuple)):
nth_values = list(set(n))
if dropna is not None:
raise ValueError("dropna option with a list of nth values is not supported")
else:
raise TypeError("n needs to be an int or a list/set/tuple of ints")
m = self.grouper._max_groupsize
# filter out values that are outside [-m, m)
pos_nth_values = [i for i in nth_values if i >= 0 and i < m]
neg_nth_values = [i for i in nth_values if i < 0 and i >= -m]
self._set_selection_from_grouper()
if not dropna: # good choice
if not pos_nth_values and not neg_nth_values:
# no valid nth values
return self._selected_obj.loc[[]]
rng = np.zeros(m, dtype=bool)
for i in pos_nth_values:
rng[i] = True
is_nth = self._cumcount_array(rng)
if neg_nth_values:
rng = np.zeros(m, dtype=bool)
for i in neg_nth_values:
rng[- i - 1] = True
is_nth |= self._cumcount_array(rng, ascending=False)
result = self._selected_obj[is_nth]
# the result index
if self.as_index:
ax = self.obj._info_axis
names = self.grouper.names
if self.obj.ndim == 1:
# this is a pass-thru
pass
elif all([ n in ax for n in names ]):
result.index = MultiIndex.from_arrays([self.obj[name][is_nth] for name in names]).set_names(names)
elif self._group_selection is not None:
result.index = self.obj._get_axis(self.axis)[is_nth]
result = result.sort_index()
return result
if (isinstance(self._selected_obj, DataFrame)
and dropna not in ['any', 'all']):
# Note: when agg-ing picker doesn't raise this, just returns NaN
raise ValueError("For a DataFrame groupby, dropna must be "
"either None, 'any' or 'all', "
"(was passed %s)." % (dropna),)
# old behaviour, but with all and any support for DataFrames.
# modified in GH 7559 to have better perf
max_len = n if n >= 0 else - 1 - n
dropped = self.obj.dropna(how=dropna, axis=self.axis)
# get a new grouper for our dropped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available (e.g. we have selected out
# a column that is not in the current object)
axis = self.grouper.axis
grouper = axis[axis.isin(dropped.index)]
keys = self.grouper.names
else:
# create a grouper with the original parameters, but on the dropped object
grouper, _, _ = _get_grouper(dropped, key=self.keys, axis=self.axis,
level=self.level, sort=self.sort)
sizes = dropped.groupby(grouper).size()
result = dropped.groupby(grouper).nth(n)
mask = (sizes<max_len).values
# set the results which don't meet the criteria
if len(result) and mask.any():
result.loc[mask] = np.nan
# reset/reindex to the original groups
if len(self.obj) == len(dropped) or len(result) == len(self.grouper.result_index):
result.index = self.grouper.result_index
else:
result = result.reindex(self.grouper.result_index)
return result
def cumcount(self, ascending=True):
"""
Number each item in each group from 0 to the length of that group - 1.
Essentially this is equivalent to
>>> self.apply(lambda x: Series(np.arange(len(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Examples
--------
>>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').cumcount()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> df.groupby('A').cumcount(ascending=False)
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
"""
self._set_selection_from_grouper()
index = self._selected_obj.index
cumcounts = self._cumcount_array(ascending=ascending)
return Series(cumcounts, index)
def cumprod(self, axis=0):
"""
Cumulative product for each group
"""
if axis != 0:
return self.apply(lambda x: x.cumprod(axis=axis))
return self._cython_transform('cumprod')
def cumsum(self, axis=0):
"""
Cumulative sum for each group
"""
if axis != 0:
return self.apply(lambda x: x.cumprod(axis=axis))
return self._cython_transform('cumsum')
def shift(self, periods=1, freq=None, axis=0):
"""
Shift each group by periods observations
"""
if freq is not None or axis != 0:
return self.apply(lambda x: x.shift(periods, freq, axis))
labels, _, ngroups = self.grouper.group_info
# filled in by Cython
indexer = np.zeros_like(labels)
_algos.group_shift_indexer(indexer, labels, ngroups, periods)
output = {}
for name, obj in self._iterate_slices():
output[name] = com.take_nd(obj.values, indexer)
return self._wrap_transformed_output(output)
def head(self, n=5):
"""
Returns first n rows of each group.
Essentially equivalent to ``.apply(lambda x: x.head(n))``,
except ignores as_index flag.
Examples
--------
>>> df = DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
in_head = self._cumcount_array() < n
head = obj[in_head]
return head
def tail(self, n=5):
"""
Returns last n rows of each group
Essentially equivalent to ``.apply(lambda x: x.tail(n))``,
except ignores as_index flag.
Examples
--------
>>> df = DataFrame([['a', 1], ['a', 2], ['b', 1], ['b', 2]],
columns=['A', 'B'])
>>> df.groupby('A').tail(1)
A B
1 a 2
3 b 2
>>> df.groupby('A').head(1)
A B
0 a 1
2 b 1
"""
obj = self._selected_obj
rng = np.arange(0, -self.grouper._max_groupsize, -1, dtype='int64')
in_tail = self._cumcount_array(rng, ascending=False) > -n
tail = obj[in_tail]
return tail
def _cumcount_array(self, arr=None, ascending=True):
"""
arr is where cumcount gets its values from
note: this is currently implementing sort=False (though the default is sort=True)
for groupby in general
"""
if arr is None:
arr = np.arange(self.grouper._max_groupsize, dtype='int64')
len_index = len(self._selected_obj.index)
cumcounts = np.zeros(len_index, dtype=arr.dtype)
if not len_index:
return cumcounts
indices, values = [], []
for v in self.indices.values():
indices.append(v)
if ascending:
values.append(arr[:len(v)])
else:
values.append(arr[len(v)-1::-1])
indices = np.concatenate(indices)
values = np.concatenate(values)
cumcounts[indices] = values
return cumcounts
def _index_with_as_index(self, b):
"""
Take boolean mask of index to be returned from apply, if as_index=True
"""
# TODO perf, it feels like this should already be somewhere...
from itertools import chain
original = self._selected_obj.index
gp = self.grouper
levels = chain((gp.levels[i][gp.labels[i][b]]
for i in range(len(gp.groupings))),
(original.get_level_values(i)[b]
for i in range(original.nlevels)))
new = MultiIndex.from_arrays(list(levels))
new.names = gp.names + original.names
return new
def _try_cast(self, result, obj):
"""
try to cast the result to our obj original type,
we may have roundtripped thru object in the mean-time
"""
if obj.ndim > 1:
dtype = obj.values.dtype
else:
dtype = obj.dtype
if not np.isscalar(result):
result = _possibly_downcast_to_dtype(result, dtype)
return result
def _cython_transform(self, how, numeric_only=True):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.transform(obj.values, how)
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_transformed_output(output, names)
def _cython_agg_general(self, how, numeric_only=True):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.aggregate(obj.values, how)
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_aggregated_output(output, names)
def _python_agg_general(self, func, *args, **kwargs):
func = _intercept_function(func)
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output = {}
for name, obj in self._iterate_slices():
try:
result, counts = self.grouper.agg_series(obj, f)
output[name] = self._try_cast(result, obj)
except TypeError:
continue
if len(output) == 0:
return self._python_apply_general(f)
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
for name, result in compat.iteritems(output):
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
output[name] = self._try_cast(values[mask], result)
return self._wrap_aggregated_output(output)
def _wrap_applied_output(self, *args, **kwargs):
raise AbstractMethodError(self)
def _concat_objects(self, keys, values, not_indexed_same=False):
from pandas.tools.merge import concat
if not not_indexed_same:
result = concat(values, axis=self.axis)
ax = self._selected_obj._get_axis(self.axis)
if isinstance(result, Series):
result = result.reindex(ax)
else:
result = result.reindex_axis(ax, axis=self.axis)
elif self.group_keys:
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concat(values, axis=self.axis, keys=group_keys,
levels=group_levels, names=group_names)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(len(values)))
result = concat(values, axis=self.axis, keys=keys)
else:
result = concat(values, axis=self.axis)
return result
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = []
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices, axis=self.axis)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
@Appender(GroupBy.__doc__)
def groupby(obj, by, **kwds):
if isinstance(obj, Series):
klass = SeriesGroupBy
elif isinstance(obj, DataFrame):
klass = DataFrameGroupBy
else: # pragma: no cover
raise TypeError('invalid type: %s' % type(obj))
return klass(obj, by, **kwds)
def _get_axes(group):
if isinstance(group, Series):
return [group.index]
else:
return group.axes
def _is_indexed_like(obj, axes):
if isinstance(obj, Series):
if len(axes) > 1:
return False
return obj.index.equals(axes[0])
elif isinstance(obj, DataFrame):
return obj.index.equals(axes[0])
return False
class BaseGrouper(object):
"""
This is an internal Grouper class, which actually holds the generated groups
"""
def __init__(self, axis, groupings, sort=True, group_keys=True):
self._filter_empty_groups = self.compressed = len(groupings) != 1
self.axis, self.groupings, self.sort, self.group_keys = \
axis, groupings, sort, group_keys
@property
def shape(self):
return tuple(ping.ngroups for ping in self.groupings)
def __iter__(self):
return iter(self.indices)
@property
def nkeys(self):
return len(self.groupings)
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
splitter = self._get_splitter(data, axis=axis)
keys = self._get_group_keys()
for key, (i, group) in zip(keys, splitter):
yield key, group
def _get_splitter(self, data, axis=0):
comp_ids, _, ngroups = self.group_info
return get_splitter(data, comp_ids, ngroups, axis=axis)
def _get_group_keys(self):
if len(self.groupings) == 1:
return self.levels[0]
else:
comp_ids, _, ngroups = self.group_info
# provide "flattened" iterator for multi-group setting
mapper = _KeyMapper(comp_ids, ngroups, self.labels, self.levels)
return [mapper.get_key(i) for i in range(ngroups)]
def apply(self, f, data, axis=0):
mutated = False
splitter = self._get_splitter(data, axis=axis)
group_keys = self._get_group_keys()
# oh boy
f_name = com._get_callable_name(f)
if (f_name not in _plotting_methods and
hasattr(splitter, 'fast_apply') and axis == 0):
try:
values, mutated = splitter.fast_apply(f, group_keys)
return group_keys, values, mutated
except (lib.InvalidApply):
# we detect a mutation of some kind
# so take slow path
pass
except (Exception) as e:
# raise this error to the caller
pass
result_values = []
for key, (i, group) in zip(group_keys, splitter):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_values.append(res)
return group_keys, result_values, mutated
@cache_readonly
def indices(self):
""" dict {group name -> group indices} """
if len(self.groupings) == 1:
return self.groupings[0].indices
else:
label_list = [ping.labels for ping in self.groupings]
keys = [_values_from_object(ping.group_index) for ping in self.groupings]
return _get_indices_dict(label_list, keys)
@property
def labels(self):
return [ping.labels for ping in self.groupings]
@property
def levels(self):
return [ping.group_index for ping in self.groupings]
@property
def names(self):
return [ping.name for ping in self.groupings]
def size(self):
"""
Compute group sizes
"""
ids, _, ngroup = self.group_info
ids = com._ensure_platform_int(ids)
out = np.bincount(ids[ids != -1], minlength=ngroup)
return Series(out, index=self.result_index, dtype='int64')
@cache_readonly
def _max_groupsize(self):
'''
Compute size of largest group
'''
# For many items in each group this is much faster than
# self.size().max(), in worst case marginally slower
if self.indices:
return max(len(v) for v in self.indices.values())
else:
return 0
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
if len(self.groupings) == 1:
return self.groupings[0].groups
else:
to_groupby = lzip(*(ping.grouper for ping in self.groupings))
to_groupby = Index(to_groupby)
return self.axis.groupby(to_groupby.values)
@cache_readonly
def is_monotonic(self):
# return if my group orderings are monotonic
return Index(self.group_info[0]).is_monotonic
@cache_readonly
def group_info(self):
comp_ids, obs_group_ids = self._get_compressed_labels()
ngroups = len(obs_group_ids)
comp_ids = com._ensure_int64(comp_ids)
return comp_ids, obs_group_ids, ngroups
def _get_compressed_labels(self):
all_labels = [ping.labels for ping in self.groupings]
if len(all_labels) > 1:
group_index = get_group_index(all_labels, self.shape,
sort=True, xnull=True)
return _compress_group_index(group_index, sort=self.sort)
ping = self.groupings[0]
return ping.labels, np.arange(len(ping.group_index))
@cache_readonly
def ngroups(self):
return len(self.result_index)
@property
def recons_labels(self):
comp_ids, obs_ids, _ = self.group_info
labels = (ping.labels for ping in self.groupings)
return decons_obs_group_ids(comp_ids,
obs_ids, self.shape, labels, xnull=True)
@cache_readonly
def result_index(self):
if not self.compressed and len(self.groupings) == 1:
return self.groupings[0].group_index.rename(self.names[0])
return MultiIndex(levels=[ping.group_index for ping in self.groupings],
labels=self.recons_labels,
verify_integrity=False,
names=self.names)
def get_group_levels(self):
if not self.compressed and len(self.groupings) == 1:
return [self.groupings[0].group_index]
name_list = []
for ping, labels in zip(self.groupings, self.recons_labels):
labels = com._ensure_platform_int(labels)
levels = ping.group_index.take(labels)
name_list.append(levels)
return name_list
#------------------------------------------------------------
# Aggregation functions
_cython_functions = {
'aggregate': {
'add': 'group_add',
'prod': 'group_prod',
'min': 'group_min',
'max': 'group_max',
'mean': 'group_mean',
'median': {
'name': 'group_median'
},
'var': 'group_var',
'first': {
'name': 'group_nth',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'last': 'group_last',
},
'transform': {
'cumprod' : 'group_cumprod',
'cumsum' : 'group_cumsum',
}
}
_cython_arity = {
'ohlc': 4, # OHLC
}
_name_functions = {}
def _get_cython_function(self, kind, how, values, is_numeric):
dtype_str = values.dtype.name
def get_func(fname):
# see if there is a fused-type version of function
# only valid for numeric
f = getattr(_algos, fname, None)
if f is not None and is_numeric:
return f
# otherwise find dtype-specific version, falling back to object
for dt in [dtype_str, 'object']:
f = getattr(_algos, "%s_%s" % (fname, dtype_str), None)
if f is not None:
return f
ftype = self._cython_functions[kind][how]
if isinstance(ftype, dict):
func = afunc = get_func(ftype['name'])
# a sub-function
f = ftype.get('f')
if f is not None:
def wrapper(*args, **kwargs):
return f(afunc, *args, **kwargs)
# need to curry our sub-function
func = wrapper
else:
func = get_func(ftype)
if func is None:
raise NotImplementedError("function is not implemented for this"
"dtype: [how->%s,dtype->%s]" %
(how, dtype_str))
return func, dtype_str
def _cython_operation(self, kind, values, how, axis):
assert kind in ['transform', 'aggregate']
arity = self._cython_arity.get(how, 1)
vdim = values.ndim
swapped = False
if vdim == 1:
values = values[:, None]
out_shape = (self.ngroups, arity)
else:
if axis > 0:
swapped = True
values = values.swapaxes(0, axis)
if arity > 1:
raise NotImplementedError("arity of more than 1 is not "
"supported for the 'how' argument")
out_shape = (self.ngroups,) + values.shape[1:]
is_numeric = is_numeric_dtype(values.dtype)
if is_datetime_or_timedelta_dtype(values.dtype):
values = values.view('int64')
is_numeric = True
elif is_bool_dtype(values.dtype):
values = _algos.ensure_float64(values)
elif com.is_integer_dtype(values):
values = values.astype('int64', copy=False)
elif is_numeric:
values = _algos.ensure_float64(values)
else:
values = values.astype(object)
try:
func, dtype_str = self._get_cython_function(kind, how, values, is_numeric)
except NotImplementedError:
if is_numeric:
values = _algos.ensure_float64(values)
func, dtype_str = self._get_cython_function(kind, how, values, is_numeric)
else:
raise
if is_numeric:
out_dtype = '%s%d' % (values.dtype.kind, values.dtype.itemsize)
else:
out_dtype = 'object'
labels, _, _ = self.group_info
if kind == 'aggregate':
result = np.empty(out_shape, dtype=out_dtype)
result.fill(np.nan)
counts = np.zeros(self.ngroups, dtype=np.int64)
result = self._aggregate(result, counts, values, labels, func, is_numeric)
elif kind == 'transform':
result = np.empty_like(values, dtype=out_dtype)
result.fill(np.nan)
# temporary storange for running-total type tranforms
accum = np.empty(out_shape, dtype=out_dtype)
result = self._transform(result, accum, values, labels, func, is_numeric)
if com.is_integer_dtype(result):
if len(result[result == tslib.iNaT]) > 0:
result = result.astype('float64')
result[result == tslib.iNaT] = np.nan
if kind == 'aggregate' and self._filter_empty_groups and not counts.all():
if result.ndim == 2:
try:
result = lib.row_bool_subset(
result, (counts > 0).view(np.uint8))
except ValueError:
result = lib.row_bool_subset_object(
com._ensure_object(result),
(counts > 0).view(np.uint8))
else:
result = result[counts > 0]
if vdim == 1 and arity == 1:
result = result[:, 0]
if how in self._name_functions:
# TODO
names = self._name_functions[how]()
else:
names = None
if swapped:
result = result.swapaxes(0, axis)
return result, names
def aggregate(self, values, how, axis=0):
return self._cython_operation('aggregate', values, how, axis)
def transform(self, values, how, axis=0):
return self._cython_operation('transform', values, how, axis)
def _aggregate(self, result, counts, values, comp_ids, agg_func, is_numeric):
if values.ndim > 3:
# punting for now
raise NotImplementedError("number of dimensions is currently "
"limited to 3")
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
agg_func(result[:, :, i], counts, chunk, comp_ids)
else:
agg_func(result, counts, values, comp_ids)
return result
def _transform(self, result, accum, values, comp_ids, transform_func, is_numeric):
comp_ids, _, ngroups = self.group_info
if values.ndim > 3:
# punting for now
raise NotImplementedError("number of dimensions is currently "
"limited to 3")
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
agg_func(result[:, :, i], values, comp_ids, accum)
else:
transform_func(result, values, comp_ids, accum)
return result
def agg_series(self, obj, func):
try:
return self._aggregate_series_fast(obj, func)
except Exception:
return self._aggregate_series_pure_python(obj, func)
def _aggregate_series_fast(self, obj, func):
func = _intercept_function(func)
if obj.index._has_complex_internals:
raise TypeError('Incompatible index for Cython grouper')
group_index, _, ngroups = self.group_info
# avoids object / Series creation overhead
dummy = obj._get_values(slice(None, 0)).to_dense()
indexer = _get_group_index_sorter(group_index, ngroups)
obj = obj.take(indexer, convert=False)
group_index = com.take_nd(group_index, indexer, allow_fill=False)
grouper = lib.SeriesGrouper(obj, func, group_index, ngroups,
dummy)
result, counts = grouper.get_result()
return result, counts
def _aggregate_series_pure_python(self, obj, func):
group_index, _, ngroups = self.group_info
counts = np.zeros(ngroups, dtype=int)
result = None
splitter = get_splitter(obj, group_index, ngroups, axis=self.axis)
for label, group in splitter:
res = func(group)
if result is None:
if (isinstance(res, (Series, Index, np.ndarray)) or
isinstance(res, list)):
raise ValueError('Function does not reduce')
result = np.empty(ngroups, dtype='O')
counts[label] = group.shape[0]
result[label] = res
result = lib.maybe_convert_objects(result, try_float=0)
return result, counts
def generate_bins_generic(values, binner, closed):
"""
Generate bin edge offsets and bin labels for one array using another array
which has bin edge values. Both arrays must be sorted.
Parameters
----------
values : array of values
binner : a comparable array of values representing bins into which to bin
the first array. Note, 'values' end-points must fall within 'binner'
end-points.
closed : which end of bin is closed; left (default), right
Returns
-------
bins : array of offsets (into 'values' argument) of bins.
Zero and last edge are excluded in result, so for instance the first
bin is values[0:bin[0]] and the last is values[bin[-1]:]
"""
lenidx = len(values)
lenbin = len(binner)
if lenidx <= 0 or lenbin <= 0:
raise ValueError("Invalid length for values or for binner")
# check binner fits data
if values[0] < binner[0]:
raise ValueError("Values falls before first bin")
if values[lenidx - 1] > binner[lenbin - 1]:
raise ValueError("Values falls after last bin")
bins = np.empty(lenbin - 1, dtype=np.int64)
j = 0 # index into values
bc = 0 # bin count
# linear scan, presume nothing about values/binner except that it fits ok
for i in range(0, lenbin - 1):
r_bin = binner[i + 1]
# count values in current bin, advance to next bin
while j < lenidx and (values[j] < r_bin or
(closed == 'right' and values[j] == r_bin)):
j += 1
bins[bc] = j
bc += 1
return bins
class BinGrouper(BaseGrouper):
def __init__(self, bins, binlabels, filter_empty=False):
self.bins = com._ensure_int64(bins)
self.binlabels = _ensure_index(binlabels)
self._filter_empty_groups = filter_empty
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
# this is mainly for compat
# GH 3881
result = {}
for key, value in zip(self.binlabels, self.bins):
if key is not tslib.NaT:
result[key] = value
return result
@property
def nkeys(self):
return 1
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
if isinstance(data, NDFrame):
slicer = lambda start,edge: data._slice(slice(start,edge),axis=axis)
length = len(data.axes[axis])
else:
slicer = lambda start,edge: data[slice(start,edge)]
length = len(data)
start = 0
for edge, label in zip(self.bins, self.binlabels):
if label is not tslib.NaT:
yield label, slicer(start,edge)
start = edge
if start < length:
yield self.binlabels[-1], slicer(start,None)
def apply(self, f, data, axis=0):
result_keys = []
result_values = []
mutated = False
for key, group in self.get_iterator(data, axis=axis):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_keys.append(key)
result_values.append(res)
return result_keys, result_values, mutated
@cache_readonly
def indices(self):
indices = collections.defaultdict(list)
i = 0
for label, bin in zip(self.binlabels, self.bins):
if i < bin:
if label is not tslib.NaT:
indices[label] = list(range(i, bin))
i = bin
return indices
@cache_readonly
def group_info(self):
ngroups = self.ngroups
obs_group_ids = np.arange(ngroups)
rep = np.diff(np.r_[0, self.bins])
rep = com._ensure_platform_int(rep)
if ngroups == len(self.bins):
comp_ids = np.repeat(np.arange(ngroups), rep)
else:
comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep)
return comp_ids.astype('int64', copy=False), \
obs_group_ids.astype('int64', copy=False), ngroups
@cache_readonly
def ngroups(self):
return len(self.result_index)
@cache_readonly
def result_index(self):
if len(self.binlabels) != 0 and isnull(self.binlabels[0]):
return self.binlabels[1:]
return self.binlabels
@property
def levels(self):
return [self.binlabels]
@property
def names(self):
return [self.binlabels.name]
@property
def groupings(self):
# for compat
return None
#----------------------------------------------------------------------
# cython aggregation
_cython_functions = copy.deepcopy(BaseGrouper._cython_functions)
_cython_functions['aggregate']['ohlc'] = 'group_ohlc'
_cython_functions['aggregate'].pop('median')
_name_functions = {
'ohlc': lambda *args: ['open', 'high', 'low', 'close']
}
def agg_series(self, obj, func):
dummy = obj[:0]
grouper = lib.SeriesBinGrouper(obj, func, self.bins, dummy)
return grouper.get_result()
class Grouping(object):
"""
Holds the grouping information for a single key
Parameters
----------
index : Index
grouper :
obj :
name :
level :
in_axis : if the Grouping is a column in self.obj and hence among
Groupby.exclusions list
Returns
-------
**Attributes**:
* indices : dict of {group -> index_list}
* labels : ndarray, group labels
* ids : mapping of label -> group
* counts : array of group counts
* group_index : unique groups
* groups : dict of {group -> label_list}
"""
def __init__(self, index, grouper=None, obj=None, name=None, level=None,
sort=True, in_axis=False):
self.name = name
self.level = level
self.grouper = _convert_grouper(index, grouper)
self.index = index
self.sort = sort
self.obj = obj
self.in_axis = in_axis
# right place for this?
if isinstance(grouper, (Series, Index)) and name is None:
self.name = grouper.name
if isinstance(grouper, MultiIndex):
self.grouper = grouper.values
# pre-computed
self._should_compress = True
# we have a single grouper which may be a myriad of things, some of which are
# dependent on the passing in level
#
if level is not None:
if not isinstance(level, int):
if level not in index.names:
raise AssertionError('Level %s not in index' % str(level))
level = index.names.index(level)
inds = index.labels[level]
level_index = index.levels[level]
if self.name is None:
self.name = index.names[level]
# XXX complete hack
if grouper is not None:
level_values = index.levels[level].take(inds)
self.grouper = level_values.map(self.grouper)
else:
# all levels may not be observed
labels, uniques = algos.factorize(inds, sort=True)
if len(uniques) > 0 and uniques[0] == -1:
# handle NAs
mask = inds != -1
ok_labels, uniques = algos.factorize(inds[mask], sort=True)
labels = np.empty(len(inds), dtype=inds.dtype)
labels[mask] = ok_labels
labels[~mask] = -1
if len(uniques) < len(level_index):
level_index = level_index.take(uniques)
self._labels = labels
self._group_index = level_index
self.grouper = level_index.take(labels)
else:
if isinstance(self.grouper, (list, tuple)):
self.grouper = com._asarray_tuplesafe(self.grouper)
# a passed Categorical
elif is_categorical_dtype(self.grouper):
# must have an ordered categorical
if self.sort:
if not self.grouper.ordered:
# technically we cannot group on an unordered Categorical
# but this a user convenience to do so; the ordering
# is preserved and if it's a reduction it doesn't make any difference
pass
# fix bug #GH8868 sort=False being ignored in categorical groupby
else:
cat = self.grouper.unique()
self.grouper = self.grouper.reorder_categories(cat.categories)
# we make a CategoricalIndex out of the cat grouper
# preserving the categories / ordered attributes
self._labels = self.grouper.codes
c = self.grouper.categories
self._group_index = CategoricalIndex(Categorical.from_codes(np.arange(len(c)),
categories=c,
ordered=self.grouper.ordered))
# a passed Grouper like
elif isinstance(self.grouper, Grouper):
# get the new grouper
grouper = self.grouper._get_binner_for_grouping(self.obj)
self.obj = self.grouper.obj
self.grouper = grouper
if self.name is None:
self.name = grouper.name
# we are done
if isinstance(self.grouper, Grouping):
self.grouper = self.grouper.grouper
# no level passed
elif not isinstance(self.grouper, (Series, Index, Categorical, np.ndarray)):
if getattr(self.grouper, 'ndim', 1) != 1:
t = self.name or str(type(self.grouper))
raise ValueError("Grouper for '%s' not 1-dimensional" % t)
self.grouper = self.index.map(self.grouper)
if not (hasattr(self.grouper, "__len__") and
len(self.grouper) == len(self.index)):
errmsg = ('Grouper result violates len(labels) == '
'len(data)\nresult: %s' %
com.pprint_thing(self.grouper))
self.grouper = None # Try for sanity
raise AssertionError(errmsg)
# if we have a date/time-like grouper, make sure that we have Timestamps like
if getattr(self.grouper,'dtype',None) is not None:
if is_datetime64_dtype(self.grouper):
from pandas import to_datetime
self.grouper = to_datetime(self.grouper)
elif is_timedelta64_dtype(self.grouper):
from pandas import to_timedelta
self.grouper = to_timedelta(self.grouper)
def __repr__(self):
return 'Grouping(%s)' % self.name
def __iter__(self):
return iter(self.indices)
_labels = None
_group_index = None
@property
def ngroups(self):
return len(self.group_index)
@cache_readonly
def indices(self):
return _groupby_indices(self.grouper)
@property
def labels(self):
if self._labels is None:
self._make_labels()
return self._labels
@property
def group_index(self):
if self._group_index is None:
self._make_labels()
return self._group_index
def _make_labels(self):
if self._labels is None or self._group_index is None:
labels, uniques = algos.factorize(self.grouper, sort=self.sort)
uniques = Index(uniques, name=self.name)
self._labels = labels
self._group_index = uniques
@cache_readonly
def groups(self):
return self.index.groupby(self.grouper)
def _get_grouper(obj, key=None, axis=0, level=None, sort=True):
"""
create and return a BaseGrouper, which is an internal
mapping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappings. They can originate as:
index mappings, keys to columns, functions, or Groupers
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
This routine tries to figure out what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
"""
group_axis = obj._get_axis(axis)
# validate that the passed level is compatible with the passed
# axis of the object
if level is not None:
if not isinstance(group_axis, MultiIndex):
if isinstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0:
raise ValueError('level > 0 only valid with MultiIndex')
level = None
key = group_axis
# a passed-in Grouper, directly convert
if isinstance(key, Grouper):
binner, grouper, obj = key._get_grouper(obj)
if key.key is None:
return grouper, [], obj
else:
return grouper, set([key.key]), obj
# already have a BaseGrouper, just return it
elif isinstance(key, BaseGrouper):
return key, [], obj
if not isinstance(key, (tuple, list)):
keys = [key]
else:
keys = key
# what are we after, exactly?
match_axis_length = len(keys) == len(group_axis)
any_callable = any(callable(g) or isinstance(g, dict) for g in keys)
any_groupers = any(isinstance(g, Grouper) for g in keys)
any_arraylike = any(isinstance(g, (list, tuple, Series, Index, np.ndarray))
for g in keys)
try:
if isinstance(obj, DataFrame):
all_in_columns = all(g in obj.columns for g in keys)
else:
all_in_columns = False
except Exception:
all_in_columns = False
if (not any_callable and not all_in_columns
and not any_arraylike and not any_groupers
and match_axis_length
and level is None):
keys = [com._asarray_tuplesafe(keys)]
if isinstance(level, (tuple, list)):
if key is None:
keys = [None] * len(level)
levels = level
else:
levels = [level] * len(keys)
groupings = []
exclusions = []
# if the actual grouper should be obj[key]
def is_in_axis(key):
if not _is_label_like(key):
try:
obj._data.items.get_loc(key)
except Exception:
return False
return True
# if the the grouper is obj[name]
def is_in_obj(gpr):
try:
return id(gpr) == id(obj[gpr.name])
except Exception:
return False
for i, (gpr, level) in enumerate(zip(keys, levels)):
if is_in_obj(gpr): # df.groupby(df['name'])
in_axis, name = True, gpr.name
exclusions.append(name)
elif is_in_axis(gpr): # df.groupby('name')
in_axis, name, gpr = True, gpr, obj[gpr]
exclusions.append(name)
else:
in_axis, name = False, None
if is_categorical_dtype(gpr) and len(gpr) != len(obj):
raise ValueError("Categorical dtype grouper must have len(grouper) == len(data)")
ping = Grouping(group_axis, gpr, obj=obj, name=name,
level=level, sort=sort, in_axis=in_axis)
groupings.append(ping)
if len(groupings) == 0:
raise ValueError('No group keys passed!')
# create the internals grouper
grouper = BaseGrouper(group_axis, groupings, sort=sort)
return grouper, exclusions, obj
def _is_label_like(val):
return isinstance(val, compat.string_types) or np.isscalar(val)
def _convert_grouper(axis, grouper):
if isinstance(grouper, dict):
return grouper.get
elif isinstance(grouper, Series):
if grouper.index.equals(axis):
return grouper._values
else:
return grouper.reindex(axis)._values
elif isinstance(grouper, (list, Series, Index, np.ndarray)):
if len(grouper) != len(axis):
raise AssertionError('Grouper and axis must be same length')
return grouper
else:
return grouper
def _whitelist_method_generator(klass, whitelist) :
"""
Yields all GroupBy member defs for DataFrame/Series names in _whitelist.
Parameters
----------
klass - class where members are defined. Should be Series or DataFrame
whitelist - list of names of klass methods to be constructed
Returns
-------
The generator yields a sequence of strings, each suitable for exec'ing,
that define implementations of the named methods for DataFrameGroupBy
or SeriesGroupBy.
Since we don't want to override methods explicitly defined in the
base class, any such name is skipped.
"""
method_wrapper_template = \
"""def %(name)s(%(sig)s) :
\"""
%(doc)s
\"""
f = %(self)s.__getattr__('%(name)s')
return f(%(args)s)"""
property_wrapper_template = \
"""@property
def %(name)s(self) :
\"""
%(doc)s
\"""
return self.__getattr__('%(name)s')"""
for name in whitelist :
# don't override anything that was explicitly defined
# in the base class
if hasattr(GroupBy,name) :
continue
# ugly, but we need the name string itself in the method.
f = getattr(klass,name)
doc = f.__doc__
doc = doc if type(doc)==str else ''
if type(f) == types.MethodType :
wrapper_template = method_wrapper_template
decl, args = make_signature(f)
# pass args by name to f because otherwise
# GroupBy._make_wrapper won't know whether
# we passed in an axis parameter.
args_by_name = ['{0}={0}'.format(arg) for arg in args[1:]]
params = {'name':name,
'doc':doc,
'sig':','.join(decl),
'self':args[0],
'args':','.join(args_by_name)}
else :
wrapper_template = property_wrapper_template
params = {'name':name, 'doc':doc}
yield wrapper_template % params
class SeriesGroupBy(GroupBy):
#
# Make class defs of attributes on SeriesGroupBy whitelist
_apply_whitelist = _series_apply_whitelist
for _def_str in _whitelist_method_generator(Series,_series_apply_whitelist) :
exec(_def_str)
def aggregate(self, func_or_funcs, *args, **kwargs):
"""
Apply aggregation function or functions to groups, yielding most likely
Series but in some cases DataFrame depending on the output of the
aggregation function
Parameters
----------
func_or_funcs : function or list / dict of functions
List/dict of functions will produce DataFrame with column names
determined by the function names themselves (list) or the keys in
the dict
Notes
-----
agg is an alias for aggregate. Use it.
Examples
--------
>>> series
bar 1.0
baz 2.0
qot 3.0
qux 4.0
>>> mapper = lambda x: x[0] # first letter
>>> grouped = series.groupby(mapper)
>>> grouped.aggregate(np.sum)
b 3.0
q 7.0
>>> grouped.aggregate([np.sum, np.mean, np.std])
mean std sum
b 1.5 0.5 3
q 3.5 0.5 7
>>> grouped.agg({'result' : lambda x: x.mean() / x.std(),
... 'total' : np.sum})
result total
b 2.121 3
q 4.95 7
See also
--------
apply, transform
Returns
-------
Series or DataFrame
"""
if isinstance(func_or_funcs, compat.string_types):
return getattr(self, func_or_funcs)(*args, **kwargs)
if hasattr(func_or_funcs, '__iter__'):
ret = self._aggregate_multiple_funcs(func_or_funcs)
else:
cyfunc = _intercept_cython(func_or_funcs)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
try:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
except Exception:
result = self._aggregate_named(func_or_funcs, *args, **kwargs)
index = Index(sorted(result), name=self.grouper.names[0])
ret = Series(result, index=index)
if not self.as_index: # pragma: no cover
print('Warning, ignoring as_index=True')
return ret
def _aggregate_multiple_funcs(self, arg):
if isinstance(arg, dict):
columns = list(arg.keys())
arg = list(arg.items())
elif any(isinstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not isinstance(x, (tuple, list)) else x
for x in arg]
# indicated column order
columns = lzip(*arg)[0]
else:
# list of functions / function names
columns = []
for f in arg:
if isinstance(f, compat.string_types):
columns.append(f)
else:
# protect against callables without names
columns.append(com._get_callable_name(f))
arg = lzip(columns, arg)
results = {}
for name, func in arg:
if name in results:
raise SpecificationError('Function names must be unique, '
'found multiple named %s' % name)
results[name] = self.aggregate(func)
return DataFrame(results, columns=columns)
def _wrap_output(self, output, index, names=None):
""" common agg/transform wrapping logic """
output = output[self.name]
if names is not None:
return DataFrame(output, index=index, columns=names)
else:
name = self.name
if name is None:
name = self._selected_obj.name
return Series(output, index=index, name=name)
def _wrap_aggregated_output(self, output, names=None):
return self._wrap_output(output=output,
index=self.grouper.result_index,
names=names)
def _wrap_transformed_output(self, output, names=None):
return self._wrap_output(output=output,
index=self.obj.index,
names=names)
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if len(keys) == 0:
# GH #6265
return Series([], name=self.name)
def _get_index():
if self.grouper.nkeys > 1:
index = MultiIndex.from_tuples(keys, names=self.grouper.names)
else:
index = Index(keys, name=self.grouper.names[0])
return index
if isinstance(values[0], dict):
# GH #823
index = _get_index()
return DataFrame(values, index=index).stack()
if isinstance(values[0], (Series, dict)):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif isinstance(values[0], DataFrame):
# possible that Series -> DataFrame by applied function
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
else:
# GH #6265
return Series(values, index=_get_index(), name=self.name)
def _aggregate_named(self, func, *args, **kwargs):
result = {}
for name, group in self:
group.name = name
output = func(group, *args, **kwargs)
if isinstance(output, (Series, Index, np.ndarray)):
raise Exception('Must produce aggregated value')
result[name] = self._try_cast(output, group)
return result
def transform(self, func, *args, **kwargs):
"""
Call function producing a like-indexed Series on each group and return
a Series with the transformed values
Parameters
----------
func : function
To apply to each group. Should return a Series with the same index
Examples
--------
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
Returns
-------
transformed : Series
"""
func = _intercept_cython(func) or func
# if string function
if isinstance(func, compat.string_types):
if func in _cython_transforms:
# cythonized transform
return getattr(self, func)(*args, **kwargs)
else:
# cythonized aggregation and merge
return self._transform_fast(lambda : getattr(self, func)(*args, **kwargs))
# reg transform
dtype = self._selected_obj.dtype
result = self._selected_obj.values.copy()
wrapper = lambda x: func(x, *args, **kwargs)
for i, (name, group) in enumerate(self):
object.__setattr__(group, 'name', name)
res = wrapper(group)
if hasattr(res, 'values'):
res = res.values
# may need to astype
try:
common_type = np.common_type(np.array(res), result)
if common_type != result.dtype:
result = result.astype(common_type)
except:
pass
indexer = self._get_index(name)
result[indexer] = res
result = _possibly_downcast_to_dtype(result, dtype)
return self._selected_obj.__class__(result,
index=self._selected_obj.index,
name=self._selected_obj.name)
def _transform_fast(self, func):
"""
fast version of transform, only applicable to builtin/cythonizable functions
"""
if isinstance(func, compat.string_types):
func = getattr(self,func)
ids, _, ngroup = self.grouper.group_info
mask = ids != -1
out = func().values[ids]
if not mask.all():
out = np.where(mask, out, np.nan)
obs = np.zeros(ngroup, dtype='bool')
obs[ids[mask]] = True
if not obs.all():
out = self._try_cast(out, self._selected_obj)
return Series(out, index=self.obj.index)
def filter(self, func, dropna=True, *args, **kwargs):
"""
Return a copy of a Series excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To apply to each group. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Examples
--------
>>> grouped.filter(lambda x: x.mean() > 0)
Returns
-------
filtered : Series
"""
if isinstance(func, compat.string_types):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notnull(x, *args, **kwargs):
b = wrapper(x, *args, **kwargs)
return b and notnull(b)
try:
indices = [self._get_index(name) for name, group in self
if true_and_notnull(group)]
except ValueError:
raise TypeError("the filter must return a boolean result")
except TypeError:
raise TypeError("the filter must return a boolean result")
filtered = self._apply_filter(indices, dropna)
return filtered
def nunique(self, dropna=True):
ids, _, _ = self.grouper.group_info
val = self.obj.get_values()
try:
sorter = np.lexsort((val, ids))
except TypeError: # catches object dtypes
assert val.dtype == object, \
'val.dtype must be object, got %s' % val.dtype
val, _ = algos.factorize(val, sort=False)
sorter = np.lexsort((val, ids))
isnull = lambda a: a == -1
else:
isnull = com.isnull
ids, val = ids[sorter], val[sorter]
# group boundaries are where group ids change
# unique observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, val[1:] != val[:-1]]
# 1st item of each group is a new unique observation
mask = isnull(val)
if dropna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).astype('int64', copy=False)
return Series(out if ids[0] != -1 else out[1:],
index=self.grouper.result_index,
name=self.name)
@deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'})
@Appender(Series.nlargest.__doc__)
def nlargest(self, n=5, keep='first'):
# ToDo: When we remove deprecate_kwargs, we can remote these methods
# and include nlargest and nsmallest to _series_apply_whitelist
return self.apply(lambda x: x.nlargest(n=n, keep=keep))
@deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'})
@Appender(Series.nsmallest.__doc__)
def nsmallest(self, n=5, keep='first'):
return self.apply(lambda x: x.nsmallest(n=n, keep=keep))
def value_counts(self, normalize=False, sort=True, ascending=False,
bins=None, dropna=True):
from functools import partial
from pandas.tools.tile import cut
from pandas.tools.merge import _get_join_indexers
if bins is not None and not np.iterable(bins):
# scalar bins cannot be done at top level
# in a backward compatible way
return self.apply(Series.value_counts,
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins)
ids, _, _ = self.grouper.group_info
val = self.obj.get_values()
# groupby removes null keys from groupings
mask = ids != -1
ids, val = ids[mask], val[mask]
if bins is None:
lab, lev = algos.factorize(val, sort=True)
else:
cat, bins = cut(val, bins, retbins=True)
# bins[:-1] for backward compat;
# o.w. cat.categories could be better
lab, lev, dropna = cat.codes, bins[:-1], False
sorter = np.lexsort((lab, ids))
ids, lab = ids[sorter], lab[sorter]
# group boundaries are where group ids change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
# new values are where sorted labels change
inc = np.r_[True, lab[1:] != lab[:-1]]
inc[idx] = True # group boundaries are also new values
out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
# num. of times each group should be repeated
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
labels = list(map(rep, self.grouper.recons_labels)) + [lab[inc]]
levels = [ping.group_index for ping in self.grouper.groupings] + [lev]
names = self.grouper.names + [self.name]
if dropna:
mask = labels[-1] != -1
if mask.all():
dropna = False
else:
out, labels = out[mask], [label[mask] for label in labels]
if normalize:
out = out.astype('float')
acc = rep(np.diff(np.r_[idx, len(ids)]))
out /= acc[mask] if dropna else acc
if sort and bins is None:
cat = ids[inc][mask] if dropna else ids[inc]
sorter = np.lexsort((out if ascending else -out, cat))
out, labels[-1] = out[sorter], labels[-1][sorter]
if bins is None:
mi = MultiIndex(levels=levels, labels=labels, names=names,
verify_integrity=False)
if com.is_integer_dtype(out):
out = com._ensure_int64(out)
return Series(out, index=mi)
# for compat. with algos.value_counts need to ensure every
# bin is present at every index level, null filled with zeros
diff = np.zeros(len(out), dtype='bool')
for lab in labels[:-1]:
diff |= np.r_[True, lab[1:] != lab[:-1]]
ncat, nbin = diff.sum(), len(levels[-1])
left = [np.repeat(np.arange(ncat), nbin),
np.tile(np.arange(nbin), ncat)]
right = [diff.cumsum() - 1, labels[-1]]
_, idx = _get_join_indexers(left, right, sort=False, how='left')
out = np.where(idx != -1, out[idx], 0)
if sort:
sorter = np.lexsort((out if ascending else -out, left[0]))
out, left[-1] = out[sorter], left[-1][sorter]
# build the multi-index w/ full levels
labels = list(map(lambda lab: np.repeat(lab[diff], nbin), labels[:-1]))
labels.append(left[-1])
mi = MultiIndex(levels=levels, labels=labels, names=names,
verify_integrity=False)
if com.is_integer_dtype(out):
out = com._ensure_int64(out)
return Series(out, index=mi)
def count(self):
""" Compute count of group, excluding missing values """
ids, _, ngroups = self.grouper.group_info
val = self.obj.get_values()
mask = (ids != -1) & ~isnull(val)
ids = com._ensure_platform_int(ids)
out = np.bincount(ids[mask], minlength=ngroups) if ngroups != 0 else []
return Series(out, index=self.grouper.result_index, name=self.name, dtype='int64')
def _apply_to_column_groupbys(self, func):
""" return a pass thru """
return func(self)
class NDFrameGroupBy(GroupBy):
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self.obj.columns
else:
slice_axis = self._selection_list
slicer = lambda x: self.obj[x]
else:
slice_axis = self.obj.index
slicer = self.obj.xs
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def _cython_agg_general(self, how, numeric_only=True):
new_items, new_blocks = self._cython_agg_blocks(how, numeric_only=numeric_only)
return self._wrap_agged_blocks(new_items, new_blocks)
def _wrap_agged_blocks(self, items, blocks):
obj = self._obj_with_exclusions
new_axes = list(obj._data.axes)
# more kludge
if self.axis == 0:
new_axes[0], new_axes[1] = new_axes[1], self.grouper.result_index
else:
new_axes[self.axis] = self.grouper.result_index
# Make sure block manager integrity check passes.
assert new_axes[0].equals(items)
new_axes[0] = items
mgr = BlockManager(blocks, new_axes)
new_obj = type(obj)(mgr)
return self._post_process_cython_aggregate(new_obj)
_block_agg_axis = 0
def _cython_agg_blocks(self, how, numeric_only=True):
data, agg_axis = self._get_data_to_aggregate()
new_blocks = []
if numeric_only:
data = data.get_numeric_data(copy=False)
for block in data.blocks:
result, _ = self.grouper.aggregate(block.values, how, axis=agg_axis)
# see if we can cast the block back to the original dtype
result = block._try_coerce_and_cast_result(result)
newb = make_block(result, placement=block.mgr_locs)
new_blocks.append(newb)
if len(new_blocks) == 0:
raise DataError('No numeric types to aggregate')
return data.items, new_blocks
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 0:
return obj.swapaxes(0, 1)._data, 1
else:
return obj._data, self.axis
def _post_process_cython_aggregate(self, obj):
# undoing kludge from below
if self.axis == 0:
obj = obj.swapaxes(0, 1)
return obj
@cache_readonly
def _obj_with_exclusions(self):
if self._selection is not None:
return self.obj.reindex(columns=self._selection_list)
if len(self.exclusions) > 0:
return self.obj.drop(self.exclusions, axis=1)
else:
return self.obj
@Appender(_agg_doc)
def aggregate(self, arg, *args, **kwargs):
if isinstance(arg, compat.string_types):
return getattr(self, arg)(*args, **kwargs)
result = OrderedDict()
if isinstance(arg, dict):
if self.axis != 0: # pragma: no cover
raise ValueError('Can only pass dict with axis=0')
obj = self._selected_obj
if any(isinstance(x, (list, tuple, dict)) for x in arg.values()):
new_arg = OrderedDict()
for k, v in compat.iteritems(arg):
if not isinstance(v, (tuple, list, dict)):
new_arg[k] = [v]
else:
new_arg[k] = v
arg = new_arg
keys = []
if self._selection is not None:
subset = obj
if isinstance(subset, DataFrame):
raise NotImplementedError("Aggregating on a DataFrame is "
"not supported")
for fname, agg_how in compat.iteritems(arg):
colg = SeriesGroupBy(subset, selection=self._selection,
grouper=self.grouper)
result[fname] = colg.aggregate(agg_how)
keys.append(fname)
else:
for col, agg_how in compat.iteritems(arg):
colg = SeriesGroupBy(obj[col], selection=col,
grouper=self.grouper)
result[col] = colg.aggregate(agg_how)
keys.append(col)
if isinstance(list(result.values())[0], DataFrame):
from pandas.tools.merge import concat
result = concat([result[k] for k in keys], keys=keys, axis=1)
else:
result = DataFrame(result)
elif isinstance(arg, list):
return self._aggregate_multiple_funcs(arg)
else:
cyfunc = _intercept_cython(arg)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(arg, *args, **kwargs)
else:
# try to treat as if we are passing a list
try:
assert not args and not kwargs
result = self._aggregate_multiple_funcs([arg])
result.columns = Index(result.columns.levels[0],
name=self._selected_obj.columns.name)
except:
result = self._aggregate_generic(arg, *args, **kwargs)
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
result.index = np.arange(len(result))
return result._convert(datetime=True)
def _aggregate_multiple_funcs(self, arg):
from pandas.tools.merge import concat
if self.axis != 0:
raise NotImplementedError("axis other than 0 is not supported")
obj = self._obj_with_exclusions
results = []
keys = []
for col in obj:
try:
colg = SeriesGroupBy(obj[col], selection=col,
grouper=self.grouper)
results.append(colg.aggregate(arg))
keys.append(col)
except (TypeError, DataError):
pass
except SpecificationError:
raise
result = concat(results, keys=keys, axis=1)
return result
def _aggregate_generic(self, func, *args, **kwargs):
if self.grouper.nkeys != 1:
raise AssertionError('Number of keys must be 1')
axis = self.axis
obj = self._obj_with_exclusions
result = {}
if axis != obj._info_axis_number:
try:
for name, data in self:
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
return self._aggregate_item_by_item(func, *args, **kwargs)
else:
for name in self.indices:
try:
data = self.get_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
wrapper = lambda x: func(x, *args, **kwargs)
result[name] = data.apply(wrapper, axis=axis)
return self._wrap_generic_output(result, obj)
def _wrap_aggregated_output(self, output, names=None):
raise AbstractMethodError(self)
def _aggregate_item_by_item(self, func, *args, **kwargs):
# only for axis==0
obj = self._obj_with_exclusions
result = {}
cannot_agg = []
errors=None
for item in obj:
try:
data = obj[item]
colg = SeriesGroupBy(data, selection=item,
grouper=self.grouper)
result[item] = self._try_cast(
colg.aggregate(func, *args, **kwargs), data)
except ValueError:
cannot_agg.append(item)
continue
except TypeError as e:
cannot_agg.append(item)
errors=e
continue
result_columns = obj.columns
if cannot_agg:
result_columns = result_columns.drop(cannot_agg)
# GH6337
if not len(result_columns) and errors is not None:
raise errors
return DataFrame(result, columns=result_columns)
def _decide_output_index(self, output, labels):
if len(output) == len(labels):
output_keys = labels
else:
output_keys = sorted(output)
try:
output_keys.sort()
except Exception: # pragma: no cover
pass
if isinstance(labels, MultiIndex):
output_keys = MultiIndex.from_tuples(output_keys,
names=labels.names)
return output_keys
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
from pandas.core.index import _all_indexes_same
if len(keys) == 0:
# XXX
return DataFrame({})
key_names = self.grouper.names
if isinstance(values[0], DataFrame):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif self.grouper.groupings is not None:
if len(self.grouper.groupings) > 1:
key_index = MultiIndex.from_tuples(keys, names=key_names)
else:
ping = self.grouper.groupings[0]
if len(keys) == ping.ngroups:
key_index = ping.group_index
key_index.name = key_names[0]
key_lookup = Index(keys)
indexer = key_lookup.get_indexer(key_index)
# reorder the values
values = [values[i] for i in indexer]
else:
key_index = Index(keys, name=key_names[0])
# don't use the key indexer
if not self.as_index:
key_index = None
# make Nones an empty object
if com._count_not_none(*values) != len(values):
try:
v = next(v for v in values if v is not None)
except StopIteration:
# If all values are None, then this will throw an error.
# We'd prefer it return an empty dataframe.
return DataFrame()
if v is None:
return DataFrame()
elif isinstance(v, NDFrame):
values = [
x if x is not None else
v._constructor(**v._construct_axes_dict())
for x in values
]
v = values[0]
if isinstance(v, (np.ndarray, Index, Series)):
if isinstance(v, Series):
applied_index = self._selected_obj._get_axis(self.axis)
all_indexed_same = _all_indexes_same([
x.index for x in values
])
singular_series = (len(values) == 1 and
applied_index.nlevels == 1)
# GH3596
# provide a reduction (Frame -> Series) if groups are
# unique
if self.squeeze:
# assign the name to this series
if singular_series:
values[0].name = keys[0]
# GH2893
# we have series in the values array, we want to
# produce a series:
# if any of the sub-series are not indexed the same
# OR we don't have a multi-index and we have only a
# single values
return self._concat_objects(
keys, values, not_indexed_same=not_indexed_same
)
# still a series
# path added as of GH 5545
elif all_indexed_same:
from pandas.tools.merge import concat
return concat(values)
if not all_indexed_same:
# GH 8467
return self._concat_objects(
keys, values, not_indexed_same=True,
)
try:
if self.axis == 0:
# GH6124 if the list of Series have a consistent name,
# then propagate that name to the result.
index = v.index.copy()
if index.name is None:
# Only propagate the series name to the result
# if all series have a consistent name. If the
# series do not have a consistent name, do
# nothing.
names = set(v.name for v in values)
if len(names) == 1:
index.name = list(names)[0]
# normally use vstack as its faster than concat
# and if we have mi-columns
if isinstance(v.index, MultiIndex) or key_index is None:
stacked_values = np.vstack(map(np.asarray, values))
result = DataFrame(stacked_values, index=key_index,
columns=index)
else:
# GH5788 instead of stacking; concat gets the dtypes correct
from pandas.tools.merge import concat
| result = concat(values, keys=key_index, | 9,883 | lcc_e | python | null | 915da6ca76bf0fe2d6117c5742cf91309668d67a8ad2f4e7 |
|
##############################################################################
# pymbar: A Python Library for MBAR
#
# Copyright 2010-2014 University of Virginia, Memorial Sloan-Kettering Cancer Center
# Portions of this software are Copyright (c) 2006-2007 The Regents of the University of California. All Rights Reserved.
# Portions of this software are Copyright (c) 2007-2008 Stanford University and Columbia University.
#
# Authors: Michael Shirts, John Chodera
# Contributors: Kyle Beauchamp
#
# pymbar is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with pymbar. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
"""
A module implementing the multistate Bennett acceptance ratio (MBAR) method for the analysis
of equilibrium samples from multiple arbitrary thermodynamic states in computing equilibrium
expectations, free energy differences, potentials of mean force, and entropy and enthalpy contributions.
Please reference the following if you use this code in your research:
[1] Shirts MR and Chodera JD. Statistically optimal analysis of samples from multiple equilibrium states.
J. Chem. Phys. 129:124105, 2008. http://dx.doi.org/10.1063/1.2978177
This module contains implementations of
* MBAR - multistate Bennett acceptance ratio estimator
"""
#=========================================================================
import math
import numpy as np
import numpy.linalg as linalg
from pymbar.utils import _logsum, kln_to_kn, kn_to_n, ParameterError
#=========================================================================
# MBAR class definition
#=========================================================================
class MBAR:
"""Multistate Bennett acceptance ratio method (MBAR) for the analysis of multiple equilibrium samples.
Notes
-----
Note that this method assumes the data are uncorrelated.
Correlated data must be subsampled to extract uncorrelated (effectively independent) samples (see example below).
References
----------
[1] Shirts MR and Chodera JD. Statistically optimal analysis of samples from multiple equilibrium states.
J. Chem. Phys. 129:124105, 2008
http://dx.doi.org/10.1063/1.2978177
"""
#=========================================================================
def __init__(self, u_kn, N_k, maximum_iterations=10000, relative_tolerance=1.0e-7, verbose=False, initial_f_k=None, method='adaptive', use_optimized=None, newton_first_gamma=0.1, newton_self_consistent=2, maxrange=1.0e5, initialize='zeros', x_kindices=None):
"""Initialize multistate Bennett acceptance ratio (MBAR) on a set of simulation data.
Upon initialization, the dimensionless free energies for all states are computed.
This may take anywhere from seconds to minutes, depending upon the quantity of data.
After initialization, the computed free energies may be obtained by a call to 'getFreeEnergies()', or
free energies or expectation at any state of interest can be computed by calls to 'computeFreeEnergy()' or
'computeExpectations()'.
----------
u_kn : np.ndarray, float, shape=(K, N_max)
u_kn[k,n] is the reduced potential energy of uncorrelated
configuration n evaluated at state k.
u_kln: np.ndarray, float, shape (K, L, N_max)
if the simulation is in form u_kln[k,l,n] it is converted to u_kn format
u_kn = [ u_1(x_1) u_1(x_2) u_1(x_3) . . . u_1(x_n)
u_2(x_1) u_2(x_2) u_2(x_3) . . . u_2(x_n)
. . .
u_k(x_1) u_k(x_2) u_k(x_3) . . . u_k(x_n)]
N_k : np.ndarray, int, shape=(K)
N_k[k] is the number of uncorrelated snapshots sampled from state k.
Some may be zero, indicating that there are no samples from that state.
We assume that the states are ordered such that the first N_k
are from the first state, the 2nd N_k the second state, and so
forth. This only becomes important for BAR -- MBAR does not
care which samples are from which state. We should eventually
allow this assumption to be overwritten by parameters passed
from above, once u_kln is phased out.
maximum_iterations : int, optional
Set to limit the maximum number of iterations performed (default 1000)
relative_tolerance : float, optional
Set to determine the relative tolerance convergence criteria (default 1.0e-6)
verbosity : bool, optional
Set to True if verbose debug output is desired (default False)
initial_f_k : np.ndarray, float, shape=(K), optional
Set to the initial dimensionless free energies to use as a
guess (default None, which sets all f_k = 0)
method : string, optional
Method for determination of dimensionless free energies:
Must be one of 'self-consistent-iteration','Newton-Raphson',
or 'adaptive' (default: 'adaptive').
Newton-Raphson is deprecated and defaults to adaptive
use_optimized : bool, optional
If False, will explicitly disable use of C++ extensions.
If None or True, extensions will be autodetected (default: None)
initialize : string, optional
If equal to 'BAR', use BAR between the pairwise state to
initialize the free energies. Eventually, should specify a path;
for now, it just does it zipping up the states.
(default: 'zeros', unless specific values are passed in.)
newton_first_gamma : float, optional
Initial gamma for newton-raphson (default = 0.1)
newton_self_consistent : int, optional
Mininum number of self-consistent iterations before
Newton-Raphson iteration (default = 2)
x_kindices
Which state is each x from? Usually doesn't matter, but does for BAR. We assume the samples
are in K order (the first N_k[0] samples are from the 0th state, the next N_k[1] samples from
the 1st state, and so forth.
Notes
-----
The reduced potential energy u_kn[k,n] = u_k(x_{ln}), where the reduced potential energy u_l(x) is defined (as in the text) by:
u_k(x) = beta_k [ U_k(x) + p_k V(x) + mu_k' n(x) ]
where
beta_k = 1/(kB T_k) is the inverse temperature of condition k, where kB is Boltzmann's constant
U_k(x) is the potential energy function for state k
p_k is the pressure at state k (if an isobaric ensemble is specified)
V(x) is the volume of configuration x
mu_k is the M-vector of chemical potentials for the various species, if a (semi)grand ensemble is specified, and ' denotes transpose
n(x) is the M-vector of numbers of the various molecular species for configuration x, corresponding to the chemical potential components of mu_m.
x_n indicates that the samples are from k different simulations of the n states. These simulations need only be a subset of the k states.
The configurations x_ln must be uncorrelated. This can be ensured by subsampling a correlated timeseries with a period larger than the statistical inefficiency,
which can be estimated from the potential energy timeseries {u_k(x_ln)}_{n=1}^{N_k} using the provided utility function 'statisticalInefficiency()'.
See the help for this function for more information.
Examples
--------
>>> from pymbar import testsystems
>>> (x_n, u_kn, N_k, s_n) = testsystems.HarmonicOscillatorsTestCase().sample(mode='u_kn')
>>> mbar = MBAR(u_kn, N_k)
"""
if method == 'Newton-Raphson':
print("Warning: Newton-Raphson is deprecated. Switching to method 'adaptive' which uses the most quickly converging between Newton-Raphson and self-consistent iteration.")
method = 'adaptive'
# Determine whether embedded C++ helper code is available
self.use_embedded_helper_code = False
if (use_optimized != None):
# If user specifies an option, use this.
self.use_embedded_helper_code = use_optimized
else:
# Test whether we can import the helper code.
try:
import _pymbar # import the helper code
# if we have succeeded, use it
self.use_embedded_helper_code = True
if verbose:
print("Using embedded C++ helper code.")
except ImportError:
# import failed
self.use_embedded_helper_code = False
if verbose:
print("Could not import working embedded C++ helper code -- using pure Python version instead.")
# Store local copies of necessary data.
# N_k[k] is the number of samples from state k, some of which might be zero.
self.N_k = np.array(N_k, dtype=np.int32)
self.N = np.sum(self.N_k)
# Get dimensions of reduced potential energy matrix, and convert to KxN form if needed.
if len(np.shape(u_kn)) == 3:
self.K = np.shape(u_kn)[1] # need to set self.K, and it's the second index
u_kn = kln_to_kn(u_kn, N_k=self.N_k)
# u_kn[k,n] is the reduced potential energy of sample n evaluated at state k
self.u_kn = np.array(u_kn, dtype=np.float64)
[K, N] = np.shape(u_kn)
if verbose:
print("K (total states) = %d, total samples = %d" % (K, N))
if np.sum(N_k) != N:
raise ParameterError(
'The sum of all N_k must equal the total number of samples (length of second dimension of u_kn.')
# Store local copies of other data
self.K = K # number of thermodynamic states energies are evaluated at
# N = \sum_{k=1}^K N_k is the total number of samples
self.N = N # maximum number of configurations
# if not defined, identify from which state each sample comes from.
if (x_kindices != None):
self.x_kindices = np.array(N,dtype=np.int32)
Nsum = 0
for k in range(K):
self.x_kindices[Nsum:Nsum+N_k[k]] = k
Nsum += N_k[k]
else:
self.x_kindices = x_kindices
# verbosity level -- if True, will print extra debug information
self.verbose = verbose
# perform consistency checks on the data.
# if, for any set of data, all reduced potential energies are the same,
# they are probably the same state. We check to within
# relative_tolerance.
self.samestates = []
if self.verbose:
for k in range(K):
for l in range(k):
diffsum = 0
uzero = u_kn[k, :] - u_kn[l, :]
diffsum += np.dot(uzero, uzero)
if (diffsum < relative_tolerance):
self.samestates.append([k, l])
self.samestates.append([l, k])
print('')
print('Warning: states %d and %d have the same energies on the dataset.' % (l, k))
print('They are therefore likely to to be the same thermodynamic state. This can occasionally cause')
print('numerical problems with computing the covariance of their energy difference, which must be')
print('identically zero in any case. Consider combining them into a single state.')
print('')
# Print number of samples from each state.
if self.verbose:
print("N_k = ")
print(N_k)
# Determine list of k indices for which N_k != 0
self.states_with_samples = np.where(self.N_k != 0)[0]
self.states_with_samples = self.states_with_samples.astype(np.int32)
# Number of states with samples.
self.K_nonzero = self.states_with_samples.size
if verbose:
print("There are %d states with samples." % self.K_nonzero)
# Initialize estimate of relative dimensionless free energy of each state to zero.
# Note that f_k[0] will be constrained to be zero throughout.
# this is default
self.f_k = np.zeros([self.K], dtype=np.float64)
# If an initial guess of the relative dimensionless free energies is
# specified, start with that.
if initial_f_k != None:
if self.verbose:
print("Initializing f_k with provided initial guess.")
# Cast to np array.
initial_f_k = np.array(initial_f_k, dtype=np.float64)
# Check shape
if initial_f_k.shape != self.f_k.shape:
raise ParameterError(
"initial_f_k must be a %d-dimensional np array." % self.K)
# Initialize f_k with provided guess.
self.f_k = initial_f_k
if self.verbose:
print(self.f_k)
# Shift all free energies such that f_0 = 0.
self.f_k[:] = self.f_k[:] - self.f_k[0]
else:
# Initialize estimate of relative dimensionless free energies.
self._initializeFreeEnergies(verbose, method=initialize)
if self.verbose:
print("Initial dimensionless free energies with method %s" % (initialize))
print("f_k = ")
print(self.f_k)
# Solve nonlinear equations for free energies of states with samples.
if (maximum_iterations > 0):
# Determine dimensionles free energies.
if method == 'self-consistent-iteration':
# Use self-consistent iteration of MBAR equations.
self._selfConsistentIteration(
maximum_iterations=maximum_iterations, relative_tolerance=relative_tolerance, verbose=verbose)
# take both steps at each point, choose 'best' by minimum gradient
elif method == 'adaptive':
self._adaptive(maximum_iterations=maximum_iterations,
relative_tolerance=relative_tolerance, verbose=verbose, print_warning=False)
else:
raise ParameterError(
"Specified method = '%s' is not a valid method. Specify 'self-consistent-iteration' or 'adaptive'.")
# Recompute all free energies because those from states with zero samples are not correctly computed by Newton-Raphson.
# and store the log weights
if verbose:
print("Recomputing all free energies and log weights for storage")
# Note: need to recalculate only if max iterations is set to zero.
(self.Log_W_nk, self.f_k) = self._computeWeights(
recalc_denom=(maximum_iterations == 0), logform=True, include_nonzero=True, return_f_k=True)
# Print final dimensionless free energies.
if self.verbose:
print("Final dimensionless free energies")
print("f_k = ")
print(self.f_k)
if self.verbose:
print("MBAR initialization complete.")
return
#=========================================================================
def getWeights(self):
"""Retrieve the weight matrix W_nk from the MBAR algorithm.
Necessary because they are stored internally as log weights.
Returns
-------
weights : np.ndarray, float, shape=(N, K)
NxK matrix of weights in the MBAR covariance and averaging formulas
"""
return np.exp(self.Log_W_nk)
#=========================================================================
def getFreeEnergyDifferences(self, compute_uncertainty=True, uncertainty_method=None, warning_cutoff=1.0e-10, return_theta=False):
"""Get the dimensionless free energy differences and uncertainties among all thermodynamic states.
Parameters
----------
compute_uncertainty : bool, optional
If False, the uncertainties will not be computed (default: True)
uncertainty_method : string, optional
Choice of method used to compute asymptotic covariance method,
or None to use default. See help for computeAsymptoticCovarianceMatrix()
for more information on various methods. (default: svd)
warning_cutoff : float, optional
Warn if squared-uncertainty is negative and larger in magnitude
than this number (default: 1.0e-10)
return_theta : bool, optional
Whether or not to return the theta matrix. Can be useful for complicated differences.
Returns
-------
Deltaf_ij :L np.ndarray, float, shape=(K, K)
Deltaf_ij[i,j] is the estimated free energy difference
dDeltaf_ij :L np.ndarray, float, shape=(K, K)
dDeltaf_ij[i,j] is the estimated statistical uncertainty
(one standard deviation) in Deltaf_ij[i,j]
Notes
-----
Computation of the covariance matrix may take some time for large K.
The reported statistical uncertainty should, in the asymptotic limit, reflect one standard deviation for the normal distribution of the estimate.
The true free energy difference should fall within the interval [-df, +df] centered on the estimate 68% of the time, and within
the interval [-2 df, +2 df] centered on the estimate 95% of the time.
This will break down in cases where the number of samples is not large enough to reach the asymptotic normal limit.
See Section III of Reference [1].
Examples
--------
>>> from pymbar import testsystems
>>> (x_n, u_kn, N_k, s_n) = testsystems.HarmonicOscillatorsTestCase().sample(mode='u_kn')
>>> mbar = MBAR(u_kn, N_k)
>>> [Deltaf_ij, dDeltaf_ij] = mbar.getFreeEnergyDifferences()
"""
# Compute free energy differences.
f_i = np.matrix(self.f_k)
Deltaf_ij = f_i - f_i.transpose()
# zero out numerical error for thermodynamically identical states
self._zerosamestates(Deltaf_ij)
returns = []
returns.append(np.array(Deltaf_ij))
if compute_uncertainty or return_theta:
# Compute asymptotic covariance matrix.
Theta_ij = self._computeAsymptoticCovarianceMatrix(
np.exp(self.Log_W_nk), self.N_k, method=uncertainty_method)
if compute_uncertainty:
# compute the covariance component without doing the double loop.
# d2DeltaF = Theta_ij[i,i] + Theta_ij[j,j] - 2.0 * Theta_ij[i,j]
diag = Theta_ij.diagonal()
d2DeltaF = diag + diag.transpose() - 2 * Theta_ij
# zero out numerical error for thermodynamically identical states
self._zerosamestates(d2DeltaF)
# check for any numbers below zero.
if (np.any(d2DeltaF < 0.0)):
if(np.any(d2DeltaF) < warning_cutoff):
# Hmm. Will this print correctly?
print("A squared uncertainty is negative. d2DeltaF = %e" % d2DeltaF[(np.any(d2DeltaF) < warning_cutoff)])
else:
d2DeltaF[(np.any(d2DeltaF) < warning_cutoff)] = 0.0
# take the square root of the entries of the matrix
dDeltaf_ij = np.sqrt(d2DeltaF)
# Return matrix of free energy differences and uncertainties.
returns.append(np.array(dDeltaf_ij))
if (return_theta):
returns.append(np.array(Theta_ij))
return returns
#=========================================================================
def computeGeneralExpectations(self, A_in, u_ln, state_list, compute_uncertainty=True,
uncertainty_method=None, warning_cutoff=1.0e-10, return_theta=False):
"""Compute the expectations of multiple observables of phase space functions on multiple states.
Compute the expectations of multiple observables of phase
space functions. [A_0(x),A_1(x),...,A_i(x)] along with the
covariances of their estimates at multiple states.
We calculate all observables at all states that are specified by the choice of state list.
Generally, it will be called in specific conditions.
Parameters
----------
A_in : np.ndarray, float, shape=(I, N)
A_in[i,n] = A_i(x_n), the value of phase observable i for configuration n
u_ln : np.ndarray, float, shape=(L, N)
u_n[l,n] is the reduced potential of configuration n at state l
if u_ln = None, we use self.u_kn
state_list : np.ndarray, int, shape (NS,2), where NS is the
total number of states of interest. it will be
of the form [[0,0],[1,1],[2,1]] which would
indicate we want to output the properties of
three observables total: the first property A[0]
at the 0th state, the 2nd property A[1] at the
1th state, and the 2nd property A[1] at the 2nd
state. This allows us to tailor this to a number of different situations.
uncertainty_method : string, optional
Choice of method used to compute asymptotic covariance method, or None to use default
See help for computeAsymptoticCovarianceMatrix() for more information on various methods. (default: None)
warning_cutoff : float, optional
Warn if squared-uncertainty is negative and larger in magnitude than this number (default: 1.0e-10)
return_theta : bool, optional
Whether or not to return the theta matrix. Can be useful for complicated differences.
Returns
-------
A_i : np.ndarray, float, shape = (I)
A_i[i] is the estimate for the expectation of A_state_list[i](x) at the state specified by u_n[state_list[i],:]
d2A_ik : np.ndarray, float, shape = (I, J)
d2A_ij[i,j] is the COVARIANCE in the estimates of observables A_i and A_j (as determined by the state list)
(* not the square root *)
General cases this will be used for.
single observable, multiple states (replacement for computeExpectations)
multiple observables, single state (replacement for computeMultipleExpectations)
diagonal cases of multiple states, single states.
Examples
--------
update this example to be more general
>>> from pymbar import testsystems
>>> (x_n, u_kn, N_k, s_n) = testsystems.HarmonicOscillatorsTestCase().sample(mode='u_kn')
>>> mbar = MBAR(u_kn, N_k)
>>> A_in = np.array([x_n,x_n**2,x_n**3])
>>> u_n = u_kn[:2,:]
>>> state_list = np.array([[0,0],[1,0],[2,0],[2,1]],int)
>>> [A_i, d2A_ij] = mbar.computeGeneralExpectations(A_in, u_n, state_list)
"""
# Retrieve N and K for convenience.
S = len(state_list) # number of computed expectations examined
K = self.K
N = self.N # N is total number of samples
# make observables all positive
A_list = np.unique(state_list[:,0])
I = len(A_list) # number of observables used
A_min = np.zeros([I], dtype=np.float64)
for i in A_list: # only need to zero the ones we will use. May be some repetition here.
A_min[i] = np.min(A_in[i, :]) #find the minimum
A_in[i, :] = A_in[i,:] - (A_min[i] - 1) #all values now positive so that we can work in logarithmic scale
# Augment W_nk, N_k, and c_k for q_A(x) for the observables, with one
# row for the specified state and I rows for the observable at that
# state.
# log weight matrix
sizea = K + 2*S # augmented size
Log_W_nk = np.zeros([N, sizea], np.float64) # log weight matrix
N_k = np.zeros([sizea], np.int32) # counts
f_k = np.zeros([sizea], np.float64) # free energies
# Fill in first section of matrix with existing q_k(x) from states.
Log_W_nk[:, 0:K] = self.Log_W_nk
N_k[0:K] = self.N_k
f_k[0:K] = self.f_k
# Compute row of W matrix for the extra states corresponding to u_ln according to the state list
for s in range(S):
l = state_list[s,1]
la = K+s #l augmented
Log_W_nk[:, la] = self._computeUnnormalizedLogWeights(u_ln[l,:])
f_k[la] = -_logsum(Log_W_nk[:, la])
Log_W_nk[:, la] += f_k[l]
# Compute the remaining rows/columns of W_nk and c_k for the
# observables.
for s in range(S):
sa = K+S+s # augmented s
i = state_list[s,0]
l = state_list[s,1]
Log_W_nk[:, sa] = np.log(A_in[i, :]) + Log_W_nk[:, K+l]
f_k[sa] = -_logsum(Log_W_nk[:, sa])
Log_W_nk[:, sa] += f_k[sa] # normalize this row
# Compute estimates.
A_i = np.zeros([S], np.float64)
for s in range(S):
A_i[s] = np.exp(-f_k[K + S + s])
if compute_uncertainty or return_theta:
# Compute augmented asymptotic covariance matrix.
W_nk = np.exp(Log_W_nk)
Theta_ij = self._computeAsymptoticCovarianceMatrix(
W_nk, N_k, method=uncertainty_method)
if compute_uncertainty:
# Compute estimates of statistical covariance
# these variances will be the same whether or not we subtract a different constant from each A_i
# todo: vectorize
# compute the covariance component without doing the double loop
d2A_ij = np.zeros([S, S], np.float64)
for i in range(S):
si = K+S+i
li = K+state_list[i,1]
for j in range(S):
sj = K+S+j
lj = K+state_list[j,1]
d2A_ij[i, j] = A_i[i] * A_i[j] * (
Theta_ij[si, sj] - Theta_ij[si, li] - Theta_ij[lj, sj] + Theta_ij[li, lj])
# Now that covariances are computed, add the constants back to A_i that
# were required to enforce positivity
for s in range(S):
A_i[s] += (A_min[state_list[s,0]] - 1)
# these values may be used outside the routine, so copy back.
for i in A_list:
A_in[i, :] = A_in[i,:] + (A_min[i] - 1)
returns = []
returns.append(A_i)
if compute_uncertainty:
returns.append(d2A_ij)
if return_theta:
returns.append(Theta_ij)
# Return expectations and uncertainties.
return returns
#=========================================================================
def computeExpectations(self, A_n, output='averages', compute_uncertainty=True, uncertainty_method=None, warning_cutoff=1.0e-10, return_theta=False, useGeneral = False, state_dependent = False):
"""Compute the expectation of an observable of a phase space function.
Compute the expectation of an observable of phase space
function A(x) at all states where potentials are generated,
including states for which no samples were drawn.
We assume observables are not function of the state. u is not
an observable -- it changes depending on the state. u_k is an
observable; the energy of state k does not depend on the
state. To compute the estimators of the energy at all K
states, use . . .
Parameters
----------
A_n : np.ndarray, float
A_n (N_max np float64 array) - A_n[n] = A(x_n)
output : string, optional
Either output averages, and uncertainties, or output a matrix of differences, with uncertainties.
compute_uncertainty : bool, optional
If False, the uncertainties will not be computed (default: True)
uncertainty_method : string, optional
Choice of method used to compute asymptotic covariance method,
or None to use default See help for computeAsymptoticCovarianceMatrix()
for more information on various methods. (default: None)
warning_cutoff : float, optional
Warn if squared-uncertainty is negative and larger in magnitude than this number (default: 1.0e-10)
return_theta : bool, optional
Whether or not to return the theta matrix. Can be useful for complicated differences.
useGeneral: bool, whether to use the GeneralExpectations formalism = False,
state_dependent: bool, whether the expectations are state-dependent.
Returns
-------
A : np.ndarray, float
if output is 'averages'
A_i (K np float64 array) - A_i[i] is the estimate for the expectation of A(x) for state i.
if output is 'differences'
dA : np.ndarray, float
dA_i (K np float64 array) - dA_i[i] is uncertainty estimate (one standard deviation) for A_i[i]
or
dA_ij (K np float64 array) - dA_ij[i,j] is uncertainty estimate (one standard deviation) for the difference in A beteen i and j
Notes
-----
The reported statistical uncertainty should, in the asymptotic limit,
reflect one standard deviation for the normal distribution of the estimate.
The true expectation should fall within the interval [-dA, +dA] centered on the estimate 68% of the time, and within
the interval [-2 dA, +2 dA] centered on the estimate 95% of the time.
This will break down in cases where the number of samples is not large enough to reach the asymptotic normal limit.
This 'breakdown' can be exacerbated by the computation of observables like indicator functions for histograms that are sparsely populated.
References
----------
See Section IV of [1].
Examples
--------
>>> from pymbar import testsystems
>>> (x_n, u_kn, N_k, s_n) = testsystems.HarmonicOscillatorsTestCase().sample(mode='u_kn')
>>> mbar = MBAR(u_kn, N_k)
>>> A_n = x_n
>>> (A_ij, dA_ij) = mbar.computeExpectations(A_n)
>>> A_n = u_kn[0,:]
>>> (A_ij, dA_ij) = mbar.computeExpectations(A_n, output='differences')
"""
dims = len(np.shape(A_n))
# Retrieve N and K for convenience.
N = self.N
K = self.K
if dims == 3:
print("expecting dim=1 or dim=2")
return None
if (useGeneral):
state_list = np.zeros([K,2],int)
if (state_dependent):
for k in range(K):
state_list[k,0] = k
state_list[k,1] = k
A_in = A_n
else:
A_in = np.zeros([1,N], dtype=np.float64)
if dims == 2:
A_n = kn_to_n(A_n, N_k=self.N_k)
A_in[0,:] = A_n
for k in range(K):
state_list[k,0] = 0
state_list[k,1] = k
general_results = self.computeGeneralExpectations(A_in, self.u_kn, state_list,
compute_uncertainty=compute_uncertainty,
uncertainty_method=uncertainty_method,
warning_cutoff=warning_cutoff,
return_theta=return_theta)
returns = []
if output == 'averages':
# Return expectations and uncertainties.
returns.append(general_results[0])
if compute_uncertainty:
indices = np.eye(K,dtype=bool)
returns.append(np.sqrt(general_results[1][indices]))
if output == 'differences':
A_im = np.matrix(general_results[0])
A_ij = A_im - A_im.transpose()
returns.append(np.array(A_ij))
if compute_uncertainty:
return np.sqrt(general_results[1])
if return_theta:
returns.append(general_results[2])
else: # current style
if dims == 2: #convert to 1xN shape
A_n = kn_to_n(A_n, N_k=self.N_k)
# Convert to np array.
A_n = np.array(A_n, np.float64)
# Augment W_nk, N_k, and c_k for q_A(x) for the observable, with one
# extra row/column for each state (Eq. 13 of [1]).
# log of weight matrix
Log_W_nk = np.zeros([N, K * 2], np.float64)
N_k = np.zeros([K * 2], np.int32) # counts
# "free energies" of the new states
f_k = np.zeros([K], np.float64)
# Fill in first half of matrix with existing q_k(x) from states.
Log_W_nk[:, 0:K] = self.Log_W_nk
N_k[0:K] = self.N_k
# Make A_n all positive so we can operate logarithmically for
# robustness
A_i = np.zeros([K], np.float64)
A_min = np.min(A_n)
A_n = A_n - (A_min - 1)
# Compute the remaining rows/columns of W_nk and the rows c_k for the
# observables.
for l in range(K):
# this works because all A_n are now positive;
Log_W_nk[:, K + l] = np.log(A_n) + self.Log_W_nk[:, l]
# we took the min at the beginning.
f_k[l] = -_logsum(Log_W_nk[:, K + l])
Log_W_nk[:, K + l] += f_k[l] # normalize the row
A_i[l] = np.exp(-f_k[l])
if compute_uncertainty or return_theta:
# Compute augmented asymptotic covariance matrix.
Theta_ij = self._computeAsymptoticCovarianceMatrix(
np.exp(Log_W_nk), N_k, method=uncertainty_method)
returns = []
if output == 'averages':
if compute_uncertainty:
# Compute uncertainties.
dA_i = np.zeros([K], np.float64)
# just the diagonals
for k in range(0, K):
dA_i[k] = np.abs(A_i[k]) * np.sqrt(
Theta_ij[K + k, K + k] + Theta_ij[k, k] - 2.0 * Theta_ij[k, K + k])
# add back minima now now that uncertainties are computed.
A_i += (A_min - 1)
# Return expectations and uncertainties.
returns.append(np.array(A_i))
if compute_uncertainty:
returns.append(np.array(dA_i))
if output == 'differences':
# Return differences of expectations and uncertainties.
# compute expectation differences
A_im = np.matrix(A_i)
A_ij = A_im - A_im.transpose()
returns.append(np.array(A_ij))
# todo - vectorize the differences! Faster and less likely to give errors.
if compute_uncertainty:
dA_ij = np.zeros([K, K], dtype=np.float64)
for i in range(0, K):
for j in range(0, K):
try:
dA_ij[i, j] = np.sqrt(
+ A_i[i] * Theta_ij[i, i] * A_i[i]
- A_i[i] * Theta_ij[i, j] * A_i[j]
- A_i[i] * Theta_ij[i, K + i] * A_i[i]
+ A_i[i] * Theta_ij[i, K + j] * A_i[j]
- A_i[j] * Theta_ij[j, i] * A_i[i]
+ A_i[j] * Theta_ij[j, j] * A_i[j]
+ A_i[j] * Theta_ij[j, K + i] * A_i[i]
- A_i[j] * Theta_ij[j, K + j] * A_i[j]
- A_i[i] * Theta_ij[K + i, i] * A_i[i]
+ A_i[i] * Theta_ij[K + i, j] * A_i[j]
+ A_i[i] * Theta_ij[K + i, K + i] * A_i[i]
- A_i[i] * Theta_ij[K + i, K + j] * A_i[j]
+ A_i[j] * Theta_ij[K + j, i] * A_i[i]
- A_i[j] * Theta_ij[K + j, j] * A_i[j]
- A_i[j] * Theta_ij[K + j, K + i] * A_i[i]
+ A_i[j] * Theta_ij[K + j, K + j] * A_i[j]
)
except:
dA_ij[i, j] = 0.0
returns.append(dA_ij)
if return_theta:
returns.append(Theta_ij)
return returns
#=========================================================================
def computeMultipleExpectations(self, A_in, u_n, compute_uncertainty=True,
uncertainty_method=None, warning_cutoff=1.0e-10, return_theta=False):
"""Compute the expectations of multiple observables of phase space functions.
Compute the expectations of multiple observables of phase space functions.
[A_0(x),A_1(x),...,A_i(x)] along with the covariances of their estimates. The state is specified by
the choice of u_n, which is the energy of the n samples evaluated at a the chosen state.
Parameters
----------
A_in : np.ndarray, float, shape=(I, k, N)
A_in[i,n] = A_i(x_n), the value of phase observable i for configuration n at state of interest
u_n : np.ndarray, float, shape=(N)
u_n[n] is the reduced potential of configuration n at the state of interest
uncertainty_method : string, optional
Choice of method used to compute asymptotic covariance method, or None to use default
See help for computeAsymptoticCovarianceMatrix() for more information on various methods. (default: None)
warning_cutoff : float, optional
Warn if squared-uncertainty is negative and larger in magnitude than this number (default: 1.0e-10)
return_theta : bool, optional
Whether or not to return the theta matrix. Can be useful for complicated differences.
Returns
-------
A_i : np.ndarray, float, shape=(I)
A_i[i] is the estimate for the expectation of A_i(x) at the state specified by u_kn
d2A_ij : np.ndarray, float, shape=(I, I)
d2A_ij[i,j] is the COVARIANCE in the estimates of A_i[i] and A_i[j],
not the square root of the covariance
Notes
-----
Not fully tested!
Examples
--------
>>> from pymbar import testsystems
>>> (x_n, u_kn, N_k, s_n) = testsystems.HarmonicOscillatorsTestCase().sample(mode='u_kn')
>>> mbar = MBAR(u_kn, N_k)
>>> A_in = np.array([x_n,x_n**2,x_n**3])
>>> u_n = u_kn[0,:]
>>> [A_i, d2A_ij] = mbar.computeMultipleExpectations(A_in, u_kn)
"""
# Retrieve N and K for convenience.
I = A_in.shape[0] # number of observables
K = self.K
N = self.N # N is total number of samples
if len(np.shape(A_in)) == 3:
A_in_old = A_in.copy()
A_in = np.zeros([I, N], np.float64)
for i in range(I):
A_in[i,:] = kn_to_n(A_in_old[i, :, :], N_k=self.N_k)
A_min = np.zeros([I], dtype=np.float64)
for i in range(I):
A_min[i] = np.min(A_in[i, :]) #find the minimum
A_in[i, :] -= (A_min[i]-1) #all now values will be positive so that we can work in logarithmic scale
if len(np.shape(u_n)) == 2:
u_n = kn_to_n(u_n, N_k=self.N_k)
# Augment W_nk, N_k, and c_k for q_A(x) for the observables, with one
# row for the specified state and I rows for the observable at that
# state.
# log weight matrix
Log_W_nk = np.zeros([N, K + 1 + I], np.float64)
W_nk = np.zeros([N, K + 1 + I], np.float64) # weight matrix
N_k = np.zeros([K + 1 + I], np.int32) # counts
f_k = np.zeros([K + 1 + I], np.float64) # free energies
# Fill in first section of matrix with existing q_k(x) from states.
Log_W_nk[:, 0:K] = self.Log_W_nk
W_nk[:, 0:K] = np.exp(self.Log_W_nk)
N_k[0:K] = self.N_k
f_k[0:K] = self.f_k
# Compute row of W matrix for the extra state corresponding to u_kn.
Log_W_nk[:, K] = self._computeUnnormalizedLogWeights(u_n)
f_k[K] = -_logsum(Log_W_nk[:, K])
Log_W_nk[:, K] += f_k[K]
# Compute the remaining rows/columns of W_nk and c_k for the
# observables.
for i in range(I):
Log_W_nk[:, K+1+i] = np.log(A_in[i, :]) + Log_W_nk[:, K]
f_k[K + 1 + i] = -_logsum(Log_W_nk[:, K + 1 + i])
Log_W_nk[:, K + 1 + i] += f_k[K + 1 + i] # normalize this row
# Compute estimates.
A_i = np.zeros([I], np.float64)
for i in range(I):
A_i[i] = np.exp(-f_k[K + 1 + i])
if compute_uncertainty or return_theta:
# Compute augmented asymptotic covariance matrix.
W_nk = np.exp(Log_W_nk)
Theta_ij = self._computeAsymptoticCovarianceMatrix(
W_nk, N_k, method=uncertainty_method)
if compute_uncertainty:
# Compute estimates of statistical covariance
# these variances will be the same whether or not we subtract a different constant from each A_i
# todo: vectorize
# compute the covariance component without doing the double loop
d2A_ij = np.zeros([I, I], np.float64)
for i in range(I):
for j in range(I):
d2A_ij[i, j] = A_i[i] * A_i[j] * (Theta_ij[K + 1 + i, K + 1 + j] - Theta_ij[
K + 1 + i, K] - Theta_ij[K, K + 1 + j] + Theta_ij[K, K])
# Now that covariances are computed, add the constants back to A_i that
# were required to enforce positivity
A_i = A_i + (A_min - 1)
returns = []
returns.append(A_i)
if compute_uncertainty:
returns.append(d2A_ij)
if return_theta:
returns.append(Theta_ij)
# Return expectations and uncertainties.
return returns
#=========================================================================
def computeOverlap(self, output='scalar'):
"""Compute estimate of overlap matrix between the states.
Returns
-------
O : np.ndarray, float, shape=(K, K)
estimated state overlap matrix: O[i,j] is an estimate
of the probability of observing a sample from state i in state j
Parameters
----------
output : string, optional
One of 'scalar', 'matrix', 'eigenvalues', 'all', specifying
what measure of overlap to return
Notes
-----
W.T * W \approx \int (p_i p_j /\sum_k N_k p_k)^2 \sum_k N_k p_k dq^N
= \int (p_i p_j /\sum_k N_k p_k) dq^N
Multiplying elementwise by N_i, the elements of row i give the probability
for a sample from state i being observed in state j.
Examples
--------
>>> from pymbar import testsystems
>>> (x_n, u_kn, N_k, s_n) = testsystems.HarmonicOscillatorsTestCase().sample(mode='u_kn')
>>> mbar = MBAR(u_kn, N_k)
>>> O_ij = mbar.computeOverlap()
"""
W = np.matrix(self.getWeights(), np.float64)
O = np.multiply(self.N_k, W.T * W)
(eigenval, eigevec) = linalg.eig(O)
# sort in descending order
eigenval = np.sort(eigenval)[::-1]
overlap_scalar = 1 - eigenval[1]
if (output == 'scalar'):
return overlap_scalar
elif (output == 'eigenvalues'):
return eigenval
elif (output == 'matrix'):
return O
elif (output == 'all'):
return overlap_scalar, eigenval, O
#=========================================================================
def computePerturbedExpectation(self, u_n, A_n, compute_uncertainty=True, uncertainty_method=None, warning_cutoff=1.0e-10, return_theta=False):
"""Compute the expectation of an observable of phase space function A(x) for a single new state.
Parameters
----------
u_n : np.ndarray, float, shape=(K, N_max)
u_n[n] = u(x_n) - the energy of the new state at all N samples previously sampled.
A_n : np.ndarray, float, shape=(K, N_max)
A_n[n] = A(x_n) - the phase space function of the new state at all N samples previously sampled. If this does NOT depend on state (e.g. position), it's simply the value of the observation. If it DOES depend on the current state, then the observables from the previous states need to be reevaluated at THIS state.
compute_uncertainty : bool, optional
If False, the uncertainties will not be computed (default: True)
uncertainty_method : string, optional
Choice of method used to compute asymptotic covariance method, or None to use default
See help for computeAsymptoticCovarianceMatrix() for more information on various methods. (default: None)
warning_cutoff : float, optional
Warn if squared-uncertainty is negative and larger in magnitude than this number (default: 1.0e-10)
return_theta : bool, optional
Whether or not to return the theta matrix. Can be useful for complicated differences.
Returns
-------
A : float
A is the estimate for the expectation of A(x) for the specified state
dA : float
dA is uncertainty estimate for A
Notes
-----
See Section IV of [1].
# Compute estimators and uncertainty.
#A = sum(W_n[:,K] * A_n[:]) # Eq. 15 of [1]
#dA = abs(A) * np.sqrt(Theta_ij[K,K] + Theta_ij[K+1,K+1] - 2.0 * Theta_ij[K,K+1]) # Eq. 16 of [1]
"""
if len(np.shape(u_n)) == 2:
u_n = kn_to_n(u_n, N_k=self.N_k)
if len(np.shape(A_n)) == 2:
A_n = kn_to_n(A_n, N_k=self.N_k)
# Convert to np matrix.
A_n = np.array(A_n, dtype=np.float64)
# Retrieve N and K for convenience.
N = self.N
K = self.K
# Make A_k all positive so we can operate logarithmically for
# robustness
A_min = np.min(A_n)
A_n = A_n - (A_min - 1)
# Augment W_nk, N_k, and c_k for q_A(x) for the observable, with one
# extra row/column for the specified state (Eq. 13 of [1]).
# weight matrix
Log_W_nk = np.zeros([N, K + 2], dtype=np.float64)
N_k = np.zeros([K + 2], dtype=np.int32) # counts
f_k = np.zeros([K + 2], dtype=np.float64) # free energies
# Fill in first K states with existing q_k(x) from states.
Log_W_nk[:, 0:K] = self.Log_W_nk
N_k[0:K] = self.N_k
# compute the free energy of the additional state
log_w_n = self._computeUnnormalizedLogWeights(u_n)
# Compute free energies
f_k[K] = -_logsum(log_w_n)
Log_W_nk[:, K] = log_w_n + f_k[K]
# compute the observable at this state
Log_W_nk[:, K + 1] = np.log(A_n) + Log_W_nk[:, K]
f_k[K + 1] = -_logsum(Log_W_nk[:, K + 1])
Log_W_nk[:, K + 1] += f_k[K + 1] # normalize the row
A = np.exp(-f_k[K + 1])
if (compute_uncertainty or return_theta):
# Compute augmented asymptotic covariance matrix.
Theta_ij = self._computeAsymptoticCovarianceMatrix(
np.exp(Log_W_nk), N_k, method=uncertainty_method)
if (compute_uncertainty):
dA = np.abs(A) * np.sqrt(
Theta_ij[K + 1, K + 1] + Theta_ij[K, K] - 2.0 * Theta_ij[K, K + 1]) # Eq. 16 of [1]
# shift answers back with the offset now that variances are computed
A += (A_min - 1)
returns = []
returns.append(A)
if (compute_uncertainty):
returns.append(dA)
if (return_theta):
returns.append(Theta_ij)
# Return expectations and uncertainties.
return returns
#=========================================================================
def computePerturbedFreeEnergies(self, u_ln, compute_uncertainty=True, uncertainty_method=None, warning_cutoff=1.0e-10, return_theta=False):
"""Compute the free energies for a new set of states.
Here, we desire the free energy differences among a set of new states, as well as the uncertainty estimates in these differences.
Parameters
----------
u_ln : np.ndarray, float, shape=(L, Nmax)
u_ln[l,n] is the reduced potential energy of uncorrelated
configuration n evaluated at new state k. Can be completely indepednent of the original number of states.
compute_uncertainty : bool, optional
If False, the uncertainties will not be computed (default: True)
uncertainty_method : string, optional
Choice of method used to compute asymptotic covariance method, or None to use default
See help for computeAsymptoticCovarianceMatrix() for more information on various methods. (default: None)
warning_cutoff : float, optional
Warn if squared-uncertainty is negative and larger in magnitude than this number (default: 1.0e-10)
Returns
-------
Deltaf_ij : np.ndarray, float, shape=(L, L)
Deltaf_ij[i,j] = f_j - f_i, the dimensionless free energy difference between new states i and j
dDeltaf_ij : np.ndarray, float, shape=(L, L)
dDeltaf_ij[i,j] is the estimated statistical uncertainty in Deltaf_ij[i,j]
Examples
--------
>>> from pymbar import testsystems
>>> (x_n, u_kn, N_k, s_n) = testsystems.HarmonicOscillatorsTestCase().sample(mode='u_kn')
>>> mbar = MBAR(u_kn, N_k)
>>> [Deltaf_ij, dDeltaf_ij] = mbar.computePerturbedFreeEnergies(u_kn)
"""
# Convert to np matrix.
u_ln = np.array(u_ln, dtype=np.float64)
# Get the dimensions of the matrix of reduced potential energies, and convert if necessary
if len(np.shape(u_ln)) == 3:
u_ln = kln_to_kn(u_ln, N_k=self.N_k)
[L, N] = u_ln.shape
# Check dimensions.
if (N < self.N):
raise "There seems to be too few samples in u_kn. You must evaluate at the new potential with all of the samples used originally."
# Retrieve N and K for convenience.
N = self.N
K = self.K
# Augment W_nk, N_k, and c_k for the new states.
W_nk = np.zeros([N, K + L], dtype=np.float64) # weight matrix
N_k = np.zeros([K + L], dtype=np.int32) # counts
f_k = np.zeros([K + L], dtype=np.float64) # free energies
# Fill in first half of matrix with existing q_k(x) from states.
W_nk[:, 0:K] = np.exp(self.Log_W_nk)
N_k[0:K] = self.N_k
f_k[0:K] = self.f_k
# Compute normalized weights.
for l in range(0, L):
# Compute unnormalized log weights.
log_w_n = self._computeUnnormalizedLogWeights(u_ln[l, :])
# Compute free energies
f_k[K + l] = - _logsum(log_w_n)
# Store normalized weights. Keep in exponential not log form
# because we will not store W_nk
W_nk[:, K + l] = np.exp(log_w_n + f_k[K + l])
if (compute_uncertainty or return_theta):
# Compute augmented asymptotic covariance matrix.
Theta_ij = self._computeAsymptoticCovarianceMatrix(
W_nk, N_k, method = uncertainty_method)
# Compute matrix of free energy differences between states and
# associated uncertainties.
# makes matrix operations easier to recast
f_k = np.matrix(f_k[K:K + L])
Deltaf_ij = f_k - f_k.transpose()
returns = []
returns.append(Deltaf_ij)
if (compute_uncertainty):
diag = Theta_ij.diagonal()
dii = diag[0, K:K + L]
d2DeltaF = dii + dii.transpose() - 2 * Theta_ij[K:K + L, K:K + L]
# check for any numbers below zero.
if (np.any(d2DeltaF < 0.0)):
if(np.any(d2DeltaF) < warning_cutoff):
print("A squared uncertainty is negative. d2DeltaF = %e" % d2DeltaF[(np.any(d2DeltaF) < warning_cutoff)])
else:
d2DeltaF[(np.any(d2DeltaF) < warning_cutoff)] = 0.0
# take the square root of entries of the matrix
dDeltaf_ij = np.sqrt(d2DeltaF)
returns.append(dDeltaf_ij)
if (return_theta):
returns.append(Theta_ij)
# Return matrix of free energy differences and uncertainties.
return returns
def computeEntropyAndEnthalpy(self, uncertainty_method=None, verbose=False, warning_cutoff=1.0e-10):
"""Decompose free energy differences into enthalpy and entropy differences.
Compute the decomposition of the free energy difference between
states 1 and N into reduced free energy differences, reduced potential
(enthalpy) differences, and reduced entropy (S/k) differences.
Parameters
----------
uncertainty_method : string , optional
Choice of method used to compute asymptotic covariance method, or None to use default
See help for computeAsymptoticCovarianceMatrix() for more information on various methods. (default: None)
warning_cutoff : float, optional
Warn if squared-uncertainty is negative and larger in magnitude than this number (default: 1.0e-10)
Returns
-------
Delta_f_ij : np.ndarray, float, shape=(K, K)
Delta_f_ij[i,j] is the dimensionless free energy difference f_j - f_i
dDelta_f_ij : np.ndarray, float, shape=(K, K)
uncertainty in Delta_f_ij
Delta_u_ij : np.ndarray, float, shape=(K, K)
Delta_u_ij[i,j] is the reduced potential energy difference u_j - u_i
dDelta_u_ij : np.ndarray, float, shape=(K, K)
uncertainty in Delta_f_ij
Delta_s_ij : np.ndarray, float, shape=(K, K)
Delta_s_ij[i,j] is the reduced entropy difference S/k between states i and j (s_j - s_i)
dDelta_s_ij : np.ndarray, float, shape=(K, K)
uncertainty in Delta_s_ij
Notes
-----
This method is EXPERIMENTAL and should be used at your own risk.
Examples
--------
>>> from pymbar import testsystems
>>> (x_n, u_kn, N_k, s_n) = testsystems.HarmonicOscillatorsTestCase().sample(mode='u_kn')
>>> mbar = MBAR(u_kn, N_k)
>>> [Delta_f_ij, dDelta_f_ij, Delta_u_ij, dDelta_u_ij, Delta_s_ij, dDelta_s_ij] = mbar.computeEntropyAndEnthalpy()
"""
if verbose:
print("Computing average energy and entropy by MBAR.")
# Retrieve N and K for convenience.
N = self.N
K = self.K
# Augment W_nk, N_k, and c_k for q_A(x) for the potential energies,
# with one extra row/column for each state.
# weight matrix
Log_W_nk = np.zeros([N, K * 2], dtype=np.float64)
N_k = np.zeros([K * 2], dtype=np.int32) # counts
# "free energies" of average states
f_k = np.zeros(K, dtype=np.float64)
# Fill in first half of matrix with existing q_k(x) from states.
Log_W_nk[:, 0:K] = self.Log_W_nk
N_k[0:K] = self.N_k
# Compute the remaining rows/columns of W_nk and c_k for the potential
# energy observable.
u_min = self.u_kn.min()
u_i = np.zeros([K], dtype=np.float64)
for l in range(0, K):
u_kn = self.u_kn[l, :] - (u_min-1) # all positive now! Subtracting off arbitrary constants doesn't affect results
# since they are all differences.
# Compute unnormalized weights.
# A(x_n) exp[f_{k} - q_{k}(x_n)] / \sum_{k'=1}^K N_{k'} exp[f_{k'} - q_{k'}(x_n)]
# harden for over/underflow with logarithms
Log_W_nk[:, K + l] = np.log(u_kn) + self.Log_W_nk[:, l]
f_k[l] = -_logsum(Log_W_nk[:, K + l])
Log_W_nk[:, K + l] += f_k[l] # normalize the row
u_i[l] = np.exp(-f_k[l])
# print "MBAR u_i[%d]: %10.5f,%10.5f" % (l,u_i[l]+u_min, u_i[l])
# Compute augmented asymptotic covariance matrix.
W_nk = np.exp(Log_W_nk)
Theta_ij = self._computeAsymptoticCovarianceMatrix(
W_nk, N_k, method=uncertainty_method)
# Compute estimators and uncertainties.
dDelta_f_ij = np.zeros([K, K], dtype=np.float64)
dDelta_u_ij = np.zeros([K, K], dtype=np.float64)
dDelta_s_ij = np.zeros([K, K], dtype=np.float64)
# Compute reduced free energy difference.
f_k = np.matrix(self.f_k)
Delta_f_ij = f_k - f_k.transpose()
# Compute reduced enthalpy difference.
u_k = np.matrix(u_i)
Delta_u_ij = u_k - u_k.transpose()
# Compute reduced entropy difference
s_k = u_k - f_k
Delta_s_ij = s_k - s_k.transpose()
# compute uncertainty matrix in free energies:
# d2DeltaF = Theta_ij[i,i] + Theta_ij[j,j] - 2.0 * Theta_ij[i,j]
diag = Theta_ij.diagonal()
dii = diag[0:K, 0:K]
d2DeltaF = dii + dii.transpose() - 2 * Theta_ij[0:K, 0:K]
# check for any numbers below zero.
if (np.any(d2DeltaF < 0.0)):
if(np.any(d2DeltaF) < warning_cutoff):
# Hmm. Will this print correctly?
print("A squared uncertainty is negative. d2DeltaF = %e" % d2DeltaF[(np.any(d2DeltaF) < warning_cutoff)])
else:
d2DeltaF[(np.any(d2DeltaF) < warning_cutoff)] = 0.0
# take the square root of the entries of matrix
dDelta_f_ij = np.sqrt(d2DeltaF)
# TODO -- vectorize this calculation for entropy and enthalpy!
for i in range(0, K):
for j in range(0, K):
try:
dDelta_u_ij[i, j] = math.sqrt(
+ u_i[i] * Theta_ij[i, i] * u_i[i] - u_i[i] * Theta_ij[i, j] * u_i[j] - u_i[
i] * Theta_ij[i, K + i] * u_i[i] + u_i[i] * Theta_ij[i, K + j] * u_i[j]
- u_i[j] * Theta_ij[j, i] * u_i[i] + u_i[j] * Theta_ij[j, j] * u_i[j] + u_i[
j] * Theta_ij[j, K + i] * u_i[i] - u_i[j] * Theta_ij[j, K + j] * u_i[j]
- u_i[i] * Theta_ij[K + i, i] * u_i[i] + u_i[i] * Theta_ij[K + i, j] * u_i[
j] + u_i[i] * Theta_ij[K + i, K + i] * u_i[i] - u_i[i] * Theta_ij[K + i, K + j] * u_i[j]
+ u_i[j] * Theta_ij[K + j, i] * u_i[i] - u_i[j] * Theta_ij[K + j, j] * u_i[
j] - u_i[j] * Theta_ij[K + j, K + i] * u_i[i] + u_i[j] * Theta_ij[K + j, K + j] * u_i[j]
)
except:
dDelta_u_ij[i, j] = 0.0
# Compute reduced entropy difference.
try:
dDelta_s_ij[i, j] = math.sqrt(
+ (u_i[i] - 1) * Theta_ij[i, i] * (u_i[i] - 1) + (u_i[i] - 1) * Theta_ij[i, j] * (-u_i[j] + 1) + (
u_i[i] - 1) * Theta_ij[i, K + i] * (-u_i[i]) + (u_i[i] - 1) * Theta_ij[i, K + j] * u_i[j]
+ (-u_i[j] + 1) * Theta_ij[j, i] * (u_i[i] - 1) + (-u_i[j] + 1) * Theta_ij[j, j] * (-u_i[j] + 1) +
(-u_i[j] + 1) * Theta_ij[j, K + i] * (-u_i[i]) +
(-u_i[j] + 1) * Theta_ij[j, K + j] * u_i[j]
+ (-u_i[i]) * Theta_ij[K + i, i] * (u_i[i] - 1) + (-u_i[i]) * Theta_ij[K + i, j] * (-u_i[j] + 1) +
(-u_i[i]) * Theta_ij[K + i, K + i] * (-u_i[i]) +
(-u_i[i]) * Theta_ij[K + i, K + j] * u_i[j]
+ u_i[j] * Theta_ij[K + j, i] * (u_i[i] - 1) + u_i[j] * Theta_ij[K + j, j] * (-u_i[j] + 1) + u_i[
j] * Theta_ij[K + j, K + i] * (-u_i[i]) + u_i[j] * Theta_ij[K + j, K + j] * u_i[j]
)
except:
dDelta_s_ij[i, j] = 0.0
# Return expectations and uncertainties.
return (Delta_f_ij, dDelta_f_ij, Delta_u_ij, dDelta_u_ij, Delta_s_ij, dDelta_s_ij)
#=========================================================================
def computePMF(self, u_n, bin_n, nbins, uncertainties='from-lowest', pmf_reference=None):
"""Compute the free energy of occupying a number of bins.
This implementation computes the expectation of an indicator-function observable for each bin.
Parameters
----------
u_n : np.ndarray, float, shape=(N)
u_n[n] is the reduced potential energy of snapshot n of state k for which the PMF is to be computed.
bin_n : np.ndarray, float, shape=(N)
bin_n[n] is the bin index of snapshot n of state k. bin_n can assume a value in range(0,nbins)
nbins : int
The number of bins
uncertainties : string, optional
Method for reporting uncertainties (default: 'from-lowest')
'from-lowest' - the uncertainties in the free energy difference with lowest point on PMF are reported
'from-reference' - same as from lowest, but from a user specified point
'from-normalization' - the normalization \sum_i p_i = 1 is used to determine uncertainties spread out through the PMF
'all-differences' - the nbins x nbins matrix df_ij of uncertainties in free energy differences is returned instead of df_i
Returns
-------
f_i : np.ndarray, float, shape=(K)
f_i[i] is the dimensionless free energy of state i, relative to the state of lowest free energy
df_i : np.ndarray, float, shape=(K)
df_i[i] is the uncertainty in the difference of f_i with respect to the state of lowest free energy
Notes
-----
All bins must have some samples in them from at least one of the states -- this will not work if bin_n.sum(0) == 0. Empty bins should be removed before calling computePMF().
This method works by computing the free energy of localizing the system to each bin for the given potential by aggregating the log weights for the given potential.
To estimate uncertainties, the NxK weight matrix W_nk is augmented to be Nx(K+nbins) in order to accomodate the normalized weights of states where
the potential is given by u_kn within each bin and infinite potential outside the bin. The uncertainties with respect to the bin of lowest free energy
are then computed in the standard way.
WARNING
This method is EXPERIMENTAL and should be used at your own risk.
Examples
--------
>>> # Generate some test data
>>> from pymbar import testsystems
>>> (x_n, u_kn, N_k, s_n) = testsystems.HarmonicOscillatorsTestCase().sample(mode='u_kn')
>>> # Initialize MBAR on data.
>>> mbar = MBAR(u_kn, N_k)
>>> # Select the potential we want to compute the PMF for (here, condition 0).
>>> u_n = u_kn[0, :]
>>> # Sort into nbins equally-populated bins
>>> nbins = 10 # number of equally-populated bins to use
>>> import numpy as np
>>> N_tot = N_k.sum()
>>> x_n_sorted = np.sort(x_n) # unroll to n-indices
>>> bins = np.append(x_n_sorted[0::(N_tot/nbins)], x_n_sorted.max()+0.1)
>>> bin_widths = bins[1:] - bins[0:-1]
>>> bin_n = np.zeros(x_n.shape, np.int32)
>>> bin_n = np.digitize(x_n, bins) - 1
>>> # Compute PMF for these unequally-sized bins.
>>> [f_i, df_i] = mbar.computePMF(u_n, bin_n, nbins)
>>> # If we want to correct for unequally-spaced bins to get a PMF on uniform measure
>>> f_i_corrected = f_i - np.log(bin_widths)
"""
# Verify that no PMF bins are empty -- we can't deal with empty bins,
# because the free energy is infinite.
for i in range(nbins):
if np.sum(bin_n == i) == 0:
raise ParameterError(
"At least one bin in provided bin_n argument has no samples. All bins must have samples for free energies to be finite. Adjust bin sizes or eliminate empty bins to ensure at least one sample per bin.")
K = self.K
# Compute unnormalized log weights for the given reduced potential
# u_n.
log_w_n = self._computeUnnormalizedLogWeights(u_n)
if len(np.shape(u_n)) == 2:
u_n = kn_to_n(u_n, N_k = self.N_k)
if len(np.shape(bin_n)) == 2:
bin_n = kn_to_n(bin_n, N_k = self.N_k)
# Compute the free energies for these states.
f_i = np.zeros([nbins], np.float64)
df_i = np.zeros([nbins], np.float64)
for i in range(nbins):
# Get linear n-indices of samples that fall in this bin.
indices = np.where(bin_n == i)
# Compute dimensionless free energy of occupying state i.
f_i[i] = - _logsum(log_w_n[indices])
# Compute uncertainties by forming matrix of W_nk.
N_k = np.zeros([self.K + nbins], np.int32)
N_k[0:K] = self.N_k
W_nk = np.zeros([self.N, self.K + nbins], np.float64)
W_nk[:, 0:K] = np.exp(self.Log_W_nk)
for i in range(nbins):
# Get indices of samples that fall in this bin.
indices = np.where(bin_n == i)
# Compute normalized weights for this state.
W_nk[indices, K + i] = np.exp(log_w_n[indices] + f_i[i])
# Compute asymptotic covariance matrix using specified method.
Theta_ij = self._computeAsymptoticCovarianceMatrix(W_nk, N_k)
if (uncertainties == 'from-lowest') or (uncertainties == 'from-specified'):
# Report uncertainties in free energy difference from lowest point
# on PMF.
if (uncertainties == 'from-lowest'):
# Determine bin index with lowest free energy.
j = f_i.argmin()
elif (uncertainties == 'from-specified'):
if pmf_reference == None:
raise ParameterError(
"no reference state specified for PMF using uncertainties = from-reference")
else:
j = pmf_reference
# Compute uncertainties with respect to difference in free energy
# from this state j.
for i in range(nbins):
df_i[i] = math.sqrt(
Theta_ij[K + i, K + i] + Theta_ij[K + j, K + j] - 2.0 * Theta_ij[K + i, K + j])
# Shift free energies so that state j has zero free energy.
f_i -= f_i[j]
# Return dimensionless free energy and uncertainty.
return (f_i, df_i)
elif (uncertainties == 'all-differences'):
# Report uncertainties in all free energy differences.
diag = Theta_ij.diagonal()
dii = diag[K, K + nbins]
d2f_ij = dii + \
dii.transpose() - 2 * Theta_ij[K:K + nbins, K:K + nbins]
# unsquare uncertainties
df_ij = np.sqrt(d2f_ij)
# Return dimensionless free energy and uncertainty.
return (f_i, df_ij)
elif (uncertainties == 'from-normalization'):
# Determine uncertainties from normalization that \sum_i p_i = 1.
# Compute bin probabilities p_i
p_i = np.exp(-f_i - _logsum(-f_i))
# todo -- eliminate triple loop over nbins!
# Compute uncertainties in bin probabilities.
d2p_i = np.zeros([nbins], np.float64)
for k in range(nbins):
for i in range(nbins):
for j in range(nbins):
delta_ik = 1.0 * (i == k)
delta_jk = 1.0 * (j == k)
d2p_i[k] += p_i[k] * (p_i[i] - delta_ik) * p_i[
k] * (p_i[j] - delta_jk) * Theta_ij[K + i, K + j]
# Transform from d2p_i to df_i
d2f_i = d2p_i / p_i ** 2
df_i = np.sqrt(d2f_i)
# return free energy and uncertainty
return (f_i, df_i)
else:
raise "Uncertainty method '%s' not recognized." % uncertainties
return
#=========================================================================
def computePMF_states(self, u_n, bin_n, nbins):
"""Compute the free energy of occupying a number of bins.
This implementation defines each bin as a separate thermodynamic state.
Parameters
----------
u_n : np.ndarray, float, shape=(K, N)
u_n[n] is the reduced potential energy of snapshot n for which the PMF is to be computed.
bin_n : np.ndarray, int, shape=(N)
bin_n[n] is the bin index of snapshot n. bin_n can assume a value in range(0,nbins)
nbins : int
The number of bins
fmax : float, optional
The maximum value of the free energy, used for an empty bin (default: 1000)
Returns
-------
f_i : np.ndarray, float, shape=(K)
f_i[i] is the dimensionless free energy of state i, relative to the state of lowest free energy
d2f_ij : np.ndarray, float, shape=(K)
d2f_ij[i,j] is the uncertainty in the difference of (f_i - f_j)
Notes
-----
All bins must have some samples in them from at least one of the states -- this will not work if bin_n.sum(0) == 0. Empty bins should be removed before calling computePMF().
This method works by computing the free energy of localizing the system to each bin for the given potential by aggregating the log weights for the given potential.
To estimate uncertainties, the NxK weight matrix W_nk is augmented to be Nx(K+nbins) in order to accomodate the normalized weights of states where
the potential is given by u_kn within each bin and infinite potential outside the bin. The uncertainties with respect to the bin of lowest free energy
are then computed in the standard way.
WARNING!
This method is EXPERIMENTAL and should be used at your own risk.
"""
# Verify that no PMF bins are empty -- we can't deal with empty bins,
# because the free energy is infinite.
for i in range(nbins):
if np.sum(bin_n == i) == 0:
raise ParameterError(
"At least one bin in provided bin_n argument has no samples. All bins must have samples for free energies to be finite. Adjust bin sizes or eliminate empty bins to ensure at least one sample per bin.")
K = self.K
# Unroll to n-indices
log_w_n = self._computeUnnormalizedLogWeights(u_n)
# Compute the free energies for these states.
f_i = np.zeros([nbins], np.float64)
for i in range(nbins):
# Get linear n-indices of samples that fall in this bin.
indices = np.where(bin_n[self.indices] == i)[0]
# Sanity check.
if (len(indices) == 0):
raise "WARNING: bin %d has no samples -- all bins must have at least one sample." % i
# Compute dimensionless free energy of occupying state i.
f_i[i] = - _logsum(log_w_n[indices])
# Shift so that f_i.min() = 0
f_i_min = f_i.min()
f_i -= f_i.min()
if self.verbose:
print("bins f_i = ")
print(f_i)
# Compute uncertainties by forming matrix of W_nk.
if self.verbose:
print("Forming W_nk matrix...")
N_k = np.zeros([self.K + nbins], np.int32)
N_k[0:K] = self.N_k
W_nk = np.zeros([self.N, self.K + nbins], np.float64)
W_nk[:, 0:K] = np.exp(self.Log_W_nk)
for i in range(nbins):
# Get indices of samples that fall in this bin.
indices = np.where(bin_n[self.indices] == i)[0]
if self.verbose:
print("bin %5d count = %10d" % (i, len(indices)))
# Compute normalized weights for this state.
W_nk[indices, K + i] = np.exp(
log_w_n[indices] + f_i[i] + f_i_min)
# Compute asymptotic covariance matrix using specified method.
Theta_ij = self._computeAsymptoticCovarianceMatrix(W_nk, N_k)
# Compute uncertainties with respect to difference in free energy from
# this state j.
diag = Theta_ij.diagonal()
dii = diag[0, K:K + nbins]
d2f_ij = dii + dii.transpose() - 2 * Theta_ij[K:K + nbins, K:K + nbins]
# Return dimensionless free energy and uncertainty.
return (f_i, d2f_ij)
#=========================================================================
# PRIVATE METHODS - INTERFACES ARE NOT EXPORTED
#=========================================================================
def _computeWeights(self, logform=False, include_nonzero=False, recalc_denom=True, return_f_k=False):
"""Compute the normalized weights corresponding to samples for the given reduced potential.
Compute the normalized weights corresponding to samples for the given reduced potential.
Also stores the all_log_denom array for reuse.
Parameters
----------
logform : bool, optional
Whether the output is in logarithmic form, which is better for stability, though sometimes
the exponential form is requires.
include_nonzero : bool, optional
whether to compute weights for states with nonzero states. Not necessary
when performing self-consistent iteration.
recalc_denom : bool, optional
recalculate the denominator, must be done if the free energies change.
default is to do it, so that errors are not made. But can be turned
off if it is known the free energies have not changed.
return_f_k : bool, optional
return the self-consistent f_k values
Returns
-------
if logform==True:
Log_W_nk (double) - Log_W_nk[n,k] is the normalized log weight of sample n from state k.
else:
W_nk (double) - W_nk[n,k] is the log weight of sample n from state k.
if return_f_k==True:
optionally return the self-consistent free energy from these weights.
"""
if (include_nonzero):
f_k = self.f_k
K = self.K
else:
f_k = self.f_k[self.states_with_samples]
K = len(self.states_with_samples)
# array of either weights or normalized log weights
Warray_nk = np.zeros([self.N, K], dtype=np.float64)
if (return_f_k):
f_k_out = np.zeros([K], dtype=np.float64)
if (recalc_denom):
self.log_weight_denom = self._computeUnnormalizedLogWeights(
np.zeros([self.N], dtype=np.float64))
for k in range(K):
if (include_nonzero):
index = k
else:
index = self.states_with_samples[k]
log_w_n = -self.u_kn[index, :] + self.log_weight_denom + f_k[k]
if (return_f_k):
f_k_out[k] = f_k[k] - _logsum(log_w_n)
if (include_nonzero):
# renormalize the weights, needed for nonzero states.
log_w_n += (f_k_out[k] - f_k[k])
if (logform):
Warray_nk[:, k] = log_w_n
else:
Warray_nk[:, k] = np.exp(log_w_n)
# Return weights (or log weights)
if (return_f_k):
f_k_out[:] = f_k_out[:] - f_k_out[0]
return Warray_nk, f_k_out
else:
return Warray_nk
#=========================================================================
def _pseudoinverse(self, A, tol=1.0e-10):
"""
Compute the Moore-Penrose pseudoinverse.
REQUIRED ARGUMENTS
A (np KxK matrix) - the square matrix whose pseudoinverse is to be computed
RETURN VALUES
Ainv (np KxK matrix) - the pseudoinverse
OPTIONAL VALUES
tol - the tolerance (relative to largest magnitude singlular value) below which singular values are to not be include in forming pseudoinverse (default: 1.0e-10)
NOTES
This implementation is provided because the 'pinv' function of np is broken in the version we were using.
TODO
Can we get rid of this and use np.linalg.pinv instead?
"""
# DEBUG
# TODO: Should we use pinv, or _pseudoinverse?
# return np.linalg.pinv(A)
# Get size
[M, N] = A.shape
if N != M:
raise "pseudoinverse can only be computed for square matrices: dimensions were %d x %d" % (
M, N)
# Make sure A contains no nan.
if(np.any(np.isnan(A))):
print("attempted to compute pseudoinverse of A =")
print(A)
raise ParameterError("A contains nan.")
# DEBUG
diagonal_loading = False
if diagonal_loading:
# Modify matrix by diagonal loading.
eigs = linalg.eigvalsh(A)
most_negative_eigenvalue = eigs.min()
if (most_negative_eigenvalue < 0.0):
print("most negative eigenvalue = %e" % most_negative_eigenvalue)
# Choose loading value.
gamma = -most_negative_eigenvalue * 1.05
# Modify Theta by diagonal loading
A += gamma * np.eye(A.shape[0])
# Compute SVD of A.
[U, S, Vt] = linalg.svd(A)
# Compute pseudoinverse by taking square root of nonzero singular
# values.
Ainv = np.matrix(np.zeros([M, M], dtype=np.float64))
for k in range(M):
if (abs(S[k]) > tol * abs(S[0])):
Ainv += (1.0/S[k]) * np.outer(U[:, k], Vt[k, :]).T
return Ainv
#=========================================================================
def _zerosamestates(self, A):
"""
zeros out states that should be identical
REQUIRED ARGUMENTS
A: the matrix whose entries are to be zeroed.
"""
for pair in self.samestates:
A[pair[0], pair[1]] = 0
A[pair[1], pair[0]] = 0
#=========================================================================
def _computeAsymptoticCovarianceMatrix(self, W, N_k, method=None):
"""
Compute estimate of the asymptotic covariance matrix.
REQUIRED ARGUMENTS
W (np.array of np.float of dimension [N,K]) - matrix of normalized weights (see Eq. 9 of [1]) - W[n,k] is the weight of snapshot n (n = 1..N) in state k
Note that sum(W(:,k)) = 1 for any k = 1..K, and sum(N_k(:) .* W(n,:)) = 1 for any n.
N_k (np.array of np.int32 of dimension [K]) - N_k[k] is the number of samples from state K
RETURN VALUES
Theta (KxK np float64 array) - asymptotic covariance matrix (see Eq. 8 of [1])
OPTIONAL ARGUMENTS
method (string) - if not None, specified method is used to compute asymptotic covariance method:
method must be one of ['generalized-inverse', 'svd', 'svd-ew', 'inverse', 'tan-HGH', 'tan', 'approximate']
If None is specified, 'svd-ew' is used.
NOTES
The computational costs of the various 'method' arguments varies:
'generalized-inverse' currently requires computation of the pseudoinverse of an NxN matrix (where N is the total number of samples)
'svd' computes the generalized inverse using the singular value decomposition -- this should be efficient yet accurate (faster)
'svd-ev' is the same as 'svd', but uses the eigenvalue decomposition of W'W to bypass the need to perform an SVD (fastest)
'inverse' only requires standard inversion of a KxK matrix (where K is the number of states), but requires all K states to be different
'approximate' only requires multiplication of KxN and NxK matrices, but is an approximate underestimate of the uncertainty
'tan' uses a simplified form that requires two pseudoinversions, but can be unstable
'tan-HGH' makes weaker assumptions on 'tan' but can occasionally be unstable
REFERENCE
See Section II and Appendix D of [1].
"""
# Set 'svd-ew' as default if uncertainty method specified as None.
if method == None:
method = 'svd-ew'
# Get dimensions of weight matrix.
[N, K] = W.shape
# Check dimensions
if(K != N_k.size):
raise ParameterError(
'W must be NxK, where N_k is a K-dimensional array.')
if(np.sum(N_k) != N):
raise ParameterError('W must be NxK, where N = sum_k N_k.')
# Check to make sure the weight matrix W is properly normalized.
tolerance = 1.0e-4 # tolerance for checking equality of sums
column_sums = np.sum(W, axis=0)
badcolumns = (np.abs(column_sums - 1) > tolerance)
if np.any(badcolumns):
which_badcolumns = np.arange(K)[badcolumns]
firstbad = which_badcolumns[0]
raise ParameterError(
'Warning: Should have \sum_n W_nk = 1. Actual column sum for state %d was %f. %d other columns have similar problems' %
(firstbad, column_sums[firstbad], np.sum(badcolumns)))
row_sums = np.sum(W * N_k, axis=1)
badrows = (np.abs(row_sums - 1) > tolerance)
if np.any(badrows):
which_badrows = np.arange(N)[badrows]
firstbad = which_badrows[0]
raise ParameterError(
'Warning: Should have \sum_k N_k W_nk = 1. Actual row sum for sample %d was %f. %d other rows have similar problems' %
(firstbad, row_sums[firstbad], np.sum(badrows)))
# Compute estimate of asymptotic covariance matrix using specified
# method.
if method == 'generalized-inverse':
# Use generalized inverse (Eq. 8 of [1]) -- most general
# Theta = W' (I - W N W')^+ W
# Construct matrices
# Diagonal N_k matrix.
Ndiag = np.matrix(np.diag(N_k), dtype=np.float64)
W = np.matrix(W, dtype=np.float64)
I = np.identity(N, dtype=np.float64)
# Compute covariance
Theta = W.T * self._pseudoinverse(I - W * Ndiag * W.T) * W
elif method == 'inverse':
# Use standard inverse method (Eq. D8 of [1]) -- only applicable if all K states are different
# Theta = [(W'W)^-1 - N + 1 1'/N]^-1
# Construct matrices
# Diagonal N_k matrix.
Ndiag = np.matrix(np.diag(N_k), dtype=np.float64)
W = np.matrix(W, dtype=np.float64)
# I = np.identity(N, dtype=np.float64) # Don't unnecessarily allocate O(N^2) memory. See Issue #106
# matrix of ones, times 1/N
O = np.ones([K, K], dtype=np.float64) / float(N)
# Make sure W is nonsingular.
if (abs(linalg.det(W.T * W)) < tolerance):
print("Warning: W'W appears to be singular, yet 'inverse' method of uncertainty estimation requires W contain no duplicate states.")
# Compute covariance
Theta = ((W.T * W).I - Ndiag + O).I
elif method == 'approximate':
# Use fast approximate expression from Kong et al. -- this underestimates the true covariance, but may be a good approximation in some cases and requires no matrix inversions
# Theta = P'P
# Construct matrices
W = np.matrix(W, dtype=np.float64)
# Compute covariance
Theta = W.T * W
elif method == 'svd':
# Use singular value decomposition based approach given in supplementary material to efficiently compute uncertainty
# See Appendix D.1, Eq. D4 in [1].
# Construct matrices
Ndiag = np.matrix(np.diag(N_k), dtype=np.float64)
W = np.matrix(W, dtype=np.float64)
I = np.identity(K, dtype=np.float64)
# Compute SVD of W
[U, S, Vt] = linalg.svd(W)
Sigma = np.matrix(np.diag(S))
V = np.matrix(Vt).T
# Compute covariance
Theta = V * Sigma * self._pseudoinverse(
I - Sigma * V.T * Ndiag * V * Sigma) * Sigma * V.T
elif method == 'svd-ew':
# Use singular value decomposition based approach given in supplementary material to efficiently compute uncertainty
# The eigenvalue decomposition of W'W is used to forego computing the SVD.
# See Appendix D.1, Eqs. D4 and D5 of [1].
# Construct matrices
Ndiag = np.matrix(np.diag(N_k), dtype=np.float64)
W = np.matrix(W, dtype=np.float64)
I = np.identity(K, dtype=np.float64)
# Compute singular values and right singular vectors of W without using SVD
# Instead, we compute eigenvalues and eigenvectors of W'W.
# Note W'W = (U S V')'(U S V') = V S' U' U S V' = V (S'S) V'
[S2, V] = linalg.eigh(W.T * W)
# Set any slightly negative eigenvalues to zero.
S2[np.where(S2 < 0.0)] = 0.0
# Form matrix of singular values Sigma, and V.
Sigma = np.matrix(np.diag(np.sqrt(S2)))
V = np.matrix(V)
# Compute covariance
Theta = V * Sigma * self._pseudoinverse(
I - Sigma * V.T * Ndiag * V * Sigma) * Sigma * V.T
elif method == 'tan-HGH':
# Use method suggested by Zhiqiang Tan without further simplification.
# TODO: There may be a problem here -- double-check this.
[N, K] = W.shape
# Estimate O matrix from W'W.
W = np.matrix(W, dtype=np.float64)
O = W.T * W
# Assemble the Lambda matrix.
Lambda = np.matrix(np.diag(N_k), dtype=np.float64)
# Identity matrix.
I = np.matrix(np.eye(K), dtype=np.float64)
# Compute H and G matrices.
H = O * Lambda - I
G = O - O * Lambda * O
# Compute pseudoinverse of H
Hinv = self._pseudoinverse(H)
# Compute estimate of asymptotic covariance.
Theta = Hinv * G * Hinv.T
elif method == 'tan':
# Use method suggested by Zhiqiang Tan.
# Estimate O matrix from W'W.
W = np.matrix(W, dtype=np.float64)
O = W.T * W
# Assemble the Lambda matrix.
Lambda = np.matrix(np.diag(N_k), dtype=np.float64)
# Compute covariance.
Oinv = self._pseudoinverse(O)
Theta = self._pseudoinverse(Oinv - Lambda)
else:
# Raise an exception.
raise ParameterError('Method ' + method + ' unrecognized.')
return Theta
#=========================================================================
def _initializeFreeEnergies(self, verbose=False, method='zeros'):
"""
Compute an initial guess at the relative free energies.
OPTIONAL ARGUMENTS
verbose (boolean) - If True, will print debug information (default: False)
method (string) - Method for initializing guess at free energies.
'zeros' - all free energies are initially set to zero
'mean-reduced-potential' - the mean reduced potential is used
"""
if (method == 'zeros'):
# Use zeros for initial free energies.
if verbose:
print("Initializing free energies to zero.")
self.f_k[:] = 0.0
elif (method == 'mean-reduced-potential'):
# Compute initial guess at free energies from the mean reduced
# potential from each state
if verbose:
print("Initializing free energies with mean reduced potential for each state.")
means = np.zeros([self.K], float)
for k in self.states_with_samples:
means[k] = self.u_kn[k, 0:self.N_k[k]].mean()
if (np.max(np.abs(means)) < 0.000001):
print("Warning: All mean reduced potentials are close to zero. If you are using energy differences in the u_kln matrix, then the mean reduced potentials will be zero, and this is expected behavoir.")
self.f_k = means
elif (method == 'BAR'):
# For now, make a simple list of those states with samples.
initialization_order = np.where(self.N_k > 0)[0]
# Initialize all f_k to zero.
self.f_k[:] = 0.0
# Initialize the rest
for index in range(0, np.size(initialization_order) - 1):
k = initialization_order[index]
l = initialization_order[index + 1]
# forward work
# here, we actually need to distinguish which states are which
w_F = (
self.u_kn[l,self.x_kindices==k] - self.u_kn[k,self.x_kindices==k])
#self.u_kln[k, l, 0:self.N_k[k]] - self.u_kln[k, k, 0:self.N_k[k]])
# reverse work
w_R = (
self.u_kn[k,self.x_kindices==l] - self.u_kn[l,self.x_kindices==l])
#self.u_kln[l, k, 0:self.N_k[l]] - self.u_kln[l, l, 0:self.N_k[l]])
if (len(w_F) > 0 and len(w_R) > 0):
# BAR solution doesn't need to be incredibly accurate to
# kickstart NR.
import pymbar.bar
self.f_k[l] = self.f_k[k] + pymbar.bar.BAR(
w_F, w_R, relative_tolerance=0.000001, verbose=False, compute_uncertainty=False)
else:
# no states observed, so we don't need to initialize this free energy anyway, as
# the solution is noniterative.
self.f_k[l] = 0
else:
# The specified method is not implemented.
raise ParameterError('Method ' + method + ' unrecognized.')
# Shift all free energies such that f_0 = 0.
self.f_k[:] = self.f_k[:] - self.f_k[0]
return
#=========================================================================
def _computeUnnormalizedLogWeights(self, u_n):
"""
Return unnormalized log weights.
REQUIRED ARGUMENTS
u_n (N np float64 array) - reduced potential energies at single state
OPTIONAL ARGUMENTS
RETURN VALUES
log_w_n (N array) - unnormalized log weights of each of a number of states
REFERENCE
'log weights' here refers to \log [ \sum_{k=1}^K N_k exp[f_k - (u_k(x_n) - u(x_n)] ]
"""
if (self.use_embedded_helper_code):
# Use embedded C++ optimizations.
import _pymbar
# necessary for helper code to interpret type of u_kn
u_n = np.array(u_n, dtype=np.float64)
log_w_n = _pymbar.computeUnnormalizedLogWeightsCpp(
self.K, self.N, self.K_nonzero, self.states_with_samples, self.N_k, self.f_k, self.u_kn, u_n)
else:
try:
from scipy import weave
# Allocate storage for return values.
log_w_n = np.zeros([self.N], dtype=np.float64)
# Copy useful class members to local variables.
K = self.K
f_k = self.f_k
N = self.N
N_k = self.N_k
u_kn = self.u_kn
# Weave inline C++ code.
code = """
double log_terms[%(K)d]; // temporary storage for log terms
for (int n = 0; n < N; n++) {
double max_log_term = 0.0;
bool first_nonzero = true;
for (int k = 0; k < K; k++) {
// skip empty states
if (N_K1(k) == 0) continue;
double log_term = log(N_K1(k)) + F_K1(k) - U_KN2(k,n) + U_N1(n);
log_terms[k] = log_term;
if (first_nonzero || (log_term > max_log_term)) {
max_log_term = log_term;
first_nonzero = false;
}
}
double term_sum = 0.0;
for (int k = 0; k < K; k++) {
// skip empty states
if (N_K1(k) == 0) continue;
term_sum += exp(log_terms[k] - max_log_term);
}
double log_term_sum = log(term_sum) + max_log_term;
LOG_W_N1(n) = - log_term_sum;
}
""" % vars()
# Execute inline C code with weave.
info = weave.inline(
code, ['K', 'N', 'N_k', 'u_n', 'u_kn', 'f_k', 'log_w_n'], headers=['<math.h>', '<stdlib.h>'], verbose=2)
except:
# Compute unnormalized log weights in pure Python.
log_w_n = np.zeros([self.N], dtype=np.float64)
for n in range(0, self.N):
log_w_n[n] = - _logsum(np.log(self.N_k[self.states_with_samples]) +
self.f_k[self.states_with_samples] -
(self.u_kn[self.states_with_samples, n] - u_n[n]))
return log_w_n
#=========================================================================
def _amIdoneIterating(self, f_k_new, relative_tolerance, iteration, maximum_iterations, print_warning, verbose):
"""
Convenience function to test whether we are done iterating, same for all iteration types
REQUIRED ARGUMENTS
f_k_new (array): new free energies
f_k (array) : older free energies
relative_tolerance (float): the relative tolerance for terminating
verbose (bool): verbose response
iterations (int): current number of iterations
print_warning (bool): sometimes, we want to surpress the warning.
RETURN VALUES
yesIam (bool): indicates that the iteration has converged.
"""
yesIam = False
# Compute change from old to new estimate.
Delta_f_k = f_k_new - self.f_k[self.states_with_samples]
# Check convergence criteria.
# Terminate when max((f - fold) / f) < relative_tolerance for all
# nonzero f.
max_delta = np.max(
np.abs(Delta_f_k) / np.max(np.abs(f_k_new)))
# Update stored free energies.
f_k = f_k_new.copy()
self.f_k[self.states_with_samples] = f_k
# write out current estimate
if verbose:
print("current f_k for states with samples =")
print(f_k)
print("relative max_delta = %e" % max_delta)
# Check convergence criteria.
# Terminate when max((f - fold) / f) < relative_tolerance for all
# nonzero f.
if np.isnan(max_delta) or (max_delta < relative_tolerance):
yesIam = True
if (yesIam):
# Report convergence, or warn user if convergence was not achieved.
if np.all(self.f_k == 0.0):
# all f_k appear to be zero
print('WARNING: All f_k appear to be zero.')
elif (max_delta < relative_tolerance):
# Convergence achieved.
if verbose:
print('Converged to tolerance of %e in %d iterations.' % (max_delta, iteration + 1))
elif (print_warning):
# Warn that convergence was not achieved.
# many times, self-consistent iteration is used in conjunction with another program. In that case,
# we don't really need to warn about anything, since we are not
# running it to convergence.
print('WARNING: Did not converge to within specified tolerance.')
print('max_delta = %e, TOLERANCE = %e, MAX_ITS = %d, iterations completed = %d' % (max_delta, relative_tolerance, maximum_iterations, iteration))
return yesIam
#=========================================================================
def _selfConsistentIteration(self, relative_tolerance=1.0e-6, maximum_iterations=1000, verbose=True, print_warning=False):
"""
Determine free energies by self-consistent iteration.
OPTIONAL ARGUMENTS
relative_tolerance (float between 0 and 1) - relative tolerance for convergence (default 1.0e-5)
maximum_iterations (int) - maximum number of self-consistent iterations (default 1000)
verbose (boolean) - verbosity level for debug output
NOTES
Self-consistent iteration of the MBAR equations is used, as described in Appendix C.1 of [1].
"""
# Iteratively update dimensionless free energies until convergence to
# specified tolerance, or maximum allowed number of iterations has been
# exceeded.
if verbose:
print("MBAR: Computing dimensionless free energies by iteration. This may take from seconds to minutes, depending on the quantity of data...")
for iteration in range(0, maximum_iterations):
if verbose:
print('Self-consistent iteration %d' % iteration)
# compute the free energies by self consistent iteration (which
# also involves calculating the weights)
(W_nk, f_k_new) = self._computeWeights(
logform=True, return_f_k=True)
if self._amIdoneIterating(
f_k_new, relative_tolerance, iteration,
maximum_iterations, print_warning, verbose):
break
return
# commenting out likelihood minimization for now
"""
#=============================================================================================
def _minimizeLikelihood(self, relative_tolerance=1.0e-6, maximum_iterations=10000, verbose=True, print_warning = True):
Determine dimensionless free energies by combined self-consistent and NR iteration, choosing the 'best' each step.
OPTIONAL ARGUMENTS
relative_tolerance (float between 0 and 1) - relative tolerance for convergence (default 1.0e-6)
maximum_iterations (int) - maximum number of minimizer iterations (default 1000)
verbose (boolean) - verbosity level for debug output
NOTES
This method determines the dimensionless free energies by minimizing a convex function whose solution is the desired estimator.
The original idea came from the construction of a likelihood function that independently reproduced the work of Geyer (see [1]
and Section 6 of [2]).
This can alternatively be formulated as a root-finding algorithm for the Z-estimator.
REFERENCES
See Appendix C.2 of [1].
if verbose: print "Determining dimensionless free energies by LBFG minimization"
# Number of states with samples.
K = self.states_with_samples.size
if verbose:
print "There are %d states with samples." % K
# Free energies
f_k = self.f_k[self.states_with_samples].copy()
# Samples
N_k = self.N_k[self.states_with_samples].copy()
from scipy import optimize
results = optimize.fmin_cg(self._objectiveF,f_k,fprime=self._gradientF,gtol=relative_tolerance, full_output=verbose,disp=verbose,maxiter=maximum_iterations)
# doesn't matter what starting point is -- it's determined by what is stored in self, not by 'dum'
#results = optimize.fmin(self._objectiveF,f_k,xtol=relative_tolerance, full_output=verbose,disp=verbose,maxiter=maximum_iterations)
self.f_k = results[0]
if verbose:
print "Obtained free energies by likelihood minimization"
return
"""
#=========================================================================
def _adaptive(self, gamma=1.0, relative_tolerance=1.0e-8, maximum_iterations=1000, verbose=True, print_warning=True):
"""
Determine dimensionless free energies by a combination of Newton-Raphson iteration and self-consistent iteration.
Picks whichever method gives the lowest gradient.
Is slower than NR (approximated, not calculated) since it calculates the log norms twice each iteration.
OPTIONAL ARGUMENTS
gamma (float between 0 and 1) - incrementor for NR iterations.
relative_tolerance (float between 0 and 1) - relative tolerance for convergence (default 1.0e-6)
maximum_iterations (int) - maximum number of Newton-Raphson iterations (default 1000)
verbose (boolean) - verbosity level for debug output
NOTES
This method determines the dimensionless free energies by minimizing a convex function whose solution is the desired estimator.
The original idea came from the construction of a likelihood function that independently reproduced the work of Geyer (see [1]
and Section 6 of [2]).
This can alternatively be formulated as a root-finding algorithm for the Z-estimator.
More details of this procedure will follow in a subsequent paper.
Only those states with nonzero counts are include in the estimation procedure.
REFERENCES
See Appendix C.2 of [1].
"""
if verbose:
print("Determining dimensionless free energies by Newton-Raphson iteration.")
# keep track of Newton-Raphson and self-consistent iterations
nr_iter = 0
sci_iter = 0
N_k = self.N_k[self.states_with_samples]
K = len(N_k)
f_k_sci = np.zeros([K], dtype=np.float64)
f_k_new = np.zeros([K], dtype=np.float64)
# Perform Newton-Raphson iterations (with sci computed on the way)
for iteration in range(0, maximum_iterations):
# Store for new estimate of dimensionless relative free energies.
f_k = self.f_k[self.states_with_samples].copy()
# compute weights for gradients: the denominators and free energies are from the previous
# iteration in most cases.
(W_nk, f_k_sci) = self._computeWeights(
recalc_denom=(iteration == 0), return_f_k = True)
# Compute gradient and Hessian of last (K-1) states.
#
# gradient (defined by Eq. C6 of [1])
# g_i(theta) = N_i - \sum_n N_i W_ni
#
# Hessian (defined by Eq. C9 of [1])
# H_ii(theta) = - \sum_n N_i W_ni (1 - N_i W_ni)
# H_ij(theta) = \sum_n N_i W_ni N_j W_nj
#
"""
g = np.matrix(np.zeros([K-1,1], dtype=np.float64)) # gradient
H = np.matrix(np.zeros([K-1,K-1], dtype=np.float64)) # Hessian
for i in range(1,K):
g[i-1] = N_k[i] - N_k[i] * W_nk[:,i].sum()
H[i-1,i-1] = - (N_k[i] * W_nk[:,i] * (1.0 - N_k[i] * W_nk[:,i])).sum()
for j in range(1,i):
H[i-1,j-1] = (N_k[i] * W_nk[:,i] * N_k[j] * W_nk[:,j]).sum()
H[j-1,i-1] = H[i-1,j-1]
# Update the free energy estimate (Eq. C11 of [1]).
Hinvg = linalg.lstsq(H,g)[0] #
# Hinvg = linalg.solve(H,g) # This might be faster if we can guarantee full rank.
for k in range(0,K-1):
f_k_new[k+1] = f_k[k+1] - gamma*Hinvg[k]
"""
g = N_k - N_k * W_nk.sum(axis=0)
NW = N_k * W_nk
H = np.dot(NW.T, NW)
H += (g.T - N_k) * np.eye(K)
# Update the free energy estimate (Eq. C11 of [1]).
# will always have lower rank the way it is set up
Hinvg = linalg.lstsq(H, g)[0]
Hinvg -= Hinvg[0]
f_k_new = f_k - gamma * Hinvg
# self-consistent iteration gradient norm and saved log sums.
g_sci = self._gradientF(f_k_sci)
gnorm_sci = np.dot(g_sci, g_sci)
# save this so we can switch it back in if g_sci is lower.
log_weight_denom = self.log_weight_denom.copy()
# newton raphson gradient norm and saved log sums.
g_nr = self._gradientF(f_k_new)
gnorm_nr = np.dot(g_nr, g_nr)
# we could save the gradient, too, but it's not too expensive to
# compute since we are doing the Hessian anyway.
if verbose:
print("self consistent iteration gradient norm is %10.5g, Newton-Raphson gradient norm is %10.5g" % (gnorm_sci, gnorm_nr))
# decide which directon to go depending on size of gradient norm
if (gnorm_sci < gnorm_nr or sci_iter < 2):
sci_iter += 1
self.log_weight_denom = log_weight_denom.copy()
if verbose:
if sci_iter < 2:
print("Choosing self-consistent iteration on iteration %d" % iteration)
else:
print("Choosing self-consistent iteration for lower gradient on iteration %d" % iteration)
f_k_new = f_k_sci.copy()
else:
nr_iter += 1
if verbose:
print("Newton-Raphson used on iteration %d" % iteration)
# get rid of big matrices that are not used.
del(log_weight_denom, NW, W_nk)
# have to set the free energies back in self, since the gradient
# routine changes them.
self.f_k[self.states_with_samples] = f_k
if (self._amIdoneIterating(f_k_new, relative_tolerance, iteration, maximum_iterations, print_warning, verbose)):
if verbose:
print('Of %d iterations, %d were Newton-Raphson iterations and %d were self-consistent iterations' % (iteration + 1, nr_iter, sci_iter))
break
return
#=========================================================================
def _objectiveF(self, f_k):
# gradient to integrate is: g_i = N_i - N_i \sum_{n=1}^N W_{ni}
# = N_i - N_i \sum_{n=1}^N exp(f_i-u_i) / \sum_{k=1} N_k exp(f_k-u_k)
# = N_i - N_i \sum_{n=1}^N exp(f_i-u_i) / \sum_{k=1} N_k exp(f_k-u_k)
# If we take F = \sum_{k=1}_{K} N_k f_k - \sum_{n=1}^N \ln [\sum_{k=1}_{K} N_k exp(f_k-u_k)]
# then:
# dF/df_i = N_i - \sum_{n=1}^N \frac{1}{\sum_{k=1} N_k exp(f_k-u_k)} d/df_i [\sum_{k=1} N_k exp(f_k-u_k)]
# = N_i - \sum_{n=1}^N \frac{1}{\sum_{k=1} N_k exp(f_k-u_k)} N_i exp(f_i-u_i)
# = N_i - N_i\sum_{n=1}^N \frac{exp(f_i-u_i)}{\sum_{k=1} N_k exp(f_k-u_k)}
# = N_i - N_i\sum_{n=1}^N W_{ni}
# actually using the negative, in order to maximize instead of minimize
self.f_k[self.states_with_samples] = f_k
return -(np.dot(N_k[self.states_with_samples], f_k) + np.sum(self._computeUnnormalizedLogWeights(np.zeros([self.states_with_samples, self.N]))))
#=========================================================================
def _gradientF(self, f_k):
# take into account entries with zero samples
self.f_k[self.states_with_samples] = f_k
K = len(self.states_with_samples)
| W_nk = self._computeWeights(recalc_denom=True) | 12,510 | lcc_e | python | null | 71bfe59ca4860459f952a18d9b6e5157e15660d9f44959f0 |
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
FTP tests.
"""
import os
import errno
from StringIO import StringIO
import getpass
from zope.interface import implements
from zope.interface.verify import verifyClass
from twisted.trial import unittest, util
from twisted.python.randbytes import insecureRandom
from twisted.cred.portal import IRealm
from twisted.protocols import basic
from twisted.internet import reactor, task, protocol, defer, error
from twisted.internet.interfaces import IConsumer
from twisted.cred.error import UnauthorizedLogin
from twisted.cred import portal, checkers, credentials
from twisted.python import failure, filepath, runtime
from twisted.test import proto_helpers
from twisted.protocols import ftp, loopback
_changeDirectorySuppression = util.suppress(
category=DeprecationWarning,
message=(
r"FTPClient\.changeDirectory is deprecated in Twisted 8\.2 and "
r"newer\. Use FTPClient\.cwd instead\."))
if runtime.platform.isWindows():
nonPOSIXSkip = "Cannot run on Windows"
else:
nonPOSIXSkip = None
class Dummy(basic.LineReceiver):
logname = None
def __init__(self):
self.lines = []
self.rawData = []
def connectionMade(self):
self.f = self.factory # to save typing in pdb :-)
def lineReceived(self,line):
self.lines.append(line)
def rawDataReceived(self, data):
self.rawData.append(data)
def lineLengthExceeded(self, line):
pass
class _BufferingProtocol(protocol.Protocol):
def connectionMade(self):
self.buffer = ''
self.d = defer.Deferred()
def dataReceived(self, data):
self.buffer += data
def connectionLost(self, reason):
self.d.callback(self)
class FTPServerTestCase(unittest.TestCase):
"""
Simple tests for an FTP server with the default settings.
@ivar clientFactory: class used as ftp client.
"""
clientFactory = ftp.FTPClientBasic
userAnonymous = "anonymous"
def setUp(self):
# Create a directory
self.directory = self.mktemp()
os.mkdir(self.directory)
self.dirPath = filepath.FilePath(self.directory)
# Start the server
p = portal.Portal(ftp.FTPRealm(
anonymousRoot=self.directory,
userHome=self.directory,
))
p.registerChecker(checkers.AllowAnonymousAccess(),
credentials.IAnonymous)
users_checker = checkers.InMemoryUsernamePasswordDatabaseDontUse()
self.username = "test-user"
self.password = "test-password"
users_checker.addUser(self.username, self.password)
p.registerChecker(users_checker, credentials.IUsernamePassword)
self.factory = ftp.FTPFactory(portal=p,
userAnonymous=self.userAnonymous)
port = reactor.listenTCP(0, self.factory, interface="127.0.0.1")
self.addCleanup(port.stopListening)
# Hook the server's buildProtocol to make the protocol instance
# accessible to tests.
buildProtocol = self.factory.buildProtocol
d1 = defer.Deferred()
def _rememberProtocolInstance(addr):
# Done hooking this.
del self.factory.buildProtocol
protocol = buildProtocol(addr)
self.serverProtocol = protocol.wrappedProtocol
def cleanupServer():
if self.serverProtocol.transport is not None:
self.serverProtocol.transport.loseConnection()
self.addCleanup(cleanupServer)
d1.callback(None)
return protocol
self.factory.buildProtocol = _rememberProtocolInstance
# Connect a client to it
portNum = port.getHost().port
clientCreator = protocol.ClientCreator(reactor, self.clientFactory)
d2 = clientCreator.connectTCP("127.0.0.1", portNum)
def gotClient(client):
self.client = client
self.addCleanup(self.client.transport.loseConnection)
d2.addCallback(gotClient)
return defer.gatherResults([d1, d2])
def assertCommandResponse(self, command, expectedResponseLines,
chainDeferred=None):
"""Asserts that a sending an FTP command receives the expected
response.
Returns a Deferred. Optionally accepts a deferred to chain its actions
to.
"""
if chainDeferred is None:
chainDeferred = defer.succeed(None)
def queueCommand(ignored):
d = self.client.queueStringCommand(command)
def gotResponse(responseLines):
self.assertEqual(expectedResponseLines, responseLines)
return d.addCallback(gotResponse)
return chainDeferred.addCallback(queueCommand)
def assertCommandFailed(self, command, expectedResponse=None,
chainDeferred=None):
if chainDeferred is None:
chainDeferred = defer.succeed(None)
def queueCommand(ignored):
return self.client.queueStringCommand(command)
chainDeferred.addCallback(queueCommand)
self.assertFailure(chainDeferred, ftp.CommandFailed)
def failed(exception):
if expectedResponse is not None:
self.assertEqual(
expectedResponse, exception.args[0])
return chainDeferred.addCallback(failed)
def _anonymousLogin(self):
d = self.assertCommandResponse(
'USER anonymous',
['331 Guest login ok, type your email address as password.'])
return self.assertCommandResponse(
'PASS test@twistedmatrix.com',
['230 Anonymous login ok, access restrictions apply.'],
chainDeferred=d)
def _userLogin(self):
"""Authenticates the FTP client using the test account."""
d = self.assertCommandResponse(
'USER %s' % (self.username),
['331 Password required for %s.' % (self.username)])
return self.assertCommandResponse(
'PASS %s' % (self.password),
['230 User logged in, proceed'],
chainDeferred=d)
class FTPAnonymousTestCase(FTPServerTestCase):
"""
Simple tests for an FTP server with different anonymous username.
The new anonymous username used in this test case is "guest"
"""
userAnonymous = "guest"
def test_anonymousLogin(self):
"""
Tests whether the changing of the anonymous username is working or not.
The FTP server should not comply about the need of password for the
username 'guest', letting it login as anonymous asking just an email
address as password.
"""
d = self.assertCommandResponse(
'USER guest',
['331 Guest login ok, type your email address as password.'])
return self.assertCommandResponse(
'PASS test@twistedmatrix.com',
['230 Anonymous login ok, access restrictions apply.'],
chainDeferred=d)
class BasicFTPServerTestCase(FTPServerTestCase):
def testNotLoggedInReply(self):
"""
When not logged in, most commands other than USER and PASS should
get NOT_LOGGED_IN errors, but some can be called before USER and PASS.
"""
loginRequiredCommandList = ['CDUP', 'CWD', 'LIST', 'MODE', 'PASV',
'PWD', 'RETR', 'STRU', 'SYST', 'TYPE']
loginNotRequiredCommandList = ['FEAT']
# Issue commands, check responses
def checkFailResponse(exception, command):
failureResponseLines = exception.args[0]
self.failUnless(failureResponseLines[-1].startswith("530"),
"%s - Response didn't start with 530: %r"
% (command, failureResponseLines[-1],))
def checkPassResponse(result, command):
result = result[0]
self.failIf(result.startswith("530"),
"%s - Response start with 530: %r"
% (command, result,))
deferreds = []
for command in loginRequiredCommandList:
deferred = self.client.queueStringCommand(command)
self.assertFailure(deferred, ftp.CommandFailed)
deferred.addCallback(checkFailResponse, command)
deferreds.append(deferred)
for command in loginNotRequiredCommandList:
deferred = self.client.queueStringCommand(command)
deferred.addCallback(checkPassResponse, command)
deferreds.append(deferred)
return defer.DeferredList(deferreds, fireOnOneErrback=True)
def testPASSBeforeUSER(self):
"""
Issuing PASS before USER should give an error.
"""
return self.assertCommandFailed(
'PASS foo',
["503 Incorrect sequence of commands: "
"USER required before PASS"])
def testNoParamsForUSER(self):
"""
Issuing USER without a username is a syntax error.
"""
return self.assertCommandFailed(
'USER',
['500 Syntax error: USER requires an argument.'])
def testNoParamsForPASS(self):
"""
Issuing PASS without a password is a syntax error.
"""
d = self.client.queueStringCommand('USER foo')
return self.assertCommandFailed(
'PASS',
['500 Syntax error: PASS requires an argument.'],
chainDeferred=d)
def testAnonymousLogin(self):
return self._anonymousLogin()
def testQuit(self):
"""
Issuing QUIT should return a 221 message.
"""
d = self._anonymousLogin()
return self.assertCommandResponse(
'QUIT',
['221 Goodbye.'],
chainDeferred=d)
def testAnonymousLoginDenied(self):
# Reconfigure the server to disallow anonymous access, and to have an
# IUsernamePassword checker that always rejects.
self.factory.allowAnonymous = False
denyAlwaysChecker = checkers.InMemoryUsernamePasswordDatabaseDontUse()
self.factory.portal.registerChecker(denyAlwaysChecker,
credentials.IUsernamePassword)
# Same response code as allowAnonymous=True, but different text.
d = self.assertCommandResponse(
'USER anonymous',
['331 Password required for anonymous.'])
# It will be denied. No-one can login.
d = self.assertCommandFailed(
'PASS test@twistedmatrix.com',
['530 Sorry, Authentication failed.'],
chainDeferred=d)
# It's not just saying that. You aren't logged in.
d = self.assertCommandFailed(
'PWD',
['530 Please login with USER and PASS.'],
chainDeferred=d)
return d
def test_anonymousWriteDenied(self):
"""
When an anonymous user attempts to edit the server-side filesystem, they
will receive a 550 error with a descriptive message.
"""
d = self._anonymousLogin()
return self.assertCommandFailed(
'MKD newdir',
['550 Anonymous users are forbidden to change the filesystem'],
chainDeferred=d)
def testUnknownCommand(self):
d = self._anonymousLogin()
return self.assertCommandFailed(
'GIBBERISH',
["502 Command 'GIBBERISH' not implemented"],
chainDeferred=d)
def testRETRBeforePORT(self):
d = self._anonymousLogin()
return self.assertCommandFailed(
'RETR foo',
["503 Incorrect sequence of commands: "
"PORT or PASV required before RETR"],
chainDeferred=d)
def testSTORBeforePORT(self):
d = self._anonymousLogin()
return self.assertCommandFailed(
'STOR foo',
["503 Incorrect sequence of commands: "
"PORT or PASV required before STOR"],
chainDeferred=d)
def testBadCommandArgs(self):
d = self._anonymousLogin()
self.assertCommandFailed(
'MODE z',
["504 Not implemented for parameter 'z'."],
chainDeferred=d)
self.assertCommandFailed(
'STRU I',
["504 Not implemented for parameter 'I'."],
chainDeferred=d)
return d
def testDecodeHostPort(self):
self.assertEqual(ftp.decodeHostPort('25,234,129,22,100,23'),
('25.234.129.22', 25623))
nums = range(6)
for i in range(6):
badValue = list(nums)
badValue[i] = 256
s = ','.join(map(str, badValue))
self.assertRaises(ValueError, ftp.decodeHostPort, s)
def testPASV(self):
# Login
wfd = defer.waitForDeferred(self._anonymousLogin())
yield wfd
wfd.getResult()
# Issue a PASV command, and extract the host and port from the response
pasvCmd = defer.waitForDeferred(self.client.queueStringCommand('PASV'))
yield pasvCmd
responseLines = pasvCmd.getResult()
host, port = ftp.decodeHostPort(responseLines[-1][4:])
# Make sure the server is listening on the port it claims to be
self.assertEqual(port, self.serverProtocol.dtpPort.getHost().port)
# Semi-reasonable way to force cleanup
self.serverProtocol.transport.loseConnection()
testPASV = defer.deferredGenerator(testPASV)
def test_SYST(self):
"""SYST command will always return UNIX Type: L8"""
d = self._anonymousLogin()
self.assertCommandResponse('SYST', ["215 UNIX Type: L8"],
chainDeferred=d)
return d
def test_RNFRandRNTO(self):
"""
Sending the RNFR command followed by RNTO, with valid filenames, will
perform a successful rename operation.
"""
# Create user home folder with a 'foo' file.
self.dirPath.child(self.username).createDirectory()
self.dirPath.child(self.username).child('foo').touch()
d = self._userLogin()
self.assertCommandResponse(
'RNFR foo',
["350 Requested file action pending further information."],
chainDeferred=d)
self.assertCommandResponse(
'RNTO bar',
["250 Requested File Action Completed OK"],
chainDeferred=d)
def check_rename(result):
self.assertTrue(
self.dirPath.child(self.username).child('bar').exists())
return result
d.addCallback(check_rename)
return d
def test_RNFRwithoutRNTO(self):
"""
Sending the RNFR command followed by any command other than RNTO
should return an error informing users that RNFR should be followed
by RNTO.
"""
d = self._anonymousLogin()
self.assertCommandResponse(
'RNFR foo',
["350 Requested file action pending further information."],
chainDeferred=d)
self.assertCommandFailed(
'OTHER don-tcare',
["503 Incorrect sequence of commands: RNTO required after RNFR"],
chainDeferred=d)
return d
def test_portRangeForwardError(self):
"""
Exceptions other than L{error.CannotListenError} which are raised by
C{listenFactory} should be raised to the caller of L{FTP.getDTPPort}.
"""
def listenFactory(portNumber, factory):
raise RuntimeError()
self.serverProtocol.listenFactory = listenFactory
self.assertRaises(RuntimeError, self.serverProtocol.getDTPPort,
protocol.Factory())
def test_portRange(self):
"""
L{FTP.passivePortRange} should determine the ports which
L{FTP.getDTPPort} attempts to bind. If no port from that iterator can
be bound, L{error.CannotListenError} should be raised, otherwise the
first successful result from L{FTP.listenFactory} should be returned.
"""
def listenFactory(portNumber, factory):
if portNumber in (22032, 22033, 22034):
raise error.CannotListenError('localhost', portNumber, 'error')
return portNumber
self.serverProtocol.listenFactory = listenFactory
port = self.serverProtocol.getDTPPort(protocol.Factory())
self.assertEqual(port, 0)
self.serverProtocol.passivePortRange = xrange(22032, 65536)
port = self.serverProtocol.getDTPPort(protocol.Factory())
self.assertEqual(port, 22035)
self.serverProtocol.passivePortRange = xrange(22032, 22035)
self.assertRaises(error.CannotListenError,
self.serverProtocol.getDTPPort,
protocol.Factory())
def test_portRangeInheritedFromFactory(self):
"""
The L{FTP} instances created by L{ftp.FTPFactory.buildProtocol} have
their C{passivePortRange} attribute set to the same object the
factory's C{passivePortRange} attribute is set to.
"""
portRange = xrange(2017, 2031)
self.factory.passivePortRange = portRange
protocol = self.factory.buildProtocol(None)
self.assertEqual(portRange, protocol.wrappedProtocol.passivePortRange)
def testFEAT(self):
"""
When the server receives 'FEAT', it should report the list of supported
features. (Additionally, ensure that the server reports various
particular features that are supported by all Twisted FTP servers.)
"""
d = self.client.queueStringCommand('FEAT')
def gotResponse(responseLines):
self.assertEqual('211-Features:', responseLines[0])
self.assertTrue(' MDTM' in responseLines)
self.assertTrue(' PASV' in responseLines)
self.assertTrue(' TYPE A;I' in responseLines)
self.assertTrue(' SIZE' in responseLines)
self.assertEqual('211 End', responseLines[-1])
return d.addCallback(gotResponse)
def testOPTS(self):
"""
When the server receives 'OPTS something', it should report
that the FTP server does not support the option called 'something'.
"""
d = self._anonymousLogin()
self.assertCommandFailed(
'OPTS something',
["502 Option 'something' not implemented."],
chainDeferred=d,
)
return d
class FTPServerTestCaseAdvancedClient(FTPServerTestCase):
"""
Test FTP server with the L{ftp.FTPClient} class.
"""
clientFactory = ftp.FTPClient
def test_anonymousSTOR(self):
"""
Try to make an STOR as anonymous, and check that we got a permission
denied error.
"""
def eb(res):
res.trap(ftp.CommandFailed)
self.assertEqual(res.value.args[0][0],
'550 foo: Permission denied.')
d1, d2 = self.client.storeFile('foo')
d2.addErrback(eb)
return defer.gatherResults([d1, d2])
def test_STORwriteError(self):
"""
Any errors during writing a file inside a STOR should be returned to
the client.
"""
# Make a failing file writer.
class FailingFileWriter(ftp._FileWriter):
def receive(self):
return defer.fail(ftp.IsNotADirectoryError("blah"))
def failingSTOR(a, b):
return defer.succeed(FailingFileWriter(None))
# Monkey patch the shell so it returns a file writer that will
# fail.
self.patch(ftp.FTPAnonymousShell, 'openForWriting', failingSTOR)
def eb(res):
self.flushLoggedErrors()
res.trap(ftp.CommandFailed)
self.assertEqual(
res.value.args[0][0],
"550 Cannot rmd, blah is not a directory")
d1, d2 = self.client.storeFile('failing_file')
d2.addErrback(eb)
return defer.gatherResults([d1, d2])
def test_RETRreadError(self):
"""
Any errors during reading a file inside a RETR should be returned to
the client.
"""
# Make a failing file reading.
class FailingFileReader(ftp._FileReader):
def send(self, consumer):
return defer.fail(ftp.IsADirectoryError("blah"))
def failingRETR(a, b):
return defer.succeed(FailingFileReader(None))
# Monkey patch the shell so it returns a file reader that will
# fail.
self.patch(ftp.FTPAnonymousShell, 'openForReading', failingRETR)
def check_response(failure):
self.flushLoggedErrors()
failure.trap(ftp.CommandFailed)
self.assertEqual(
failure.value.args[0][0],
"125 Data connection already open, starting transfer")
self.assertEqual(
failure.value.args[0][1],
"550 blah: is a directory")
proto = _BufferingProtocol()
d = self.client.retrieveFile('failing_file', proto)
d.addErrback(check_response)
return d
class FTPServerPasvDataConnectionTestCase(FTPServerTestCase):
def _makeDataConnection(self, ignored=None):
# Establish a passive data connection (i.e. client connecting to
# server).
d = self.client.queueStringCommand('PASV')
def gotPASV(responseLines):
host, port = ftp.decodeHostPort(responseLines[-1][4:])
cc = protocol.ClientCreator(reactor, _BufferingProtocol)
return cc.connectTCP('127.0.0.1', port)
return d.addCallback(gotPASV)
def _download(self, command, chainDeferred=None):
if chainDeferred is None:
chainDeferred = defer.succeed(None)
chainDeferred.addCallback(self._makeDataConnection)
def queueCommand(downloader):
# wait for the command to return, and the download connection to be
# closed.
d1 = self.client.queueStringCommand(command)
d2 = downloader.d
return defer.gatherResults([d1, d2])
chainDeferred.addCallback(queueCommand)
def downloadDone((ignored, downloader)):
return downloader.buffer
return chainDeferred.addCallback(downloadDone)
def test_LISTEmpty(self):
"""
When listing empty folders, LIST returns an empty response.
"""
d = self._anonymousLogin()
# No files, so the file listing should be empty
self._download('LIST', chainDeferred=d)
def checkEmpty(result):
self.assertEqual('', result)
return d.addCallback(checkEmpty)
def test_LISTWithBinLsFlags(self):
"""
LIST ignores requests for folder with names like '-al' and will list
the content of current folder.
"""
os.mkdir(os.path.join(self.directory, 'foo'))
os.mkdir(os.path.join(self.directory, 'bar'))
# Login
d = self._anonymousLogin()
self._download('LIST -aL', chainDeferred=d)
def checkDownload(download):
names = []
for line in download.splitlines():
names.append(line.split(' ')[-1])
self.assertEqual(2, len(names))
self.assertIn('foo', names)
self.assertIn('bar', names)
return d.addCallback(checkDownload)
def test_LISTWithContent(self):
"""
LIST returns all folder's members, each member listed on a separate
line and with name and other details.
"""
os.mkdir(os.path.join(self.directory, 'foo'))
os.mkdir(os.path.join(self.directory, 'bar'))
# Login
d = self._anonymousLogin()
# We expect 2 lines because there are two files.
self._download('LIST', chainDeferred=d)
def checkDownload(download):
self.assertEqual(2, len(download[:-2].split('\r\n')))
d.addCallback(checkDownload)
# Download a names-only listing.
self._download('NLST ', chainDeferred=d)
def checkDownload(download):
filenames = download[:-2].split('\r\n')
filenames.sort()
self.assertEqual(['bar', 'foo'], filenames)
d.addCallback(checkDownload)
# Download a listing of the 'foo' subdirectory. 'foo' has no files, so
# the file listing should be empty.
self._download('LIST foo', chainDeferred=d)
def checkDownload(download):
self.assertEqual('', download)
d.addCallback(checkDownload)
# Change the current working directory to 'foo'.
def chdir(ignored):
return self.client.queueStringCommand('CWD foo')
d.addCallback(chdir)
# Download a listing from within 'foo', and again it should be empty,
# because LIST uses the working directory by default.
self._download('LIST', chainDeferred=d)
def checkDownload(download):
self.assertEqual('', download)
return d.addCallback(checkDownload)
def testManyLargeDownloads(self):
# Login
d = self._anonymousLogin()
# Download a range of different size files
for size in range(100000, 110000, 500):
fObj = file(os.path.join(self.directory, '%d.txt' % (size,)), 'wb')
fObj.write('x' * size)
fObj.close()
self._download('RETR %d.txt' % (size,), chainDeferred=d)
def checkDownload(download, size=size):
self.assertEqual(size, len(download))
d.addCallback(checkDownload)
return d
def test_downloadFolder(self):
"""
When RETR is called for a folder, it will fail complaining that
the path is a folder.
"""
# Make a directory in the current working directory
self.dirPath.child('foo').createDirectory()
# Login
d = self._anonymousLogin()
d.addCallback(self._makeDataConnection)
def retrFolder(downloader):
downloader.transport.loseConnection()
deferred = self.client.queueStringCommand('RETR foo')
return deferred
d.addCallback(retrFolder)
def failOnSuccess(result):
raise AssertionError('Downloading a folder should not succeed.')
d.addCallback(failOnSuccess)
def checkError(failure):
failure.trap(ftp.CommandFailed)
self.assertEqual(
['550 foo: is a directory'], failure.value.message)
current_errors = self.flushLoggedErrors()
self.assertEqual(
0, len(current_errors),
'No errors should be logged while downloading a folder.')
d.addErrback(checkError)
return d
def test_NLSTEmpty(self):
"""
NLST with no argument returns the directory listing for the current
working directory.
"""
# Login
d = self._anonymousLogin()
# Touch a file in the current working directory
self.dirPath.child('test.txt').touch()
# Make a directory in the current working directory
self.dirPath.child('foo').createDirectory()
self._download('NLST ', chainDeferred=d)
def checkDownload(download):
filenames = download[:-2].split('\r\n')
filenames.sort()
self.assertEqual(['foo', 'test.txt'], filenames)
return d.addCallback(checkDownload)
def test_NLSTNonexistent(self):
"""
NLST on a non-existent file/directory returns nothing.
"""
# Login
d = self._anonymousLogin()
self._download('NLST nonexistent.txt', chainDeferred=d)
def checkDownload(download):
self.assertEqual('', download)
return d.addCallback(checkDownload)
def test_NLSTOnPathToFile(self):
"""
NLST on an existent file returns only the path to that file.
"""
# Login
d = self._anonymousLogin()
# Touch a file in the current working directory
self.dirPath.child('test.txt').touch()
self._download('NLST test.txt', chainDeferred=d)
def checkDownload(download):
filenames = download[:-2].split('\r\n')
self.assertEqual(['test.txt'], filenames)
return d.addCallback(checkDownload)
class FTPServerPortDataConnectionTestCase(FTPServerPasvDataConnectionTestCase):
def setUp(self):
self.dataPorts = []
return FTPServerPasvDataConnectionTestCase.setUp(self)
def _makeDataConnection(self, ignored=None):
# Establish an active data connection (i.e. server connecting to
# client).
deferred = defer.Deferred()
class DataFactory(protocol.ServerFactory):
protocol = _BufferingProtocol
def buildProtocol(self, addr):
p = protocol.ServerFactory.buildProtocol(self, addr)
reactor.callLater(0, deferred.callback, p)
return p
dataPort = reactor.listenTCP(0, DataFactory(), interface='127.0.0.1')
self.dataPorts.append(dataPort)
cmd = 'PORT ' + ftp.encodeHostPort('127.0.0.1', dataPort.getHost().port)
self.client.queueStringCommand(cmd)
return deferred
def tearDown(self):
l = [defer.maybeDeferred(port.stopListening) for port in self.dataPorts]
d = defer.maybeDeferred(
FTPServerPasvDataConnectionTestCase.tearDown, self)
l.append(d)
return defer.DeferredList(l, fireOnOneErrback=True)
def testPORTCannotConnect(self):
# Login
d = self._anonymousLogin()
# Listen on a port, and immediately stop listening as a way to find a
# port number that is definitely closed.
def loggedIn(ignored):
port = reactor.listenTCP(0, protocol.Factory(),
interface='127.0.0.1')
portNum = port.getHost().port
d = port.stopListening()
d.addCallback(lambda _: portNum)
return d
d.addCallback(loggedIn)
# Tell the server to connect to that port with a PORT command, and
# verify that it fails with the right error.
def gotPortNum(portNum):
return self.assertCommandFailed(
'PORT ' + ftp.encodeHostPort('127.0.0.1', portNum),
["425 Can't open data connection."])
return d.addCallback(gotPortNum)
def test_nlstGlobbing(self):
"""
When Unix shell globbing is used with NLST only files matching the
pattern will be returned.
"""
self.dirPath.child('test.txt').touch()
self.dirPath.child('ceva.txt').touch()
self.dirPath.child('no.match').touch()
d = self._anonymousLogin()
self._download('NLST *.txt', chainDeferred=d)
def checkDownload(download):
filenames = download[:-2].split('\r\n')
filenames.sort()
self.assertEqual(['ceva.txt', 'test.txt'], filenames)
return d.addCallback(checkDownload)
class DTPFactoryTests(unittest.TestCase):
"""
Tests for L{ftp.DTPFactory}.
"""
def setUp(self):
"""
Create a fake protocol interpreter and a L{ftp.DTPFactory} instance to
test.
"""
self.reactor = task.Clock()
class ProtocolInterpreter(object):
dtpInstance = None
self.protocolInterpreter = ProtocolInterpreter()
self.factory = ftp.DTPFactory(
self.protocolInterpreter, None, self.reactor)
def test_setTimeout(self):
"""
L{ftp.DTPFactory.setTimeout} uses the reactor passed to its initializer
to set up a timed event to time out the DTP setup after the specified
number of seconds.
"""
# Make sure the factory's deferred fails with the right exception, and
# make it so we can tell exactly when it fires.
finished = []
d = self.assertFailure(self.factory.deferred, ftp.PortConnectionError)
d.addCallback(finished.append)
self.factory.setTimeout(6)
# Advance the clock almost to the timeout
self.reactor.advance(5)
# Nothing should have happened yet.
self.assertFalse(finished)
# Advance it to the configured timeout.
self.reactor.advance(1)
# Now the Deferred should have failed with TimeoutError.
self.assertTrue(finished)
# There should also be no calls left in the reactor.
self.assertFalse(self.reactor.calls)
def test_buildProtocolOnce(self):
"""
A L{ftp.DTPFactory} instance's C{buildProtocol} method can be used once
to create a L{ftp.DTP} instance.
"""
protocol = self.factory.buildProtocol(None)
self.assertIsInstance(protocol, ftp.DTP)
# A subsequent call returns None.
self.assertIdentical(self.factory.buildProtocol(None), None)
def test_timeoutAfterConnection(self):
"""
If a timeout has been set up using L{ftp.DTPFactory.setTimeout}, it is
cancelled by L{ftp.DTPFactory.buildProtocol}.
"""
self.factory.setTimeout(10)
protocol = self.factory.buildProtocol(None)
# Make sure the call is no longer active.
self.assertFalse(self.reactor.calls)
def test_connectionAfterTimeout(self):
"""
If L{ftp.DTPFactory.buildProtocol} is called after the timeout
specified by L{ftp.DTPFactory.setTimeout} has elapsed, C{None} is
returned.
"""
# Handle the error so it doesn't get logged.
d = self.assertFailure(self.factory.deferred, ftp.PortConnectionError)
# Set up the timeout and then cause it to elapse so the Deferred does
# fail.
self.factory.setTimeout(10)
self.reactor.advance(10)
# Try to get a protocol - we should not be able to.
self.assertIdentical(self.factory.buildProtocol(None), None)
# Make sure the Deferred is doing the right thing.
return d
def test_timeoutAfterConnectionFailed(self):
"""
L{ftp.DTPFactory.deferred} fails with L{PortConnectionError} when
L{ftp.DTPFactory.clientConnectionFailed} is called. If the timeout
specified with L{ftp.DTPFactory.setTimeout} expires after that, nothing
additional happens.
"""
finished = []
d = self.assertFailure(self.factory.deferred, ftp.PortConnectionError)
d.addCallback(finished.append)
self.factory.setTimeout(10)
self.assertFalse(finished)
self.factory.clientConnectionFailed(None, None)
self.assertTrue(finished)
self.reactor.advance(10)
return d
def test_connectionFailedAfterTimeout(self):
"""
If L{ftp.DTPFactory.clientConnectionFailed} is called after the timeout
specified by L{ftp.DTPFactory.setTimeout} has elapsed, nothing beyond
the normal timeout before happens.
"""
# Handle the error so it doesn't get logged.
d = self.assertFailure(self.factory.deferred, ftp.PortConnectionError)
# Set up the timeout and then cause it to elapse so the Deferred does
# fail.
self.factory.setTimeout(10)
self.reactor.advance(10)
# Now fail the connection attempt. This should do nothing. In
# particular, it should not raise an exception.
self.factory.clientConnectionFailed(None, defer.TimeoutError("foo"))
# Give the Deferred to trial so it can make sure it did what we
# expected.
return d
# -- Client Tests -----------------------------------------------------------
class PrintLines(protocol.Protocol):
"""Helper class used by FTPFileListingTests."""
def __init__(self, lines):
self._lines = lines
def connectionMade(self):
for line in self._lines:
self.transport.write(line + "\r\n")
self.transport.loseConnection()
class MyFTPFileListProtocol(ftp.FTPFileListProtocol):
def __init__(self):
self.other = []
ftp.FTPFileListProtocol.__init__(self)
def unknownLine(self, line):
self.other.append(line)
class FTPFileListingTests(unittest.TestCase):
def getFilesForLines(self, lines):
fileList = MyFTPFileListProtocol()
d = loopback.loopbackAsync(PrintLines(lines), fileList)
d.addCallback(lambda _: (fileList.files, fileList.other))
return d
def testOneLine(self):
# This example line taken from the docstring for FTPFileListProtocol
line = '-rw-r--r-- 1 root other 531 Jan 29 03:26 README'
def check(((file,), other)):
self.failIf(other, 'unexpect unparsable lines: %s' % repr(other))
self.failUnless(file['filetype'] == '-', 'misparsed fileitem')
self.failUnless(file['perms'] == 'rw-r--r--', 'misparsed perms')
self.failUnless(file['owner'] == 'root', 'misparsed fileitem')
self.failUnless(file['group'] == 'other', 'misparsed fileitem')
self.failUnless(file['size'] == 531, 'misparsed fileitem')
self.failUnless(file['date'] == 'Jan 29 03:26', 'misparsed fileitem')
self.failUnless(file['filename'] == 'README', 'misparsed fileitem')
self.failUnless(file['nlinks'] == 1, 'misparsed nlinks')
self.failIf(file['linktarget'], 'misparsed linktarget')
return self.getFilesForLines([line]).addCallback(check)
def testVariantLines(self):
line1 = 'drw-r--r-- 2 root other 531 Jan 9 2003 A'
line2 = 'lrw-r--r-- 1 root other 1 Jan 29 03:26 B -> A'
line3 = 'woohoo! '
def check(((file1, file2), (other,))):
self.failUnless(other == 'woohoo! \r', 'incorrect other line')
# file 1
self.failUnless(file1['filetype'] == 'd', 'misparsed fileitem')
self.failUnless(file1['perms'] == 'rw-r--r--', 'misparsed perms')
self.failUnless(file1['owner'] == 'root', 'misparsed owner')
self.failUnless(file1['group'] == 'other', 'misparsed group')
self.failUnless(file1['size'] == 531, 'misparsed size')
self.failUnless(file1['date'] == 'Jan 9 2003', 'misparsed date')
self.failUnless(file1['filename'] == 'A', 'misparsed filename')
self.failUnless(file1['nlinks'] == 2, 'misparsed nlinks')
self.failIf(file1['linktarget'], 'misparsed linktarget')
# file 2
self.failUnless(file2['filetype'] == 'l', 'misparsed fileitem')
self.failUnless(file2['perms'] == 'rw-r--r--', 'misparsed perms')
self.failUnless(file2['owner'] == 'root', 'misparsed owner')
self.failUnless(file2['group'] == 'other', 'misparsed group')
self.failUnless(file2['size'] == 1, 'misparsed size')
self.failUnless(file2['date'] == 'Jan 29 03:26', 'misparsed date')
self.failUnless(file2['filename'] == 'B', 'misparsed filename')
self.failUnless(file2['nlinks'] == 1, 'misparsed nlinks')
self.failUnless(file2['linktarget'] == 'A', 'misparsed linktarget')
return self.getFilesForLines([line1, line2, line3]).addCallback(check)
def testUnknownLine(self):
def check((files, others)):
self.failIf(files, 'unexpected file entries')
self.failUnless(others == ['ABC\r', 'not a file\r'],
'incorrect unparsable lines: %s' % repr(others))
return self.getFilesForLines(['ABC', 'not a file']).addCallback(check)
def test_filenameWithUnescapedSpace(self):
'''
Will parse filenames and linktargets containing unescaped
space characters.
'''
line1 = 'drw-r--r-- 2 root other 531 Jan 9 2003 A B'
line2 = (
'lrw-r--r-- 1 root other 1 Jan 29 03:26 '
'B A -> D C/A B'
)
def check((files, others)):
self.assertEqual([], others, 'unexpected others entries')
self.assertEqual(
'A B', files[0]['filename'], 'misparsed filename')
self.assertEqual(
'B A', files[1]['filename'], 'misparsed filename')
self.assertEqual(
'D C/A B', files[1]['linktarget'], 'misparsed linktarget')
return self.getFilesForLines([line1, line2]).addCallback(check)
def test_filenameWithEscapedSpace(self):
'''
Will parse filenames and linktargets containing escaped
space characters.
'''
line1 = 'drw-r--r-- 2 root other 531 Jan 9 2003 A\ B'
line2 = (
'lrw-r--r-- 1 root other 1 Jan 29 03:26 '
'B A -> D\ C/A B'
)
def check((files, others)):
self.assertEqual([], others, 'unexpected others entries')
self.assertEqual(
'A B', files[0]['filename'], 'misparsed filename')
self.assertEqual(
'B A', files[1]['filename'], 'misparsed filename')
self.assertEqual(
'D C/A B', files[1]['linktarget'], 'misparsed linktarget')
return self.getFilesForLines([line1, line2]).addCallback(check)
def testYear(self):
# This example derived from bug description in issue 514.
fileList = ftp.FTPFileListProtocol()
exampleLine = (
'-rw-r--r-- 1 root other 531 Jan 29 2003 README\n')
class PrintLine(protocol.Protocol):
def connectionMade(self):
self.transport.write(exampleLine)
self.transport.loseConnection()
def check(ignored):
file = fileList.files[0]
self.failUnless(file['size'] == 531, 'misparsed fileitem')
self.failUnless(file['date'] == 'Jan 29 2003', 'misparsed fileitem')
self.failUnless(file['filename'] == 'README', 'misparsed fileitem')
d = loopback.loopbackAsync(PrintLine(), fileList)
return d.addCallback(check)
class FTPClientTests(unittest.TestCase):
def testFailedRETR(self):
f = protocol.Factory()
f.noisy = 0
port = reactor.listenTCP(0, f, interface="127.0.0.1")
self.addCleanup(port.stopListening)
portNum = port.getHost().port
# This test data derived from a bug report by ranty on #twisted
responses = ['220 ready, dude (vsFTPd 1.0.0: beat me, break me)',
# USER anonymous
'331 Please specify the password.',
# PASS twisted@twistedmatrix.com
'230 Login successful. Have fun.',
# TYPE I
'200 Binary it is, then.',
# PASV
'227 Entering Passive Mode (127,0,0,1,%d,%d)' %
(portNum >> 8, portNum & 0xff),
# RETR /file/that/doesnt/exist
'550 Failed to open file.']
f.buildProtocol = lambda addr: PrintLines(responses)
client = ftp.FTPClient(passive=1)
cc = protocol.ClientCreator(reactor, ftp.FTPClient, passive=1)
d = cc.connectTCP('127.0.0.1', portNum)
def gotClient(client):
p = protocol.Protocol()
return client.retrieveFile('/file/that/doesnt/exist', p)
d.addCallback(gotClient)
return self.assertFailure(d, ftp.CommandFailed)
def test_errbacksUponDisconnect(self):
"""
Test the ftp command errbacks when a connection lost happens during
the operation.
"""
ftpClient = ftp.FTPClient()
tr = proto_helpers.StringTransportWithDisconnection()
ftpClient.makeConnection(tr)
tr.protocol = ftpClient
d = ftpClient.list('some path', Dummy())
m = []
def _eb(failure):
m.append(failure)
return None
d.addErrback(_eb)
from twisted.internet.main import CONNECTION_LOST
ftpClient.connectionLost(failure.Failure(CONNECTION_LOST))
self.failUnless(m, m)
return d
class FTPClientTestCase(unittest.TestCase):
"""
Test advanced FTP client commands.
"""
def setUp(self):
"""
Create a FTP client and connect it to fake transport.
"""
self.client = ftp.FTPClient()
self.transport = proto_helpers.StringTransportWithDisconnection()
self.client.makeConnection(self.transport)
self.transport.protocol = self.client
def tearDown(self):
"""
Deliver disconnection notification to the client so that it can
perform any cleanup which may be required.
"""
self.client.connectionLost(error.ConnectionLost())
def _testLogin(self):
"""
Test the login part.
"""
self.assertEqual(self.transport.value(), '')
self.client.lineReceived(
'331 Guest login ok, type your email address as password.')
self.assertEqual(self.transport.value(), 'USER anonymous\r\n')
self.transport.clear()
self.client.lineReceived(
'230 Anonymous login ok, access restrictions apply.')
self.assertEqual(self.transport.value(), 'TYPE I\r\n')
self.transport.clear()
self.client.lineReceived('200 Type set to I.')
def test_CDUP(self):
"""
Test the CDUP command.
L{ftp.FTPClient.cdup} should return a Deferred which fires with a
sequence of one element which is the string the server sent
indicating that the command was executed successfully.
(XXX - This is a bad API)
"""
def cbCdup(res):
self.assertEqual(res[0], '250 Requested File Action Completed OK')
self._testLogin()
d = self.client.cdup().addCallback(cbCdup)
self.assertEqual(self.transport.value(), 'CDUP\r\n')
self.transport.clear()
self.client.lineReceived('250 Requested File Action Completed OK')
return d
def test_failedCDUP(self):
"""
Test L{ftp.FTPClient.cdup}'s handling of a failed CDUP command.
When the CDUP command fails, the returned Deferred should errback
with L{ftp.CommandFailed}.
"""
self._testLogin()
d = self.client.cdup()
self.assertFailure(d, ftp.CommandFailed)
self.assertEqual(self.transport.value(), 'CDUP\r\n')
self.transport.clear()
self.client.lineReceived('550 ..: No such file or directory')
return d
def test_PWD(self):
"""
Test the PWD command.
L{ftp.FTPClient.pwd} should return a Deferred which fires with a
sequence of one element which is a string representing the current
working directory on the server.
(XXX - This is a bad API)
"""
def cbPwd(res):
self.assertEqual(ftp.parsePWDResponse(res[0]), "/bar/baz")
self._testLogin()
d = self.client.pwd().addCallback(cbPwd)
self.assertEqual(self.transport.value(), 'PWD\r\n')
self.client.lineReceived('257 "/bar/baz"')
return d
def test_failedPWD(self):
"""
Test a failure in PWD command.
When the PWD command fails, the returned Deferred should errback
with L{ftp.CommandFailed}.
"""
self._testLogin()
d = self.client.pwd()
self.assertFailure(d, ftp.CommandFailed)
self.assertEqual(self.transport.value(), 'PWD\r\n')
self.client.lineReceived('550 /bar/baz: No such file or directory')
return d
def test_CWD(self):
"""
Test the CWD command.
L{ftp.FTPClient.cwd} should return a Deferred which fires with a
sequence of one element which is the string the server sent
indicating that the command was executed successfully.
(XXX - This is a bad API)
"""
def cbCwd(res):
self.assertEqual(res[0], '250 Requested File Action Completed OK')
self._testLogin()
d = self.client.cwd("bar/foo").addCallback(cbCwd)
self.assertEqual(self.transport.value(), 'CWD bar/foo\r\n')
self.client.lineReceived('250 Requested File Action Completed OK')
return d
def test_failedCWD(self):
"""
Test a failure in CWD command.
When the PWD command fails, the returned Deferred should errback
with L{ftp.CommandFailed}.
"""
self._testLogin()
d = self.client.cwd("bar/foo")
self.assertFailure(d, ftp.CommandFailed)
self.assertEqual(self.transport.value(), 'CWD bar/foo\r\n')
self.client.lineReceived('550 bar/foo: No such file or directory')
return d
def test_passiveRETR(self):
"""
Test the RETR command in passive mode: get a file and verify its
content.
L{ftp.FTPClient.retrieveFile} should return a Deferred which fires
with the protocol instance passed to it after the download has
completed.
(XXX - This API should be based on producers and consumers)
"""
def cbRetr(res, proto):
self.assertEqual(proto.buffer, 'x' * 1000)
def cbConnect(host, port, factory):
self.assertEqual(host, '127.0.0.1')
self.assertEqual(port, 12345)
proto = factory.buildProtocol((host, port))
proto.makeConnection(proto_helpers.StringTransport())
self.client.lineReceived(
'150 File status okay; about to open data connection.')
proto.dataReceived("x" * 1000)
proto.connectionLost(failure.Failure(error.ConnectionDone("")))
self.client.connectFactory = cbConnect
self._testLogin()
proto = _BufferingProtocol()
d = self.client.retrieveFile("spam", proto)
d.addCallback(cbRetr, proto)
self.assertEqual(self.transport.value(), 'PASV\r\n')
self.transport.clear()
self.client.lineReceived('227 Entering Passive Mode (%s).' %
(ftp.encodeHostPort('127.0.0.1', 12345),))
self.assertEqual(self.transport.value(), 'RETR spam\r\n')
self.transport.clear()
self.client.lineReceived('226 Transfer Complete.')
return d
def test_RETR(self):
"""
Test the RETR command in non-passive mode.
Like L{test_passiveRETR} but in the configuration where the server
establishes the data connection to the client, rather than the other
way around.
"""
self.client.passive = False
def generatePort(portCmd):
portCmd.text = 'PORT %s' % (ftp.encodeHostPort('127.0.0.1', 9876),)
portCmd.protocol.makeConnection(proto_helpers.StringTransport())
portCmd.protocol.dataReceived("x" * 1000)
portCmd.protocol.connectionLost(
failure.Failure(error.ConnectionDone("")))
def cbRetr(res, proto):
self.assertEqual(proto.buffer, 'x' * 1000)
self.client.generatePortCommand = generatePort
self._testLogin()
proto = _BufferingProtocol()
d = self.client.retrieveFile("spam", proto)
d.addCallback(cbRetr, proto)
self.assertEqual(self.transport.value(), 'PORT %s\r\n' %
(ftp.encodeHostPort('127.0.0.1', 9876),))
self.transport.clear()
self.client.lineReceived('200 PORT OK')
self.assertEqual(self.transport.value(), 'RETR spam\r\n')
self.transport.clear()
self.client.lineReceived('226 Transfer Complete.')
return d
def test_failedRETR(self):
"""
Try to RETR an unexisting file.
L{ftp.FTPClient.retrieveFile} should return a Deferred which
errbacks with L{ftp.CommandFailed} if the server indicates the file
cannot be transferred for some reason.
"""
def cbConnect(host, port, factory):
self.assertEqual(host, '127.0.0.1')
self.assertEqual(port, 12345)
proto = factory.buildProtocol((host, port))
proto.makeConnection(proto_helpers.StringTransport())
self.client.lineReceived(
'150 File status okay; about to open data connection.')
proto.connectionLost(failure.Failure(error.ConnectionDone("")))
self.client.connectFactory = cbConnect
self._testLogin()
proto = _BufferingProtocol()
d = self.client.retrieveFile("spam", proto)
self.assertFailure(d, ftp.CommandFailed)
self.assertEqual(self.transport.value(), 'PASV\r\n')
self.transport.clear()
self.client.lineReceived('227 Entering Passive Mode (%s).' %
(ftp.encodeHostPort('127.0.0.1', 12345),))
self.assertEqual(self.transport.value(), 'RETR spam\r\n')
self.transport.clear()
self.client.lineReceived('550 spam: No such file or directory')
return d
def test_lostRETR(self):
"""
Try a RETR, but disconnect during the transfer.
L{ftp.FTPClient.retrieveFile} should return a Deferred which
errbacks with L{ftp.ConnectionLost)
"""
self.client.passive = False
l = []
def generatePort(portCmd):
portCmd.text = 'PORT %s' % (ftp.encodeHostPort('127.0.0.1', 9876),)
tr = proto_helpers.StringTransportWithDisconnection()
portCmd.protocol.makeConnection(tr)
tr.protocol = portCmd.protocol
portCmd.protocol.dataReceived("x" * 500)
l.append(tr)
self.client.generatePortCommand = generatePort
self._testLogin()
proto = _BufferingProtocol()
d = self.client.retrieveFile("spam", proto)
self.assertEqual(self.transport.value(), 'PORT %s\r\n' %
(ftp.encodeHostPort('127.0.0.1', 9876),))
self.transport.clear()
self.client.lineReceived('200 PORT OK')
self.assertEqual(self.transport.value(), 'RETR spam\r\n')
self.assert_(l)
l[0].loseConnection()
self.transport.loseConnection()
self.assertFailure(d, ftp.ConnectionLost)
return d
def test_passiveSTOR(self):
"""
Test the STOR command: send a file and verify its content.
L{ftp.FTPClient.storeFile} should return a two-tuple of Deferreds.
The first of which should fire with a protocol instance when the
data connection has been established and is responsible for sending
the contents of the file. The second of which should fire when the
upload has completed, the data connection has been closed, and the
server has acknowledged receipt of the file.
(XXX - storeFile should take a producer as an argument, instead, and
only return a Deferred which fires when the upload has succeeded or
failed).
"""
tr = proto_helpers.StringTransport()
def cbStore(sender):
self.client.lineReceived(
'150 File status okay; about to open data connection.')
sender.transport.write("x" * 1000)
sender.finish()
sender.connectionLost(failure.Failure(error.ConnectionDone("")))
def cbFinish(ign):
self.assertEqual(tr.value(), "x" * 1000)
def cbConnect(host, port, factory):
self.assertEqual(host, '127.0.0.1')
self.assertEqual(port, 12345)
proto = factory.buildProtocol((host, port))
proto.makeConnection(tr)
self.client.connectFactory = cbConnect
self._testLogin()
d1, d2 = self.client.storeFile("spam")
d1.addCallback(cbStore)
d2.addCallback(cbFinish)
self.assertEqual(self.transport.value(), 'PASV\r\n')
self.transport.clear()
self.client.lineReceived('227 Entering Passive Mode (%s).' %
(ftp.encodeHostPort('127.0.0.1', 12345),))
self.assertEqual(self.transport.value(), 'STOR spam\r\n')
self.transport.clear()
self.client.lineReceived('226 Transfer Complete.')
return defer.gatherResults([d1, d2])
def test_failedSTOR(self):
"""
Test a failure in the STOR command.
If the server does not acknowledge successful receipt of the
uploaded file, the second Deferred returned by
L{ftp.FTPClient.storeFile} should errback with L{ftp.CommandFailed}.
"""
tr = proto_helpers.StringTransport()
def cbStore(sender):
self.client.lineReceived(
'150 File status okay; about to open data connection.')
sender.transport.write("x" * 1000)
sender.finish()
sender.connectionLost(failure.Failure(error.ConnectionDone("")))
def cbConnect(host, port, factory):
self.assertEqual(host, '127.0.0.1')
self.assertEqual(port, 12345)
proto = factory.buildProtocol((host, port))
proto.makeConnection(tr)
self.client.connectFactory = cbConnect
self._testLogin()
d1, d2 = self.client.storeFile("spam")
d1.addCallback(cbStore)
self.assertFailure(d2, ftp.CommandFailed)
self.assertEqual(self.transport.value(), 'PASV\r\n')
self.transport.clear()
self.client.lineReceived('227 Entering Passive Mode (%s).' %
(ftp.encodeHostPort('127.0.0.1', 12345),))
self.assertEqual(self.transport.value(), 'STOR spam\r\n')
self.transport.clear()
self.client.lineReceived(
'426 Transfer aborted. Data connection closed.')
return defer.gatherResults([d1, d2])
def test_STOR(self):
"""
Test the STOR command in non-passive mode.
Like L{test_passiveSTOR} but in the configuration where the server
establishes the data connection to the client, rather than the other
way around.
"""
tr = proto_helpers.StringTransport()
self.client.passive = False
def generatePort(portCmd):
portCmd.text = 'PORT %s' % ftp.encodeHostPort('127.0.0.1', 9876)
portCmd.protocol.makeConnection(tr)
def cbStore(sender):
self.assertEqual(self.transport.value(), 'PORT %s\r\n' %
(ftp.encodeHostPort('127.0.0.1', 9876),))
self.transport.clear()
self.client.lineReceived('200 PORT OK')
self.assertEqual(self.transport.value(), 'STOR spam\r\n')
self.transport.clear()
self.client.lineReceived(
'150 File status okay; about to open data connection.')
sender.transport.write("x" * 1000)
sender.finish()
sender.connectionLost(failure.Failure(error.ConnectionDone("")))
self.client.lineReceived('226 Transfer Complete.')
def cbFinish(ign):
self.assertEqual(tr.value(), "x" * 1000)
self.client.generatePortCommand = generatePort
self._testLogin()
d1, d2 = self.client.storeFile("spam")
d1.addCallback(cbStore)
d2.addCallback(cbFinish)
return defer.gatherResults([d1, d2])
def test_passiveLIST(self):
"""
Test the LIST command.
L{ftp.FTPClient.list} should return a Deferred which fires with a
protocol instance which was passed to list after the command has
succeeded.
(XXX - This is a very unfortunate API; if my understanding is
correct, the results are always at least line-oriented, so allowing
a per-line parser function to be specified would make this simpler,
but a default implementation should really be provided which knows
how to deal with all the formats used in real servers, so
application developers never have to care about this insanity. It
would also be nice to either get back a Deferred of a list of
filenames or to be able to consume the files as they are received
(which the current API does allow, but in a somewhat inconvenient
fashion) -exarkun)
"""
def cbList(res, fileList):
fls = [f["filename"] for f in fileList.files]
expected = ["foo", "bar", "baz"]
expected.sort()
fls.sort()
self.assertEqual(fls, expected)
def cbConnect(host, port, factory):
self.assertEqual(host, '127.0.0.1')
self.assertEqual(port, 12345)
proto = factory.buildProtocol((host, port))
proto.makeConnection(proto_helpers.StringTransport())
self.client.lineReceived(
'150 File status okay; about to open data connection.')
sending = [
'-rw-r--r-- 0 spam egg 100 Oct 10 2006 foo\r\n',
'-rw-r--r-- 3 spam egg 100 Oct 10 2006 bar\r\n',
'-rw-r--r-- 4 spam egg 100 Oct 10 2006 baz\r\n',
]
for i in sending:
proto.dataReceived(i)
proto.connectionLost(failure.Failure(error.ConnectionDone("")))
self.client.connectFactory = cbConnect
self._testLogin()
fileList = ftp.FTPFileListProtocol()
d = self.client.list('foo/bar', fileList).addCallback(cbList, fileList)
self.assertEqual(self.transport.value(), 'PASV\r\n')
self.transport.clear()
self.client.lineReceived('227 Entering Passive Mode (%s).' %
(ftp.encodeHostPort('127.0.0.1', 12345),))
self.assertEqual(self.transport.value(), 'LIST foo/bar\r\n')
self.client.lineReceived('226 Transfer Complete.')
return d
def test_LIST(self):
"""
Test the LIST command in non-passive mode.
Like L{test_passiveLIST} but in the configuration where the server
establishes the data connection to the client, rather than the other
way around.
"""
self.client.passive = False
def generatePort(portCmd):
portCmd.text = 'PORT %s' % (ftp.encodeHostPort('127.0.0.1', 9876),)
portCmd.protocol.makeConnection(proto_helpers.StringTransport())
self.client.lineReceived(
'150 File status okay; about to open data connection.')
sending = [
'-rw-r--r-- 0 spam egg 100 Oct 10 2006 foo\r\n',
'-rw-r--r-- 3 spam egg 100 Oct 10 2006 bar\r\n',
'-rw-r--r-- 4 spam egg 100 Oct 10 2006 baz\r\n',
]
for i in sending:
portCmd.protocol.dataReceived(i)
portCmd.protocol.connectionLost(
failure.Failure(error.ConnectionDone("")))
def cbList(res, fileList):
fls = [f["filename"] for f in fileList.files]
expected = ["foo", "bar", "baz"]
expected.sort()
fls.sort()
self.assertEqual(fls, expected)
self.client.generatePortCommand = generatePort
self._testLogin()
fileList = ftp.FTPFileListProtocol()
d = self.client.list('foo/bar', fileList).addCallback(cbList, fileList)
self.assertEqual(self.transport.value(), 'PORT %s\r\n' %
(ftp.encodeHostPort('127.0.0.1', 9876),))
self.transport.clear()
self.client.lineReceived('200 PORT OK')
self.assertEqual(self.transport.value(), 'LIST foo/bar\r\n')
self.transport.clear()
self.client.lineReceived('226 Transfer Complete.')
return d
def test_failedLIST(self):
"""
Test a failure in LIST command.
L{ftp.FTPClient.list} should return a Deferred which fails with
L{ftp.CommandFailed} if the server indicates the indicated path is
invalid for some reason.
"""
def cbConnect(host, port, factory):
self.assertEqual(host, '127.0.0.1')
self.assertEqual(port, 12345)
proto = factory.buildProtocol((host, port))
proto.makeConnection(proto_helpers.StringTransport())
self.client.lineReceived(
'150 File status okay; about to open data connection.')
proto.connectionLost(failure.Failure(error.ConnectionDone("")))
self.client.connectFactory = cbConnect
self._testLogin()
fileList = ftp.FTPFileListProtocol()
d = self.client.list('foo/bar', fileList)
self.assertFailure(d, ftp.CommandFailed)
self.assertEqual(self.transport.value(), 'PASV\r\n')
self.transport.clear()
self.client.lineReceived('227 Entering Passive Mode (%s).' %
(ftp.encodeHostPort('127.0.0.1', 12345),))
self.assertEqual(self.transport.value(), 'LIST foo/bar\r\n')
self.client.lineReceived('550 foo/bar: No such file or directory')
return d
def test_NLST(self):
"""
Test the NLST command in non-passive mode.
L{ftp.FTPClient.nlst} should return a Deferred which fires with a
list of filenames when the list command has completed.
"""
self.client.passive = False
def generatePort(portCmd):
portCmd.text = 'PORT %s' % (ftp.encodeHostPort('127.0.0.1', 9876),)
portCmd.protocol.makeConnection(proto_helpers.StringTransport())
self.client.lineReceived(
'150 File status okay; about to open data connection.')
portCmd.protocol.dataReceived('foo\r\n')
portCmd.protocol.dataReceived('bar\r\n')
portCmd.protocol.dataReceived('baz\r\n')
portCmd.protocol.connectionLost(
failure.Failure(error.ConnectionDone("")))
def cbList(res, proto):
fls = proto.buffer.splitlines()
expected = ["foo", "bar", "baz"]
expected.sort()
fls.sort()
self.assertEqual(fls, expected)
self.client.generatePortCommand = generatePort
self._testLogin()
lstproto = _BufferingProtocol()
d = self.client.nlst('foo/bar', lstproto).addCallback(cbList, lstproto)
self.assertEqual(self.transport.value(), 'PORT %s\r\n' %
(ftp.encodeHostPort('127.0.0.1', 9876),))
self.transport.clear()
self.client.lineReceived('200 PORT OK')
self.assertEqual(self.transport.value(), 'NLST foo/bar\r\n')
self.client.lineReceived('226 Transfer Complete.')
return d
def test_passiveNLST(self):
"""
Test the NLST command.
Like L{test_passiveNLST} but in the configuration where the server
establishes the data connection to the client, rather than the other
way around.
"""
def cbList(res, proto):
fls = proto.buffer.splitlines()
expected = ["foo", "bar", "baz"]
expected.sort()
fls.sort()
self.assertEqual(fls, expected)
def cbConnect(host, port, factory):
self.assertEqual(host, '127.0.0.1')
self.assertEqual(port, 12345)
proto = factory.buildProtocol((host, port))
proto.makeConnection(proto_helpers.StringTransport())
self.client.lineReceived(
'150 File status okay; about to open data connection.')
proto.dataReceived('foo\r\n')
proto.dataReceived('bar\r\n')
proto.dataReceived('baz\r\n')
proto.connectionLost(failure.Failure(error.ConnectionDone("")))
self.client.connectFactory = cbConnect
self._testLogin()
lstproto = _BufferingProtocol()
d = self.client.nlst('foo/bar', lstproto).addCallback(cbList, lstproto)
self.assertEqual(self.transport.value(), 'PASV\r\n')
self.transport.clear()
self.client.lineReceived('227 Entering Passive Mode (%s).' %
(ftp.encodeHostPort('127.0.0.1', 12345),))
self.assertEqual(self.transport.value(), 'NLST foo/bar\r\n')
self.client.lineReceived('226 Transfer Complete.')
return d
def test_failedNLST(self):
"""
Test a failure in NLST command.
L{ftp.FTPClient.nlst} should return a Deferred which fails with
L{ftp.CommandFailed} if the server indicates the indicated path is
invalid for some reason.
"""
tr = proto_helpers.StringTransport()
def cbConnect(host, port, factory):
self.assertEqual(host, '127.0.0.1')
self.assertEqual(port, 12345)
proto = factory.buildProtocol((host, port))
proto.makeConnection(tr)
self.client.lineReceived(
'150 File status okay; about to open data connection.')
proto.connectionLost(failure.Failure(error.ConnectionDone("")))
self.client.connectFactory = cbConnect
self._testLogin()
lstproto = _BufferingProtocol()
d = self.client.nlst('foo/bar', lstproto)
self.assertFailure(d, ftp.CommandFailed)
self.assertEqual(self.transport.value(), 'PASV\r\n')
self.transport.clear()
self.client.lineReceived('227 Entering Passive Mode (%s).' %
(ftp.encodeHostPort('127.0.0.1', 12345),))
self.assertEqual(self.transport.value(), 'NLST foo/bar\r\n')
self.client.lineReceived('550 foo/bar: No such file or directory')
return d
def test_changeDirectoryDeprecated(self):
"""
L{ftp.FTPClient.changeDirectory} is deprecated and the direct caller of
it is warned of this.
"""
self._testLogin()
d = self.assertWarns(
DeprecationWarning,
"FTPClient.changeDirectory is deprecated in Twisted 8.2 and "
"newer. Use FTPClient.cwd instead.",
__file__,
lambda: self.client.changeDirectory('.'))
# This is necessary to make the Deferred fire. The Deferred needs
# to fire so that tearDown doesn't cause it to errback and fail this
# or (more likely) a later test.
self.client.lineReceived('250 success')
return d
def test_changeDirectory(self):
"""
Test the changeDirectory method.
L{ftp.FTPClient.changeDirectory} should return a Deferred which fires
with True if succeeded.
"""
def cbCd(res):
self.assertEqual(res, True)
self._testLogin()
d = self.client.changeDirectory("bar/foo").addCallback(cbCd)
self.assertEqual(self.transport.value(), 'CWD bar/foo\r\n')
self.client.lineReceived('250 Requested File Action Completed OK')
return d
test_changeDirectory.suppress = [_changeDirectorySuppression]
def test_failedChangeDirectory(self):
"""
Test a failure in the changeDirectory method.
The behaviour here is the same as a failed CWD.
"""
self._testLogin()
d = self.client.changeDirectory("bar/foo")
self.assertFailure(d, ftp.CommandFailed)
self.assertEqual(self.transport.value(), 'CWD bar/foo\r\n')
self.client.lineReceived('550 bar/foo: No such file or directory')
return d
test_failedChangeDirectory.suppress = [_changeDirectorySuppression]
def test_strangeFailedChangeDirectory(self):
"""
Test a strange failure in changeDirectory method.
L{ftp.FTPClient.changeDirectory} is stricter than CWD as it checks
code 250 for success.
"""
self._testLogin()
d = self.client.changeDirectory("bar/foo")
self.assertFailure(d, ftp.CommandFailed)
self.assertEqual(self.transport.value(), 'CWD bar/foo\r\n')
self.client.lineReceived('252 I do what I want !')
return d
test_strangeFailedChangeDirectory.suppress = [_changeDirectorySuppression]
def test_renameFromTo(self):
"""
L{ftp.FTPClient.rename} issues I{RNTO} and I{RNFR} commands and returns
a L{Deferred} which fires when a file has successfully been renamed.
"""
self._testLogin()
d = self.client.rename("/spam", "/ham")
self.assertEqual(self.transport.value(), 'RNFR /spam\r\n')
self.transport.clear()
fromResponse = (
'350 Requested file action pending further information.\r\n')
self.client.lineReceived(fromResponse)
self.assertEqual(self.transport.value(), 'RNTO /ham\r\n')
toResponse = (
'250 Requested File Action Completed OK')
self.client.lineReceived(toResponse)
d.addCallback(self.assertEqual, ([fromResponse], [toResponse]))
return d
def test_renameFromToEscapesPaths(self):
"""
L{ftp.FTPClient.rename} issues I{RNTO} and I{RNFR} commands with paths
escaped according to U{http://cr.yp.to/ftp/filesystem.html}.
"""
self._testLogin()
fromFile = "/foo/ba\nr/baz"
toFile = "/qu\nux"
self.client.rename(fromFile, toFile)
self.client.lineReceived("350 ")
self.client.lineReceived("250 ")
self.assertEqual(
self.transport.value(),
"RNFR /foo/ba\x00r/baz\r\n"
"RNTO /qu\x00ux\r\n")
def test_renameFromToFailingOnFirstError(self):
"""
The L{Deferred} returned by L{ftp.FTPClient.rename} is errbacked with
L{CommandFailed} if the I{RNFR} command receives an error response code
(for example, because the file does not exist).
"""
self._testLogin()
d = self.client.rename("/spam", "/ham")
self.assertEqual(self.transport.value(), 'RNFR /spam\r\n')
self.transport.clear()
self.client.lineReceived('550 Requested file unavailable.\r\n')
# The RNTO should not execute since the RNFR failed.
self.assertEqual(self.transport.value(), '')
return self.assertFailure(d, ftp.CommandFailed)
def test_renameFromToFailingOnRenameTo(self):
"""
The L{Deferred} returned by L{ftp.FTPClient.rename} is errbacked with
L{CommandFailed} if the I{RNTO} command receives an error response code
(for example, because the destination directory does not exist).
"""
self._testLogin()
d = self.client.rename("/spam", "/ham")
self.assertEqual(self.transport.value(), 'RNFR /spam\r\n')
self.transport.clear()
self.client.lineReceived('350 Requested file action pending further information.\r\n')
self.assertEqual(self.transport.value(), 'RNTO /ham\r\n')
self.client.lineReceived('550 Requested file unavailable.\r\n')
return self.assertFailure(d, ftp.CommandFailed)
def test_makeDirectory(self):
"""
L{ftp.FTPClient.makeDirectory} issues a I{MKD} command and returns a
L{Deferred} which is called back with the server's response if the
directory is created.
"""
self._testLogin()
d = self.client.makeDirectory("/spam")
self.assertEqual(self.transport.value(), 'MKD /spam\r\n')
self.client.lineReceived('257 "/spam" created.')
return d.addCallback(self.assertEqual, ['257 "/spam" created.'])
def test_makeDirectoryPathEscape(self):
"""
L{ftp.FTPClient.makeDirectory} escapes the path name it sends according
to U{http://cr.yp.to/ftp/filesystem.html}.
"""
self._testLogin()
d = self.client.makeDirectory("/sp\nam")
self.assertEqual(self.transport.value(), 'MKD /sp\x00am\r\n')
# This is necessary to make the Deferred fire. The Deferred needs
# to fire so that tearDown doesn't cause it to errback and fail this
# or (more likely) a later test.
self.client.lineReceived('257 win')
return d
def test_failedMakeDirectory(self):
"""
L{ftp.FTPClient.makeDirectory} returns a L{Deferred} which is errbacked
with L{CommandFailed} if the server returns an error response code.
"""
self._testLogin()
d = self.client.makeDirectory("/spam")
self.assertEqual(self.transport.value(), 'MKD /spam\r\n')
self.client.lineReceived('550 PERMISSION DENIED')
return self.assertFailure(d, ftp.CommandFailed)
def test_getDirectory(self):
"""
Test the getDirectory method.
L{ftp.FTPClient.getDirectory} should return a Deferred which fires with
the current directory on the server. It wraps PWD command.
"""
def cbGet(res):
self.assertEqual(res, "/bar/baz")
self._testLogin()
d = self.client.getDirectory().addCallback(cbGet)
self.assertEqual(self.transport.value(), 'PWD\r\n')
self.client.lineReceived('257 "/bar/baz"')
return d
def test_failedGetDirectory(self):
"""
Test a failure in getDirectory method.
The behaviour should be the same as PWD.
"""
self._testLogin()
d = self.client.getDirectory()
self.assertFailure(d, ftp.CommandFailed)
self.assertEqual(self.transport.value(), 'PWD\r\n')
self.client.lineReceived('550 /bar/baz: No such file or directory')
return d
def test_anotherFailedGetDirectory(self):
"""
Test a different failure in getDirectory method.
The response should be quoted to be parsed, so it returns an error
otherwise.
"""
self._testLogin()
d = self.client.getDirectory()
self.assertFailure(d, ftp.CommandFailed)
self.assertEqual(self.transport.value(), 'PWD\r\n')
self.client.lineReceived('257 /bar/baz')
return d
def test_removeFile(self):
"""
L{ftp.FTPClient.removeFile} sends a I{DELE} command to the server for
the indicated file and returns a Deferred which fires after the server
sends a 250 response code.
"""
self._testLogin()
d = self.client.removeFile("/tmp/test")
self.assertEqual(self.transport.value(), 'DELE /tmp/test\r\n')
response = '250 Requested file action okay, completed.'
self.client.lineReceived(response)
return d.addCallback(self.assertEqual, [response])
def test_failedRemoveFile(self):
"""
If the server returns a response code other than 250 in response to a
I{DELE} sent by L{ftp.FTPClient.removeFile}, the L{Deferred} returned
by C{removeFile} is errbacked with a L{Failure} wrapping a
L{CommandFailed}.
"""
self._testLogin()
d = self.client.removeFile("/tmp/test")
self.assertEqual(self.transport.value(), 'DELE /tmp/test\r\n')
response = '501 Syntax error in parameters or arguments.'
self.client.lineReceived(response)
d = self.assertFailure(d, ftp.CommandFailed)
d.addCallback(lambda exc: self.assertEqual(exc.args, ([response],)))
return d
def test_unparsableRemoveFileResponse(self):
"""
If the server returns a response line which cannot be parsed, the
L{Deferred} returned by L{ftp.FTPClient.removeFile} is errbacked with a
L{BadResponse} containing the response.
"""
self._testLogin()
d = self.client.removeFile("/tmp/test")
response = '765 blah blah blah'
self.client.lineReceived(response)
d = self.assertFailure(d, ftp.BadResponse)
d.addCallback(lambda exc: self.assertEqual(exc.args, ([response],)))
return d
def test_multilineRemoveFileResponse(self):
"""
If the server returns multiple response lines, the L{Deferred} returned
by L{ftp.FTPClient.removeFile} is still fired with a true value if the
ultimate response code is 250.
"""
self._testLogin()
d = self.client.removeFile("/tmp/test")
response = ['250-perhaps a progress report',
'250 okay']
map(self.client.lineReceived, response)
return d.addCallback(self.assertTrue)
def test_removeDirectory(self):
"""
L{ftp.FTPClient.removeDirectory} sends a I{RMD} command to the server
for the indicated directory and returns a Deferred which fires after
the server sends a 250 response code.
"""
self._testLogin()
d = self.client.removeDirectory('/tmp/test')
self.assertEqual(self.transport.value(), 'RMD /tmp/test\r\n')
response = '250 Requested file action okay, completed.'
self.client.lineReceived(response)
return d.addCallback(self.assertEqual, [response])
def test_failedRemoveDirectory(self):
"""
If the server returns a response code other than 250 in response to a
I{RMD} sent by L{ftp.FTPClient.removeDirectory}, the L{Deferred}
returned by C{removeDirectory} is errbacked with a L{Failure} wrapping
a L{CommandFailed}.
"""
self._testLogin()
d = self.client.removeDirectory("/tmp/test")
self.assertEqual(self.transport.value(), 'RMD /tmp/test\r\n')
response = '501 Syntax error in parameters or arguments.'
self.client.lineReceived(response)
d = self.assertFailure(d, ftp.CommandFailed)
d.addCallback(lambda exc: self.assertEqual(exc.args, ([response],)))
return d
def test_unparsableRemoveDirectoryResponse(self):
"""
If the server returns a response line which cannot be parsed, the
L{Deferred} returned by L{ftp.FTPClient.removeDirectory} is errbacked
with a L{BadResponse} containing the response.
"""
self._testLogin()
d = self.client.removeDirectory("/tmp/test")
response = '765 blah blah blah'
self.client.lineReceived(response)
d = self.assertFailure(d, ftp.BadResponse)
d.addCallback(lambda exc: self.assertEqual(exc.args, ([response],)))
return d
def test_multilineRemoveDirectoryResponse(self):
"""
If the server returns multiple response lines, the L{Deferred} returned
by L{ftp.FTPClient.removeDirectory} is still fired with a true value
if the ultimate response code is 250.
"""
self._testLogin()
d = self.client.removeDirectory("/tmp/test")
response = ['250-perhaps a progress report',
'250 okay']
map(self.client.lineReceived, response)
return d.addCallback(self.assertTrue)
class FTPClientBasicTests(unittest.TestCase):
def testGreeting(self):
# The first response is captured as a greeting.
ftpClient = ftp.FTPClientBasic()
ftpClient.lineReceived('220 Imaginary FTP.')
self.assertEqual(['220 Imaginary FTP.'], ftpClient.greeting)
def testResponseWithNoMessage(self):
# Responses with no message are still valid, i.e. three digits followed
# by a space is complete response.
ftpClient = ftp.FTPClientBasic()
ftpClient.lineReceived('220 ')
self.assertEqual(['220 '], ftpClient.greeting)
def testMultilineResponse(self):
ftpClient = ftp.FTPClientBasic()
ftpClient.transport = proto_helpers.StringTransport()
ftpClient.lineReceived('220 Imaginary FTP.')
# Queue (and send) a dummy command, and set up a callback to capture the
# result
deferred = ftpClient.queueStringCommand('BLAH')
result = []
deferred.addCallback(result.append)
deferred.addErrback(self.fail)
# Send the first line of a multiline response.
ftpClient.lineReceived('210-First line.')
self.assertEqual([], result)
# Send a second line, again prefixed with "nnn-".
ftpClient.lineReceived('123-Second line.')
self.assertEqual([], result)
# Send a plain line of text, no prefix.
ftpClient.lineReceived('Just some text.')
self.assertEqual([], result)
# Now send a short (less than 4 chars) line.
ftpClient.lineReceived('Hi')
self.assertEqual([], result)
# Now send an empty line.
ftpClient.lineReceived('')
self.assertEqual([], result)
# And a line with 3 digits in it, and nothing else.
ftpClient.lineReceived('321')
self.assertEqual([], result)
# Now finish it.
ftpClient.lineReceived('210 Done.')
self.assertEqual(
['210-First line.',
'123-Second line.',
'Just some text.',
'Hi',
'',
'321',
'210 Done.'], result[0])
def test_noPasswordGiven(self):
"""
Passing None as the password avoids sending the PASS command.
"""
# Create a client, and give it a greeting.
ftpClient = ftp.FTPClientBasic()
ftpClient.transport = proto_helpers.StringTransport()
ftpClient.lineReceived('220 Welcome to Imaginary FTP.')
# Queue a login with no password
ftpClient.queueLogin('bob', None)
self.assertEqual('USER bob\r\n', ftpClient.transport.value())
# Clear the test buffer, acknowledge the USER command.
ftpClient.transport.clear()
ftpClient.lineReceived('200 Hello bob.')
# The client shouldn't have sent anything more (i.e. it shouldn't have
# sent a PASS command).
self.assertEqual('', ftpClient.transport.value())
def test_noPasswordNeeded(self):
"""
Receiving a 230 response to USER prevents PASS from being sent.
"""
# Create a client, and give it a greeting.
ftpClient = ftp.FTPClientBasic()
ftpClient.transport = proto_helpers.StringTransport()
ftpClient.lineReceived('220 Welcome to Imaginary FTP.')
# Queue a login with no password
ftpClient.queueLogin('bob', 'secret')
self.assertEqual('USER bob\r\n', ftpClient.transport.value())
# Clear the test buffer, acknowledge the USER command with a 230
# response code.
ftpClient.transport.clear()
ftpClient.lineReceived('230 Hello bob. No password needed.')
# The client shouldn't have sent anything more (i.e. it shouldn't have
# sent a PASS command).
self.assertEqual('', ftpClient.transport.value())
class PathHandling(unittest.TestCase):
def testNormalizer(self):
for inp, outp in [('a', ['a']),
('/a', ['a']),
('/', []),
('a/b/c', ['a', 'b', 'c']),
('/a/b/c', ['a', 'b', 'c']),
('/a/', ['a']),
('a/', ['a'])]:
self.assertEqual(ftp.toSegments([], inp), outp)
for inp, outp in [('b', ['a', 'b']),
('b/', ['a', 'b']),
('/b', ['b']),
('/b/', ['b']),
('b/c', ['a', 'b', 'c']),
('b/c/', ['a', 'b', 'c']),
('/b/c', ['b', 'c']),
('/b/c/', ['b', 'c'])]:
self.assertEqual(ftp.toSegments(['a'], inp), outp)
for inp, outp in [('//', []),
('//a', ['a']),
('a//', ['a']),
('a//b', ['a', 'b'])]:
self.assertEqual(ftp.toSegments([], inp), outp)
for inp, outp in [('//', []),
('//b', ['b']),
('b//c', ['a', 'b', 'c'])]:
self.assertEqual(ftp.toSegments(['a'], inp), outp)
for inp, outp in [('..', []),
('../', []),
('a/..', ['x']),
('/a/..', []),
('/a/b/..', ['a']),
('/a/b/../', ['a']),
('/a/b/../c', ['a', 'c']),
('/a/b/../c/', ['a', 'c']),
('/a/b/../../c', ['c']),
('/a/b/../../c/', ['c']),
('/a/b/../../c/..', []),
('/a/b/../../c/../', [])]:
self.assertEqual(ftp.toSegments(['x'], inp), outp)
for inp in ['..', '../', 'a/../..', 'a/../../',
'/..', '/../', '/a/../..', '/a/../../',
'/a/b/../../..']:
self.assertRaises(ftp.InvalidPath, ftp.toSegments, [], inp)
for inp in ['../..', '../../', '../a/../..']:
self.assertRaises(ftp.InvalidPath, ftp.toSegments, ['x'], inp)
class IsGlobbingExpressionTests(unittest.TestCase):
"""
Tests for _isGlobbingExpression utility function.
"""
def test_isGlobbingExpressionEmptySegments(self):
"""
_isGlobbingExpression will return False for None, or empty
segments.
"""
self.assertFalse(ftp._isGlobbingExpression())
self.assertFalse(ftp._isGlobbingExpression([]))
self.assertFalse(ftp._isGlobbingExpression(None))
def test_isGlobbingExpressionNoGlob(self):
"""
_isGlobbingExpression will return False for plain segments.
Also, it only checks the last segment part (filename) and will not
check the path name.
"""
self.assertFalse(ftp._isGlobbingExpression(['ignore', 'expr']))
self.assertFalse(ftp._isGlobbingExpression(['*.txt', 'expr']))
def test_isGlobbingExpressionGlob(self):
"""
_isGlobbingExpression will return True for segments which contains
globbing characters in the last segment part (filename).
"""
self.assertTrue(ftp._isGlobbingExpression(['ignore', '*.txt']))
self.assertTrue(ftp._isGlobbingExpression(['ignore', '[a-b].txt']))
self.assertTrue(ftp._isGlobbingExpression(['ignore', 'fil?.txt']))
class BaseFTPRealmTests(unittest.TestCase):
"""
Tests for L{ftp.BaseFTPRealm}, a base class to help define L{IFTPShell}
realms with different user home directory policies.
"""
def test_interface(self):
"""
L{ftp.BaseFTPRealm} implements L{IRealm}.
"""
self.assertTrue(verifyClass(IRealm, ftp.BaseFTPRealm))
def test_getHomeDirectory(self):
"""
L{ftp.BaseFTPRealm} calls its C{getHomeDirectory} method with the
avatarId being requested to determine the home directory for that
avatar.
"""
result = filepath.FilePath(self.mktemp())
avatars = []
class TestRealm(ftp.BaseFTPRealm):
def getHomeDirectory(self, avatarId):
avatars.append(avatarId)
return result
realm = TestRealm(self.mktemp())
iface, avatar, logout = realm.requestAvatar(
"alice@example.com", None, ftp.IFTPShell)
self.assertIsInstance(avatar, ftp.FTPShell)
self.assertEqual(avatar.filesystemRoot, result)
def test_anonymous(self):
"""
L{ftp.BaseFTPRealm} returns an L{ftp.FTPAnonymousShell} instance for
anonymous avatar requests.
"""
anonymous = self.mktemp()
realm = ftp.BaseFTPRealm(anonymous)
iface, avatar, logout = realm.requestAvatar(
checkers.ANONYMOUS, None, ftp.IFTPShell)
self.assertIsInstance(avatar, ftp.FTPAnonymousShell)
self.assertEqual(avatar.filesystemRoot, filepath.FilePath(anonymous))
def test_notImplemented(self):
"""
L{ftp.BaseFTPRealm.getHomeDirectory} should be overridden by a subclass
and raises L{NotImplementedError} if it is not.
"""
realm = ftp.BaseFTPRealm(self.mktemp())
self.assertRaises(NotImplementedError, realm.getHomeDirectory, object())
class FTPRealmTestCase(unittest.TestCase):
"""
Tests for L{ftp.FTPRealm}.
"""
def test_getHomeDirectory(self):
"""
L{ftp.FTPRealm} accepts an extra directory to its initializer and treats
the avatarId passed to L{ftp.FTPRealm.getHomeDirectory} as a single path
segment to construct a child of that directory.
"""
base = '/path/to/home'
realm = ftp.FTPRealm(self.mktemp(), base)
home = realm.getHomeDirectory('alice@example.com')
self.assertEqual(
filepath.FilePath(base).child('alice@example.com'), home)
def test_defaultHomeDirectory(self):
"""
If no extra directory is passed to L{ftp.FTPRealm}, it uses C{"/home"}
as the base directory containing all user home directories.
"""
realm = ftp.FTPRealm(self.mktemp())
home = realm.getHomeDirectory('alice@example.com')
self.assertEqual(filepath.FilePath('/home/alice@example.com'), home)
class SystemFTPRealmTests(unittest.TestCase):
"""
Tests for L{ftp.SystemFTPRealm}.
"""
skip = nonPOSIXSkip
def test_getHomeDirectory(self):
"""
L{ftp.SystemFTPRealm.getHomeDirectory} treats the avatarId passed to it
as a username in the underlying platform and returns that account's home
directory.
"""
# Try to pick a username that will have a home directory.
user = getpass.getuser()
# Try to find their home directory in a different way than used by the
# implementation. Maybe this is silly and can only introduce spurious
# failures due to system-specific configurations.
import pwd
expected = pwd.getpwnam(user).pw_dir
realm = ftp.SystemFTPRealm(self.mktemp())
home = realm.getHomeDirectory(user)
self.assertEqual(home, filepath.FilePath(expected))
def test_noSuchUser(self):
"""
L{ftp.SystemFTPRealm.getHomeDirectory} raises L{UnauthorizedLogin} when
passed a username which has no corresponding home directory in the
system's accounts database.
"""
user = insecureRandom(4).encode('hex')
realm = ftp.SystemFTPRealm(self.mktemp())
self.assertRaises(UnauthorizedLogin, realm.getHomeDirectory, user)
class ErrnoToFailureTestCase(unittest.TestCase):
"""
Tests for L{ftp.errnoToFailure} errno checking.
"""
def test_notFound(self):
"""
C{errno.ENOENT} should be translated to L{ftp.FileNotFoundError}.
"""
d = ftp.errnoToFailure(errno.ENOENT, "foo")
return self.assertFailure(d, ftp.FileNotFoundError)
def test_permissionDenied(self):
"""
C{errno.EPERM} should be translated to L{ftp.PermissionDeniedError}.
"""
d = ftp.errnoToFailure(errno.EPERM, "foo")
return self.assertFailure(d, ftp.PermissionDeniedError)
def test_accessDenied(self):
"""
C{errno.EACCES} should be translated to L{ftp.PermissionDeniedError}.
"""
d = ftp.errnoToFailure(errno.EACCES, "foo")
return self.assertFailure(d, ftp.PermissionDeniedError)
def test_notDirectory(self):
"""
C{errno.ENOTDIR} should be translated to L{ftp.IsNotADirectoryError}.
"""
d = ftp.errnoToFailure(errno.ENOTDIR, "foo")
return self.assertFailure(d, ftp.IsNotADirectoryError)
def test_fileExists(self):
"""
C{errno.EEXIST} should be translated to L{ftp.FileExistsError}.
"""
d = ftp.errnoToFailure(errno.EEXIST, "foo")
return self.assertFailure(d, ftp.FileExistsError)
def test_isDirectory(self):
"""
C{errno.EISDIR} should be translated to L{ftp.IsADirectoryError}.
"""
d = ftp.errnoToFailure(errno.EISDIR, "foo")
return self.assertFailure(d, ftp.IsADirectoryError)
def test_passThrough(self):
"""
If an unknown errno is passed to L{ftp.errnoToFailure}, it should let
the originating exception pass through.
"""
try:
raise RuntimeError("bar")
except:
d = ftp.errnoToFailure(-1, "foo")
return self.assertFailure(d, RuntimeError)
class AnonymousFTPShellTestCase(unittest.TestCase):
"""
Test anynomous shell properties.
"""
def test_anonymousWrite(self):
"""
Check that L{ftp.FTPAnonymousShell} returns an error when trying to
open it in write mode.
"""
shell = ftp.FTPAnonymousShell('')
d = shell.openForWriting(('foo',))
self.assertFailure(d, ftp.PermissionDeniedError)
return d
class IFTPShellTestsMixin:
"""
Generic tests for the C{IFTPShell} interface.
"""
def directoryExists(self, path):
"""
Test if the directory exists at C{path}.
@param path: the relative path to check.
@type path: C{str}.
@return: C{True} if C{path} exists and is a directory, C{False} if
it's not the case
@rtype: C{bool}
"""
raise NotImplementedError()
def createDirectory(self, path):
"""
Create a directory in C{path}.
@param path: the relative path of the directory to create, with one
segment.
@type path: C{str}
"""
raise NotImplementedError()
def fileExists(self, path):
"""
Test if the file exists at C{path}.
@param path: the relative path to check.
@type path: C{str}.
@return: C{True} if C{path} exists and is a file, C{False} if it's not
the case.
@rtype: C{bool}
"""
raise NotImplementedError()
def createFile(self, path, fileContent=''):
"""
Create a file named C{path} with some content.
@param path: the relative path of the file to create, without
directory.
@type path: C{str}
@param fileContent: the content of the file.
@type fileContent: C{str}
"""
raise NotImplementedError()
def test_createDirectory(self):
"""
C{directoryExists} should report correctly about directory existence,
and C{createDirectory} should create a directory detectable by
C{directoryExists}.
"""
self.assertFalse(self.directoryExists('bar'))
self.createDirectory('bar')
self.assertTrue(self.directoryExists('bar'))
def test_createFile(self):
"""
C{fileExists} should report correctly about file existence, and
C{createFile} should create a file detectable by C{fileExists}.
"""
self.assertFalse(self.fileExists('file.txt'))
self.createFile('file.txt')
self.assertTrue(self.fileExists('file.txt'))
def test_makeDirectory(self):
"""
Create a directory and check it ends in the filesystem.
"""
d = self.shell.makeDirectory(('foo',))
def cb(result):
self.assertTrue(self.directoryExists('foo'))
return d.addCallback(cb)
def test_makeDirectoryError(self):
"""
Creating a directory that already exists should fail with a
C{ftp.FileExistsError}.
"""
self.createDirectory('foo')
d = self.shell.makeDirectory(('foo',))
return self.assertFailure(d, ftp.FileExistsError)
def test_removeDirectory(self):
"""
Try to remove a directory and check it's removed from the filesystem.
"""
self.createDirectory('bar')
d = self.shell.removeDirectory(('bar',))
def cb(result):
self.assertFalse(self.directoryExists('bar'))
return d.addCallback(cb)
def test_removeDirectoryOnFile(self):
"""
removeDirectory should not work in file and fail with a
C{ftp.IsNotADirectoryError}.
"""
self.createFile('file.txt')
d = self.shell.removeDirectory(('file.txt',))
return self.assertFailure(d, ftp.IsNotADirectoryError)
def test_removeNotExistingDirectory(self):
"""
Removing directory that doesn't exist should fail with a
C{ftp.FileNotFoundError}.
"""
d = self.shell.removeDirectory(('bar',))
return self.assertFailure(d, ftp.FileNotFoundError)
def test_removeFile(self):
"""
Try to remove a file and check it's removed from the filesystem.
"""
self.createFile('file.txt')
d = self.shell.removeFile(('file.txt',))
def cb(res):
self.assertFalse(self.fileExists('file.txt'))
d.addCallback(cb)
return d
def test_removeFileOnDirectory(self):
"""
removeFile should not work on directory.
"""
self.createDirectory('ned')
d = self.shell.removeFile(('ned',))
return self.assertFailure(d, ftp.IsADirectoryError)
def test_removeNotExistingFile(self):
"""
Try to remove a non existent file, and check it raises a
L{ftp.FileNotFoundError}.
"""
d = self.shell.removeFile(('foo',))
return self.assertFailure(d, ftp.FileNotFoundError)
def test_list(self):
"""
Check the output of the list method.
"""
self.createDirectory('ned')
self.createFile('file.txt')
d = self.shell.list(('.',))
def cb(l):
l.sort()
self.assertEqual(l,
[('file.txt', []), ('ned', [])])
return d.addCallback(cb)
def test_listWithStat(self):
"""
Check the output of list with asked stats.
"""
self.createDirectory('ned')
self.createFile('file.txt')
d = self.shell.list(('.',), ('size', 'permissions',))
def cb(l):
l.sort()
self.assertEqual(len(l), 2)
self.assertEqual(l[0][0], 'file.txt')
self.assertEqual(l[1][0], 'ned')
# Size and permissions are reported differently between platforms
# so just check they are present
self.assertEqual(len(l[0][1]), 2)
self.assertEqual(len(l[1][1]), 2)
return d.addCallback(cb)
def test_listWithInvalidStat(self):
"""
Querying an invalid stat should result to a C{AttributeError}.
"""
self.createDirectory('ned')
d = self.shell.list(('.',), ('size', 'whateverstat',))
return self.assertFailure(d, AttributeError)
def test_listFile(self):
"""
Check the output of the list method on a file.
"""
self.createFile('file.txt')
d = self.shell.list(('file.txt',))
def cb(l):
l.sort()
self.assertEqual(l,
[('file.txt', [])])
return d.addCallback(cb)
def test_listNotExistingDirectory(self):
"""
list on a directory that doesn't exist should fail with a
L{ftp.FileNotFoundError}.
"""
d = self.shell.list(('foo',))
return self.assertFailure(d, ftp.FileNotFoundError)
def test_access(self):
"""
Try to access a resource.
"""
self.createDirectory('ned')
d = self.shell.access(('ned',))
return d
def test_accessNotFound(self):
"""
access should fail on a resource that doesn't exist.
"""
d = self.shell.access(('foo',))
return self.assertFailure(d, ftp.FileNotFoundError)
def test_openForReading(self):
"""
Check that openForReading returns an object providing C{ftp.IReadFile}.
"""
self.createFile('file.txt')
d = self.shell.openForReading(('file.txt',))
def cb(res):
self.assertTrue(ftp.IReadFile.providedBy(res))
d.addCallback(cb)
return d
def test_openForReadingNotFound(self):
"""
openForReading should fail with a C{ftp.FileNotFoundError} on a file
that doesn't exist.
"""
d = self.shell.openForReading(('ned',))
return self.assertFailure(d, ftp.FileNotFoundError)
def test_openForReadingOnDirectory(self):
"""
openForReading should not work on directory.
"""
self.createDirectory('ned')
d = self.shell.openForReading(('ned',))
return self.assertFailure(d, ftp.IsADirectoryError)
def test_openForWriting(self):
"""
Check that openForWriting returns an object providing C{ftp.IWriteFile}.
"""
d = self.shell.openForWriting(('foo',))
def cb1(res):
self.assertTrue(ftp.IWriteFile.providedBy(res))
return res.receive().addCallback(cb2)
def cb2(res):
self.assertTrue(IConsumer.providedBy(res))
d.addCallback(cb1)
return d
def test_openForWritingExistingDirectory(self):
"""
openForWriting should not be able to open a directory that already
exists.
"""
self.createDirectory('ned')
d = self.shell.openForWriting(('ned',))
return self.assertFailure(d, ftp.IsADirectoryError)
def test_openForWritingInNotExistingDirectory(self):
"""
openForWring should fail with a L{ftp.FileNotFoundError} if you specify
a file in a directory that doesn't exist.
"""
self.createDirectory('ned')
d = self.shell.openForWriting(('ned', 'idonotexist', 'foo'))
return self.assertFailure(d, ftp.FileNotFoundError)
def test_statFile(self):
"""
Check the output of the stat method on a file.
"""
fileContent = 'wobble\n'
self.createFile('file.txt', fileContent)
d = self.shell.stat(('file.txt',), ('size', 'directory'))
def cb(res):
self.assertEqual(res[0], len(fileContent))
self.assertFalse(res[1])
d.addCallback(cb)
return d
def test_statDirectory(self):
"""
Check the output of the stat method on a directory.
"""
self.createDirectory('ned')
| d = self.shell.stat(('ned',), ('size', 'directory')) | 8,415 | lcc_e | python | null | 84cc071c6bef98207a5ba6686d302d89c377a6f4b67e1ad2 |
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
DESCRIPTION = """Beaker Wizard is a tool which can transform that
"create all the necessary files with correct names, values, and paths"
boring phase of every test creation into one-line joy. For power
users there is a lot of inspiration in the man page. For quick start
just ``cd`` to your test package directory and simply run
``beaker-wizard``.
"""
__doc__ = """
beaker-wizard: Tool to ease the creation of a new Beaker task
=============================================================
.. program:: beaker-wizard
Synopsis
--------
| :program:`beaker-wizard` [*options*] <testname> <bug>
The *testname* argument should be specified as::
[[[NAMESPACE/]PACKAGE/]TYPE/][PATH/]NAME
which can be shortened as you need::
TESTNAME
TYPE/TESTNAME
TYPE/PATH/TESTNAME
PACKAGE/TYPE/NAME
PACKAGE/TYPE/PATH/NAME
NAMESPACE/PACKAGE/TYPE/NAME
NAMESPACE/PACKAGE/TYPE/PATH/NAME
| :program:`beaker-wizard` Makefile
This form will run the Wizard in the Makefile edit mode which allows you to
quickly and simply update metadata of an already existing test while trying to
keep the rest of the Makefile untouched.
Description
-----------
%(DESCRIPTION)s
The beaker-wizard was designed to be flexible: it is intended not only for
beginning Beaker users who will welcome questions with hints but also for
experienced test writers who can make use of the extensive command-line
options to push their new-test-creating productivity to the limits.
For basic usage help, see Options_ below or run ``beaker-wizard -h``.
For advanced features and expert usage examples, read on.
Highlights
~~~~~~~~~~
* provide reasonable defaults wherever possible
* flexible confirmation (``--every``, ``--common``, ``--yes``)
* predefined skeletons (beaker, beakerlib, simple, multihost, library, parametrized, empty)
* saved user preferences (defaults, user skeletons, licenses)
* Bugzilla integration (fetch bug info, reproducers, suggest name, description)
* Makefile edit mode (quick adding of bugs, limiting archs or releases...)
* automated adding created files to the git repository
Skeletons
~~~~~~~~~
Another interesting feature is that you can save your own skeletons into
the preferences file, so that you can automatically populate the new
test scripts with your favourite structure.
All of the test related metadata gathered by the Wizard can be expanded
inside the skeletons using XML tags. For example: use ``<package/>`` for
expanding into the test package name or ``<test/>`` for the full test name.
The following metadata variables are available:
* test namespace package type path testname description
* bugs reproducers requires architectures releases version time
* priority license confidential destructive
* skeleton author email
Options
-------
-h, --help show this help message and exit
-V, --version display version info and quit
Basic metadata:
-d DESCRIPTION short description
-a ARCHS architectures [All]
-r RELEASES releases [All]
-o PACKAGES run for packages [wizard]
-q PACKAGES required packages [wizard]
-t TIME test time [5m]
Extra metadata:
-z VERSION test version [1.0]
-p PRIORITY priority [Normal]
-l LICENSE license [GPLv2+]
-i INTERNAL confidential [No]
-u UGLY destructive [No]
Author info:
-n NAME your name [Petr Splichal]
-m MAIL your email address [psplicha@redhat.com]
Test creation specifics:
-s SKELETON skeleton to use [beakerlib]
-j PREFIX join the bug prefix to the testname [Yes]
-f, --force force without review and overwrite existing files
-w, --write write preferences to ~/.beaker_client/wizard
-b, --bugzilla contact bugzilla to get bug details
-g, --git add created files to the git repository
Confirmation and verbosity:
-v, --verbose display detailed info about every action
-e, --every prompt for each and every available option
-c, --common confirm only commonly used options [Default]
-y, --yes yes, I'm sure, no questions, just do it!
Examples
--------
Some brief examples::
beaker-wizard overload-performance 379791
regression test with specified bug and name
-> /CoreOS/perl/Regression/bz379791-overload-performance
beaker-wizard buffer-overflow 2008-1071 -a i386
security test with specified CVE and name, i386 arch only
-> /CoreOS/perl/Security/CVE-2008-1071-buffer-overflow
beaker-wizard Sanity/options -y -a?
sanity test with given name, ask just for architecture
-> /CoreOS/perl/Sanity/options
beaker-wizard Sanity/server/smoke
add an optional path under test type directory
-> /CoreOS/perl/Sanity/server/smoke
beaker-wizard -by 1234
contact bugzilla for details, no questions, just review
-> /CoreOS/installer/Regression/bz1234-Swap-partition-Installer
beaker-wizard -byf 2007-0455
security test, no questions, no review, overwrite existing files
-> /CoreOS/gd/Security/CVE-2007-0455-gd-buffer-overrun
All of the previous examples assume you're in the package tests
directory (e.g. ``cd git/tests/perl``). All the necessary directories and
files are created under this location.
Bugzilla integration
~~~~~~~~~~~~~~~~~~~~
The following example creates a regression test for bug #227655.
Option ``-b`` is used to contact Bugzilla to automatically fetch bug
details and ``-y`` to skip unnecessary questions.
::
# beaker-wizard -by 227655
Contacting bugzilla...
Fetching details for bz227655
Examining attachments for possible reproducers
Adding test.pl (simple test using Net::Config)
Adding libnet.cfg (libnet.cfg test config file)
Ready to create the test, please review
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/CoreOS/perl/Regression/bz227655-libnet-cfg-in-wrong-directory
Namespace : CoreOS
Package : perl
Test type : Regression
Relative path : None
Test name : bz227655-libnet-cfg-in-wrong-directory
Description : Test for bz227655 (libnet.cfg in wrong directory)
Bug or CVE numbers : bz227655
Reproducers to fetch : test.pl, libnet.cfg
Required packages : None
Architectures : All
Releases : All
Version : 1.0
Time : 5m
Priority : Normal
License : GPLv2+
Confidential : No
Destructive : No
Skeleton : beakerlib
Author : Petr Splichal
Email : psplicha@redhat.com
[Everything OK?]
Directory Regression/bz227655-libnet-cfg-in-wrong-directory created
File Regression/bz227655-libnet-cfg-in-wrong-directory/PURPOSE written
File Regression/bz227655-libnet-cfg-in-wrong-directory/runtest.sh written
File Regression/bz227655-libnet-cfg-in-wrong-directory/Makefile written
Attachment test.pl downloaded
Attachment libnet.cfg downloaded
Command line
~~~~~~~~~~~~
The extensive command line syntax can come in handy for example
when creating a bunch of sanity tests for a component. Let's
create a test skeleton for each of wget's feature areas::
# cd git/tests/wget
# for test in download recursion rules authentication; do
> beaker-wizard -yf $test -t 10m -q httpd,vsftpd \\
> -d "Sanity test for $test options"
> done
...
/CoreOS/wget/Sanity/authentication
Namespace : CoreOS
Package : wget
Test type : Sanity
Relative path : None
Test name : authentication
Description : Sanity test for authentication options
Bug or CVE numbers : None
Reproducers to fetch : None
Required packages : httpd, vsftpd
Architectures : All
Releases : All
Version : 1.0
Time : 10m
Priority : Normal
License : GPLv2+
Confidential : No
Destructive : No
Skeleton : beakerlib
Author : Petr Splichal
Email : psplicha@redhat.com
Directory Sanity/authentication created
File Sanity/authentication/PURPOSE written
File Sanity/authentication/runtest.sh written
File Sanity/authentication/Makefile written
# tree
.
`-- Sanity
|-- authentication
| |-- Makefile
| |-- PURPOSE
| `-- runtest.sh
|-- download
| |-- Makefile
| |-- PURPOSE
| `-- runtest.sh
|-- recursion
| |-- Makefile
| |-- PURPOSE
| `-- runtest.sh
`-- rules
|-- Makefile
|-- PURPOSE
`-- runtest.sh
Notes
-----
If you provide an option with a "?" you will be given a list of
available options and a prompt to type your choice in.
For working Bugzilla integration you need ``python-bugzilla`` package installed on your system.
If you are trying to access a bug with restricted access, log
in to Bugzilla first with the following command::
bugzilla login
You will be asked for email and password and after successfully logging in a
``~/.bugzillacookies`` file will be created which then will be used
in all subsequent Bugzilla queries. Logout can be performed with
``rm ~/.bugzillacookies`` ;-)
Files
-----
All commonly used preferences can be saved into ``~/.beaker_client/wizard``.
Use "write" command to save current settings when reviewing gathered
test data or edit the file with you favourite editor.
All options in the config file are self-explanatory. For confirm level choose
one of: nothing, common or everything.
Library tasks
-------------
The "library" skeleton can be used to create a "library task". It allows you to bundle
together common functionality which may be required across multiple
tasks. To learn more, see `the BeakerLib documentation for library
tasks <https://fedorahosted.org/beakerlib/wiki/libraries>`__.
Bugs
----
If you encounter an issue or have an idea for enhancement, please `file a new bug`_.
See also `open bugs`_.
.. _file a new bug: https://bugzilla.redhat.com/enter_bug.cgi?product=Beaker&component=command+line&short_desc=beaker-wizard:+&status_whiteboard=BeakerWizard&assigned_to=psplicha@redhat.com
.. _open bugs: https://bugzilla.redhat.com/buglist.cgi?product=Beaker&bug_status=__open__&short_desc=beaker-wizard&short_desc_type=allwordssubstr
See also
--------
* `Beaker documentation <http://beaker-project.org/help.html>`_
* `BeakerLib <https://fedorahosted.org/beakerlib>`_
""" % globals()
from optparse import OptionParser, OptionGroup, IndentedHelpFormatter, SUPPRESS_HELP
from xml.dom.minidom import parse, parseString
from datetime import date
from time import sleep
import subprocess
import textwrap
import pwd
import sys
import re
import os
# Version
WizardVersion = "2.3.0"
# Regular expressions
RegExpPackage = re.compile("^(?![._+-])[.a-zA-Z0-9_+-]+(?<![._-])$")
RegExpRhtsRequires = re.compile("^(?![._+-])[.a-zA-Z0-9_+-/()]+(?<![._-])$")
RegExpPath = re.compile("^(?![/-])[a-zA-Z0-9/_-]+(?<![/-])$")
RegExpTestName = re.compile("^(?!-)[a-zA-Z0-9-_]+(?<!-)$")
RegExpBug = re.compile("^\d+$")
RegExpBugLong = re.compile("^bz\d+$")
RegExpBugPrefix = re.compile("^bz")
RegExpCVE = re.compile("^\d{4}-\d{4}$")
RegExpCVELong = re.compile("^CVE-\d{4}-\d{4}$")
RegExpCVEPrefix = re.compile("^CVE-")
RegExpAuthor = re.compile("^[a-zA-Z]+\.?( [a-zA-Z]+\.?){1,2}$")
RegExpEmail = re.compile("^[a-z._-]+@[a-z.-]+$")
RegExpYes = re.compile("Everything OK|y|ye|jo|ju|ja|ano|da", re.I)
RegExpReproducer = re.compile("repr|test|expl|poc|demo", re.I)
RegExpScript = re.compile("\.(sh|py|pl)$")
RegExpMetadata = re.compile("(\$\(METADATA\):\s+Makefile.*)$", re.S)
RegExpTest = re.compile("TEST=(\S+)", re.S)
RegExpVersion = re.compile("TESTVERSION=([\d.]+)", re.S)
# Suggested test types (these used to be enforced)
SuggestedTestTypes = """Regression Performance Stress Certification
Security Durations Interoperability Standardscompliance
Customeracceptance Releasecriterium Crasher Tier1 Tier2
Alpha KernelTier1 KernelTier2 Multihost MultihostDriver
Install FedoraTier1 FedoraTier2 KernelRTTier1
KernelReporting Sanity Library""".split()
# Guesses
GuessAuthorLogin = pwd.getpwuid(os.getuid())[0]
GuessAuthorDomain = re.sub("^.*\.([^.]+\.[^.]+)$", "\\1", os.uname()[1])
GuessAuthorEmail = "%s@%s" % (GuessAuthorLogin, GuessAuthorDomain)
GuessAuthorName = pwd.getpwuid(os.getuid())[4]
# Make sure guesses are valid values
if not RegExpEmail.match(GuessAuthorEmail):
GuessAuthorEmail = "your@email.com"
if not RegExpAuthor.match(GuessAuthorName):
GuessAuthorName = "Your Name"
# Commands
GitCommand="git add".split()
# Constants
MaxLengthSuggestedDesc = 50
MaxLengthTestName = 50
ReviewWidth = 22
MakefileLineWidth = 17
VimDictionary = "# vim: dict+=/usr/share/beakerlib/dictionary.vim cpt=.,w,b,u,t,i,k"
BugzillaUrl = 'https://bugzilla.redhat.com/show_bug.cgi?id='
BugzillaXmlrpc = 'https://bugzilla.redhat.com/xmlrpc.cgi'
PreferencesDir = os.getenv('HOME') + "/.beaker_client"
PreferencesFile = PreferencesDir + "/wizard"
PreferencesTemplate = """<?xml version="1.0" ?>
<wizard>
<author>
<name>%s</name>
<email>%s</email>
<confirm>common</confirm>
<skeleton>beakerlib</skeleton>
</author>
<test>
<time>5m</time>
<type>Sanity</type>
<prefix>Yes</prefix>
<namespace>CoreOS</namespace>
<priority>Normal</priority>
<license>GPLv2+</license>
<confidential>No</confidential>
<destructive>No</destructive>
</test>
<licenses>
<license name="GPLvX">
This is GPLvX license text.
</license>
<license name="GPLvY">
This is GPLvY license text.
</license>
<license name="GPLvZ">
This is GPLvZ license text.
</license>
</licenses>
<skeletons>
<skeleton name="skel1" requires="gdb" rhtsrequires="library(perl/lib1) library(scl/lib2)">
This is skeleton 1 example.
</skeleton>
<skeleton name="skel2">
This is skeleton 2 example.
</skeleton>
<skeleton name="skel3">
This is skeleton 3 example.
</skeleton>
</skeletons>
</wizard>
""" % (GuessAuthorName, GuessAuthorEmail)
def wrapText(text):
""" Wrapt text to fit default width """
text = re.compile("\s+").sub(" ", text)
return "\n".join(textwrap.wrap(text))
def dedentText(text, count = 12):
""" Remove leading spaces from the beginning of lines """
return re.compile("\n" + " " * count).sub("\n", text)
def indentText(text, count = 12):
""" Insert leading spaces to the beginning of lines """
return re.compile("\n").sub("\n" + " " * count, text)
def shortenText(text, max = 50):
""" Shorten long texts into something more usable """
# if shorter, nothing to do
if not text or len(text) <= max:
return text
# cut the text
text = text[0:max+1]
# remove last non complete word
text = re.sub(" [^ ]*$", "", text)
return text
def shellEscaped(text):
"""
Returns the text escaped for inclusion inside a shell double-quoted string.
"""
return text.replace('\\', '\\\\')\
.replace('"', r'\"')\
.replace('$', r'\$')\
.replace('`', r'\`')\
.replace('!', r'\!')
def unique(seq):
""" Remove duplicates from the supplied sequence """
dictionary = {}
for i in seq:
dictionary[i] = 1
return dictionary.keys()
def hr(width = 70):
""" Return simple ascii horizontal rule """
if width < 2: return ""
return "# " + (width - 2) * "~"
def comment(text, width = 70, comment = "#", top = True, bottom = True, padding = 3):
""" Create nicely formated comment """
result = ""
# top hrule & padding
if width and top: result += hr(width) + "\n"
result += int(padding/3) * (comment + "\n")
# prepend lines with the comment char and padding
result += re.compile("^(?!#)", re.M).sub(comment + padding * " ", text)
# bottom padding & hrule
result += int(padding/3) * ("\n" + comment)
if width and bottom: result += "\n" + hr(width)
# remove any trailing spaces
result = re.compile("\s+$", re.M).sub("", result)
return result
def dashifyText(text, allowExtraChars = ""):
""" Replace all special chars with dashes, and perhaps shorten """
if not text: return text
# remove the rubbish from the start & end
text = re.sub("^[^a-zA-Z0-9]*", "", text)
text = re.sub("[^a-zA-Z0-9]*$", "", text)
# replace all special chars with dashes
text = re.sub("[^a-zA-Z0-9%s]+" % allowExtraChars, "-", text)
return text
def createNode(node, text):
""" Create a child text node """
# find document root
root = node
while root.nodeType != root.DOCUMENT_NODE:
root = root.parentNode
# append child text node
node.appendChild(root.createTextNode(text))
return node
def getNode(node):
""" Return node value """
try: value = node.firstChild.nodeValue
except: return None
else: return value
def setNode(node, value):
""" Set node value (create a child if necessary) """
try: node.firstChild.nodeValue = value
except: createNode(node, value)
return value
def findNode(parent, tag, name = None):
""" Find a child node with specified tag (and name) """
try:
for child in parent.getElementsByTagName(tag):
if name is None or child.getAttribute("name") == name:
return child
except:
return None
def findNodeNames(node, tag):
""" Return list of all name values of specified tags """
list = []
for child in node.getElementsByTagName(tag):
if child.hasAttribute("name"):
list.append(child.getAttribute("name"))
return list
def parentDir():
""" Get parent directory name for package name suggestion """
dir = re.split("/", os.getcwd())[-1]
if dir == "": return "kernel"
# remove the -tests suffix if present
# (useful if writing tests in the package/package-tests directory)
dir = re.sub("-tests$", "", dir)
return dir
def addToGit(path):
""" Add a file or a directory to Git """
try:
process = subprocess.Popen(GitCommand + [path],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,)
out, err = process.communicate()
if process.wait():
print "Sorry, failed to add %s to git :-(" % path
print out, err
sys.exit(1)
except OSError:
print("Unable to run %s, is %s installed?"
% (" ".join(GitCommand), GitCommand[0]))
sys.exit(1)
def removeEmbargo(summary):
return summary.replace('EMBARGOED ', '')
class Preferences:
""" Test's author preferences """
def __init__(self, load_user_prefs=True):
""" Set (in future get) user preferences / defaults """
self.template = parseString(PreferencesTemplate)
self.firstRun = False
if load_user_prefs:
self.load()
else:
self.xml = self.template
self.parse()
# XXX (ncoghlan): all of these exec invocations should be replaced with
# appropriate usage of setattr and getattr. However, beaker-wizard needs
# decent test coverage before embarking on that kind of refactoring...
def parse(self):
""" Parse values from the xml file """
# parse list nodes
for node in "author test licenses skeletons".split():
exec("self.%s = findNode(self.xml, '%s')" % (node, node))
# parse single value nodes for author
for node in "name email confirm skeleton".split():
exec("self.%s = findNode(self.author, '%s')" % (node, node))
# if the node cannot be found get the default from template
if not eval("self." + node):
print "Could not find <%s> in preferences, using default" % node
exec("self.%s = findNode(self.template, '%s').cloneNode(True)"
% (node, node))
exec("self.author.appendChild(self.%s)" % node)
# parse single value nodes for test
for node in "type namespace time priority confidential destructive " \
"prefix license".split():
exec("self.%s = findNode(self.test, '%s')" % (node, node))
# if the node cannot be found get the default from template
if not eval("self." + node):
print "Could not find <%s> in preferences, using default" % node
exec("self.%s = findNode(self.template, '%s').cloneNode(True)" % (node, node))
exec("self.test.appendChild(self.%s)" % node)
def load(self):
""" Load user preferences (or set to defaults) """
preferences_file = os.environ.get("BEAKER_WIZARD_CONF", PreferencesFile)
try:
self.xml = parse(preferences_file)
except:
if os.path.exists(preferences_file):
print "I'm sorry, the preferences file seems broken.\n" \
"Did you do something ugly to %s?" % preferences_file
sleep(3)
else:
self.firstRun = True
self.xml = self.template
self.parse()
else:
try:
self.parse()
except:
print "Failed to parse %s, falling to defaults." % preferences_file
sleep(3)
self.xml = self.template
self.parse()
def update(self, author, email, confirm, type, namespace, \
time, priority, confidential, destructive, prefix, license, skeleton):
""" Update preferences with current settings """
setNode(self.name, author)
setNode(self.email, email)
setNode(self.confirm, confirm)
setNode(self.type, type)
setNode(self.namespace, namespace)
setNode(self.time, time)
setNode(self.priority, priority)
setNode(self.confidential, confidential)
setNode(self.destructive, destructive)
setNode(self.prefix, prefix)
setNode(self.license, license)
setNode(self.skeleton, skeleton)
def save(self):
""" Save user preferences """
# try to create directory
try:
os.makedirs(PreferencesDir)
except OSError, e:
if e.errno == 17:
pass
else:
print "Cannot create preferences directory %s :-(" % PreferencesDir
return
# try to write the file
try:
file = open(PreferencesFile, "w")
except:
print "Cannot write to %s" % PreferencesFile
else:
file.write((self.xml.toxml() + "\n").encode("utf-8"))
file.close()
print "Preferences saved to %s" % PreferencesFile
sleep(1)
def getAuthor(self): return getNode(self.name)
def getEmail(self): return getNode(self.email)
def getConfirm(self): return getNode(self.confirm)
def getType(self): return getNode(self.type)
def getPackage(self): return parentDir()
def getNamespace(self): return getNode(self.namespace)
def getTime(self): return getNode(self.time)
def getPriority(self): return getNode(self.priority)
def getConfidential(self): return getNode(self.confidential)
def getDestructive(self): return getNode(self.destructive)
def getPrefix(self): return getNode(self.prefix)
def getVersion(self): return "1.0"
def getLicense(self): return getNode(self.license)
def getSkeleton(self): return getNode(self.skeleton)
def getLicenseContent(self, license):
content = findNode(self.licenses, "license", license)
if content:
return re.sub("\n\s+$", "", content.firstChild.nodeValue)
else:
return None
class Help:
""" Help texts """
def __init__(self, options = None):
if options:
# display expert usage page only
if options.expert():
print self.expert();
sys.exit(0)
# show version info
elif options.ver():
print self.version();
sys.exit(0)
def usage(self):
return "beaker-wizard [options] [TESTNAME] [BUG/CVE...] or beaker-wizard Makefile"
def version(self):
return "beaker-wizard %s" % WizardVersion
def description(self):
return DESCRIPTION
def expert(self):
os.execv('/usr/bin/man', ['man', 'beaker-wizard'])
sys.exit(1)
class Makefile:
"""
Parse values from an existing Makefile to set the initial values
Used in the Makefile edit mode.
"""
def __init__(self, options):
# try to read the original Makefile
self.path = options.arg[0]
try:
# open and read the whole content into self.text
print "Reading the Makefile..."
file = open(self.path)
self.text = "".join(file.readlines())
file.close()
# substitute the old style $TEST sub-variables if present
for var in "TOPLEVEL_NAMESPACE PACKAGE_NAME RELATIVE_PATH".split():
m = re.search("%s=(\S+)" % var, self.text)
if m: self.text = re.sub("\$\(%s\)" % var, m.group(1), self.text)
# locate the metadata section
print "Inspecting the metadata section..."
m = RegExpMetadata.search(self.text)
self.metadata = m.group(1)
# parse the $TEST and $TESTVERSION
print "Checking for the full test name and version..."
m = RegExpTest.search(self.text)
options.arg = [m.group(1)]
m = RegExpVersion.search(self.text)
options.opt.version = m.group(1)
except:
print "Failed to parse the original Makefile"
sys.exit(6)
# disable test name prefixing and set confirm to nothing
options.opt.prefix = "No"
options.opt.confirm = "nothing"
# initialize non-existent options.opt.* vars
options.opt.bug = options.opt.owner = options.opt.runfor = None
# uknown will be used to store unrecognized metadata fields
self.unknown = ""
# map long fields to short versions
map = {
"description" : "desc",
"architectures" : "archs",
"testtime" : "time"
}
# parse info from metadata line by line
print "Parsing the individual metadata..."
for line in self.metadata.split("\n"):
m = re.search("echo\s+[\"'](\w+):\s*(.*)[\"']", line)
# skip non-@echo lines
if not m: continue
# read the key & value pair
try: key = map[m.group(1).lower()]
except: key = m.group(1).lower()
# get the value, unescape escaped double quotes
value = re.sub("\\\\\"", "\"", m.group(2))
# skip fields known to contain variables
if key in ("name", "testversion", "path"): continue
# save known fields into options
for data in "owner desc type archs releases time priority license " \
"confidential destructive bug requires runfor".split():
if data == key:
# if multiple choice, extend the array
if key in "archs bug releases requires runfor".split():
try: exec("options.opt.%s.append(value)" % key)
except: exec("options.opt.%s = [value]" % key)
# otherwise just set the value
else:
exec("options.opt.%s = value" % key)
break
# save unrecognized fields to be able to restore them back
else:
self.unknown += "\n" + line
# parse name & email
m = re.search("(.*)\s+<(.*)>", options.opt.owner)
if m:
options.opt.author = m.group(1)
options.opt.email = m.group(2)
# add bug list to arg
if options.opt.bug:
options.arg.extend(options.opt.bug)
# success
print "Makefile successfully parsed."
def save(self, test, version, content):
# possibly update the $TEST and $TESTVERSION
self.text = RegExpTest.sub("TEST=" + test, self.text)
self.text = RegExpVersion.sub("TESTVERSION=" + version, self.text)
# substitute the new metadata
m = RegExpMetadata.search(content)
self.text = RegExpMetadata.sub(m.group(1), self.text)
# add unknown metadata fields we were not able to parse at init
self.text = re.sub("\n\n\trhts-lint",
self.unknown + "\n\n\trhts-lint", self.text)
# let's write it
try:
file = open(self.path, "w")
file.write(self.text.encode("utf-8"))
file.close()
except:
print "Cannot write to %s" % self.path
sys.exit(3)
else:
print "Makefile successfully written"
class Options:
"""
Class maintaining user preferences and options provided on command line
self.opt ... options parsed from command line
self.pref ... user preferences / defaults
"""
def __init__(self, argv=None, load_user_prefs=True):
if argv is None:
argv = sys.argv
self.pref = Preferences(load_user_prefs)
formatter = IndentedHelpFormatter(max_help_position=40)
#formatter._long_opt_fmt = "%s"
# parse options
parser = OptionParser(Help().usage(), formatter=formatter)
parser.set_description(Help().description())
# examples and help
parser.add_option("-x", "--expert",
dest="expert",
action="store_true",
help=SUPPRESS_HELP)
parser.add_option("-V", "--version",
dest="ver",
action="store_true",
help="display version info and quit")
# author
groupAuthor = OptionGroup(parser, "Author info")
groupAuthor.add_option("-n",
dest="author",
metavar="NAME",
help="your name [%s]" % self.pref.getAuthor())
groupAuthor.add_option("-m",
dest="email",
metavar="MAIL",
help="your email address [%s]" % self.pref.getEmail())
# create
groupCreate = OptionGroup(parser, "Test creation specifics")
groupCreate.add_option("-s",
dest="skeleton",
help="skeleton to use [%s]" % self.pref.getSkeleton())
groupCreate.add_option("-j",
dest="prefix",
metavar="PREFIX",
help="join the bug prefix to the testname [%s]"
% self.pref.getPrefix())
groupCreate.add_option("-f", "--force",
dest="force",
action="store_true",
help="force without review and overwrite existing files")
groupCreate.add_option("-w", "--write",
dest="write",
action="store_true",
help="write preferences to ~/.beaker_client/wizard")
groupCreate.add_option("-b", "--bugzilla",
dest="bugzilla",
action="store_true",
help="contact bugzilla to get bug details")
groupCreate.add_option("-g", "--git",
dest="git",
action="store_true",
help="add created files to the git repository")
groupCreate.add_option("-C", "--current-directory",
dest="use_current_dir",
action="store_true",
default=False,
help="create test in current directory")
# setup default to correctly display in help
defaultEverything = defaultCommon = defaultNothing = ""
if self.pref.getConfirm() == "everything":
defaultEverything = " [Default]"
elif self.pref.getConfirm() == "common":
defaultCommon = " [Default]"
elif self.pref.getConfirm() == "nothing":
defaultNothing = " [Default]"
# confirm
groupConfirm = OptionGroup(parser, "Confirmation and verbosity")
groupConfirm.add_option("-v", "--verbose",
dest="verbose",
action="store_true",
help="display detailed info about every action")
groupConfirm.add_option("-e", "--every",
dest="confirm",
action="store_const",
const="everything",
help="prompt for each and every available option" + defaultEverything)
groupConfirm.add_option("-c", "--common",
dest="confirm",
action="store_const",
const="common",
help="confirm only commonly used options" + defaultCommon)
groupConfirm.add_option("-y", "--yes",
dest="confirm",
action="store_const",
const="nothing",
help="yes, I'm sure, no questions, just do it!" + defaultNothing)
# test metadata
groupMeta = OptionGroup(parser, "Basic metadata")
groupMeta.add_option("-d",
dest="desc",
metavar="DESCRIPTION",
help="short description")
groupMeta.add_option("-a",
dest="archs",
action="append",
help="architectures [All]")
groupMeta.add_option("-r",
dest="releases",
action="append",
help="releases [All]")
groupMeta.add_option("-o",
dest="runfor",
action="append",
metavar="PACKAGES",
help="run for packages [%s]" % self.pref.getPackage())
groupMeta.add_option("-q",
dest="requires",
action="append",
metavar="PACKAGES",
help="required packages [%s]" % self.pref.getPackage())
groupMeta.add_option("-Q",
dest="rhtsrequires",
action="append",
metavar="TEST",
help="required RHTS tests or libraries")
groupMeta.add_option("-t",
dest="time",
help="test time [%s]" % self.pref.getTime())
# test metadata
groupExtra = OptionGroup(parser, "Extra metadata")
groupExtra.add_option("-z",
dest="version",
help="test version [%s]" % self.pref.getVersion())
groupExtra.add_option("-p",
dest="priority",
help="priority [%s]" % self.pref.getPriority())
groupExtra.add_option("-l",
dest="license",
help="license [%s]" % self.pref.getLicense())
groupExtra.add_option("-i",
dest="confidential",
metavar="INTERNAL",
help="confidential [%s]" % self.pref.getConfidential())
groupExtra.add_option("-u",
dest="destructive",
metavar="UGLY",
help="destructive [%s]" % self.pref.getDestructive())
# put it together
parser.add_option_group(groupMeta)
parser.add_option_group(groupExtra)
parser.add_option_group(groupAuthor)
parser.add_option_group(groupCreate)
parser.add_option_group(groupConfirm)
# convert all args to unicode
uniarg = []
for arg in argv[1:]:
uniarg.append(unicode(arg, "utf-8"))
# and parse it!
(self.opt, self.arg) = parser.parse_args(uniarg)
# parse namespace/package/type/path/test
self.opt.namespace = None
self.opt.package = None
self.opt.type = None
self.opt.path = None
self.opt.name = None
self.opt.bugs = []
self.makefile = False
if self.arg:
# if we're run in the Makefile-edit mode, parse it to get the values
if re.match(".*Makefile$", self.arg[0]):
self.makefile = Makefile(self)
# the first arg looks like bug/CVE -> we take all args as bugs/CVE's
if RegExpBug.match(self.arg[0]) or RegExpBugLong.match(self.arg[0]) or \
RegExpCVE.match(self.arg[0]) or RegExpCVELong.match(self.arg[0]):
self.opt.bugs = self.arg[:]
# otherwise we expect bug/CVE as second and following
else:
self.opt.bugs = self.arg[1:]
# parsing namespace/package/type/path/testname
self.testinfo = self.arg[0]
path_components = os.path.normpath(self.testinfo.rstrip('/')).split('/')
if len(path_components) >= 1:
self.opt.name = path_components.pop(-1)
if len(path_components) >= 3 and re.match(Namespace().match() + '$', path_components[0]):
self.opt.namespace = path_components.pop(0)
self.opt.package = path_components.pop(0)
self.opt.type = path_components.pop(0)
elif len(path_components) >= 2 and path_components[1] in SuggestedTestTypes:
self.opt.package = path_components.pop(0)
self.opt.type = path_components.pop(0)
elif len(path_components) >= 1:
self.opt.type = path_components.pop(0)
if path_components:
self.opt.path = '/'.join(path_components)
# try to connect to bugzilla
self.bugzilla = None
if self.opt.bugzilla:
try:
from bugzilla import Bugzilla
except:
print "Sorry, the bugzilla interface is not available right now, try:\n" \
" yum install python-bugzilla\n" \
"Use 'bugzilla login' command if you wish to access restricted bugs."
sys.exit(8)
else:
try:
print "Contacting bugzilla..."
self.bugzilla = Bugzilla(url=BugzillaXmlrpc)
except:
print "Cannot connect to bugzilla, check your net connection."
sys.exit(9)
# command-line-only option interface
def expert(self): return self.opt.expert
def ver(self): return self.opt.ver
def force(self): return self.opt.force
def write(self): return self.opt.write
def verbose(self): return self.pref.firstRun or self.opt.verbose
def confirm(self): return self.opt.confirm or self.pref.getConfirm()
# return both specified and default values for the rest of options
def author(self): return [ self.opt.author, self.pref.getAuthor() ]
def email(self): return [ self.opt.email, self.pref.getEmail() ]
def skeleton(self): return [ self.opt.skeleton, self.pref.getSkeleton() ]
def archs(self): return [ self.opt.archs, [] ]
def releases(self): return [ self.opt.releases, ['-RHEL4', '-RHELClient5', '-RHELServer5'] ]
def runfor(self): return [ self.opt.runfor, [self.pref.getPackage()] ]
def requires(self): return [ self.opt.requires, [self.pref.getPackage()] ]
def rhtsrequires(self): return [ self.opt.rhtsrequires, [] ]
def time(self): return [ self.opt.time, self.pref.getTime() ]
def priority(self): return [ self.opt.priority, self.pref.getPriority() ]
def confidential(self): return [ self.opt.confidential, self.pref.getConfidential() ]
def destructive(self): return [ self.opt.destructive, self.pref.getDestructive() ]
def prefix(self): return [ self.opt.prefix, self.pref.getPrefix() ]
def license(self): return [ self.opt.license, self.pref.getLicense() ]
def version(self): return [ self.opt.version, self.pref.getVersion() ]
def desc(self): return [ self.opt.desc, "What the test does" ]
def description(self): return [ self.opt.description, "" ]
def namespace(self): return [ self.opt.namespace, self.pref.getNamespace() ]
def package(self): return [ self.opt.package, self.pref.getPackage() ]
def type(self): return [ self.opt.type, self.pref.getType() ]
def path(self): return [ self.opt.path, "" ]
def name(self): return [ self.opt.name, "a-few-descriptive-words" ]
def bugs(self): return [ self.opt.bugs, [] ]
class Inquisitor:
"""
Father of all Inquisitors
Well he is not quite real Inquisitor, as he is very
friendly and accepts any answer you give him.
"""
def __init__(self, options = None, suggest = None):
# set options & initialize
self.options = options
self.suggest = suggest
self.common = True
self.error = 0
self.init()
if not self.options: return
# finally ask for confirmation or valid value
if self.confirm or not self.valid():
self.ask()
def init(self):
""" Initialize basic stuff """
self.name = "Answer"
self.question = "What is the answer to life, the universe and everything"
self.description = None
self.default()
def default(self, optpref=None):
""" Initialize default option data """
# nothing to do when options not supplied
if not optpref: return
# initialize opt (from command line) & pref (from user preferences)
(self.opt, self.pref) = optpref
# set confirm flag
self.confirm = self.common and self.options.confirm() != "nothing" \
or not self.common and self.options.confirm() == "everything"
# now set the data!
# commandline option overrides both preferences & suggestion
if self.opt:
self.data = self.opt
self.confirm = False
# use suggestion if available (disabled in makefile edit mode)
elif self.suggest and not self.options.makefile:
self.data = self.suggest
# otherwise use the default from user preferences
else:
self.data = self.pref
# reset the user preference if it's not a valid value
# (to prevent suggestions like: x is not valid what about x?)
if not self.valid():
self.pref = "something else"
def defaultify(self):
""" Set data to default/preferred value """
self.data = self.pref
def normalize(self):
""" Remove trailing and double spaces """
if not self.data: return
self.data = re.sub("^\s*", "", self.data)
self.data = re.sub("\s*$", "", self.data)
self.data = re.sub("\s+", " ", self.data)
def read(self):
""" Read an answer from user """
try:
answer = unicode(sys.stdin.readline().strip(), "utf-8")
except KeyboardInterrupt:
print "\nOk, finishing for now. See you later ;-)"
sys.exit(4)
# if just enter pressed, we leave self.data as it is (confirmed)
if answer != "":
# append the data if the answer starts with a "+"
m = re.search("^\+\s*(.*)", answer)
if m and type(self.data) is list:
self.data.append(m.group(1))
else:
self.data = answer
self.normalize()
def heading(self):
""" Display nice heading with question """
print "\n" + self.question + "\n" + 77 * "~";
def value(self):
""" Return current value """
return self.data
def show(self, data = None):
""" Return current value nicely formatted (redefined in children)"""
if not data: data = self.data
if data == "": return "None"
return data
def singleName(self):
""" Return the name in lowercase singular (for error reporting) """
return re.sub("s$", "", self.name.lower())
def matchName(self, text):
""" Return true if the text matches inquisitor's name """
# remove any special characters from the search string
text = re.sub("[^\w\s]", "", text)
return re.search(text, self.name, re.I)
def describe(self):
if self.description is not None:
print wrapText(self.description)
def format(self, data = None):
""" Display in a nicely indented style """
print self.name.rjust(ReviewWidth), ":", (data or self.show())
def formatMakefileLine(self, name = None, value = None):
""" Format testinfo line for Makefile inclusion """
if not (self.value() or value): return ""
return '\n @echo "%s%s" >> $(METADATA)' % (
((name or self.name) + ":").ljust(MakefileLineWidth),
shellEscaped(value or self.value()))
def valid(self):
""" Return true when provided value is a valid answer """
return self.data not in ["?", ""]
def suggestion(self):
""" Provide user with a suggestion or detailed description """
# if current data is valid, offer is as a suggestion
if self.valid():
if self.options.verbose(): self.describe()
return "%s?" % self.show()
# otherwise suggest the default value
else:
bad = self.data
self.defaultify()
# regular suggestion (no question mark for help)
if bad is None or "".join(bad) != "?":
self.error += 1
if self.error > 1 or self.options.verbose(): self.describe()
return "%s is not a valid %s, what about %s?" \
% (self.show(bad), self.singleName(), self.show(self.pref))
# we got question mark ---> display description to help
else:
self.describe()
return "%s?" % self.show()
def ask(self, force = False, suggest = None):
""" Ask for valid value """
if force: self.confirm = True
if suggest: self.data = suggest
self.heading()
# keep asking until we get sane answer
while self.confirm or not self.valid():
sys.stdout.write("[%s] " % self.suggestion().encode("utf-8"))
self.read()
self.confirm = False
def edit(self, suggest = None):
""" Edit = force to ask again
returns true if changes were made """
before = self.data
self.ask(force = True, suggest = suggest)
return self.data != before
class SingleChoice(Inquisitor):
""" This Inquisitor accepts just one value from the given list """
def init(self):
self.name = "SingleChoice"
self.question = "Give a valid answer from the list"
self.description = "Supply a single value from the list above."
self.list = ["list", "of", "valid", "values"]
self.default()
def propose(self):
""" Try to find nearest match in the list"""
if self.data == "?": return
for item in self.list:
if re.search(self.data, item, re.I):
self.pref = item
return
def valid(self):
if self.data in self.list:
return True
else:
self.propose()
return False
def heading(self):
Inquisitor.heading(self)
if self.list: print wrapText("Possible values: " + ", ".join(self.list))
class YesNo(SingleChoice):
""" Inquisitor expecting only two obvious answers """
def init(self):
self.name = "Yes or No"
self.question = "Are you sure?"
self.description = "All you need to say is simply 'Yes,' or 'No'; \
anything beyond this comes from the evil one."
self.list = ["Yes", "No"]
self.default()
def normalize(self):
""" Recognize yes/no abbreviations """
if not self.data: return
self.data = re.sub("^y.*$", "Yes", self.data, re.I)
self.data = re.sub("^n.*$", "No", self.data, re.I)
def formatMakefileLine(self, name = None, value = None):
""" Format testinfo line for Makefile inclusion """
# testinfo requires lowercase yes/no
return Inquisitor.formatMakefileLine(self,
name = name, value = self.data.lower())
def valid(self):
self.normalize()
return SingleChoice.valid(self)
class MultipleChoice(SingleChoice):
""" This Inquisitor accepts more values but only from the given list """
def init(self):
self.name = "MultipleChoice"
self.question = "Give one or more values from the list"
self.description = "You can supply more values separated with space or comma\n"\
"but they all must be from the list above."
self.list = ["list", "of", "valid", "values"]
self.emptyListMeaning = "None"
self.sort = True
self.default()
def default(self, optpref):
# initialize opt & pref
(self.opt, self.pref) = optpref
# set confirm flag
self.confirm = self.common and self.options.confirm() != "nothing" \
or not self.common and self.options.confirm() == "everything"
# first initialize data as an empty list
self.data = []
# append possible suggestion to the data (disabled in makefile edit mode)
if self.suggest and not self.options.makefile:
self.data.append(self.suggest)
# add items obtained from the command line
if self.opt:
self.data.extend(self.opt)
self.confirm = False
# default preferences used only if still no data obtained
if not self.data:
self.data.extend(self.pref)
self.listify()
def defaultify(self):
self.data = self.pref[:]
self.listify()
def listify(self):
# make sure data is list
if type(self.data) is not list:
# special value "none" means an empty list
if self.data.lower() == "none":
self.data = []
return
# depending on emptyListMeaning "all" can mean
elif self.data.lower() == "all":
# no restrictions (releases, archs)
if self.emptyListMeaning == "All":
self.data = []
# all items (reproducers)
else:
self.data = self.list[:]
return
# otherwise just listify
else:
self.data = [ self.data ]
# expand comma/space separated items
result = []
for item in self.data:
# strip trailing separators
item = re.sub('[ ,]*$', '', item)
# split on spaces and commas
result.extend(re.split('[ ,]+', item))
self.data = result
# let's make data unique and sorted
if self.sort:
self.data = unique(self.data)
self.data.sort()
def normalize(self):
""" Parse input into a list """
self.listify()
def showItem(self, item):
return item
def formatMakefileLine(self, name = None, value = None):
""" Format testinfo line for Makefile inclusion """
# for multiple choice we produce values joined by spaces
return Inquisitor.formatMakefileLine(self,
name = name, value = " ".join(self.data))
def show(self, data = None):
if data is None: data = self.data
if not data: return self.emptyListMeaning
return ", ".join(map(self.showItem, data))
def propose(self):
""" Try to find nearest matches in the list"""
if self.data[0] == "?": return
result = []
try:
for item in self.list:
if re.search(self.data[0], item, re.I):
result.append(item)
except:
pass
if result:
self.pref = result[:]
def validItem(self, item):
return item in self.list
def valid(self):
for item in self.data:
if not self.validItem(item):
self.data = [item]
self.propose()
return False
return True
# TODO: Make the licensing organisation configurable
LICENSE_ORGANISATION = "Red Hat, Inc"
GPLv2_ONLY_LICENSE = ("""Copyright (c) %s %s.
This copyrighted material is made available to anyone wishing
to use, modify, copy, or redistribute it subject to the terms
and conditions of the GNU General Public License version 2.
This program is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the implied
warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the Free
Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA."""
% (date.today().year, LICENSE_ORGANISATION))
GPLv2_OR_LATER_LICENSE = ("""Copyright (c) %s %s.
This program is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 2 of
the License, or (at your option) any later version.
This program is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the implied
warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see http://www.gnu.org/licenses/."""
% (date.today().year, LICENSE_ORGANISATION))
GPLv3_OR_LATER_LICENSE = ("""Copyright (c) %s %s.
This program is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
This program is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the implied
warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see http://www.gnu.org/licenses/."""
% (date.today().year, LICENSE_ORGANISATION))
PROPRIETARY_LICENSE_TEMPLATE = ("""Copyright (c) %s %s. All rights reserved.
%%s"""
% (date.today().year, LICENSE_ORGANISATION))
DEFINED_LICENSES = {
# Annoyingly, the bare "GPLv2" and "GPLv3" options differ in whether or not
# they include the "or later" clause. Unfortunately, changing it now could
# result in GPLv3 tests intended to be GPLv3+ getting mislabeled.
"GPLv2" : GPLv2_ONLY_LICENSE,
"GPLv3" : GPLv3_OR_LATER_LICENSE,
# The GPLvX+ variants consistently use the "or later" phrasing
"GPLv2+" : GPLv2_OR_LATER_LICENSE,
"GPLv3+" : GPLv3_OR_LATER_LICENSE,
"other" : PROPRIETARY_LICENSE_TEMPLATE,
}
class License(Inquisitor):
""" License to be included in test files """
def init(self):
self.name = "License"
self.question = "What licence should be used?"
self.description = "Just supply a license GPLv2+, GPLv3+, ..."
self.common = False
self.default(self.options.license())
self.licenses = DEFINED_LICENSES
def get(self):
""" Return license corresponding to user choice """
if self.data != "other" and self.data in self.licenses.keys():
return dedentText(self.licenses[self.data])
else:
license = self.options.pref.getLicenseContent(self.data)
if license: # user defined license from preferences
return dedentText(self.licenses["other"] % (
license,), count = 12)
else: # anything else
return dedentText(self.licenses["other"] % (
"PROVIDE YOUR LICENSE TEXT HERE.",))
class Time(Inquisitor):
""" Time for test to run """
def init(self):
self.name = "Time"
self.question = "Time for test to run"
self.description = """The time must be in format [1-99][m|h|d] for 1-99
minutes/hours/days (e.g. 3m, 2h, 1d)"""
self.default(self.options.time())
def valid(self):
m = re.match("^(\d{1,2})[mhd]$", self.data)
return m is not None and int(m.group(1)) > 0
class Version(Inquisitor):
""" Time for test to run """
def init(self):
self.name = "Version"
self.question = "Version of the test"
self.description = "Must be in the format x.y"
self.common = False
self.default(self.options.version())
def valid(self):
return re.match("^\d+\.\d+$", self.data)
class Priority(SingleChoice):
""" Test priority """
def init(self):
self.name = "Priority"
self.question = "Priority"
self.description = "Test priority for scheduling purposes"
self.common = False
self.list = "Low Medium Normal High Manual".split()
self.default(self.options.priority())
class Confidential(YesNo):
""" Confidentiality flag """
def init(self):
self.name = "Confidential"
self.question = "Confidential"
self.description = "Should the test be kept internal?"
self.common = False
self.list = ["Yes", "No"]
self.default(self.options.confidential())
def singleName(self):
return "confidentiality flag"
class Destructive(YesNo):
""" Destructivity flag """
def init(self):
self.name = "Destructive"
self.question = "Destructive"
self.description = "Is it such an ugly test that it can break the system?"
self.common = False
self.list = ["Yes", "No"]
self.default(self.options.destructive())
def singleName(self):
return "destructivity flag"
class Prefix(YesNo):
""" Bug number prefix """
def init(self):
self.name = "Prefix the test name"
self.question = "Add the bug number to the test name?"
self.description = "Should we prefix the test name with the bug/CVE number?"
self.common = False
self.list = ["Yes", "No"]
self.default(self.options.prefix())
def singleName(self):
return "prefix choice"
class Releases(MultipleChoice):
""" List of releases the test should run on """
def init(self):
self.name = "Releases"
self.question = "Releases (choose one or more or \"all\")"
self.description = """One or more values separated with space or comma
or "all" for no limitaion. You can also use minus sign for excluding
a specific release (-RHEL4)"""
self.list = "RHEL2.1 RHEL3 RHEL4 RHELServer5 RHELClient5".split()
self.list += ["RHEL{0}".format(id) for id in range(6, 9)]
self.list += "FC4 FC5 FC6".split()
self.list += ["F{0}".format(release) for release in range(7, 28)]
self.sort = True
self.common = False
self.emptyListMeaning = "All"
self.default(self.options.releases())
def validItem(self, item):
item = re.sub("^-","", item)
return item in self.list
class Architectures(MultipleChoice):
""" List of architectures the test should run on """
def init(self):
self.name = "Architectures"
self.question = "Architectures (choose one or more or \"all\")"
self.description = "You can supply more values separated with space or comma\n"\
"but they all must be from the list of possible values above."
self.list = "i386 x86_64 ia64 ppc ppc64 ppc64le s390 s390x aarch64".split()
self.sort = True
self.common = False
self.emptyListMeaning = "All"
self.default(self.options.archs())
class Namespace(SingleChoice):
""" Namespace"""
def init(self):
self.name = "Namespace"
self.question = "Namespace"
self.description = "Provide a root namespace for the test."
self.list = """distribution installation kernel desktop tools CoreOS
cluster rhn examples performance ISV virt""".split()
if self.options: self.default(self.options.namespace())
def match(self):
""" Return regular expression matching valid data """
return "(" + "|".join(self.list) + ")"
class Package(Inquisitor):
""" Package for which the test is written """
def init(self):
self.name = "Package"
self.question = "What package is this test for?"
self.description = "Supply a package name (without version or release number)"
self.common = False
self.default(self.options.package())
def valid(self):
return RegExpPackage.match(self.data)
class Type(Inquisitor):
""" Test type """
def init(self):
self.name = "Test type"
self.question = "What is the type of test?"
self.description = "Specify the type of the test. Hints above."
self.proposed = 0
self.proposedname = ""
self.list = SuggestedTestTypes
self.dirs = [os.path.join(o) for o in os.listdir('.') if os.path.isdir(os.path.join('.',o)) and not o.startswith('.')]
if self.options: self.default(self.options.type())
def heading(self):
Inquisitor.heading(self)
print wrapText("Recommended values: " + ", ".join(sorted(self.dirs)))
print wrapText("Possible values: " + ", ".join(self.list))
def propose(self):
""" Try to find nearest match in the list"""
self.proposed = 1
self.proposedname = self.data
self.description = "Type '%s' does not exist. Confirm creating a new type." % self.proposedname
self.describe()
for item in self.list:
if re.search(self.data, item, re.I):
self.pref = item
return
def suggestSkeleton(self):
""" For multihost tests and library suggest proper skeleton """
if self.data == "Multihost":
return "multihost"
elif self.data == "Library":
return "library"
def valid(self):
if self.data in self.list or self.data in self.dirs or (self.proposed == 1 and self.proposedname == self.data):
return True
else:
self.propose()
return False
class Path(Inquisitor):
""" Relative path to test """
def init(self):
self.name = "Relative path"
self.question = "Relative path under test type"
self.description = """Path can be used to organize tests
for complex packages, e.g. 'server' part in
/CoreOS/mysql/Regression/server/bz123456-some-test.
(You can also use dir/subdir for deeper nesting.
Use "none" for no path.)"""
self.common = False
self.default(self.options.path())
def valid(self):
return (self.data is None or self.data == ""
or RegExpPath.match(self.data))
def normalize(self):
""" Replace none keyword with real empty path """
Inquisitor.normalize(self)
if self.data and re.match('none', self.data, re.I):
self.data = None
def value(self):
if self.data:
return "/" + self.data
else:
return ""
class Bugs(MultipleChoice):
""" List of bugs/CVE's related to the test """
def init(self):
self.name = "Bug or CVE numbers"
self.question = "Bugs or CVE's related to the test"
self.description = """Supply one or more bug or CVE numbers
(e.g. 123456 or 2009-7890). Use the '+' sign to add
the bugs instead of replacing the current list."""
self.list = []
self.sort = False
self.emptyListMeaning = "None"
self.bug = None
self.default(self.options.bugs())
self.reproducers = Reproducers(self.options)
def validItem(self, item):
return RegExpBug.match(item) \
or RegExpCVE.match(item)
def valid(self):
# let's remove possible (otherwise harmless) bug/CVE prefixes
for i in range(len(self.data)):
self.data[i] = re.sub(RegExpBugPrefix, "", self.data[i])
self.data[i] = re.sub(RegExpCVEPrefix, "", self.data[i])
# and do the real validation
return MultipleChoice.valid(self)
def showItem(self, item):
if RegExpBug.match(item):
return "BZ#" + item
elif RegExpCVE.match(item):
return "CVE-" + item
else:
return item
def formatMakefileLine(self, name = None, value = None):
""" Format testinfo line for Makefile inclusion """
list = []
# filter bugs only (CVE's are not valid for testinfo.desc)
for item in self.data:
if RegExpBug.match(item):
list.append(item)
if not list: return ""
return Inquisitor.formatMakefileLine(self, name = "Bug", value = " ".join(list))
def getFirstBug(self):
""" Return first bug/CVE if there is some """
if self.data: return self.showItem(self.data[0])
def fetchBugDetails(self):
""" Fetch details of the first bug from Bugzilla """
if self.options.bugzilla and self.data:
# use CVE prefix when searching for CVE's in bugzilla
if RegExpCVE.match(self.data[0]):
bugid = "CVE-" + self.data[0]
else:
bugid = self.data[0]
# contact bugzilla and try to fetch the details
try:
print "Fetching details for", self.showItem(self.data[0])
self.bug = self.options.bugzilla.getbug(bugid,
include_fields=['id', 'alias', 'component', 'summary',
'attachments'])
except Exception, e:
if re.search('not authorized to access', str(e)):
print "Sorry, %s has a restricted access.\n"\
"Use 'bugzilla login' command to set up cookies "\
"then try again." % self.showItem(self.data[0])
else:
print "Sorry, could not get details for %s\n%s" % (bugid, e)
sleep(3)
return
# successfully fetched
else:
# for CVE's add the bug id to the list of bugs
if RegExpCVE.match(self.data[0]):
self.data.append(str(self.bug.id))
# else investigate for possible CVE alias
elif self.bug.alias and RegExpCVELong.match(self.bug.alias[0]):
cve = re.sub("CVE-", "", self.bug.alias[0])
self.data[:0] = [cve]
# and search attachments for possible reproducers
if self.bug:
self.reproducers.find(self.bug)
return True
def getSummary(self):
""" Return short summary fetched from bugzilla """
if self.bug:
return re.sub("CVE-\d{4}-\d{4}\s*", "", removeEmbargo(self.bug.summary))
def getComponent(self):
""" Return bug component fetched from bugzilla """
if self.bug:
component = self.bug.component
# Use the first component if component list given
if isinstance(component, list):
component = component[0]
# Ignore generic CVE component "vulnerability"
if component != 'vulnerability':
return component
def getLink(self):
""" Return URL of the first bug """
if self.data:
if RegExpCVE.match(self.data[0]):
return "%sCVE-%s" % (BugzillaUrl, self.data[0])
else:
return BugzillaUrl + self.data[0]
def suggestType(self):
""" Guess test type according to first bug/CVE """
if self.data:
if RegExpBug.match(self.data[0]):
return "Regression"
elif RegExpCVE.match(self.data[0]):
return "Security"
def suggestConfidential(self):
""" If the first bug is a CVE, suggest as confidential """
if self.data and RegExpCVE.match(self.data[0]):
return "Yes"
else:
return None
def suggestTestName(self):
""" Suggest testname from bugzilla summary """
return dashifyText(shortenText(self.getSummary(), MaxLengthTestName))
def suggestDescription(self):
""" Suggest short description from bugzilla summary """
if self.getSummary():
return "Test for %s (%s)" % (
self.getFirstBug(),
shortenText(re.sub(":", "", self.getSummary()),
max=MaxLengthSuggestedDesc))
def formatBugDetails(self):
""" Put details fetched from Bugzilla into nice format for PURPOSE file """
if not self.bug:
return ""
else:
return "Bug summary: %s\nBugzilla link: %s\n" % (
self.getSummary(), self.getLink())
class Name(Inquisitor):
""" Test name """
def init(self):
self.name = "Test name"
self.question = "Test name"
self.description = """Use few, well chosen words describing
what the test does. Special chars will be automatically
converted to dashes."""
self.default(self.options.name())
self.data = dashifyText(self.data, allowExtraChars="_")
self.bugs = Bugs(self.options)
self.bugs.fetchBugDetails()
# suggest test name (except when supplied on command line)
if self.bugs.suggestTestName() and not self.opt:
self.data = self.bugs.suggestTestName()
self.prefix = Prefix(self.options)
def normalize(self):
""" Add auto-dashify function for name editing """
if not self.data == "?":
# when editing the test name --- dashify, but allow
# using underscore if the user really wants it
self.data = dashifyText(self.data, allowExtraChars="_")
def valid(self):
return self.data is not None and RegExpTestName.match(self.data)
def value(self):
""" Return test name (including bug/CVE number) """
bug = self.bugs.getFirstBug()
if bug and self.prefix.value() == "Yes":
return bug.replace('BZ#','bz') + "-" + self.data
else:
return self.data
def format(self, data = None):
""" When formatting let's display with bug/CVE numbers """
Inquisitor.format(self, self.value())
class Reproducers(MultipleChoice):
""" Possible reproducers from Bugzilla """
def init(self):
self.name = "Reproducers to fetch"
self.question = "Which Bugzilla attachments do you wish to download?"
self.description = """Wizard can download Bugzilla attachments for you.
It suggests those which look like reproducers, but you can pick
the right attachments manually as well."""
self.bug = None
self.list = []
self.sort = True
self.emptyListMeaning = "None"
self.common = False
self.default([[], []])
self.confirm = False
def singleName(self):
return "reproducer"
def find(self, bug):
""" Get the list of all attachments (except patches and obsolotes)"""
if not bug or not bug.attachments:
return False
# remember the bug & empty the lists
self.bug = bug
self.list = []
self.pref = []
self.data = []
# Provide "None" as a possible choice for attachment download
self.list.append("None")
print "Examining attachments for possible reproducers"
for attachment in self.bug.attachments:
# skip obsolete and patch attachments
is_patch = attachment.get("is_patch", attachment.get("ispatch"))
filename = attachment.get("file_name", attachment.get("filename"))
is_obsolete = attachment.get(
"is_obsolete", attachment.get("isobsolete"))
if is_patch == 0 and is_obsolete == 0:
self.list.append(filename)
# add to suggested attachments if it looks like a reproducer
if RegExpReproducer.search(attachment['description']) or \
RegExpReproducer.search(filename):
self.data.append(filename)
self.pref.append(filename)
print "Adding",
else:
print "Skipping",
print "%s (%s)" % (filename, attachment['description'])
sleep(1)
def download(self, path):
""" Download selected reproducers """
if not self.bug:
return False
for attachment in self.bug.attachments:
attachment_filename = attachment.get(
"file_name", attachment.get("filename"))
is_obsolete = attachment.get(
"is_obsolete", attachment.get("isobsolete"))
if attachment_filename in self.data and is_obsolete == 0:
print "Attachment", attachment_filename,
try:
dirfiles = os.listdir(path)
filename = path + "/" + attachment_filename
remote = self.options.bugzilla.openattachment(
attachment['id'])
# rename the attachment if it has the same name as one
# of the files in the current directory
if attachment_filename in dirfiles:
print "- file already exists in {0}/".format(path)
new_name = ""
while new_name == "":
print "Choose a new filename for the attachment: ",
new_name = unicode(
sys.stdin.readline().strip(), "utf-8")
filename = path + "/" + new_name
local = open(filename, 'w')
local.write(remote.read())
remote.close()
local.close()
# optionally add to the git repository
if self.options.opt.git:
addToGit(filename)
addedToGit = ", added to git"
else:
addedToGit = ""
except:
print "download failed"
print "python-bugzilla-0.5 or higher required"
sys.exit(5)
else:
print "downloaded" + addedToGit
class RunFor(MultipleChoice):
""" List of packages which this test should be run for """
def init(self):
self.name = "Run for packages"
self.question = "Run for packages"
self.description = """Provide a list of packages which this test should
be run for. It's a good idea to add dependent packages here."""
self.list = []
self.sort = True
self.emptyListMeaning = "None"
self.common = False
self.default(self.options.runfor())
def validItem(self, item):
return RegExpPackage.match(item)
class Requires(MultipleChoice):
""" List of packages which should be installed on test system """
def init(self):
self.name = "Required packages"
self.question = "Requires: packages which test depends on"
self.description = """Just write a list of package names
which should be automatically installed on the test system."""
self.list = []
self.sort = True
self.emptyListMeaning = "None"
self.common = False
self.default(self.options.requires())
def validItem(self, item):
return RegExpPackage.match(item)
class RhtsRequires(MultipleChoice):
""" List of other RHTS tests or libraries which this test requires """
def init(self):
self.name = "Required RHTS tests/libraries"
self.question = "RhtsRequires: other tests or libraries required by " \
"this one, e.g. test(/mytests/common) or library(mytestlib)"
self.description = """Write a list of RPM dependencies which should be
installed by the package manager. Other tasks provide test(/task/name)
and libraries provide library(name)."""
self.list = []
self.sort = True
self.emptyListMeaning = "None"
self.common = False
self.default(self.options.rhtsrequires())
def validItem(self, item):
return RegExpRhtsRequires.match(item)
class Skeleton(SingleChoice):
""" Skeleton to be used for creating the runtest.sh """
def init(self):
self.name = "Skeleton"
self.question = "Skeleton to be used for creating the runtest.sh"
self.description = """There are several runtest.sh skeletons available:
beaker (general Beaker template),
beakerlib (BeakerLib structure),
simple (creates separate script with test logic),
empty (populates runtest.sh just with header and license) and
"skelX" (custom skeletons saved in user preferences)."""
self.skeletons = parseString("""
<skeletons>
<skeleton name="beakerlib">
# Include Beaker environment
. /usr/bin/rhts-environment.sh || exit 1
. /usr/share/beakerlib/beakerlib.sh || exit 1
PACKAGE="<package/>"
rlJournalStart
rlPhaseStartSetup
rlAssertRpm $PACKAGE
rlRun "TmpDir=\$(mktemp -d)" 0 "Creating tmp directory"
rlRun "pushd $TmpDir"
rlPhaseEnd
rlPhaseStartTest
rlRun "touch foo" 0 "Creating the foo test file"
rlAssertExists "foo"
rlRun "ls -l foo" 0 "Listing the foo test file"
rlPhaseEnd
rlPhaseStartCleanup
rlRun "popd"
rlRun "rm -r $TmpDir" 0 "Removing tmp directory"
rlPhaseEnd
rlJournalPrintText
rlJournalEnd
</skeleton>
<skeleton name="conditional">
# Include Beaker environment
. /usr/bin/rhts-environment.sh || exit 1
. /usr/share/beakerlib/beakerlib.sh || exit 1
PACKAGE="<package/>"
rlJournalStart
rlPhaseStartSetup
rlAssertRpm $PACKAGE || rlDie "$PACKAGE not installed"
rlRun "TmpDir=\$(mktemp -d)" 0 "Creating tmp directory"
rlRun "pushd $TmpDir"
rlPhaseEnd
rlGetTestState && { rlPhaseStartTest
rlRun "touch foo" 0 "Creating the foo test file"
rlAssertExists "foo"
rlRun "ls -l foo" 0 "Listing the foo test file"
rlPhaseEnd; }
rlPhaseStartCleanup
rlRun "popd"
rlRun "rm -r $TmpDir" 0 "Removing tmp directory"
rlPhaseEnd
rlJournalPrintText
rlJournalEnd
</skeleton>
<skeleton name="beaker">
# Include Beaker environment
. /usr/bin/rhts-environment.sh || exit 1
PACKAGE="<package/>"
set -x
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Setup
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
score=0
rpm -q $PACKAGE || ((score++))
TmpDir=$(mktemp -d) || ((score++))
pushd $TmpDir || ((score++))
((score == 0)) && result=PASS || result=FAIL
echo "Setup finished, result: $result, score: $score"
report_result $TEST/setup $result $score
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Test
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
score=0
touch foo || ((score++))
[ -e foo ] || ((score++))
ls -l foo || ((score++))
((score == 0)) && result=PASS || result=FAIL
echo "Testing finished, result: $result, score: $score"
report_result $TEST/testing $result $score
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Cleanup
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
score=0
popd || ((score++))
rm -r "$TmpDir" || ((score++))
((score == 0)) && result=PASS || result=FAIL
echo "Cleanup finished, result: $result, score: $score"
report_result $TEST/cleanup $result $score
</skeleton>
<skeleton name="multihost">
# Include Beaker environment
. /usr/bin/rhts-environment.sh || exit 1
. /usr/share/beakerlib/beakerlib.sh || exit 1
PACKAGE="<package/>"
# set client & server manually if debugging
# SERVERS="server.example.com"
# CLIENTS="client.example.com"
Server() {
rlPhaseStartTest Server
# server setup goes here
rlRun "rhts-sync-set -s READY" 0 "Server ready"
rlRun "rhts-sync-block -s DONE $CLIENTS" 0 "Waiting for the client"
rlPhaseEnd
}
Client() {
rlPhaseStartTest Client
rlRun "rhts-sync-block -s READY $SERVERS" 0 "Waiting for the server"
# client action goes here
rlRun "rhts-sync-set -s DONE" 0 "Client done"
rlPhaseEnd
}
rlJournalStart
rlPhaseStartSetup
rlAssertRpm $PACKAGE
rlLog "Server: $SERVERS"
rlLog "Client: $CLIENTS"
rlRun "TmpDir=\$(mktemp -d)" 0 "Creating tmp directory"
rlRun "pushd $TmpDir"
rlPhaseEnd
if echo $SERVERS | grep -q $HOSTNAME ; then
Server
elif echo $CLIENTS | grep -q $HOSTNAME ; then
Client
else
rlReport "Stray" "FAIL"
fi
rlPhaseStartCleanup
rlRun "popd"
rlRun "rm -r $TmpDir" 0 "Removing tmp directory"
rlPhaseEnd
rlJournalPrintText
rlJournalEnd
</skeleton>
<skeleton name="simple">
rhts-run-simple-test $TEST ./test
</skeleton>
<skeleton name="empty">
</skeleton>
<skeleton name="library">
# Include Beaker environment
. /usr/bin/rhts-environment.sh || exit 1
. /usr/share/beakerlib/beakerlib.sh || exit 1
PACKAGE="<package/>"
PHASE=${PHASE:-Test}
rlJournalStart
rlPhaseStartSetup
rlRun "rlImport <package/>/<testname/>"
rlRun "TmpDir=\$(mktemp -d)" 0 "Creating tmp directory"
rlRun "pushd $TmpDir"
rlPhaseEnd
# Create file
if [[ "$PHASE" =~ "Create" ]]; then
rlPhaseStartTest "Create"
fileCreate
rlPhaseEnd
fi
# Self test
if [[ "$PHASE" =~ "Test" ]]; then
rlPhaseStartTest "Test default name"
fileCreate
rlAssertExists "$fileFILENAME"
rlPhaseEnd
rlPhaseStartTest "Test filename in parameter"
fileCreate "parameter-file"
rlAssertExists "parameter-file"
rlPhaseEnd
rlPhaseStartTest "Test filename in variable"
FILENAME="variable-file" fileCreate
rlAssertExists "variable-file"
rlPhaseEnd
fi
rlPhaseStartCleanup
rlRun "popd"
rlRun "rm -r $TmpDir" 0 "Removing tmp directory"
rlPhaseEnd
rlJournalPrintText
rlJournalEnd
</skeleton>
<skeleton name="parametrized">
# Include Beaker environment
. /usr/bin/rhts-environment.sh || exit 1
. /usr/share/beakerlib/beakerlib.sh || exit 1
# Packages to be tested
PACKAGES=${PACKAGES:-<runfor />}
# Other required packages
REQUIRES=${REQUIRES:-<requires />}
rlJournalStart
rlPhaseStartSetup
rlAssertRpm --all
rlRun "TmpDir=\$(mktemp -d)" 0 "Creating tmp directory"
rlRun "pushd $TmpDir"
rlPhaseEnd
rlPhaseStartTest
rlRun "touch foo" 0 "Creating the foo test file"
rlAssertExists "foo"
rlRun "ls -l foo" 0 "Listing the foo test file"
rlPhaseEnd
rlPhaseStartCleanup
rlRun "popd"
rlRun "rm -r $TmpDir" 0 "Removing tmp directory"
rlPhaseEnd
rlJournalPrintText
rlJournalEnd
</skeleton>
</skeletons>
""")
self.makefile = """
export TEST=%s
export TESTVERSION=%s
BUILT_FILES=
FILES=$(METADATA) %s
.PHONY: all install download clean
run: $(FILES) build
./runtest.sh
build: $(BUILT_FILES)%s
clean:
rm -f *~ $(BUILT_FILES)
include /usr/share/rhts/lib/rhts-make.include
$(METADATA): Makefile
@echo "Owner: %s" > $(METADATA)
@echo "Name: $(TEST)" >> $(METADATA)
@echo "TestVersion: $(TESTVERSION)" >> $(METADATA)
@echo "Path: $(TEST_DIR)" >> $(METADATA)%s
rhts-lint $(METADATA)
"""
# skeleton for lib.sh file when creating library
self.library = """
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# library-prefix = %s
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
true <<'=cut'
=pod
=head1 NAME
%s/%s - %s
=head1 DESCRIPTION
This is a trivial example of a BeakerLib library. It's main goal
is to provide a minimal template which can be used as a skeleton
when creating a new library. It implements function fileCreate().
Please note, that all library functions must begin with the same
prefix which is defined at the beginning of the library.
=cut
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Variables
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
true <<'=cut'
=pod
=head1 VARIABLES
Below is the list of global variables. When writing a new library,
please make sure that all global variables start with the library
prefix to prevent collisions with other libraries.
=over
=item fileFILENAME
Default file name to be used when no provided ('foo').
=back
=cut
fileFILENAME="foo"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Functions
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
true <<'=cut'
=pod
=head1 FUNCTIONS
=head2 fileCreate
Create a new file, name it accordingly and make sure (assert) that
the file is successfully created.
fileCreate [filename]
=over
=item filename
Name for the newly created file. Optionally the filename can be
provided in the FILENAME environment variable. When no file name
is given 'foo' is used by default.
=back
Returns 0 when the file is successfully created, non-zero otherwise.
=cut
fileCreate() {
local filename
filename=${1:-$FILENAME}
filename=${filename:-$fileFILENAME}
rlRun "touch '$filename'" 0 "Creating file '$filename'"
rlAssertExists "$filename"
return $?
}
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Execution
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
true <<'=cut'
=pod
=head1 EXECUTION
This library supports direct execution. When run as a task, phases
provided in the PHASE environment variable will be executed.
Supported phases are:
=over
=item Create
Create a new empty file. Use FILENAME to provide the desired file
name. By default 'foo' is created in the current directory.
=item Test
Run the self test suite.
=back
=cut
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Verification
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# This is a verification callback which will be called by
# rlImport after sourcing the library to make sure everything is
# all right. It makes sense to perform a basic sanity test and
# check that all required packages are installed. The function
# should return 0 only when the library is ready to serve.
fileLibraryLoaded() {
if rpm=$(rpm -q coreutils); then
rlLogDebug "Library coreutils/file running with $rpm"
return 0
else
rlLogError "Package coreutils not installed"
return 1
fi
}
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Authors
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
true <<'=cut'
=pod
=head1 AUTHORS
=over
=item *
%s
=back
=cut
"""
self.list = []
self.list.extend(findNodeNames(self.skeletons, "skeleton"))
self.list.extend(findNodeNames(self.options.pref.skeletons, "skeleton"))
self.common = False
self.default(self.options.skeleton())
self.requires = None
self.rhtsrequires = None
def replaceVariables(self, xml, test = None):
""" Replace all <variable> tags with their respective values """
skeleton = ""
for child in xml.childNodes:
# regular text node -> just copy
if child.nodeType == child.TEXT_NODE:
skeleton += child.nodeValue
# xml tag -> try to expand value of test.tag.show()
elif child.nodeType == child.ELEMENT_NODE:
try:
name = child.tagName
# some variables need a special treatment
if name == "test":
value = test.fullPath()
elif name == "bugs":
value = test.testname.bugs.show()
elif name == "reproducers":
value = test.testname.bugs.reproducers.show()
elif name == "runfor":
value = ' '.join(test.runfor.data)
| elif name == "requires": | 9,136 | lcc_e | python | null | 056351f8553a75ef1f3616070faa31f09fdb5d4a1fc276d2 |