repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
stanlyxiang/incubator-hawq | refs/heads/master | tools/bin/pythonSrc/pychecker-0.8.18/pychecker/CodeChecks.py | 7 | #!/usr/bin/env python
# Copyright (c) 2001-2006, MetaSlash Inc. All rights reserved.
# Portions Copyright (c) 2005, Google, Inc. All rights reserved.
"""
Find warnings in byte code from Python source files.
"""
import string
import types
from pychecker import msgs
from pychecker import utils
from pychecker import Warning
from pychecker import OP
from pychecker import Stack
from pychecker import python
__pychecker__ = 'no-argsused'
def cfg() :
return utils.cfg()
def getFunctionArgErr(func_name, argCount, minArgs, maxArgs):
err = None
if maxArgs == None:
if argCount < minArgs :
err = msgs.INVALID_ARG_COUNT2 % (func_name, argCount, minArgs)
elif argCount < minArgs or argCount > maxArgs:
if minArgs == maxArgs:
err = msgs.INVALID_ARG_COUNT1 % (func_name, argCount, minArgs)
else:
err = msgs.INVALID_ARG_COUNT3 % (func_name, argCount, minArgs, maxArgs)
return err
def _checkFunctionArgCount(code, func_name, argCount, minArgs, maxArgs,
objectReference = 0) :
# there is an implied argument for object creation and self.xxx()
if objectReference :
minArgs = minArgs - 1
if maxArgs is not None :
maxArgs = maxArgs - 1
err = getFunctionArgErr(func_name, argCount, minArgs, maxArgs)
if err :
code.addWarning(err)
def _checkFunctionArgs(code, func, objectReference, argCount, kwArgs,
check_arg_count = 1) :
func_name = func.function.func_code.co_name
if kwArgs :
args_len = func.function.func_code.co_argcount
arg_names = func.function.func_code.co_varnames[argCount:args_len]
if argCount < args_len and kwArgs[0] in arg_names:
if cfg().namedArgs :
code.addWarning(msgs.FUNC_USES_NAMED_ARGS % func_name)
# convert the named args into regular params, and really check
while argCount < args_len and kwArgs and kwArgs[0] in arg_names:
argCount = argCount + 1
kwArgs = kwArgs[1:]
_checkFunctionArgs(code, func, objectReference, argCount, kwArgs,
check_arg_count)
return
if not func.supportsKW :
code.addWarning(msgs.FUNC_DOESNT_SUPPORT_KW % func_name)
if check_arg_count :
_checkFunctionArgCount(code, func_name, argCount,
func.minArgs, func.maxArgs, objectReference)
def _getReferenceFromModule(module, identifier) :
func = module.functions.get(identifier, None)
if func is not None :
return func, None, 0
create = 0
c = module.classes.get(identifier, None)
if c is not None :
func = c.methods.get(utils.INIT, None)
create = 1
return func, c, create
def _getFunction(module, stackValue) :
'Return (function, class) from the stack value'
identifier = stackValue.data
if type(identifier) == types.StringType :
return _getReferenceFromModule(module, identifier)
# find the module this references
i, maxLen = 0, len(identifier)
while i < maxLen :
id = utils.safestr(identifier[i])
if module.classes.has_key(id) or module.functions.has_key(id) :
break
refModule = module.modules.get(id, None)
if refModule is not None :
module = refModule
else :
return None, None, 0
i = i + 1
# if we got to the end, there is only modules, nothing we can do
# we also can't handle if there is more than 2 items left
if i >= maxLen or (i+2) < maxLen :
return None, None, 0
if (i+1) == maxLen :
return _getReferenceFromModule(module, identifier[-1])
# we can't handle self.x.y
if (i+2) == maxLen and identifier[0] == cfg().methodArgName:
return None, None, 0
c = module.classes.get(identifier[-2], None)
if c is None :
return None, None, 0
return c.methods.get(identifier[-1], None), c, 0
def _validateKwArgs(code, info, func_name, kwArgs):
if len(info) < 4:
code.addWarning(msgs.FUNC_DOESNT_SUPPORT_KW % func_name)
elif not info[3]:
return
try:
# info could be from a builtin method which means that
# info[3] is not a list.
dummy = info[3][0]
except IndexError:
return
for arg in kwArgs:
if arg not in info[3]:
code.addWarning(msgs.FUNC_DOESNT_SUPPORT_KW_ARG % (func_name, arg))
def _checkBuiltin(code, loadValue, argCount, kwArgs, check_arg_count = 1) :
returnValue = Stack.makeFuncReturnValue(loadValue, argCount)
func_name = loadValue.data
if loadValue.type == Stack.TYPE_GLOBAL :
info = python.GLOBAL_FUNC_INFO.get(func_name, None)
if info is not None :
if func_name == 'input' and cfg().usesInput:
code.addWarning(msgs.USES_INPUT)
if cfg().constAttr and \
((func_name == 'setattr' and argCount >= 2) or
(func_name == 'getattr' and argCount == 2)):
arg2 = code.stack[-argCount + 1]
if arg2.const:
# lambda with setattr and const is a common way of setting
# attributes, so allow it
if code.func.function.func_name != '<lambda>':
code.addWarning(msgs.USES_CONST_ATTR % func_name)
if kwArgs:
_validateKwArgs(code, info, func_name, kwArgs)
elif check_arg_count :
_checkFunctionArgCount(code, func_name, argCount,
info[1], info[2])
returnValue = Stack.Item(returnValue.data, info[0])
returnValue.setStringType(info[0])
elif type(func_name) == types.TupleType and len(func_name) <= 2 :
objType = code.typeMap.get(utils.safestr(func_name[0]), [])
if types.ListType in objType :
try :
if func_name[1] == 'append' and argCount > 1 :
code.addWarning(msgs.LIST_APPEND_ARGS % func_name[0])
check_arg_count = 0
except AttributeError :
# FIXME: why do we need to catch AttributeError???
pass
if len(objType) == 1 :
# if it's a builtin, check method
builtinType = python.BUILTIN_METHODS.get(objType[0])
if builtinType is not None :
methodInfo = builtinType.get(func_name[1])
# set func properly
if kwArgs :
_validateKwArgs(code, methodInfo, func_name[1], kwArgs)
elif methodInfo :
returnValue = Stack.Item(func_name[1], methodInfo[0])
returnValue.setStringType(methodInfo[0])
if check_arg_count and methodInfo is not None :
_checkFunctionArgCount(code, func_name[1], argCount,
methodInfo[1], methodInfo[2])
return returnValue
_IMMUTABLE_LIST_METHODS = ('count', 'index',)
_IMMUTABLE_DICT_METHODS = ('copy', 'get', 'has_key',
'items', 'keys', 'values',
'iteritems', 'iterkeys', 'itervalues')
def _checkModifyDefaultArg(code, objectName, methodName=None) :
try :
value = code.func.defaultValue(objectName)
objectType = type(value)
if objectType in python.MUTABLE_TYPES :
if objectType == types.DictType and \
methodName in _IMMUTABLE_DICT_METHODS :
return
if objectType == types.ListType and \
methodName in _IMMUTABLE_LIST_METHODS :
return
code.addWarning(msgs.MODIFYING_DEFAULT_ARG % objectName)
except ValueError :
pass
def _isexception(object) :
# FIXME: i have no idea why this function is necessary
# it seems that the issubclass() should work, but it doesn't always
if hasattr(object, 'type'):
if object.type == types.TupleType:
# if we have a tuple, we can't check the contents (not enough info)
## for item in object.value:
## if not _isexception(item):
## return 0
return 1
try:
# try/except is necessary for globals like NotImplemented
if issubclass(object, Exception) :
return 1
# Python 2.5 added a BaseException to the hierarchy. That's
# really what we need to check if it exists.
if utils.pythonVersion() >= utils.PYTHON_2_5:
if issubclass(object, BaseException):
return 1
except TypeError:
return 0
for c in object.__bases__ :
if utils.startswith(utils.safestr(c), 'exceptions.') :
return 1
if len(c.__bases__) > 0 and _isexception(c) :
return 1
return 0
def _checkStringFind(code, loadValue):
if len(loadValue.data) == 2 and loadValue.data[1] == 'find':
try:
if types.StringType in code.typeMap.get(loadValue.data[0], []):
op = code.nextOpInfo()[0]
if OP.IS_CONDITIONAL_JUMP(op) or OP.IS_NOT(op):
code.addWarning(msgs.BAD_STRING_FIND)
except TypeError:
# we don't care if loadValue.data[0] is not hashable
pass
def _checkAbstract(refClass, code, name):
name_list = refClass.isAbstract()
if name_list:
name_list.sort()
names = string.join(name_list, ", ")
code.addWarning(msgs.METHODS_NEED_OVERRIDE % (names, name))
_SEQUENCE_TYPES = (types.TupleType, types.ListType, types.StringType)
try: _SEQUENCE_TYPES = _SEQUENCE_TYPES + (types.UnicodeType,)
except AttributeError: pass
# FIXME: this is not complete. errors will be caught only sometimes,
# depending on the order the functions/methods are processed
# in the dict. Need to be able to run through all functions
# twice, but because the code sucks, this is not possible.
def _checkReturnValueUse(code, func):
if func.returnValues is None:
return
err = None
opInfo = code.nextOpInfo()
if func.returnsNoValue():
# make sure we really know how to check for all the return types
for rv in func.returnValues:
if rv[1].type in _UNCHECKABLE_STACK_TYPES:
return
if not OP.POP_TOP(opInfo[0]):
err = msgs.USING_NONE_RETURN_VALUE % utils.safestr(func)
elif OP.UNPACK_SEQUENCE(opInfo[0]):
# verify unpacking into proper # of vars
varCount = opInfo[1]
stackRV = func.returnValues[0][1]
returnType = stackRV.getType({})
funcCount = stackRV.length
if returnType in _SEQUENCE_TYPES:
if varCount != funcCount and funcCount > 0:
err = msgs.WRONG_UNPACK_FUNCTION % (utils.safestr(func), funcCount, varCount)
elif returnType not in _UNCHECKABLE_STACK_TYPES:
err = msgs.UNPACK_NON_SEQUENCE % (utils.safestr(func), _getTypeStr(returnType))
if err:
code.addWarning(err)
def _handleFunctionCall(codeSource, code, argCount, indexOffset = 0,
check_arg_count = 1) :
'Checks for warnings, returns function called (may be None)'
if not code.stack :
return
kwArgCount = argCount >> utils.VAR_ARGS_BITS
argCount = argCount & utils.MAX_ARGS_MASK
# function call on stack is before the args, and keyword args
funcIndex = argCount + 2 * kwArgCount + 1 + indexOffset
if funcIndex > len(code.stack) :
funcIndex = 0
# to find on stack, we have to look backwards from top of stack (end)
funcIndex = -funcIndex
# store the keyword names/keys to check if using named arguments
kwArgs = []
if kwArgCount > 0 :
# loop backwards by 2 (keyword, value) in stack to find keyword args
for i in range(-2 - indexOffset, (-2 * kwArgCount - 1), -2) :
kwArgs.append(code.stack[i].data)
kwArgs.reverse()
loadValue = code.stack[funcIndex]
funcName = loadValue.getName()
returnValue = Stack.makeFuncReturnValue(loadValue, argCount)
if loadValue.isMethodCall(codeSource.classObject, cfg().methodArgName):
methodName = loadValue.data[1]
try :
m = codeSource.classObject.methods[methodName]
if m != None :
objRef = not m.isStaticMethod()
_checkFunctionArgs(code, m, objRef, argCount, kwArgs,
check_arg_count)
except KeyError :
sattr = codeSource.classObject.statics.get(methodName)
if sattr is not None :
funcName = sattr.getName()
if sattr is None and cfg().callingAttribute :
code.addWarning(msgs.INVALID_METHOD % methodName)
elif loadValue.type in (Stack.TYPE_ATTRIBUTE, Stack.TYPE_GLOBAL) and \
type(loadValue.data) in (types.StringType, types.TupleType) :
# apply(func, (args)), can't check # of args, so just return func
if loadValue.data == 'apply' :
loadValue = code.stack[funcIndex+1]
funcName = loadValue.getName()
else :
if cfg().modifyDefaultValue and \
type(loadValue.data) == types.TupleType :
_checkModifyDefaultArg(code, loadValue.data[0],
loadValue.data[1])
func, refClass, method = _getFunction(codeSource.module, loadValue)
if func == None and type(loadValue.data) == types.TupleType and \
len(loadValue.data) == 2 :
# looks like we are making a method call
data = loadValue.data
if type(data[0]) == types.StringType :
# do we know the type of the local variable?
varType = code.typeMap.get(data[0])
if varType is not None and len(varType) == 1 :
if hasattr(varType[0], 'methods') :
# it's a class & we know the type, get the method
func = varType[0].methods.get(data[1])
if func is not None :
method = 1
if cfg().abstractClasses and refClass and method:
_checkAbstract(refClass, code, funcName)
if cfg().stringFind:
_checkStringFind(code, loadValue)
if func != None :
if refClass and func.isClassMethod():
argCount = argCount + 1
_checkFunctionArgs(code, func, method, argCount, kwArgs,
check_arg_count)
# if this isn't a c'tor, we should check
if not (refClass and method) and cfg().checkReturnValues:
_checkReturnValueUse(code, func)
if refClass :
if method :
# c'tor, return the class as the type
returnValue = Stack.Item(loadValue, refClass)
elif func.isClassMethod():
# FIXME: do anything here?
pass
elif argCount > 0 and cfg().methodArgName and \
not func.isStaticMethod() and \
code.stack[funcIndex].type == Stack.TYPE_ATTRIBUTE and \
code.stack[funcIndex+1].data != cfg().methodArgName:
e = msgs.SELF_NOT_FIRST_ARG % (cfg().methodArgName, '')
code.addWarning(e)
elif refClass and method :
returnValue = Stack.Item(loadValue, refClass)
if (argCount > 0 or len(kwArgs) > 0) and \
not refClass.ignoreAttrs and \
not refClass.methods.has_key(utils.INIT) and \
not _isexception(refClass.classObject) :
code.addWarning(msgs.NO_CTOR_ARGS)
else :
returnValue = _checkBuiltin(code, loadValue, argCount, kwArgs,
check_arg_count)
if returnValue.type is types.NoneType and \
not OP.POP_TOP(code.nextOpInfo()[0]) :
name = utils.safestr(loadValue.data)
if type(loadValue.data) == types.TupleType :
name = string.join(loadValue.data, '.')
# lambda with setattr is a common way of setting
# attributes, so allow it
if name != 'setattr' \
or code.func.function.func_name != '<lambda>':
code.addWarning(msgs.USING_NONE_RETURN_VALUE % name)
code.stack = code.stack[:funcIndex] + [ returnValue ]
code.functionsCalled[funcName] = loadValue
def _classHasAttribute(c, attr) :
return (c.methods.has_key(attr) or c.members.has_key(attr) or
hasattr(c.classObject, attr))
def _checkClassAttribute(attr, c, code) :
if _classHasAttribute(c, attr) :
try :
del c.memberRefs[attr]
except KeyError :
pass
elif cfg().classAttrExists :
if attr not in cfg().missingAttrs:
code.addWarning(msgs.INVALID_CLASS_ATTR % attr)
def _checkModuleAttribute(attr, module, code, ref) :
try:
if attr not in module.modules[ref].attributes and \
not utils.endswith(ref, '.' + attr) :
code.addWarning(msgs.INVALID_MODULE_ATTR % attr)
except (KeyError, TypeError):
# if ref isn't found, or ref isn't even hashable, we don't care
# we may not know, or ref could be something funky [e for e].method()
pass
try:
_checkClassAttribute(attr, module.classes[ref], code)
except (KeyError, TypeError):
# if ref isn't found, or ref isn't even hashable, we don't care
# we may not know, or ref could be something funky [e for e].method()
pass
def _getGlobalName(name, func) :
# get the right name of global refs (for from XXX import YYY)
opModule = func.function.func_globals.get(name)
try :
if opModule and isinstance(opModule, types.ModuleType) :
name = opModule.__name__
except :
# we have to do this in case the class raises an access exception
# due to overriding __special__() methods
pass
return name
def _checkNoEffect(code, ignoreStmtWithNoEffect=0):
if (not ignoreStmtWithNoEffect and
OP.POP_TOP(code.nextOpInfo()[0]) and cfg().noEffect):
code.addWarning(msgs.POSSIBLE_STMT_WITH_NO_EFFECT)
def _makeConstant(code, index, factoryFunction) :
"Build a constant on the stack ((), [], or {})"
if index > 0 :
code.stack[-index:] = [ factoryFunction(code.stack[-index:]) ]
_checkNoEffect(code)
else :
code.pushStack(factoryFunction())
def _hasGlobal(operand, module, func, main) :
return (func.function.func_globals.has_key(operand) or
main or module.moduleLineNums.has_key(operand) or
__builtins__.has_key(operand))
def _checkGlobal(operand, module, func, code, err, main = 0) :
if not _hasGlobal(operand, module, func, main) :
code.addWarning(err % operand)
if not cfg().reportAllGlobals :
func.function.func_globals[operand] = operand
def _handleComparison(stack, operand) :
num_ops = 2
if operand == 'exception match':
num_ops = 1
si = min(len(stack), num_ops)
compareValues = stack[-si:]
for _ in range(si, 2) :
compareValues.append(Stack.Item(None, None))
stack[-si:] = [ Stack.makeComparison(compareValues, operand) ]
return compareValues
def _handleImport(code, operand, module, main, fromName) :
# FIXME: this function should be refactored/cleaned up
key = operand
tmpOperand = tmpFromName = operand
if fromName is not None :
tmpOperand = tmpFromName = fromName
key = (fromName, operand)
if cfg().deprecated:
try:
undeprecated = python.DEPRECATED_MODULES[tmpFromName]
except KeyError:
pass
else:
msg = msgs.USING_DEPRECATED_MODULE % tmpFromName
if undeprecated:
msg.data = msg.data + msgs.USE_INSTEAD % undeprecated
code.addWarning(msg)
if cfg().reimportSelf and tmpOperand == module.module.__name__ :
code.addWarning(msgs.IMPORT_SELF % tmpOperand)
modline1 = module.moduleLineNums.get(tmpOperand, None)
modline2 = module.moduleLineNums.get((tmpFromName, '*'), None)
key2 = (tmpFromName,)
if fromName is not None and operand != '*' :
key2 = (tmpFromName, operand)
modline3 = module.moduleLineNums.get(key2, None)
if modline1 is not None or modline2 is not None or modline3 is not None :
err = None
if fromName is None :
if modline1 is not None :
err = msgs.MODULE_IMPORTED_AGAIN % operand
elif cfg().mixImport :
err = msgs.MIX_IMPORT_AND_FROM_IMPORT % tmpFromName
else :
if modline3 is not None and operand != '*' :
err = 'from %s import %s' % (tmpFromName, operand)
err = msgs.MODULE_MEMBER_IMPORTED_AGAIN % err
elif modline1 is not None :
if cfg().mixImport and code.getLineNum() != modline1[1] :
err = msgs.MIX_IMPORT_AND_FROM_IMPORT % tmpFromName
else :
err = msgs.MODULE_MEMBER_ALSO_STAR_IMPORTED % fromName
# filter out warnings when files are different (ie, from X import ...)
if err is not None and cfg().moduleImportErrors :
bytes = module.main_code
if bytes is None or \
bytes.function.func_code.co_filename == code.func_code.co_filename :
code.addWarning(err)
if main :
fileline = (code.func_code.co_filename, code.getLineNum())
module.moduleLineNums[key] = fileline
if fromName is not None :
module.moduleLineNums[(fromName,)] = fileline
def _handleImportFrom(code, operand, module, main) :
fromName = code.stack[-1].data
if utils.pythonVersion() < utils.PYTHON_2_0 and \
OP.POP_TOP(code.nextOpInfo()[0]):
code.popNextOp()
code.pushStack(Stack.Item(operand, types.ModuleType))
_handleImport(code, operand, module, main, fromName)
# http://www.python.org/doc/current/lib/typesseq-strings.html
_FORMAT_CONVERTERS = 'diouxXeEfFgGcrs'
# NOTE: lLh are legal in the flags, but are ignored by python, we warn
_FORMAT_FLAGS = '*#- +.' + string.digits
def _getFormatInfo(format, code) :
vars = []
# first get rid of all the instances of %% in the string, they don't count
format = string.replace(format, "%%", "")
sections = string.split(format, '%')
percentFormatCount = formatCount = string.count(format, '%')
mappingFormatCount = 0
# skip the first item in the list, it's always empty
for section in sections[1:] :
orig_section = section
if not section:
w = msgs.INVALID_FORMAT % orig_section
w.data = w.data + ' (end of format string)'
code.addWarning(w)
continue
# handle dictionary formats
if section[0] == '(' :
mappingFormatCount = mappingFormatCount + 1
varname = string.split(section, ')')
if varname[1] == '' :
code.addWarning(msgs.INVALID_FORMAT % section)
vars.append(varname[0][1:])
section = varname[1]
if not section :
# no format data to check
continue
# FIXME: we ought to just define a regular expression to check
# formatRE = '[ #+-]*([0-9]*|*)(|.(|*|[0-9]*)[diouxXeEfFgGcrs].*'
stars = 0
for i in range(0, len(section)) :
if section[i] in _FORMAT_CONVERTERS :
break
if section[i] in _FORMAT_FLAGS :
if section[i] == '*' :
stars = stars + 1
if mappingFormatCount > 0 :
code.addWarning(msgs.USING_STAR_IN_FORMAT_MAPPING % section)
if stars > 2 :
code.addWarning(msgs.TOO_MANY_STARS_IN_FORMAT)
formatCount = formatCount + stars
if section[i] not in _FORMAT_CONVERTERS :
code.addWarning(msgs.INVALID_FORMAT % orig_section)
if mappingFormatCount > 0 and mappingFormatCount != percentFormatCount :
code.addWarning(msgs.CANT_MIX_MAPPING_IN_FORMATS)
return formatCount, vars
def _getConstant(code, module, data) :
data = utils.safestr(data.data)
format = code.constants.get(data)
if format is not None :
return format
format = module.variables.get(data)
if format is not None and format.value is not None :
return format.value
return None
_UNCHECKABLE_FORMAT_STACK_TYPES = \
(Stack.TYPE_UNKNOWN, Stack.TYPE_FUNC_RETURN, Stack.TYPE_ATTRIBUTE,
Stack.TYPE_GLOBAL, Stack.TYPE_EXCEPT)
_UNCHECKABLE_STACK_TYPES = _UNCHECKABLE_FORMAT_STACK_TYPES + (types.NoneType,)
def _getFormatString(code, codeSource) :
if len(code.stack) <= 1 :
return ''
format = code.stack[-2]
if format.type != types.StringType or not format.const :
format = _getConstant(code, codeSource.module, format)
if format is None or type(format) != types.StringType :
return ''
return format
return format.data
def _getFormatWarnings(code, codeSource) :
format = _getFormatString(code, codeSource)
if not format :
return
args = 0
count, vars = _getFormatInfo(format, code)
topOfStack = code.stack[-1]
if topOfStack.isLocals() :
for varname in vars :
if not code.unusedLocals.has_key(varname) :
code.addWarning(msgs.NO_LOCAL_VAR % varname)
else :
code.unusedLocals[varname] = None
else :
stackItemType = topOfStack.getType(code.typeMap)
if ((stackItemType == types.DictType and len(vars) > 0) or
codeSource.func.isParam(topOfStack.data) or
stackItemType in _UNCHECKABLE_FORMAT_STACK_TYPES) :
return
if topOfStack.type == types.TupleType :
args = topOfStack.length
elif stackItemType == types.TupleType :
args = len(code.constants.get(topOfStack.data, (0,)))
else :
args = 1
if args and count != args :
code.addWarning(msgs.INVALID_FORMAT_COUNT % (count, args))
def _checkAttributeType(code, stackValue, attr) :
if not cfg().checkObjectAttrs :
return
varTypes = code.typeMap.get(utils.safestr(stackValue.data), None)
if not varTypes :
return
# the value may have been converted on stack (`v`)
other_types = []
if stackValue.type not in varTypes :
other_types = [stackValue.type]
for varType in varTypes + other_types :
# ignore built-in types that have no attributes
if python.METHODLESS_OBJECTS.has_key(varType) :
continue
attrs = python.BUILTIN_ATTRS.get(varType, None)
if attrs is not None :
if attr in attrs :
return
continue
if hasattr(varType, 'ignoreAttrs') :
if varType.ignoreAttrs or _classHasAttribute(varType, attr) :
return
elif not hasattr(varType, 'attributes') or attr in varType.attributes :
return
code.addWarning(msgs.OBJECT_HAS_NO_ATTR % (stackValue.data, attr))
def _getTypeStr(t):
returnStr = utils.safestr(t)
strs = string.split(returnStr, "'")
try:
if len(strs) == 3:
returnStr = strs[-2]
except IndexError:
pass
return returnStr
def _getLineNum(co, instr_index):
co_lnotab = co.co_lnotab
lineno = co.co_firstlineno
addr = 0
for lnotab_index in range(0, len(co_lnotab), 2):
addr = addr + ord(co_lnotab[lnotab_index])
if addr > instr_index:
return lineno
lineno = lineno + ord(co_lnotab[lnotab_index+1])
return lineno
class Code :
'Hold all the code state information necessary to find warnings'
def __init__(self) :
self.bytes = None
self.func = None
self.func_code = None
self.index = 0
self.indexList = []
self.extended_arg = 0
self.lastLineNum = 0
self.maxCode = 0
self.has_except = 0
self.try_finally_first = 0
self.starts_and_ends_with_finally = 0
self.returnValues = []
self.raiseValues = []
self.stack = []
self.unpackCount = 0
self.loops = 0
self.branches = {}
self.warnings = []
self.globalRefs = {}
self.unusedLocals = {}
self.deletedLocals = {}
self.functionsCalled = {}
self.typeMap = {}
self.constants = {}
self.codeObjects = {}
def init(self, func) :
self.func = func
self.func_code, self.bytes, self.index, self.maxCode, self.extended_arg = \
OP.initFuncCode(func.function)
self.lastLineNum = self.func_code.co_firstlineno
self.returnValues = []
# initialize the arguments to unused
for arg in func.arguments() :
self.unusedLocals[arg] = 0
self.typeMap[arg] = [ Stack.TYPE_UNKNOWN ]
def getLineNum(self):
line = self.lastLineNum
# if we don't have linenum info, calc it from co_lntab & index
if line == self.func_code.co_firstlineno:
# FIXME: this could be optimized, if we kept last line info
line = _getLineNum(self.func_code, self.index - 1)
return line
def getWarning(self, err, line = None) :
if line is None :
line = self.getLineNum()
return Warning.Warning(self.func_code, line, err)
def addWarning(self, err, line = None) :
w = err
if not isinstance(w, Warning.Warning):
w = self.getWarning(err, line)
self.warnings.append(w)
def popNextOp(self) :
self.indexList.append(self.index)
info = OP.getInfo(self.bytes, self.index, self.extended_arg)
op, oparg, self.index, self.extended_arg = info
if op < OP.HAVE_ARGUMENT :
utils.debug(" %d %s" % (self.indexList[-1], OP.name[op]))
operand = None
else :
operand = OP.getOperand(op, self.func_code, oparg)
self.label = label = OP.getLabel(op, oparg, self.index)
utils.debug(" %d %s" % (self.indexList[-1], OP.name[op]), oparg, operand)
if label != None :
self.addBranch(label)
return op, oparg, operand
def nextOpInfo(self, offset = 0) :
try :
return OP.getInfo(self.bytes, self.index + offset, 0)[0:3]
except IndexError :
return -1, 0, -1
def getFirstOp(self) :
# find the first real op, maybe we should not check if params are used
i = extended_arg = 0
while i < self.maxCode :
op, oparg, i, extended_arg = OP.getInfo(self.bytes, i, extended_arg)
if not OP.LINE_NUM(op) :
if not (OP.LOAD_CONST(op) or OP.LOAD_GLOBAL(op)) :
return op
raise RuntimeError('Could not find first opcode in function')
def pushStack(self, item, ignoreStmtWithNoEffect=0):
self.stack.append(item)
_checkNoEffect(self, ignoreStmtWithNoEffect)
def popStack(self) :
if self.stack :
del self.stack[-1]
def popStackItems(self, count) :
stackLen = len(self.stack)
if stackLen > 0 :
count = min(count, stackLen)
del self.stack[-count:]
def unpack(self) :
if self.unpackCount :
self.unpackCount = self.unpackCount - 1
else :
self.popStack()
def __getStringStackType(self, data) :
try :
return data.getType({})
except AttributeError :
return Stack.TYPE_UNKNOWN
def __getStackType(self) :
if not self.stack :
return Stack.TYPE_UNKNOWN
if not self.unpackCount :
return self.__getStringStackType(self.stack[-1])
data = self.stack[-1].data
if type(data) == types.TupleType :
try :
return self.__getStringStackType(data[len(data)-self.unpackCount])
except IndexError :
# happens when unpacking a var for which we don't know the size
pass
return Stack.TYPE_UNKNOWN
def setType(self, name) :
valueList = self.typeMap.get(name, [])
newType = self.__getStackType()
# longs are being merged with ints, assume they are the same
# comparisons are really ints anyways
if newType in (types.LongType, Stack.TYPE_COMPARISON):
newType = types.IntType
if newType not in valueList :
valueList.append(newType)
# need to ignore various types (Unknown, Func return values, etc)
# also ignore None, don't care if they use it and a real type
if valueList and newType not in _UNCHECKABLE_STACK_TYPES and \
cfg().inconsistentTypes:
oldTypes = []
# only add types to the value list that are "interesting"
for typeToAdd in valueList:
if typeToAdd not in _UNCHECKABLE_STACK_TYPES and \
typeToAdd != newType:
oldTypes.append(_getTypeStr(typeToAdd))
# do we have any "interesting" old types? if so, warn
if oldTypes:
self.addWarning(msgs.INCONSISTENT_TYPE % \
(name, oldTypes, _getTypeStr(newType)))
self.typeMap[name] = valueList
def addReturn(self) :
if len(self.stack) > 0 :
value = (self.getLineNum(), self.stack[-1], self.nextOpInfo()[2])
self.returnValues.append(value)
self.popStack()
def addRaise(self) :
self.raiseValues.append((self.getLineNum(), None, self.nextOpInfo()[2]))
def addBranch(self, label) :
if label is not None :
self.branches[label] = self.branches.get(label, 0) + 1
def removeBranch(self, label) :
branch = self.branches.get(label, None)
if branch is not None :
if branch == 1 :
del self.branches[label]
else :
self.branches[label] = branch - 1
def remove_unreachable_code(self, label) :
if len(self.indexList) >= 2 :
index = self.indexList[-2]
if index >= 0 and OP.POP_BLOCK(ord(self.bytes[index])) :
index = self.indexList[-3]
if index >= 0 :
op = ord(self.bytes[index])
if OP.RETURN_VALUE(op) or OP.RAISE_VARARGS(op) or \
OP.END_FINALLY(ord(self.bytes[label-1])) :
self.removeBranch(label)
def updateCheckerArgs(self, operand) :
rc = utils.shouldUpdateArgs(operand)
if rc :
utils.updateCheckerArgs(self.stack[-1].data, self.func_code,
self.getLineNum(), self.warnings)
return rc
def updateModuleLineNums(self, module, operand) :
filelist = (self.func_code.co_filename, self.getLineNum())
module.moduleLineNums[operand] = filelist
class CodeSource :
'Holds source information about a code block (module, class, func, etc)'
def __init__(self, module, func, c, main, in_class, code) :
self.module = module
self.func = func
self.classObject = c
self.main = main
self.in_class = in_class
self.code = code
self.calling_code = []
def _checkException(code, name) :
if code.stack and code.stack[-1].type == Stack.TYPE_EXCEPT :
if __builtins__.has_key(name) :
code.addWarning(msgs.SET_EXCEPT_TO_BUILTIN % name)
def _checkAssign(code, name):
if name in _BAD_ASSIGN_NAMES:
code.addWarning(msgs.SHOULDNT_ASSIGN_BUILTIN % name)
else:
cap = string.capitalize(name)
if cap in _BAD_ASSIGN_NAMES:
code.addWarning(msgs.SHOULDNT_ASSIGN_NAME % (name, cap))
def _checkVariableOperationOnItself(code, lname, msg):
if code.stack and code.stack[-1].getName() == lname:
code.addWarning(msg % lname)
def _checkFutureKeywords(code, varname) :
kw = python.FUTURE_KEYWORDS.get(varname)
if kw is not None :
code.addWarning(msgs.USING_KEYWORD % (varname, kw))
def _STORE_NAME(oparg, operand, codeSource, code) :
if not code.updateCheckerArgs(operand) :
_checkFutureKeywords(code, operand)
module = codeSource.module
if not codeSource.in_class :
_checkShadowBuiltin(code, operand)
if not codeSource.calling_code :
_checkGlobal(operand, module, codeSource.func, code,
msgs.GLOBAL_DEFINED_NOT_DECLARED, codeSource.main)
else :
if code.stack :
codeSource.classObject.statics[operand] = code.stack[-1]
codeSource.classObject.lineNums[operand] = code.getLineNum()
var = module.variables.get(operand)
if var is not None and code.stack and code.stack[-1].const :
var.value = code.stack[-1].data
if code.unpackCount :
code.unpackCount = code.unpackCount - 1
else:
_checkAssign(code, operand)
_checkException(code, operand)
code.popStack()
if not module.moduleLineNums.has_key(operand) and codeSource.main :
code.updateModuleLineNums(module, operand)
_STORE_GLOBAL = _STORE_NAME
def _checkLoadGlobal(codeSource, code, varname) :
_checkFutureKeywords(code, varname)
should_check = 1
if code.func_code.co_name == utils.LAMBDA :
# this could really be a local reference, check first
if not codeSource.main and codeSource.calling_code:
func = getattr(codeSource.calling_code[-1], 'function', None)
if func is not None and varname in func.func_code.co_varnames :
_handleLoadLocal(code, codeSource, varname)
should_check = 0
if should_check :
# if a global var starts w/__ and the global is referenced in a class
# we have to strip off the _class-name, to get the original name
if codeSource.classObject and \
utils.startswith(varname, '_' + codeSource.classObject.name + '__'):
varname = varname[len(codeSource.classObject.name)+1:]
# make sure we remember each global ref to check for unused
code.globalRefs[_getGlobalName(varname, codeSource.func)] = varname
if not codeSource.in_class :
_checkGlobal(varname, codeSource.module, codeSource.func,
code, msgs.INVALID_GLOBAL)
def _LOAD_NAME(oparg, operand, codeSource, code) :
_checkLoadGlobal(codeSource, code, operand)
# if there was from XXX import *, _* names aren't imported
if codeSource.module.modules.has_key(operand) and \
hasattr(codeSource.module.module, operand) :
operand = getattr(codeSource.module.module, operand).__name__
opType, const = Stack.TYPE_GLOBAL, 0
if operand == 'None' :
opType, const = types.NoneType, 0
elif operand == 'Ellipsis' :
opType, const = types.EllipsisType, 1
code.pushStack(Stack.Item(operand, opType, const))
_LOAD_GLOBAL = _LOAD_NAME
def _LOAD_DEREF(oparg, operand, codeSource, code) :
if type(oparg) == types.IntType :
func_code = code.func_code
try:
argname = func_code.co_cellvars[oparg]
except IndexError:
argname = func_code.co_freevars[oparg - len(func_code.co_cellvars)]
code.pushStack(Stack.Item(argname, types.StringType))
if code.func_code.co_name != utils.LAMBDA :
code.unusedLocals[argname] = None
else :
_LOAD_GLOBAL(oparg, operand, codeSource, code)
_LOAD_CLOSURE = _LOAD_DEREF
def _DELETE_NAME(oparg, operand, codeSource, code) :
_checkLoadGlobal(codeSource, code, operand)
# FIXME: handle deleting global multiple times
_DELETE_GLOBAL = _DELETE_NAME
def _make_const(value):
if type(value) == types.TupleType:
return Stack.makeTuple(map(_make_const, value))
return Stack.Item(value, type(value), 1)
def _LOAD_CONST(oparg, operand, codeSource, code) :
code.pushStack(_make_const(operand))
if type(operand) == types.CodeType :
name = operand.co_name
obj = code.codeObjects.get(name, None)
if name == utils.LAMBDA :
# use a unique key, so we can have multiple lambdas
code.codeObjects[code.index] = operand
elif obj is None :
code.codeObjects[name] = operand
elif cfg().redefiningFunction :
code.addWarning(msgs.REDEFINING_ATTR % (name, obj.co_firstlineno))
def _checkLocalShadow(code, module, varname) :
if module.variables.has_key(varname) and cfg().shadows :
line = module.moduleLineNums.get(varname, ('<unknown>', 0))
w = code.getWarning(msgs.LOCAL_SHADOWS_GLOBAL % (varname, line[1]))
if line[0] != w.file:
w.err = '%s in file %s' % (w.err, line[0])
code.addWarning(w)
def _checkShadowBuiltin(code, varname) :
if __builtins__.has_key(varname) and varname[0] != '_' and \
cfg().shadowBuiltins:
code.addWarning(msgs.VARIABLE_SHADOWS_BUILTIN % varname)
def _checkLoadLocal(code, codeSource, varname, deletedWarn, usedBeforeSetWarn) :
_checkFutureKeywords(code, varname)
deletedLine = code.deletedLocals.get(varname)
if deletedLine :
code.addWarning(deletedWarn % (varname, deletedLine))
elif not code.unusedLocals.has_key(varname) and \
not codeSource.func.isParam(varname) :
code.addWarning(usedBeforeSetWarn % varname)
code.unusedLocals[varname] = None
_checkLocalShadow(code, codeSource.module, varname)
def _handleLoadLocal(code, codeSource, varname) :
_checkLoadLocal(code, codeSource, varname,
msgs.LOCAL_DELETED, msgs.VAR_USED_BEFORE_SET)
def _LOAD_FAST(oparg, operand, codeSource, code) :
code.pushStack(Stack.Item(operand, type(operand)))
_handleLoadLocal(code, codeSource, operand)
def _STORE_FAST(oparg, operand, codeSource, code) :
if not code.updateCheckerArgs(operand) :
_checkFutureKeywords(code, operand)
if code.stack and code.stack[-1].type == types.StringType and \
not code.stack[-1].const:
_checkVariableOperationOnItself(code, operand,
msgs.SET_VAR_TO_ITSELF)
code.setType(operand)
if not code.unpackCount and code.stack and \
(code.stack[-1].const or code.stack[-1].type == types.TupleType) :
if code.constants.has_key(operand) :
del code.constants[operand]
else :
code.constants[operand] = code.stack[-1].data
_checkLocalShadow(code, codeSource.module, operand)
_checkShadowBuiltin(code, operand)
_checkAssign(code, operand)
_checkException(code, operand)
if code.deletedLocals.has_key(operand) :
del code.deletedLocals[operand]
if not code.unusedLocals.has_key(operand) :
errLine = code.getLineNum()
if code.unpackCount and not cfg().unusedLocalTuple :
errLine = -errLine
code.unusedLocals[operand] = errLine
code.unpack()
def _DELETE_FAST(oparg, operand, codeSource, code) :
_checkLoadLocal(code, codeSource, operand,
msgs.LOCAL_ALREADY_DELETED, msgs.VAR_DELETED_BEFORE_SET)
code.deletedLocals[operand] = code.getLineNum()
def _checkAttribute(top, operand, codeSource, code) :
if top.data == cfg().methodArgName and codeSource.classObject != None :
_checkClassAttribute(operand, codeSource.classObject, code)
elif type(top.type) == types.StringType or top.type == types.ModuleType :
_checkModuleAttribute(operand, codeSource.module, code, top.data)
else :
_checkAttributeType(code, top, operand)
def _checkExcessiveReferences(code, top, extraAttr = None) :
if cfg().maxReferences <= 0 :
return
try :
data = top.data
if extraAttr is not None :
data = data + (extraAttr,)
maxReferences = cfg().maxReferences
if data[0] == cfg().methodArgName:
maxReferences = maxReferences + 1
if len(data) > maxReferences :
name = string.join(top.data, '.')
code.addWarning(msgs.TOO_MANY_REFERENCES % (maxReferences, name))
except TypeError :
pass
def _checkDeprecated(code, identifierTuple):
# check deprecated module.function
try:
name = string.join(identifierTuple, '.')
undeprecated = python.DEPRECATED_ATTRS[name]
except (KeyError, TypeError):
pass
else:
msg = msgs.USING_DEPRECATED_ATTR % name
if undeprecated:
msg.data = msg.data + msgs.USE_INSTEAD % undeprecated
code.addWarning(msg)
def _LOAD_ATTR(oparg, operand, codeSource, code) :
if len(code.stack) > 0 :
top = code.stack[-1]
_checkAttribute(top, operand, codeSource, code)
top.addAttribute(operand)
if len(top.data) == 2:
if cfg().deprecated:
_checkDeprecated(code, top.data)
try:
insecure = python.SECURITY_FUNCS.get(top.data[0])
except TypeError:
pass
else:
if insecure and insecure.has_key(operand):
func = string.join(top.data, '.')
code.addWarning(msgs.USING_INSECURE_FUNC % func)
nextOp = code.nextOpInfo()[0]
if not OP.LOAD_ATTR(nextOp) :
if OP.POP_TOP(nextOp) and cfg().noEffect:
code.addWarning(msgs.POSSIBLE_STMT_WITH_NO_EFFECT)
else :
_checkExcessiveReferences(code, top)
def _ok_to_set_attr(classObject, basename, attr) :
return (cfg().onlyCheckInitForMembers and classObject != None and
basename == cfg().methodArgName and
not _classHasAttribute(classObject, attr))
def _STORE_ATTR(oparg, operand, codeSource, code) :
if code.stack :
top = code.stack.pop()
top_name = '%s.%s' % (top.getName(), operand)
try:
# FIXME: this is a hack to handle code like:
# a.a = [x for x in range(2) if x > 1]
previous = code.stack[-1]
except IndexError:
previous = None
if top.type in (types.StringType, Stack.TYPE_ATTRIBUTE) and \
previous and previous.type == Stack.TYPE_ATTRIBUTE:
_checkVariableOperationOnItself(code, top_name,
msgs.SET_VAR_TO_ITSELF)
_checkExcessiveReferences(code, top, operand)
if _ok_to_set_attr(codeSource.classObject, top.data, operand) :
code.addWarning(msgs.INVALID_SET_CLASS_ATTR % operand)
code.unpack()
def _DELETE_ATTR(oparg, operand, codeSource, code) :
if len(code.stack) > 0 :
_checkAttribute(code.stack[-1], operand, codeSource, code)
def _getExceptionInfo(codeSource, item):
# FIXME: probably ought to try to handle raise module.Error
if item.type is types.StringType and item.const == 1:
return item.data, 1
e = None
if item.type is Stack.TYPE_GLOBAL:
try:
e = eval(item.data)
except NameError:
pass
if not e:
try:
c = codeSource.module.classes.get(item.data)
except TypeError: # item.data may not be hashable (e.g., list)
return e, 0
if c is not None:
e = c.classObject
else:
v = codeSource.module.variables.get(item.data)
if v is not None:
return v, (v.type == types.StringType)
return e, 0
_UNCHECKABLE_CATCH_TYPES = (Stack.TYPE_UNKNOWN, Stack.TYPE_ATTRIBUTE)
def _checkCatchException(codeSource, code, item):
if not cfg().badExceptions:
return
if item.data is None or item.type in _UNCHECKABLE_CATCH_TYPES:
return
e, is_str = _getExceptionInfo(codeSource, item)
if is_str:
code.addWarning(msgs.CATCH_STR_EXCEPTION % item.data)
elif e is not None and not _isexception(e):
code.addWarning(msgs.CATCH_BAD_EXCEPTION % item.data)
def _handleExceptionChecks(codeSource, code, checks):
for item in checks:
if item is not None:
if item.type is not types.TupleType:
_checkCatchException(codeSource, code, item)
else:
for ti in item.data:
if isinstance(ti, Stack.Item):
_checkCatchException(codeSource, code, ti)
_BOOL_NAMES = ('True', 'False')
_BAD_ASSIGN_NAMES = _BOOL_NAMES + ('None',)
def _checkBoolean(code, checks):
for item in checks:
try:
data = string.capitalize(item.data)
if item.type is Stack.TYPE_GLOBAL and data in _BOOL_NAMES:
code.addWarning(msgs.BOOL_COMPARE % item.data)
except (AttributeError, TypeError):
# TypeError is necessary for Python 1.5.2
pass # ignore items that are not a StackItem or a string
def _COMPARE_OP(oparg, operand, codeSource, code) :
compareValues = _handleComparison(code.stack, operand)
if oparg == OP.EXCEPT_COMPARISON:
_handleExceptionChecks(codeSource, code, compareValues)
elif oparg < OP.IN_COMPARISON: # '<', '<=', '==', '!=', '>', '>='
_checkBoolean(code, compareValues)
elif oparg < OP.IS_COMPARISON: # 'in', 'not in'
# TODO: any checks that should be done here?
pass
elif cfg().isLiteral:
# X is Y or X is not Y comparison
second_arg = code.stack[-1].data[2]
# FIXME: how should booleans be handled, need to think about it
## if second_arg.const or (second_arg.type == Stack.TYPE_GLOBAL and
## second_arg.data in ['True', 'False']):
if second_arg.const and second_arg.data is not None:
data = second_arg.data
if second_arg.type is types.DictType:
data = {}
not_str = ''
if oparg != OP.IS_COMPARISON:
not_str = ' not'
code.addWarning(msgs.IS_LITERAL % (not_str, data))
_checkNoEffect(code)
def _IMPORT_NAME(oparg, operand, codeSource, code) :
code.pushStack(Stack.Item(operand, types.ModuleType))
nextOp = code.nextOpInfo()[0]
if not OP.IMPORT_FROM(nextOp) and not OP.IMPORT_STAR(nextOp) :
_handleImport(code, operand, codeSource.module, codeSource.main, None)
def _IMPORT_FROM(oparg, operand, codeSource, code) :
_handleImportFrom(code, operand, codeSource.module, codeSource.main)
# this is necessary for python 1.5 (see STORE_GLOBAL/NAME)
if utils.pythonVersion() < utils.PYTHON_2_0 :
code.popStack()
if not codeSource.main :
code.unusedLocals[operand] = None
elif not codeSource.module.moduleLineNums.has_key(operand) :
code.updateModuleLineNums(codeSource.module, operand)
def _IMPORT_STAR(oparg, operand, codeSource, code) :
_handleImportFrom(code, '*', codeSource.module, codeSource.main)
# Python 2.3 introduced some optimizations that create problems
# this is a utility for ignoring these cases
def _shouldIgnoreCodeOptimizations(code, bytecodes, offset, length=None):
if utils.pythonVersion() < utils.PYTHON_2_3:
return 0
if length is None:
length = offset - 1
try:
start = code.index - offset
return bytecodes == code.bytes[start:start+length]
except IndexError:
return 0
# In Python 2.3, a, b = 1,2 generates this code:
# ...
# ROT_TWO
# JUMP_FORWARD 2
# DUP_TOP
# POP_TOP
#
# which generates a Possible stmt w/no effect
# ROT_TWO = 2; JUMP_FORWARD = 110; 2, 0 is the offset (2)
_IGNORE_SEQ = '%c%c%c%c' % (2, 110, 2, 0)
def _shouldIgnoreNoEffectWarning(code):
return _shouldIgnoreCodeOptimizations(code, _IGNORE_SEQ, 5)
def _DUP_TOP(oparg, operand, codeSource, code) :
if len(code.stack) > 0 :
code.pushStack(code.stack[-1], _shouldIgnoreNoEffectWarning(code))
def _popn(code, n) :
if len(code.stack) >= 2 :
loadValue = code.stack[-2]
if cfg().modifyDefaultValue and loadValue.type == types.StringType :
_checkModifyDefaultArg(code, loadValue.data)
code.popStackItems(n)
def _DELETE_SUBSCR(oparg, operand, codeSource, code) :
_popn(code, 2)
def _STORE_SUBSCR(oparg, operand, codeSource, code) :
_popn(code, 3)
def _CALL_FUNCTION(oparg, operand, codeSource, code) :
_handleFunctionCall(codeSource, code, oparg)
def _CALL_FUNCTION_VAR(oparg, operand, codeSource, code) :
_handleFunctionCall(codeSource, code, oparg, 1, 0)
def _CALL_FUNCTION_KW(oparg, operand, codeSource, code) :
_handleFunctionCall(codeSource, code, oparg, 1)
def _CALL_FUNCTION_VAR_KW(oparg, operand, codeSource, code) :
_handleFunctionCall(codeSource, code, oparg, 2, 0)
def _MAKE_FUNCTION(oparg, operand, codeSource, code) :
newValue = Stack.makeFuncReturnValue(code.stack[-1], oparg)
code.popStackItems(oparg+1)
code.pushStack(newValue)
def _MAKE_CLOSURE(oparg, operand, codeSource, code) :
_MAKE_FUNCTION(max(0, oparg - 1), operand, codeSource, code)
def _BUILD_MAP(oparg, operand, codeSource, code) :
_makeConstant(code, oparg, Stack.makeDict)
def _BUILD_TUPLE(oparg, operand, codeSource, code) :
_makeConstant(code, oparg, Stack.makeTuple)
def _BUILD_LIST(oparg, operand, codeSource, code) :
_makeConstant(code, oparg, Stack.makeList)
def _BUILD_CLASS(oparg, operand, codeSource, code) :
newValue = Stack.makeFuncReturnValue(code.stack[-1], types.ClassType)
code.popStackItems(3)
code.pushStack(newValue)
def _LIST_APPEND(oparg, operand, codeSource, code):
code.popStackItems(2)
def _modifyStackName(code, suffix):
if code.stack:
tos = code.stack[-1]
tos_type = type(tos.data)
if tos_type == types.StringType:
tos.data = tos.data + suffix
elif tos_type == types.TupleType and \
type(tos.data[-1]) == types.StringType:
tos.data = tos.data[:-1] + (tos.data[-1] + suffix,)
def _UNARY_CONVERT(oparg, operand, codeSource, code) :
if code.stack:
stackValue = code.stack[-1]
if stackValue.data == cfg().methodArgName and \
stackValue.const == 0 and codeSource.classObject is not None and \
codeSource.func.function.func_name == '__repr__' :
code.addWarning(msgs.USING_SELF_IN_REPR)
stackValue.data = utils.safestr(stackValue.data)
stackValue.type = types.StringType
_modifyStackName(code, '-repr')
def _UNARY_POSITIVE(oparg, operand, codeSource, code) :
if OP.UNARY_POSITIVE(code.nextOpInfo()[0]) :
code.addWarning(msgs.STMT_WITH_NO_EFFECT % '++')
code.popNextOp()
elif cfg().unaryPositive and code.stack and not code.stack[-1].const :
code.addWarning(msgs.UNARY_POSITIVE_HAS_NO_EFFECT)
_modifyStackName(code, '-pos')
def _UNARY_NEGATIVE(oparg, operand, codeSource, code) :
if OP.UNARY_NEGATIVE(code.nextOpInfo()[0]) :
code.addWarning(msgs.STMT_WITH_NO_EFFECT % '--')
_modifyStackName(code, '-neg')
def _UNARY_NOT(oparg, operand, codeSource, code) :
_modifyStackName(code, '-not')
def _UNARY_INVERT(oparg, operand, codeSource, code) :
if OP.UNARY_INVERT(code.nextOpInfo()[0]) :
code.addWarning(msgs.STMT_WITH_NO_EFFECT % '~~')
_modifyStackName(code, '-invert')
def _popStackRef(code, operand, count = 2) :
code.popStackItems(count)
code.pushStack(Stack.Item(operand, Stack.TYPE_UNKNOWN))
def _popModifiedStack(code, suffix=' '):
code.popStack()
_modifyStackName(code, suffix)
def _pop(oparg, operand, codeSource, code) :
code.popStack()
_POP_TOP = _PRINT_ITEM = _pop
def _popModified(oparg, operand, codeSource, code):
_popModifiedStack(code)
def _BINARY_RSHIFT(oparg, operand, codeSource, code):
_coerce_type(code)
_popModified(oparg, operand, codeSource, code)
_BINARY_LSHIFT = _BINARY_RSHIFT
def _checkModifyNoOp(code, op, msg=msgs.MODIFY_VAR_NOOP, modifyStack=1):
stack = code.stack
if len(stack) >= 2:
if (stack[-1].type != Stack.TYPE_UNKNOWN and
stack[-2].type != Stack.TYPE_UNKNOWN):
name = stack[-1].getName()
if name != Stack.TYPE_UNKNOWN and name == stack[-2].getName():
code.addWarning(msg % (name, op, name))
if modifyStack:
code.popStack()
stack[-1].const = 0
_modifyStackName(code, op)
def _BINARY_AND(oparg, operand, codeSource, code):
# Don't modify the stack, since _coerce_type() will do it.
_checkModifyNoOp(code, '&', modifyStack=0)
_coerce_type(code)
def _BINARY_OR(oparg, operand, codeSource, code):
# Don't modify the stack, since _coerce_type() will do it.
_checkModifyNoOp(code, '|', modifyStack=0)
_coerce_type(code)
def _BINARY_XOR(oparg, operand, codeSource, code):
# Don't modify the stack, since _coerce_type() will do it.
_checkModifyNoOp(code, '^', msgs.XOR_VAR_WITH_ITSELF, modifyStack=0)
_coerce_type(code)
def _PRINT_ITEM_TO(oparg, operand, codeSource, code) :
code.popStackItems(2)
try:
ComplexType = types.ComplexType
except NameError:
ComplexType = types.FloatType # need some numeric type here
_NUMERIC_TYPES = (types.IntType, types.FloatType, ComplexType)
# FIXME: This is pathetically weak, need to handle more types
def _coerce_type(code) :
_checkNoEffect(code)
newItem = Stack.Item('<stack>', Stack.TYPE_UNKNOWN)
if len(code.stack) >= 2 :
s1, s2 = code.stack[-2:]
s1type = s1.getType(code.typeMap)
s2type = s2.getType(code.typeMap)
if s1type != s2type :
if s1type in _NUMERIC_TYPES and s2type in _NUMERIC_TYPES :
newType = types.FloatType
if s1type == ComplexType or s2type == ComplexType:
newType = ComplexType
newItem.type = newType
code.popStackItems(2)
code.pushStack(newItem)
def _BINARY_ADD(oparg, operand, codeSource, code) :
stack = code.stack
if len(stack) >= 2 and (stack[-1].const and stack[-2].const and
stack[-1].type == stack[-2].type) :
value = stack[-2].data + stack[-1].data
code.popStackItems(2)
code.pushStack(Stack.Item(value, type(value), 1))
else :
_coerce_type(code)
def _BINARY_SUBTRACT(oparg, operand, codeSource, code) :
_coerce_type(code)
_BINARY_POWER = _BINARY_SUBTRACT
def _BINARY_SUBSCR(oparg, operand, codeSource, code) :
_checkNoEffect(code)
if len(code.stack) >= 2 :
stack = code.stack
varType = code.typeMap.get(utils.safestr(stack[-2].data), [])
if types.ListType in varType and stack[-1].type == types.TupleType :
code.addWarning(msgs.USING_TUPLE_ACCESS_TO_LIST % stack[-2].data)
_popStackRef(code, operand)
def _isint(stackItem, code) :
if type(stackItem.data) == types.IntType :
return 1
stackTypes = code.typeMap.get(stackItem.data, [])
if len(stackTypes) != 1 :
return 0
return types.IntType in stackTypes
def _BINARY_DIVIDE(oparg, operand, codeSource, code) :
_checkNoEffect(code)
_checkModifyNoOp(code, '/', msgs.DIVIDE_VAR_BY_ITSELF, 0)
if cfg().intDivide and len(code.stack) >= 2 :
if _isint(code.stack[-1], code) and _isint(code.stack[-2], code) :
# don't warn if we are going to convert the result to an int
if not (len(code.stack) >= 3 and
code.stack[-3].data == 'int' and
OP.CALL_FUNCTION(code.nextOpInfo()[0])):
code.addWarning(msgs.INTEGER_DIVISION % tuple(code.stack[-2:]))
_popModifiedStack(code, '/')
def _BINARY_TRUE_DIVIDE(oparg, operand, codeSource, code) :
_checkNoEffect(code)
_checkVariableOperationOnItself(code, operand, msgs.DIVIDE_VAR_BY_ITSELF)
_popModifiedStack(code, '/')
_BINARY_FLOOR_DIVIDE = _BINARY_TRUE_DIVIDE
def _BINARY_MULTIPLY(oparg, operand, codeSource, code) :
if len(code.stack) >= 2 :
format = _getFormatString(code, codeSource)
if format and type(code.stack[-1].data) == types.IntType :
code.stack[-2].data = format * code.stack[-1].data
code.popStack()
else:
_coerce_type(code)
else:
_popModifiedStack(code, '*')
def _BINARY_MODULO(oparg, operand, codeSource, code) :
_checkNoEffect(code)
if cfg().modulo1 and code.stack and code.stack[-1].data == 1:
if len(code.stack) < 2 or \
code.stack[-2].getType(code.typeMap) != types.FloatType:
code.addWarning(msgs.MODULO_1)
_getFormatWarnings(code, codeSource)
_popModifiedStack(code, '%')
if code.stack:
code.stack[-1].const = 0
def _ROT_TWO(oparg, operand, codeSource, code) :
if len(code.stack) >= 2 :
tmp = code.stack[-2]
code.stack[-2] = code.stack[-1]
code.stack[-1] = tmp
def _ROT_THREE(oparg, operand, codeSource, code) :
"""Lifts second and third stack item one position up,
moves top down to position three."""
if len(code.stack) >= 3 :
second = code.stack[-2]
third = code.stack[-3]
code.stack[-3] = code.stack[-1]
code.stack[-2] = third
code.stack[-1] = second
def _ROT_FOUR(oparg, operand, codeSource, code) :
"""Lifts second, third and forth stack item one position up,
moves top down to position four."""
if len(code.stack) >= 4 :
second = code.stack[-2]
third = code.stack[-3]
fourth = code.stack[-4]
code.stack[-4] = code.stack[-1]
code.stack[-3] = fourth
code.stack[-2] = third
code.stack[-1] = second
def _SETUP_EXCEPT(oparg, operand, codeSource, code) :
code.has_except = 1
code.pushStack(Stack.Item(None, Stack.TYPE_EXCEPT))
code.pushStack(Stack.Item(None, Stack.TYPE_EXCEPT))
def _SETUP_FINALLY(oparg, operand, codeSource, code) :
if not code.has_except :
code.try_finally_first = 1
def _END_FINALLY(oparg, operand, codeSource, code) :
if code.try_finally_first and code.index == (len(code.bytes) - 4) :
code.starts_and_ends_with_finally = 1
def _LINE_NUM(oparg, operand, codeSource, code) :
code.lastLineNum = oparg
def _UNPACK_SEQUENCE(oparg, operand, codeSource, code) :
code.unpackCount = oparg
if code.stack:
top = code.stack[-1]
# if we know we have a tuple, make sure we unpack it into the
# right # of variables
topType = top.getType(code.typeMap)
if topType in _SEQUENCE_TYPES:
length = top.length
# we don't know the length, maybe it's constant and we can find out
if length == 0:
value = code.constants.get(utils.safestr(top.data))
if type(value) in _SEQUENCE_TYPES:
length = len(value)
if length > 0 and length != oparg:
if cfg().unpackLength:
code.addWarning(msgs.WRONG_UNPACK_SIZE % (length, oparg))
elif topType not in _UNCHECKABLE_STACK_TYPES:
if cfg().unpackNonSequence:
code.addWarning(msgs.UNPACK_NON_SEQUENCE %
(top.data, _getTypeStr(topType)))
_modifyStackName(code, '-unpack')
def _SLICE_1_ARG(oparg, operand, codeSource, code) :
_popStackRef(code, operand)
_SLICE1 = _SLICE2 = _SLICE_1_ARG
def _SLICE3(oparg, operand, codeSource, code) :
_popStackRef(code, operand, 3)
def _check_string_iteration(code, index):
try:
item = code.stack[index]
except IndexError:
return
if item.getType(code.typeMap) == types.StringType and \
cfg().stringIteration:
code.addWarning(msgs.STRING_ITERATION % item.data)
def _FOR_LOOP(oparg, operand, codeSource, code) :
code.loops = code.loops + 1
_check_string_iteration(code, -2)
_popStackRef(code, '<for_loop>', 2)
def _GET_ITER(oparg, operand, codeSource, code) :
_check_string_iteration(code, -1)
def _FOR_ITER(oparg, operand, codeSource, code) :
code.loops = code.loops + 1
_popStackRef(code, '<for_iter>', 1)
def _jump(oparg, operand, codeSource, code) :
if len(code.stack) > 0 :
topOfStack = code.stack[-1]
if topOfStack.isMethodCall(codeSource.classObject, cfg().methodArgName):
name = topOfStack.data[-1]
if codeSource.classObject.methods.has_key(name) :
code.addWarning(msgs.USING_METHOD_AS_ATTR % name)
_JUMP_ABSOLUTE = _jump
def _skip_loops(bytes, i, lastLineNum, max) :
extended_arg = 0
blockCount = 1
while i < max :
op, oparg, i, extended_arg = OP.getInfo(bytes, i, extended_arg)
if OP.LINE_NUM(op) :
lastLineNum = oparg
elif OP.FOR_LOOP(op) or OP.FOR_ITER(op) or OP.SETUP_LOOP(op) :
blockCount = blockCount + 1
elif OP.POP_BLOCK(op) :
blockCount = blockCount - 1
if blockCount <= 0 :
break
return lastLineNum, i
def _is_unreachable(code, topOfStack, branch, if_false) :
# Are we are checking exceptions, but we not catching all exceptions?
if (topOfStack.type == Stack.TYPE_COMPARISON and
topOfStack.data[1] == 'exception match' and
topOfStack.data[2] is not Exception) :
return 1
# do we possibly have while 1: ?
if not (topOfStack.const and topOfStack.data == 1 and if_false) :
return 0
# get the op just before the branch (ie, -3)
op, oparg, i, extended_arg = OP.getInfo(code.bytes, branch - 3, 0)
# are we are jumping to before the while 1: (LOAD_CONST, JUMP_IF_FALSE)
if not (OP.JUMP_ABSOLUTE(op) and oparg == (code.index - 3*3)) :
return 0
# check if we break out of the loop
i = code.index
lastLineNum = code.getLineNum()
while i < branch :
op, oparg, i, extended_arg = OP.getInfo(code.bytes, i, extended_arg)
if OP.LINE_NUM(op) :
lastLineNum = oparg
elif OP.BREAK_LOOP(op) :
return 0
elif OP.FOR_LOOP(op) or OP.FOR_ITER(op) or OP.SETUP_LOOP(op) :
lastLineNum, i = _skip_loops(code.bytes, i, lastLineNum, branch)
i = code.index - 3*4
op, oparg, i, extended_arg = OP.getInfo(code.bytes, i, 0)
if OP.SETUP_LOOP(op) :
# a little lie to pretend we have a raise after a while 1:
code.removeBranch(i + oparg)
code.raiseValues.append((lastLineNum, None, i + oparg))
return 1
# In Python 2.3, while/if 1: gets optimized to
# ...
# JUMP_FORWARD 4
# JUMP_IF_FALSE ?
# POP_TOP
#
# which generates a Using a conditional statement with a constant value
# JUMP_FORWARD = 110; 4, 0 is the offset (4)
_IGNORE_BOGUS_JUMP = '%c%c%c' % (110, 4, 0)
def _shouldIgnoreBogusJumps(code):
return _shouldIgnoreCodeOptimizations(code, _IGNORE_BOGUS_JUMP, 6, 3)
def _checkConstantCondition(code, topOfStack, if_false):
# don't warn when doing (test and 'true' or 'false')
# still warn when doing (test and None or 'false')
if if_false or not OP.LOAD_CONST(code.nextOpInfo(1)[0]) or \
not topOfStack.data or topOfStack.type is types.NoneType:
if not _shouldIgnoreBogusJumps(code):
code.addWarning(msgs.CONSTANT_CONDITION % utils.safestr(topOfStack))
def _jump_conditional(oparg, operand, codeSource, code, if_false) :
# FIXME: this doesn't work in 2.3+ since constant conditions
# are optimized away by the compiler.
if code.stack :
topOfStack = code.stack[-1]
if (topOfStack.const or topOfStack.type is types.NoneType) and \
cfg().constantConditions and \
(topOfStack.data != 1 or cfg().constant1):
_checkConstantCondition(code, topOfStack, if_false)
if _is_unreachable(code, topOfStack, code.label, if_false) :
code.removeBranch(code.label)
_jump(oparg, operand, codeSource, code)
def _JUMP_IF_FALSE(oparg, operand, codeSource, code) :
_jump_conditional(oparg, operand, codeSource, code, 1)
def _JUMP_IF_TRUE(oparg, operand, codeSource, code) :
_jump_conditional(oparg, operand, codeSource, code, 0)
def _JUMP_FORWARD(oparg, operand, codeSource, code) :
_jump(oparg, operand, codeSource, code)
code.remove_unreachable_code(code.label)
def _RETURN_VALUE(oparg, operand, codeSource, code) :
if not codeSource.calling_code :
code.addReturn()
def _EXEC_STMT(oparg, operand, codeSource, code) :
if cfg().usesExec :
if code.stack and code.stack[-1].isNone() :
code.addWarning(msgs.USES_GLOBAL_EXEC)
else :
code.addWarning(msgs.USES_EXEC)
def _checkStrException(code, varType, item):
if varType is types.StringType:
code.addWarning(msgs.RAISE_STR_EXCEPTION % item.data)
def _RAISE_VARARGS(oparg, operand, codeSource, code) :
code.addRaise()
if not cfg().badExceptions:
return
if oparg > 0 and len(code.stack) >= oparg:
item = code.stack[-oparg]
if item.type not in (Stack.TYPE_FUNC_RETURN, Stack.TYPE_UNKNOWN):
if item.type is Stack.TYPE_GLOBAL:
e, is_str = _getExceptionInfo(codeSource, item)
if is_str:
_checkStrException(code, e.type, item)
elif e is not None and not _isexception(e):
code.addWarning(msgs.RAISE_BAD_EXCEPTION % item.data)
else:
_checkStrException(code, item.getType(code.typeMap), item)
DISPATCH = [ None ] * 256
DISPATCH[ 1] = _POP_TOP
DISPATCH[ 2] = _ROT_TWO
DISPATCH[ 3] = _ROT_THREE
DISPATCH[ 4] = _DUP_TOP
DISPATCH[ 5] = _ROT_FOUR
DISPATCH[ 10] = _UNARY_POSITIVE
DISPATCH[ 11] = _UNARY_NEGATIVE
DISPATCH[ 12] = _UNARY_NOT
DISPATCH[ 13] = _UNARY_CONVERT
DISPATCH[ 15] = _UNARY_INVERT
DISPATCH[ 18] = _LIST_APPEND
DISPATCH[ 19] = _BINARY_POWER
DISPATCH[ 20] = _BINARY_MULTIPLY
DISPATCH[ 21] = _BINARY_DIVIDE
DISPATCH[ 22] = _BINARY_MODULO
DISPATCH[ 23] = _BINARY_ADD
DISPATCH[ 24] = _BINARY_SUBTRACT
DISPATCH[ 25] = _BINARY_SUBSCR
DISPATCH[ 26] = _BINARY_FLOOR_DIVIDE
DISPATCH[ 27] = _BINARY_TRUE_DIVIDE
# FIXME: add INPLACE FLOOR/TRUE DIVIDE: 28/29
DISPATCH[ 31] = _SLICE1
DISPATCH[ 32] = _SLICE2
DISPATCH[ 33] = _SLICE3
DISPATCH[ 55] = _BINARY_ADD # INPLACE
DISPATCH[ 56] = _BINARY_SUBTRACT # INPLACE
DISPATCH[ 57] = _BINARY_MULTIPLY # INPLACE
DISPATCH[ 58] = _BINARY_DIVIDE # INPLACE
DISPATCH[ 59] = _BINARY_MODULO # INPLACE
DISPATCH[ 60] = _STORE_SUBSCR
DISPATCH[ 61] = _DELETE_SUBSCR
DISPATCH[ 62] = _BINARY_LSHIFT
DISPATCH[ 63] = _BINARY_RSHIFT
DISPATCH[ 64] = _BINARY_AND
DISPATCH[ 65] = _BINARY_XOR
DISPATCH[ 66] = _BINARY_OR
DISPATCH[ 67] = _BINARY_POWER # INPLACE
DISPATCH[ 68] = _GET_ITER
DISPATCH[ 71] = _PRINT_ITEM
DISPATCH[ 73] = _PRINT_ITEM_TO
DISPATCH[ 75] = _BINARY_LSHIFT # INPLACE
DISPATCH[ 76] = _BINARY_RSHIFT # INPLACE
DISPATCH[ 77] = _BINARY_AND # INPLACE
DISPATCH[ 78] = _BINARY_XOR # INPLACE
DISPATCH[ 79] = _BINARY_OR # INPLACE
DISPATCH[ 83] = _RETURN_VALUE
DISPATCH[ 84] = _IMPORT_STAR
DISPATCH[ 85] = _EXEC_STMT
DISPATCH[ 88] = _END_FINALLY
DISPATCH[ 89] = _BUILD_CLASS
DISPATCH[ 90] = _STORE_NAME
DISPATCH[ 91] = _DELETE_NAME
DISPATCH[ 92] = _UNPACK_SEQUENCE
DISPATCH[ 93] = _FOR_ITER
DISPATCH[ 95] = _STORE_ATTR
DISPATCH[ 96] = _DELETE_ATTR
DISPATCH[ 97] = _STORE_GLOBAL
DISPATCH[ 98] = _DELETE_GLOBAL
DISPATCH[100] = _LOAD_CONST
DISPATCH[101] = _LOAD_NAME
DISPATCH[102] = _BUILD_TUPLE
DISPATCH[103] = _BUILD_LIST
DISPATCH[104] = _BUILD_MAP
DISPATCH[105] = _LOAD_ATTR
DISPATCH[106] = _COMPARE_OP
DISPATCH[107] = _IMPORT_NAME
DISPATCH[108] = _IMPORT_FROM
DISPATCH[110] = _JUMP_FORWARD
DISPATCH[111] = _JUMP_IF_FALSE
DISPATCH[112] = _JUMP_IF_TRUE
DISPATCH[113] = _JUMP_ABSOLUTE
DISPATCH[114] = _FOR_LOOP
DISPATCH[116] = _LOAD_GLOBAL
DISPATCH[121] = _SETUP_EXCEPT
DISPATCH[122] = _SETUP_FINALLY
DISPATCH[124] = _LOAD_FAST
DISPATCH[125] = _STORE_FAST
DISPATCH[126] = _DELETE_FAST
DISPATCH[127] = _LINE_NUM
DISPATCH[130] = _RAISE_VARARGS
DISPATCH[131] = _CALL_FUNCTION
DISPATCH[132] = _MAKE_FUNCTION
DISPATCH[134] = _MAKE_CLOSURE
DISPATCH[135] = _LOAD_CLOSURE
DISPATCH[136] = _LOAD_DEREF
DISPATCH[140] = _CALL_FUNCTION_VAR
DISPATCH[141] = _CALL_FUNCTION_KW
DISPATCH[142] = _CALL_FUNCTION_VAR_KW
|
0Chencc/CTFCrackTools | refs/heads/master | Lib/site-packages/pip/_vendor/distlib/resources.py | 335 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2016 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import unicode_literals
import bisect
import io
import logging
import os
import pkgutil
import shutil
import sys
import types
import zipimport
from . import DistlibException
from .util import cached_property, get_cache_base, path_to_cache_dir, Cache
logger = logging.getLogger(__name__)
cache = None # created when needed
class ResourceCache(Cache):
def __init__(self, base=None):
if base is None:
# Use native string to avoid issues on 2.x: see Python #20140.
base = os.path.join(get_cache_base(), str('resource-cache'))
super(ResourceCache, self).__init__(base)
def is_stale(self, resource, path):
"""
Is the cache stale for the given resource?
:param resource: The :class:`Resource` being cached.
:param path: The path of the resource in the cache.
:return: True if the cache is stale.
"""
# Cache invalidation is a hard problem :-)
return True
def get(self, resource):
"""
Get a resource into the cache,
:param resource: A :class:`Resource` instance.
:return: The pathname of the resource in the cache.
"""
prefix, path = resource.finder.get_cache_info(resource)
if prefix is None:
result = path
else:
result = os.path.join(self.base, self.prefix_to_dir(prefix), path)
dirname = os.path.dirname(result)
if not os.path.isdir(dirname):
os.makedirs(dirname)
if not os.path.exists(result):
stale = True
else:
stale = self.is_stale(resource, path)
if stale:
# write the bytes of the resource to the cache location
with open(result, 'wb') as f:
f.write(resource.bytes)
return result
class ResourceBase(object):
def __init__(self, finder, name):
self.finder = finder
self.name = name
class Resource(ResourceBase):
"""
A class representing an in-package resource, such as a data file. This is
not normally instantiated by user code, but rather by a
:class:`ResourceFinder` which manages the resource.
"""
is_container = False # Backwards compatibility
def as_stream(self):
"""
Get the resource as a stream.
This is not a property to make it obvious that it returns a new stream
each time.
"""
return self.finder.get_stream(self)
@cached_property
def file_path(self):
global cache
if cache is None:
cache = ResourceCache()
return cache.get(self)
@cached_property
def bytes(self):
return self.finder.get_bytes(self)
@cached_property
def size(self):
return self.finder.get_size(self)
class ResourceContainer(ResourceBase):
is_container = True # Backwards compatibility
@cached_property
def resources(self):
return self.finder.get_resources(self)
class ResourceFinder(object):
"""
Resource finder for file system resources.
"""
if sys.platform.startswith('java'):
skipped_extensions = ('.pyc', '.pyo', '.class')
else:
skipped_extensions = ('.pyc', '.pyo')
def __init__(self, module):
self.module = module
self.loader = getattr(module, '__loader__', None)
self.base = os.path.dirname(getattr(module, '__file__', ''))
def _adjust_path(self, path):
return os.path.realpath(path)
def _make_path(self, resource_name):
# Issue #50: need to preserve type of path on Python 2.x
# like os.path._get_sep
if isinstance(resource_name, bytes): # should only happen on 2.x
sep = b'/'
else:
sep = '/'
parts = resource_name.split(sep)
parts.insert(0, self.base)
result = os.path.join(*parts)
return self._adjust_path(result)
def _find(self, path):
return os.path.exists(path)
def get_cache_info(self, resource):
return None, resource.path
def find(self, resource_name):
path = self._make_path(resource_name)
if not self._find(path):
result = None
else:
if self._is_directory(path):
result = ResourceContainer(self, resource_name)
else:
result = Resource(self, resource_name)
result.path = path
return result
def get_stream(self, resource):
return open(resource.path, 'rb')
def get_bytes(self, resource):
with open(resource.path, 'rb') as f:
return f.read()
def get_size(self, resource):
return os.path.getsize(resource.path)
def get_resources(self, resource):
def allowed(f):
return (f != '__pycache__' and not
f.endswith(self.skipped_extensions))
return set([f for f in os.listdir(resource.path) if allowed(f)])
def is_container(self, resource):
return self._is_directory(resource.path)
_is_directory = staticmethod(os.path.isdir)
def iterator(self, resource_name):
resource = self.find(resource_name)
if resource is not None:
todo = [resource]
while todo:
resource = todo.pop(0)
yield resource
if resource.is_container:
rname = resource.name
for name in resource.resources:
if not rname:
new_name = name
else:
new_name = '/'.join([rname, name])
child = self.find(new_name)
if child.is_container:
todo.append(child)
else:
yield child
class ZipResourceFinder(ResourceFinder):
"""
Resource finder for resources in .zip files.
"""
def __init__(self, module):
super(ZipResourceFinder, self).__init__(module)
archive = self.loader.archive
self.prefix_len = 1 + len(archive)
# PyPy doesn't have a _files attr on zipimporter, and you can't set one
if hasattr(self.loader, '_files'):
self._files = self.loader._files
else:
self._files = zipimport._zip_directory_cache[archive]
self.index = sorted(self._files)
def _adjust_path(self, path):
return path
def _find(self, path):
path = path[self.prefix_len:]
if path in self._files:
result = True
else:
if path and path[-1] != os.sep:
path = path + os.sep
i = bisect.bisect(self.index, path)
try:
result = self.index[i].startswith(path)
except IndexError:
result = False
if not result:
logger.debug('_find failed: %r %r', path, self.loader.prefix)
else:
logger.debug('_find worked: %r %r', path, self.loader.prefix)
return result
def get_cache_info(self, resource):
prefix = self.loader.archive
path = resource.path[1 + len(prefix):]
return prefix, path
def get_bytes(self, resource):
return self.loader.get_data(resource.path)
def get_stream(self, resource):
return io.BytesIO(self.get_bytes(resource))
def get_size(self, resource):
path = resource.path[self.prefix_len:]
return self._files[path][3]
def get_resources(self, resource):
path = resource.path[self.prefix_len:]
if path and path[-1] != os.sep:
path += os.sep
plen = len(path)
result = set()
i = bisect.bisect(self.index, path)
while i < len(self.index):
if not self.index[i].startswith(path):
break
s = self.index[i][plen:]
result.add(s.split(os.sep, 1)[0]) # only immediate children
i += 1
return result
def _is_directory(self, path):
path = path[self.prefix_len:]
if path and path[-1] != os.sep:
path += os.sep
i = bisect.bisect(self.index, path)
try:
result = self.index[i].startswith(path)
except IndexError:
result = False
return result
_finder_registry = {
type(None): ResourceFinder,
zipimport.zipimporter: ZipResourceFinder
}
try:
# In Python 3.6, _frozen_importlib -> _frozen_importlib_external
try:
import _frozen_importlib_external as _fi
except ImportError:
import _frozen_importlib as _fi
_finder_registry[_fi.SourceFileLoader] = ResourceFinder
_finder_registry[_fi.FileFinder] = ResourceFinder
del _fi
except (ImportError, AttributeError):
pass
def register_finder(loader, finder_maker):
_finder_registry[type(loader)] = finder_maker
_finder_cache = {}
def finder(package):
"""
Return a resource finder for a package.
:param package: The name of the package.
:return: A :class:`ResourceFinder` instance for the package.
"""
if package in _finder_cache:
result = _finder_cache[package]
else:
if package not in sys.modules:
__import__(package)
module = sys.modules[package]
path = getattr(module, '__path__', None)
if path is None:
raise DistlibException('You cannot get a finder for a module, '
'only for a package')
loader = getattr(module, '__loader__', None)
finder_maker = _finder_registry.get(type(loader))
if finder_maker is None:
raise DistlibException('Unable to locate finder for %r' % package)
result = finder_maker(module)
_finder_cache[package] = result
return result
_dummy_module = types.ModuleType(str('__dummy__'))
def finder_for_path(path):
"""
Return a resource finder for a path, which should represent a container.
:param path: The path.
:return: A :class:`ResourceFinder` instance for the path.
"""
result = None
# calls any path hooks, gets importer into cache
pkgutil.get_importer(path)
loader = sys.path_importer_cache.get(path)
finder = _finder_registry.get(type(loader))
if finder:
module = _dummy_module
module.__file__ = os.path.join(path, '')
module.__loader__ = loader
result = finder(module)
return result
|
LiaoPan/blaze | refs/heads/master | blaze/compute/tests/test_spark.py | 3 | from __future__ import absolute_import, division, print_function
import pytest
pyspark = pytest.importorskip('pyspark')
import pandas as pd
from blaze import compute, symbol, summary, exp, by, join, merge
from toolz import identity
data = [['Alice', 100, 1],
['Bob', 200, 2],
['Alice', 50, 3]]
data2 = [['Alice', 'Austin'],
['Bob', 'Boston']]
df = pd.DataFrame(data, columns=['name', 'amount', 'id'])
# this only exists because we need to have a single session scoped spark
# context, otherwise these would simply be global variables
@pytest.fixture
def rdd(sc):
return sc.parallelize(data)
@pytest.fixture
def rdd2(sc):
return sc.parallelize(data2)
t = symbol('t', 'var * {name: string, amount: int, id: int}')
t2 = symbol('t2', 'var * {name: string, city: string}')
# Web Commons Graph Example data
data_idx = [['A', 1],
['B', 2],
['C', 3]]
data_arc = [[1, 3],
[2, 3],
[3, 1]]
t_idx = symbol('idx', 'var * {name: string, node_id: int32}')
t_arc = symbol('arc', 'var * {node_out: int32, node_id: int32}')
def test_spark_symbol(rdd):
assert compute(t, rdd) == rdd
def test_spark_projection(rdd):
assert compute(t['name'], rdd).collect() == [row[0] for row in data]
def test_spark_multicols_projection(rdd):
result = compute(t[['amount', 'name']], rdd).collect()
expected = [(100, 'Alice'), (200, 'Bob'), (50, 'Alice')]
print(result)
print(expected)
assert result == expected
inc = lambda x: x + 1
reduction_exprs = [
t['amount'].sum(),
t['amount'].min(),
t['amount'].max(),
t['amount'].nunique(),
t['name'].nunique(),
t['amount'].count(),
(t['amount'] > 150).any(),
(t['amount'] > 150).all(),
t['amount'].mean(),
t['amount'].var(),
summary(a=t.amount.sum(), b=t.id.count()),
t['amount'].std()]
def test_spark_reductions(rdd):
for expr in reduction_exprs:
result = compute(expr, rdd)
expected = compute(expr, data)
if not result == expected:
print(result)
print(expected)
if isinstance(result, float):
assert abs(result - expected) < 0.001
else:
assert result == expected
exprs = [
t['amount'],
t['amount'] == 100,
t['amount'].truncate(150),
t[t['name'] == 'Alice'],
t[t['amount'] == 0],
t[t['amount'] > 150],
t['amount'] + t['id'],
t['amount'] % t['id'],
exp(t['amount']),
by(t['name'], total=t['amount'].sum()),
by(t['name'], total=(t['amount'] + 1).sum()),
(t['amount'] * 1).label('foo'),
t.map(lambda tup: tup[1] + tup[2], 'real'),
t.like(name='Alice'),
t['amount'].apply(identity, 'var * real', splittable=True),
t['amount'].map(inc, 'int')]
def test_spark_basic(rdd):
check_exprs_against_python(exprs, data, rdd)
def check_exprs_against_python(exprs, data, rdd):
any_bad = False
for expr in exprs:
result = compute(expr, rdd).collect()
expected = list(compute(expr, data))
if not result == expected:
any_bad = True
print("Expression:", expr)
print("Spark:", result)
print("Python:", expected)
assert not any_bad
def test_spark_big_by(sc):
tbig = symbol(
'tbig', 'var * {name: string, sex: string[1], amount: int, id: int}')
big_exprs = [
by(tbig[['name', 'sex']], total=tbig['amount'].sum()),
by(tbig[['name', 'sex']], total=(tbig['id'] + tbig['amount']).sum())]
databig = [['Alice', 'F', 100, 1],
['Alice', 'F', 100, 3],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'M', 200, 5]]
rddbig = sc.parallelize(databig)
check_exprs_against_python(big_exprs, databig, rddbig)
def test_head(rdd):
assert list(compute(t.head(1), rdd)) == list(compute(t.head(1), data))
def test_sort(rdd):
check_exprs_against_python([
t.sort('amount'),
t.sort('amount', ascending=True),
t.sort(t['amount'], ascending=True),
t.sort(-t['amount'].label('foo') + 1, ascending=True),
t.sort(['amount', 'id'])], data, rdd)
def test_distinct(rdd):
assert set(compute(t['name'].distinct(), rdd).collect()) == \
set(['Alice', 'Bob'])
@pytest.mark.xfail(
raises=NotImplementedError,
reason='cannot specify columns to distinct on yet',
)
def test_distinct_on(rdd):
compute(t.distinct('name'), rdd)
def test_join(rdd, rdd2):
joined = join(t, t2, 'name')
expected = [('Alice', 100, 1, 'Austin'),
('Bob', 200, 2, 'Boston'),
('Alice', 50, 3, 'Austin')]
result = compute(joined, {t: rdd, t2: rdd2}).collect()
assert all(i in expected for i in result)
def test_multi_column_join(sc):
left = [(1, 2, 3),
(2, 3, 4),
(1, 3, 5)]
right = [(1, 2, 30),
(1, 3, 50),
(1, 3, 150)]
rleft = sc.parallelize(left)
rright = sc.parallelize(right)
L = symbol('L', 'var * {x: int, y: int, z: int}')
R = symbol('R', 'var * {x: int, y: int, w: int}')
j = join(L, R, ['x', 'y'])
result = compute(j, {L: rleft, R: rright})
expected = [(1, 2, 3, 30),
(1, 3, 5, 50),
(1, 3, 5, 150)]
assert set(result.collect()) == set(expected)
def test_groupby(sc):
rddidx = sc.parallelize(data_idx)
rddarc = sc.parallelize(data_arc)
joined = join(t_arc, t_idx, "node_id")
t = by(joined['name'], count=joined['node_id'].count())
a = compute(t, {t_arc: rddarc, t_idx: rddidx})
in_degree = dict(a.collect())
assert in_degree == {'A': 1, 'C': 2}
def test_multi_level_rowfunc_works(rdd):
expr = t['amount'].map(lambda x: x + 1, 'int')
assert compute(expr, rdd).collect() == [x[1] + 1 for x in data]
def test_merge(rdd):
col = (t['amount'] * 2).label('new')
expr = merge(t['name'], col)
assert compute(expr, rdd).collect() == [
(row[0], row[1] * 2) for row in data]
def test_selection_out_of_order(rdd):
expr = t['name'][t['amount'] < 100]
assert compute(expr, rdd).collect() == ['Alice']
def test_recursive_rowfunc_is_used(rdd):
expr = by(t['name'], total=(2 * (t['amount'] + t['id'])).sum())
expected = [('Alice', 2 * (101 + 53)),
('Bob', 2 * (202))]
assert set(compute(expr, rdd).collect()) == set(expected)
def test_outer_join(sc):
left = [(1, 'Alice', 100),
(2, 'Bob', 200),
(4, 'Dennis', 400)]
left = sc.parallelize(left)
right = [('NYC', 1),
('Boston', 1),
('LA', 3),
('Moscow', 4)]
right = sc.parallelize(right)
L = symbol('L', 'var * {id: int, name: string, amount: real}')
R = symbol('R', 'var * {city: string, id: int}')
assert set(compute(join(L, R), {L: left, R: right}).collect()) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(4, 'Dennis', 400, 'Moscow')])
assert set(compute(join(L, R, how='left'), {L: left, R: right}).collect()) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, None),
(4, 'Dennis', 400, 'Moscow')])
assert set(compute(join(L, R, how='right'), {L: left, R: right}).collect()) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(3, None, None, 'LA'),
(4, 'Dennis', 400, 'Moscow')])
# Full outer join not yet supported
assert set(compute(join(L, R, how='outer'), {L: left, R: right}).collect()) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, None),
(3, None, None, 'LA'),
(4, 'Dennis', 400, 'Moscow')])
|
majora2007/plexpy | refs/heads/master | lib/unidecode/x056.py | 252 | data = (
'Di ', # 0x00
'Qi ', # 0x01
'Jiao ', # 0x02
'Chong ', # 0x03
'Jiao ', # 0x04
'Kai ', # 0x05
'Tan ', # 0x06
'San ', # 0x07
'Cao ', # 0x08
'Jia ', # 0x09
'Ai ', # 0x0a
'Xiao ', # 0x0b
'Piao ', # 0x0c
'Lou ', # 0x0d
'Ga ', # 0x0e
'Gu ', # 0x0f
'Xiao ', # 0x10
'Hu ', # 0x11
'Hui ', # 0x12
'Guo ', # 0x13
'Ou ', # 0x14
'Xian ', # 0x15
'Ze ', # 0x16
'Chang ', # 0x17
'Xu ', # 0x18
'Po ', # 0x19
'De ', # 0x1a
'Ma ', # 0x1b
'Ma ', # 0x1c
'Hu ', # 0x1d
'Lei ', # 0x1e
'Du ', # 0x1f
'Ga ', # 0x20
'Tang ', # 0x21
'Ye ', # 0x22
'Beng ', # 0x23
'Ying ', # 0x24
'Saai ', # 0x25
'Jiao ', # 0x26
'Mi ', # 0x27
'Xiao ', # 0x28
'Hua ', # 0x29
'Mai ', # 0x2a
'Ran ', # 0x2b
'Zuo ', # 0x2c
'Peng ', # 0x2d
'Lao ', # 0x2e
'Xiao ', # 0x2f
'Ji ', # 0x30
'Zhu ', # 0x31
'Chao ', # 0x32
'Kui ', # 0x33
'Zui ', # 0x34
'Xiao ', # 0x35
'Si ', # 0x36
'Hao ', # 0x37
'Fu ', # 0x38
'Liao ', # 0x39
'Qiao ', # 0x3a
'Xi ', # 0x3b
'Xiu ', # 0x3c
'Tan ', # 0x3d
'Tan ', # 0x3e
'Mo ', # 0x3f
'Xun ', # 0x40
'E ', # 0x41
'Zun ', # 0x42
'Fan ', # 0x43
'Chi ', # 0x44
'Hui ', # 0x45
'Zan ', # 0x46
'Chuang ', # 0x47
'Cu ', # 0x48
'Dan ', # 0x49
'Yu ', # 0x4a
'Tun ', # 0x4b
'Cheng ', # 0x4c
'Jiao ', # 0x4d
'Ye ', # 0x4e
'Xi ', # 0x4f
'Qi ', # 0x50
'Hao ', # 0x51
'Lian ', # 0x52
'Xu ', # 0x53
'Deng ', # 0x54
'Hui ', # 0x55
'Yin ', # 0x56
'Pu ', # 0x57
'Jue ', # 0x58
'Qin ', # 0x59
'Xun ', # 0x5a
'Nie ', # 0x5b
'Lu ', # 0x5c
'Si ', # 0x5d
'Yan ', # 0x5e
'Ying ', # 0x5f
'Da ', # 0x60
'Dan ', # 0x61
'Yu ', # 0x62
'Zhou ', # 0x63
'Jin ', # 0x64
'Nong ', # 0x65
'Yue ', # 0x66
'Hui ', # 0x67
'Qi ', # 0x68
'E ', # 0x69
'Zao ', # 0x6a
'Yi ', # 0x6b
'Shi ', # 0x6c
'Jiao ', # 0x6d
'Yuan ', # 0x6e
'Ai ', # 0x6f
'Yong ', # 0x70
'Jue ', # 0x71
'Kuai ', # 0x72
'Yu ', # 0x73
'Pen ', # 0x74
'Dao ', # 0x75
'Ge ', # 0x76
'Xin ', # 0x77
'Dun ', # 0x78
'Dang ', # 0x79
'Sin ', # 0x7a
'Sai ', # 0x7b
'Pi ', # 0x7c
'Pi ', # 0x7d
'Yin ', # 0x7e
'Zui ', # 0x7f
'Ning ', # 0x80
'Di ', # 0x81
'Lan ', # 0x82
'Ta ', # 0x83
'Huo ', # 0x84
'Ru ', # 0x85
'Hao ', # 0x86
'Xia ', # 0x87
'Ya ', # 0x88
'Duo ', # 0x89
'Xi ', # 0x8a
'Chou ', # 0x8b
'Ji ', # 0x8c
'Jin ', # 0x8d
'Hao ', # 0x8e
'Ti ', # 0x8f
'Chang ', # 0x90
'[?] ', # 0x91
'[?] ', # 0x92
'Ca ', # 0x93
'Ti ', # 0x94
'Lu ', # 0x95
'Hui ', # 0x96
'Bo ', # 0x97
'You ', # 0x98
'Nie ', # 0x99
'Yin ', # 0x9a
'Hu ', # 0x9b
'Mo ', # 0x9c
'Huang ', # 0x9d
'Zhe ', # 0x9e
'Li ', # 0x9f
'Liu ', # 0xa0
'Haai ', # 0xa1
'Nang ', # 0xa2
'Xiao ', # 0xa3
'Mo ', # 0xa4
'Yan ', # 0xa5
'Li ', # 0xa6
'Lu ', # 0xa7
'Long ', # 0xa8
'Fu ', # 0xa9
'Dan ', # 0xaa
'Chen ', # 0xab
'Pin ', # 0xac
'Pi ', # 0xad
'Xiang ', # 0xae
'Huo ', # 0xaf
'Mo ', # 0xb0
'Xi ', # 0xb1
'Duo ', # 0xb2
'Ku ', # 0xb3
'Yan ', # 0xb4
'Chan ', # 0xb5
'Ying ', # 0xb6
'Rang ', # 0xb7
'Dian ', # 0xb8
'La ', # 0xb9
'Ta ', # 0xba
'Xiao ', # 0xbb
'Jiao ', # 0xbc
'Chuo ', # 0xbd
'Huan ', # 0xbe
'Huo ', # 0xbf
'Zhuan ', # 0xc0
'Nie ', # 0xc1
'Xiao ', # 0xc2
'Ca ', # 0xc3
'Li ', # 0xc4
'Chan ', # 0xc5
'Chai ', # 0xc6
'Li ', # 0xc7
'Yi ', # 0xc8
'Luo ', # 0xc9
'Nang ', # 0xca
'Zan ', # 0xcb
'Su ', # 0xcc
'Xi ', # 0xcd
'So ', # 0xce
'Jian ', # 0xcf
'Za ', # 0xd0
'Zhu ', # 0xd1
'Lan ', # 0xd2
'Nie ', # 0xd3
'Nang ', # 0xd4
'[?] ', # 0xd5
'[?] ', # 0xd6
'Wei ', # 0xd7
'Hui ', # 0xd8
'Yin ', # 0xd9
'Qiu ', # 0xda
'Si ', # 0xdb
'Nin ', # 0xdc
'Jian ', # 0xdd
'Hui ', # 0xde
'Xin ', # 0xdf
'Yin ', # 0xe0
'Nan ', # 0xe1
'Tuan ', # 0xe2
'Tuan ', # 0xe3
'Dun ', # 0xe4
'Kang ', # 0xe5
'Yuan ', # 0xe6
'Jiong ', # 0xe7
'Pian ', # 0xe8
'Yun ', # 0xe9
'Cong ', # 0xea
'Hu ', # 0xeb
'Hui ', # 0xec
'Yuan ', # 0xed
'You ', # 0xee
'Guo ', # 0xef
'Kun ', # 0xf0
'Cong ', # 0xf1
'Wei ', # 0xf2
'Tu ', # 0xf3
'Wei ', # 0xf4
'Lun ', # 0xf5
'Guo ', # 0xf6
'Qun ', # 0xf7
'Ri ', # 0xf8
'Ling ', # 0xf9
'Gu ', # 0xfa
'Guo ', # 0xfb
'Tai ', # 0xfc
'Guo ', # 0xfd
'Tu ', # 0xfe
'You ', # 0xff
)
|
megcunningham/django-debug-toolbar | refs/heads/master | debug_toolbar/panels/sql/__init__.py | 41 | from debug_toolbar.panels.sql.panel import SQLPanel # noqa
|
graehl/nplm01 | refs/heads/master | python/testNeuralLM.py | 6 | import nplm
if __name__ == "__main__":
import sys
import fileinput
import argparse
parser = argparse.ArgumentParser(description='Score sentences using n-gram language model.')
parser.add_argument('--test_file', metavar='file', dest='test_file', help='test text file')
parser.add_argument('--model_file', metavar='file', dest='model_file', help='model file')
args = parser.parse_args()
m = nplm.NeuralLM.from_file(args.model_file)
n = m.ngram_size
for line in fileinput.input(args.test_file):
words = line.split()
if len(words) < n: continue
unk = m.word_to_index['<unk>']
words = ['<s>'] * (n-1) + words + ['</s>']
words = [m.word_to_index.get(w, unk) for w in words]
ngrams = []
for i in xrange(n-1, len(words)):
ngrams.append(words[i-n+1:i+1])
ngrams = m.make_data(ngrams)
print m.forward_prop(ngrams[:-1], output=ngrams[-1])[:,0]
|
deepfield/ibis | refs/heads/master | ibis/tests/test_version.py | 1 | import os
from pkg_resources import parse_version
from pkg_resources.extern.packaging.version import Version
import pytest
import ibis
@pytest.mark.skipif(
bool(os.environ.get('CIRCLECI', None)),
reason='Testing import time on CI is flaky due to VM variance',
)
def test_import_time():
sh = pytest.importorskip('sh')
lines = [
'from timeit import timeit',
"print(timeit('import ibis'))",
]
delta = float(str(sh.python(c='; '.join(lines))))
assert delta < 2.0
def test_version():
assert isinstance(parse_version(ibis.__version__), Version)
|
sio2project/oioioi | refs/heads/master | oioioi/problems/migrations/0028_problemname.py | 1 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-05-10 16:50
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('problems', '0027_problem_alter_legacy_name'),
]
operations = [
migrations.CreateModel(
name='ProblemName',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Human-readable name.', max_length=255, verbose_name='name translation')),
('language', models.CharField(choices=[(b'en', b'English'), (b'pl', b'Polish')], max_length=2, verbose_name='language code')),
('problem', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='names', to='problems.Problem')),
],
options={
'verbose_name': 'problem name',
'verbose_name_plural': 'problem names',
},
),
migrations.AlterUniqueTogether(
name='problemname',
unique_together=set([('problem', 'language')]),
),
]
|
gameduell/duell | refs/heads/master | bin/win/python2.7.9/Lib/site-packages/pip/_vendor/html5lib/treewalkers/lxmletree.py | 355 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from lxml import etree
from ..treebuilders.etree import tag_regexp
from gettext import gettext
_ = gettext
from . import _base
from .. import ihatexml
def ensure_str(s):
if s is None:
return None
elif isinstance(s, text_type):
return s
else:
return s.decode("utf-8", "strict")
class Root(object):
def __init__(self, et):
self.elementtree = et
self.children = []
if et.docinfo.internalDTD:
self.children.append(Doctype(self,
ensure_str(et.docinfo.root_name),
ensure_str(et.docinfo.public_id),
ensure_str(et.docinfo.system_url)))
root = et.getroot()
node = root
while node.getprevious() is not None:
node = node.getprevious()
while node is not None:
self.children.append(node)
node = node.getnext()
self.text = None
self.tail = None
def __getitem__(self, key):
return self.children[key]
def getnext(self):
return None
def __len__(self):
return 1
class Doctype(object):
def __init__(self, root_node, name, public_id, system_id):
self.root_node = root_node
self.name = name
self.public_id = public_id
self.system_id = system_id
self.text = None
self.tail = None
def getnext(self):
return self.root_node.children[1]
class FragmentRoot(Root):
def __init__(self, children):
self.children = [FragmentWrapper(self, child) for child in children]
self.text = self.tail = None
def getnext(self):
return None
class FragmentWrapper(object):
def __init__(self, fragment_root, obj):
self.root_node = fragment_root
self.obj = obj
if hasattr(self.obj, 'text'):
self.text = ensure_str(self.obj.text)
else:
self.text = None
if hasattr(self.obj, 'tail'):
self.tail = ensure_str(self.obj.tail)
else:
self.tail = None
self.isstring = isinstance(obj, str) or isinstance(obj, bytes)
# Support for bytes here is Py2
if self.isstring:
self.obj = ensure_str(self.obj)
def __getattr__(self, name):
return getattr(self.obj, name)
def getnext(self):
siblings = self.root_node.children
idx = siblings.index(self)
if idx < len(siblings) - 1:
return siblings[idx + 1]
else:
return None
def __getitem__(self, key):
return self.obj[key]
def __bool__(self):
return bool(self.obj)
def getparent(self):
return None
def __str__(self):
return str(self.obj)
def __unicode__(self):
return str(self.obj)
def __len__(self):
return len(self.obj)
class TreeWalker(_base.NonRecursiveTreeWalker):
def __init__(self, tree):
if hasattr(tree, "getroot"):
tree = Root(tree)
elif isinstance(tree, list):
tree = FragmentRoot(tree)
_base.NonRecursiveTreeWalker.__init__(self, tree)
self.filter = ihatexml.InfosetFilter()
def getNodeDetails(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key
return _base.TEXT, ensure_str(getattr(node, key))
elif isinstance(node, Root):
return (_base.DOCUMENT,)
elif isinstance(node, Doctype):
return _base.DOCTYPE, node.name, node.public_id, node.system_id
elif isinstance(node, FragmentWrapper) and node.isstring:
return _base.TEXT, node.obj
elif node.tag == etree.Comment:
return _base.COMMENT, ensure_str(node.text)
elif node.tag == etree.Entity:
return _base.ENTITY, ensure_str(node.text)[1:-1] # strip &;
else:
# This is assumed to be an ordinary element
match = tag_regexp.match(ensure_str(node.tag))
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = ensure_str(node.tag)
attrs = {}
for name, value in list(node.attrib.items()):
name = ensure_str(name)
value = ensure_str(value)
match = tag_regexp.match(name)
if match:
attrs[(match.group(1), match.group(2))] = value
else:
attrs[(None, name)] = value
return (_base.ELEMENT, namespace, self.filter.fromXmlName(tag),
attrs, len(node) > 0 or node.text)
def getFirstChild(self, node):
assert not isinstance(node, tuple), _("Text nodes have no children")
assert len(node) or node.text, "Node has no children"
if node.text:
return (node, "text")
else:
return node[0]
def getNextSibling(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key
if key == "text":
# XXX: we cannot use a "bool(node) and node[0] or None" construct here
# because node[0] might evaluate to False if it has no child element
if len(node):
return node[0]
else:
return None
else: # tail
return node.getnext()
return (node, "tail") if node.tail else node.getnext()
def getParentNode(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key
if key == "text":
return node
# else: fallback to "normal" processing
return node.getparent()
|
j127/Gladiator-Arena | refs/heads/dev | alembic/env.py | 76 | from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = None
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
JohnDevitt/appengine-django-skeleton-master | refs/heads/master | lib/django/contrib/sites/requests.py | 695 | from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class RequestSite(object):
"""
A class that shares the primary interface of Site (i.e., it has
``domain`` and ``name`` attributes) but gets its data from a Django
HttpRequest object rather than from a database.
The save() and delete() methods raise NotImplementedError.
"""
def __init__(self, request):
self.domain = self.name = request.get_host()
def __str__(self):
return self.domain
def save(self, force_insert=False, force_update=False):
raise NotImplementedError('RequestSite cannot be saved.')
def delete(self):
raise NotImplementedError('RequestSite cannot be deleted.')
|
halvertoluke/edx-platform | refs/heads/default_branch | openedx/core/djangoapps/credit/exceptions.py | 60 | """Exceptions raised by the credit API. """
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from rest_framework import status
from rest_framework.exceptions import APIException
# TODO: Cleanup this mess! ECOM-2908
class CreditApiBadRequest(Exception):
"""
Could not complete a request to the credit API because
there was a problem with the request (as opposed to an internal error).
"""
pass
class InvalidCreditRequirements(CreditApiBadRequest):
"""
The requirement dictionary provided has invalid format.
"""
pass
class InvalidCreditCourse(CreditApiBadRequest):
"""
The course is not configured for credit.
"""
pass
class UserIsNotEligible(CreditApiBadRequest):
"""
The user has not satisfied eligibility requirements for credit.
"""
pass
class CreditProviderNotConfigured(CreditApiBadRequest):
"""
The requested credit provider is not configured correctly for the course.
"""
pass
class RequestAlreadyCompleted(CreditApiBadRequest):
"""
The user has already submitted a request and received a response from the credit provider.
"""
pass
class CreditRequestNotFound(CreditApiBadRequest):
"""
The request does not exist.
"""
pass
class InvalidCreditStatus(CreditApiBadRequest):
"""
The status is not either "approved" or "rejected".
"""
pass
class InvalidCreditRequest(APIException):
""" API request is invalid. """
status_code = status.HTTP_400_BAD_REQUEST
class UserNotEligibleException(InvalidCreditRequest):
""" User not eligible for credit for a given course. """
def __init__(self, course_key, username):
detail = _('[{username}] is not eligible for credit for [{course_key}].').format(username=username,
course_key=course_key)
super(UserNotEligibleException, self).__init__(detail)
class InvalidCourseKey(InvalidCreditRequest):
""" Course key is invalid. """
def __init__(self, course_key):
detail = _('[{course_key}] is not a valid course key.').format(course_key=course_key)
super(InvalidCourseKey, self).__init__(detail)
|
swiftstack/pympler | refs/heads/master | test/tracker/test_stats.py | 4 |
import os
import re
import sys
import unittest
from shutil import rmtree
from tempfile import mkdtemp, mkstemp
from pympler.util.compat import StringIO, BytesIO
from pympler.classtracker import ClassTracker
from pympler.classtracker_stats import ConsoleStats, HtmlStats, Stats
from pympler.asizeof import Asizer, asizeof
class Foo:
def __init__(self):
self.foo = 'Foo'
def __repr__(self):
return '<%s>' % self.foo
class Bar(Foo):
def __init__(self):
Foo.__init__(self)
self.bar = 'bar'
class FooNew(object):
def __init__(self):
self.foo = 'foo'
class BarNew(FooNew):
def __init__(self):
super(BarNew, self).__init__()
class LogTestCase(unittest.TestCase):
def setUp(self):
self.out = StringIO()
self.tracker = ClassTracker(stream=self.out)
@property
def output(self):
"""Return output recorded in `ClassTracker` output stream."""
return self.out.getvalue()
def tearDown(self):
self.tracker.stop_periodic_snapshots()
self.tracker.clear()
def test_dump(self):
"""Test serialization of log data.
"""
foo = Foo()
foo.data = range(1000)
bar = Bar()
self.tracker.track_object(foo, resolution_level=4)
self.tracker.track_object(bar)
self.tracker.create_snapshot('Footest')
f1 = StringIO()
f2 = StringIO()
ConsoleStats(tracker=self.tracker, stream=f1).print_stats()
tmp = BytesIO()
Stats(tracker=self.tracker).dump_stats(tmp, close=False)
self.tracker.clear()
stats = ConsoleStats(stream=f2)
self.assertEqual(stats.index, None)
self.assertEqual(stats.snapshots, None)
tmp.seek(0)
stats.load_stats(tmp)
tmp.close()
self.assert_('Foo' in stats.index)
stats.print_stats()
self.assertEqual(f1.getvalue(), f2.getvalue())
# Test partial printing
stats.stream = f3 = StringIO()
stats.sort_stats()
tolen = len(stats.sorted)
stats.print_stats(clsname='Bar')
self.assertEqual(len(stats.sorted), tolen)
stats.print_summary()
clsname = f3.getvalue().split('\n')[0]
self.assertNotEqual(re.search('Bar', clsname), None, clsname)
f1.close()
f2.close()
f3.close()
def test_sort_stats(self):
"""Test sort_stats and reverse_order.
"""
self.tracker.track_class(Bar, name='Bar')
foo = Foo()
foo.data = list(range(1000))
bar1 = Bar()
bar2 = Bar()
self.tracker.track_object(foo, resolution_level=4)
self.tracker.create_snapshot()
stats = self.tracker.stats
# Test sort_stats and reverse_order
self.assertEqual(stats.sort_stats('size'), stats)
self.assertEqual(stats.sorted[0].classname, 'Foo')
stats.reverse_order()
self.assertEqual(stats.sorted[0].classname, 'Bar')
stats.sort_stats('classname', 'birth')
self.assertEqual(stats.sorted[0].classname, 'Bar')
self.assertRaises(ValueError, stats.sort_stats, 'name', 42, 'classn')
stats.sort_stats('classname')
def test_dump_load_with_filename(self):
"""Test serialization with filename.
"""
foo = Foo()
self.tracker.track_object(foo, resolution_level=2)
self.tracker.create_snapshot()
fhandle, fname = mkstemp(prefix='pympler_test_dump')
os.close(fhandle)
try:
self.tracker.stats.dump_stats(fname)
output = StringIO()
stats = ConsoleStats(filename=fname, stream=output)
stats.print_stats()
self.assertTrue('<Foo>' in output.getvalue(), output.getvalue())
# Check if a Stats loaded from a dump can be dumped again
stats.dump_stats(fname)
finally:
os.unlink(fname)
def test_tracked_classes(self):
"""Test listing tracked classes.
"""
self.tracker.track_class(Foo, name='Foo')
self.tracker.track_class(Bar, name='Bar')
foo = Foo()
self.tracker.create_snapshot()
bar = Bar()
self.tracker.create_snapshot()
foo = FooNew()
self.tracker.track_object(foo)
self.tracker.create_snapshot()
stats = self.tracker.stats
self.assertEqual(stats.tracked_classes, ['Bar', 'Foo', 'FooNew'])
stats.print_summary()
def test_print_stats(self):
"""Test printing class-filtered statistics.
"""
self.tracker.track_class(Foo, name='Foo', trace=True)
self.tracker.track_class(Bar, name='Bar')
foo = Foo()
bar = Bar()
self.tracker.create_snapshot()
stats = self.tracker.stats
stats.print_stats(clsname='Foo')
self.assertTrue('Foo' in self.output, self.output)
self.assertFalse('Bar' in self.output, self.output)
self.assertTrue('foo = Foo()' in self.output, self.output)
def test_print_stats_limit(self):
"""Test printing limited statistics.
"""
self.tracker.track_class(Foo, name='Foo')
foo = [Foo() for _ in range(10)]
self.tracker.create_snapshot()
stats = self.tracker.stats
stats.print_stats(limit=3)
self.assertEqual(self.output.count('<Foo>'), 3)
self.out.seek(0)
self.out.truncate()
stats.print_stats(limit=0.5)
self.assertEqual(self.output.count('<Foo>'), 5)
def test_snapshots(self):
"""Test multiple snapshots.
"""
self.tracker.track_class(Foo, name='Foo')
self.tracker.track_class(Bar, name='Bar')
self.tracker.track_class(FooNew, name='FooNew')
self.tracker.create_snapshot()
f1 = Foo()
self.tracker.create_snapshot()
f2 = Foo()
f3 = FooNew()
self.tracker.create_snapshot()
b = Bar()
del b
self.tracker.create_snapshot()
stats = self.tracker.stats
stats.print_stats()
stats.print_summary()
def test_merge(self):
"""Test merging of reference trees.
"""
self.tracker.track_class(FooNew, name='Foo', resolution_level=2)
f1 = FooNew()
f1.a = list(range(1000))
f2 = FooNew()
f2.a = list(range(100))
f2.b = 'This is some stupid spam.'
self.tracker.create_snapshot('Merge test')
sizer = Asizer()
sz1 = sizer.asized(f1)
sz2 = sizer.asized(f2)
stats = self.tracker.stats
for fp in stats.snapshots:
if fp.desc == 'Merge test':
stats.annotate_snapshot(fp)
self.assert_(hasattr(fp, 'classes'))
classes = fp.classes
stats.annotate_snapshot(fp)
self.assertEqual(fp.classes, classes)
self.assert_('Foo' in fp.classes, fp.classes)
self.assert_('merged' in fp.classes['Foo'])
fm = fp.classes['Foo']['merged']
self.assertEqual(fm.size, sz1.size + sz2.size, (fm.size, str(sz1), str(sz2)))
refs = {}
for ref in fm.refs:
refs[ref.name] = ref
self.assert_('__dict__' in refs.keys(), refs.keys())
refs2 = {}
for ref in refs['__dict__'].refs:
refs2[ref.name] = ref
self.assert_('[V] a' in refs2.keys(), refs2.keys())
self.assert_('[V] b' in refs2.keys(), refs2.keys())
self.assertEqual(refs2['[V] a'].size, asizeof(f1.a, f2.a))
def test_html(self):
"""Test emitting HTML statistics."""
self.tracker.track_class(Foo, name='Foo', resolution_level=2)
self.tracker.track_class(Bar, name='Bar', trace=True)
f1 = Foo()
f1.a = list(range(100000))
f2 = Foo()
f2.a = list(range(1000))
f2.b = 'This is some stupid spam.'
f1 = Bar()
self.tracker.create_snapshot('Merge test')
stats = HtmlStats(tracker=self.tracker)
try:
target = mkdtemp(prefix='pympler_test')
output = os.path.join(target, 'footest.html')
stats.create_html(output)
source = open(output).read()
# Ensure relative links are used
fname = os.path.join('footest_files', 'Foo.html')
self.assertTrue('<a href="%s">' % fname in source, (fname, source))
finally:
rmtree(target)
def test_charts(self):
"""Test emitting graphic charts."""
self.tracker.track_class(Foo, name='Foo', resolution_level=2)
f1 = Foo()
f1.a = list(range(1000))
f2 = Foo()
f2.a = list(range(100))
f2.b = 'This is some stupid spam.'
self.tracker.create_snapshot('Merge test')
from pympler import charts
try:
target = mkdtemp(prefix='pympler_test')
output = os.path.join(target, 'timespace.png')
charts.tracker_timespace(output, self.tracker.stats)
finally:
rmtree(target)
if __name__ == "__main__":
suite = unittest.TestSuite()
tclasses = [ LogTestCase ]
for tclass in tclasses:
names = unittest.getTestCaseNames(tclass, 'test_')
suite.addTests(map(tclass, names))
if not unittest.TextTestRunner().run(suite).wasSuccessful():
sys.exit(1)
|
antiagainst/shaderc | refs/heads/master | glslc/test/option_dash_S.py | 16 | # Copyright 2015 The Shaderc Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import expect
import os.path
from glslc_test_framework import inside_glslc_testsuite
from placeholder import FileShader, StdinShader
def simple_vertex_shader():
return """#version 310 es
void main() {
gl_Position = vec4(1., 2., 3., 4.);
}"""
def simple_fragment_shader():
return """#version 310 es
void main() {
gl_FragDepth = 10.;
}"""
def simple_compute_shader():
return """#version 310 es
void main() {
uvec3 temp = gl_WorkGroupID;
}"""
@inside_glslc_testsuite('OptionDashCapS')
class TestSingleDashCapSSingleFile(expect.ValidAssemblyFile):
"""Tests that -S works with a single file."""
shader = FileShader(simple_vertex_shader(), '.vert')
glslc_args = ['-S', shader]
@inside_glslc_testsuite('OptionDashCapS')
class TestSingleFileSingleDashCapS(expect.ValidAssemblyFile):
"""Tests that the position of -S doesn't matter."""
shader = FileShader(simple_vertex_shader(), '.vert')
glslc_args = [shader, '-S']
@inside_glslc_testsuite('OptionDashCapS')
class TestSingleDashCapSMultipleFiles(expect.ValidAssemblyFile):
"""Tests that -S works with multiple files."""
shader1 = FileShader(simple_vertex_shader(), '.vert')
shader2 = FileShader(simple_vertex_shader(), '.vert')
shader3 = FileShader(simple_fragment_shader(), '.frag')
glslc_args = ['-S', shader1, shader2, shader3]
@inside_glslc_testsuite('OptionDashCapS')
class TestMultipleDashCapSSingleFile(expect.ValidAssemblyFile):
"""Tests that multiple -Ss works as one."""
shader = FileShader(simple_vertex_shader(), '.vert')
glslc_args = ['-S', '-S', shader, '-S']
@inside_glslc_testsuite('OptionDashCapS')
class TestMultipleDashCapSMultipleFiles(expect.ValidAssemblyFile):
"""Tests a mix of -Ss and files."""
shader1 = FileShader(simple_fragment_shader(), '.frag')
shader2 = FileShader(simple_vertex_shader(), '.vert')
shader3 = FileShader(simple_compute_shader(), '.comp')
glslc_args = ['-S', shader1, '-S', '-S', shader2, '-S', shader3, '-S']
@inside_glslc_testsuite('OptionDashCapS')
class TestDashCapSWithDashC(expect.ValidAssemblyFile):
"""Tests that -S overwrites -c."""
shader1 = FileShader(simple_fragment_shader(), '.frag')
shader2 = FileShader(simple_vertex_shader(), '.vert')
glslc_args = ['-c', '-S', shader1, '-c', '-c', shader2]
@inside_glslc_testsuite('OptionDashCapS')
class TestDashCapSWithDashFShaderStage(expect.ValidAssemblyFile):
"""Tests that -S works with -fshader-stage=."""
shader1 = FileShader(simple_fragment_shader(), '.glsl')
shader2 = FileShader(simple_vertex_shader(), '.glsl')
shader3 = FileShader(simple_compute_shader(), '.glsl')
glslc_args = ['-S',
'-fshader-stage=fragment', shader1,
'-fshader-stage=vertex', shader2,
'-fshader-stage=compute', shader3]
@inside_glslc_testsuite('OptionDashCapS')
class TestDashCapSWithDashStd(expect.ValidAssemblyFileWithWarning):
"""Tests that -S works with -std=."""
shader1 = FileShader(simple_fragment_shader(), '.frag')
shader2 = FileShader(simple_vertex_shader(), '.vert')
shader3 = FileShader(simple_compute_shader(), '.comp')
glslc_args = ['-S', '-std=450', shader1, shader2, shader3]
w = (': warning: (version, profile) forced to be (450, none), '
'while in source code it is (310, es)\n')
expected_warning = [
shader1, w, shader2, w, shader3, w, '3 warnings generated.\n']
@inside_glslc_testsuite('OptionDashCapS')
class TestDashCapSWithDashOSingleFile(expect.SuccessfulReturn,
expect.CorrectAssemblyFilePreamble):
"""Tests that -S works with -o on a single file."""
shader = FileShader(simple_fragment_shader(), '.frag')
glslc_args = ['-S', '-o', 'blabla', shader]
def check_output_blabla(self, status):
output_name = os.path.join(status.directory, 'blabla')
return self.verify_assembly_file_preamble(output_name)
@inside_glslc_testsuite('OptionDashCapS')
class TestDashCapSWithDashOMultipleFiles(expect.ErrorMessage):
"""Tests that -S works with -o on a single file."""
shader1 = FileShader(simple_fragment_shader(), '.frag')
shader2 = FileShader(simple_vertex_shader(), '.vert')
glslc_args = ['-S', '-o', 'blabla', shader1, shader2]
expected_error = ['glslc: error: cannot specify -o when '
'generating multiple output files\n']
@inside_glslc_testsuite('OptionDashCapS')
class TestDashCapSWithStdIn(expect.ValidAssemblyFile):
"""Tests that -S works with stdin."""
shader = StdinShader(simple_fragment_shader())
glslc_args = ['-S', '-fshader-stage=fragment', shader]
@inside_glslc_testsuite('OptionDashCapS')
class TestDashCapSWithStdOut(
expect.ReturnCodeIsZero, expect.StdoutMatch, expect.StderrMatch):
"""Tests that -S works with stdout."""
shader = FileShader(simple_fragment_shader(), '.frag')
glslc_args = ['-S', '-o', '-', shader]
expected_stdout = True
expected_stderr = ''
|
an146/pacman | refs/heads/master | test/pacman/tests/sync-nodepversion03.py | 27 | self.description = "nodepversion: -Sdd works but no deps"
p1 = pmpkg("pkg1", "1.0-2")
p1.depends = ["provision>=1.0-2"]
self.addpkg2db("sync", p1)
p2 = pmpkg("pkg2", "1.0-2")
p2.provides = ["provision=1.0-1"]
self.addpkg2db("sync", p2)
self.args = "-Sdd %s" % p1.name
self.addrule("PACMAN_RETCODE=0")
self.addrule("PKG_EXIST=pkg1")
self.addrule("!PKG_EXIST=pkg2")
|
heran7/edx-platform | refs/heads/master | cms/djangoapps/contentstore/management/commands/populate_creators.py | 14 | """
Script for granting existing course instructors course creator privileges.
This script is only intended to be run once on a given environment.
"""
from auth.authz import get_users_with_instructor_role, get_users_with_staff_role
from course_creators.views import add_user_with_status_granted, add_user_with_status_unrequested
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from django.db.utils import IntegrityError
class Command(BaseCommand):
"""
Script for granting existing course instructors course creator privileges.
"""
help = 'Grants all users with INSTRUCTOR role permission to create courses'
def handle(self, *args, **options):
"""
The logic of the command.
"""
username = 'populate_creators_command'
email = 'grant+creator+access@edx.org'
try:
admin = User.objects.create_user(username, email, 'foo')
admin.is_staff = True
admin.save()
except IntegrityError:
# If the script did not complete the last time it was run,
# the admin user will already exist.
admin = User.objects.get(username=username, email=email)
for user in get_users_with_instructor_role():
add_user_with_status_granted(admin, user)
# Some users will be both staff and instructors. Those folks have been
# added with status granted above, and add_user_with_status_unrequested
# will not try to add them again if they already exist in the course creator database.
for user in get_users_with_staff_role():
add_user_with_status_unrequested(user)
# There could be users who are not in either staff or instructor (they've
# never actually done anything in Studio). I plan to add those as unrequested
# when they first go to their dashboard.
admin.delete()
|
furious-luke/python-utils | refs/heads/master | pythonutils/containers.py | 1 | def find(ctr, match, default=None):
def _find(ctr, match):
if hasattr(ctr, '__iter__'):
if match in ctr:
if hasattr(ctr, 'itervalues'):
# Dereference to value.
return (ctr[match], True)
else:
# Return actual object.
return (ctr[ctr.index(match)], True)
if hasattr(ctr, '__getitem__') and hasattr(ctr, 'itervalues'):
# dict-like object.
for value in ctr.itervalues():
result = _find(value, match)
if result[1]:
return result
else:
# Any other iterable object.
for value in ctr:
result = _find(value, match)
if result[1]:
return result
return (None, False)
result = _find(ctr, match)
if result[1]:
return result[0]
else:
return default
|
ingokegel/intellij-community | refs/heads/master | python/testData/codeInsight/controlflow/lambda.py | 83 | {(lambda i=i: i) for i in range(4)}
|
itsyouonline/identityserver | refs/heads/master | clients/python/itsyouonline/Grant.py | 1 | """
Auto-generated class for Grant
"""
from six import string_types
Grant = string_types
|
Technocaveman/There-is-no-Third-Step | refs/heads/master | node_modules/pygmentize-bundled/vendor/pygments/pygments/formatters/img.py | 268 | # -*- coding: utf-8 -*-
"""
pygments.formatters.img
~~~~~~~~~~~~~~~~~~~~~~~
Formatter for Pixmap output.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
from pygments.formatter import Formatter
from pygments.util import get_bool_opt, get_int_opt, \
get_list_opt, get_choice_opt
# Import this carefully
try:
from PIL import Image, ImageDraw, ImageFont
pil_available = True
except ImportError:
pil_available = False
try:
import _winreg
except ImportError:
_winreg = None
__all__ = ['ImageFormatter', 'GifImageFormatter', 'JpgImageFormatter',
'BmpImageFormatter']
# For some unknown reason every font calls it something different
STYLES = {
'NORMAL': ['', 'Roman', 'Book', 'Normal', 'Regular', 'Medium'],
'ITALIC': ['Oblique', 'Italic'],
'BOLD': ['Bold'],
'BOLDITALIC': ['Bold Oblique', 'Bold Italic'],
}
# A sane default for modern systems
DEFAULT_FONT_NAME_NIX = 'Bitstream Vera Sans Mono'
DEFAULT_FONT_NAME_WIN = 'Courier New'
class PilNotAvailable(ImportError):
"""When Python imaging library is not available"""
class FontNotFound(Exception):
"""When there are no usable fonts specified"""
class FontManager(object):
"""
Manages a set of fonts: normal, italic, bold, etc...
"""
def __init__(self, font_name, font_size=14):
self.font_name = font_name
self.font_size = font_size
self.fonts = {}
self.encoding = None
if sys.platform.startswith('win'):
if not font_name:
self.font_name = DEFAULT_FONT_NAME_WIN
self._create_win()
else:
if not font_name:
self.font_name = DEFAULT_FONT_NAME_NIX
self._create_nix()
def _get_nix_font_path(self, name, style):
from commands import getstatusoutput
exit, out = getstatusoutput('fc-list "%s:style=%s" file' %
(name, style))
if not exit:
lines = out.splitlines()
if lines:
path = lines[0].strip().strip(':')
return path
def _create_nix(self):
for name in STYLES['NORMAL']:
path = self._get_nix_font_path(self.font_name, name)
if path is not None:
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
break
else:
raise FontNotFound('No usable fonts named: "%s"' %
self.font_name)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
for stylename in STYLES[style]:
path = self._get_nix_font_path(self.font_name, stylename)
if path is not None:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
break
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
def _lookup_win(self, key, basename, styles, fail=False):
for suffix in ('', ' (TrueType)'):
for style in styles:
try:
valname = '%s%s%s' % (basename, style and ' '+style, suffix)
val, _ = _winreg.QueryValueEx(key, valname)
return val
except EnvironmentError:
continue
else:
if fail:
raise FontNotFound('Font %s (%s) not found in registry' %
(basename, styles[0]))
return None
def _create_win(self):
try:
key = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
r'Software\Microsoft\Windows NT\CurrentVersion\Fonts')
except EnvironmentError:
try:
key = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
r'Software\Microsoft\Windows\CurrentVersion\Fonts')
except EnvironmentError:
raise FontNotFound('Can\'t open Windows font registry key')
try:
path = self._lookup_win(key, self.font_name, STYLES['NORMAL'], True)
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
path = self._lookup_win(key, self.font_name, STYLES[style])
if path:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
finally:
_winreg.CloseKey(key)
def get_char_size(self):
"""
Get the character size.
"""
return self.fonts['NORMAL'].getsize('M')
def get_font(self, bold, oblique):
"""
Get the font based on bold and italic flags.
"""
if bold and oblique:
return self.fonts['BOLDITALIC']
elif bold:
return self.fonts['BOLD']
elif oblique:
return self.fonts['ITALIC']
else:
return self.fonts['NORMAL']
class ImageFormatter(Formatter):
"""
Create a PNG image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
*New in Pygments 0.10.*
Additional options accepted:
`image_format`
An image format to output to that is recognised by PIL, these include:
* "PNG" (default)
* "JPEG"
* "BMP"
* "GIF"
`line_pad`
The extra spacing (in pixels) between each line of text.
Default: 2
`font_name`
The font name to be used as the base font from which others, such as
bold and italic fonts will be generated. This really should be a
monospace font to look sane.
Default: "Bitstream Vera Sans Mono"
`font_size`
The font size in points to be used.
Default: 14
`image_pad`
The padding, in pixels to be used at each edge of the resulting image.
Default: 10
`line_numbers`
Whether line numbers should be shown: True/False
Default: True
`line_number_start`
The line number of the first line.
Default: 1
`line_number_step`
The step used when printing line numbers.
Default: 1
`line_number_bg`
The background colour (in "#123456" format) of the line number bar, or
None to use the style background color.
Default: "#eed"
`line_number_fg`
The text color of the line numbers (in "#123456"-like format).
Default: "#886"
`line_number_chars`
The number of columns of line numbers allowable in the line number
margin.
Default: 2
`line_number_bold`
Whether line numbers will be bold: True/False
Default: False
`line_number_italic`
Whether line numbers will be italicized: True/False
Default: False
`line_number_separator`
Whether a line will be drawn between the line number area and the
source code area: True/False
Default: True
`line_number_pad`
The horizontal padding (in pixels) between the line number margin, and
the source code area.
Default: 6
`hl_lines`
Specify a list of lines to be highlighted. *New in Pygments 1.2.*
Default: empty list
`hl_color`
Specify the color for highlighting lines. *New in Pygments 1.2.*
Default: highlight color of the selected style
"""
# Required by the pygments mapper
name = 'img'
aliases = ['img', 'IMG', 'png']
filenames = ['*.png']
unicodeoutput = False
default_image_format = 'png'
def __init__(self, **options):
"""
See the class docstring for explanation of options.
"""
if not pil_available:
raise PilNotAvailable(
'Python Imaging Library is required for this formatter')
Formatter.__init__(self, **options)
# Read the style
self.styles = dict(self.style)
if self.style.background_color is None:
self.background_color = '#fff'
else:
self.background_color = self.style.background_color
# Image options
self.image_format = get_choice_opt(
options, 'image_format', ['png', 'jpeg', 'gif', 'bmp'],
self.default_image_format, normcase=True)
self.image_pad = get_int_opt(options, 'image_pad', 10)
self.line_pad = get_int_opt(options, 'line_pad', 2)
# The fonts
fontsize = get_int_opt(options, 'font_size', 14)
self.fonts = FontManager(options.get('font_name', ''), fontsize)
self.fontw, self.fonth = self.fonts.get_char_size()
# Line number options
self.line_number_fg = options.get('line_number_fg', '#886')
self.line_number_bg = options.get('line_number_bg', '#eed')
self.line_number_chars = get_int_opt(options,
'line_number_chars', 2)
self.line_number_bold = get_bool_opt(options,
'line_number_bold', False)
self.line_number_italic = get_bool_opt(options,
'line_number_italic', False)
self.line_number_pad = get_int_opt(options, 'line_number_pad', 6)
self.line_numbers = get_bool_opt(options, 'line_numbers', True)
self.line_number_separator = get_bool_opt(options,
'line_number_separator', True)
self.line_number_step = get_int_opt(options, 'line_number_step', 1)
self.line_number_start = get_int_opt(options, 'line_number_start', 1)
if self.line_numbers:
self.line_number_width = (self.fontw * self.line_number_chars +
self.line_number_pad * 2)
else:
self.line_number_width = 0
self.hl_lines = []
hl_lines_str = get_list_opt(options, 'hl_lines', [])
for line in hl_lines_str:
try:
self.hl_lines.append(int(line))
except ValueError:
pass
self.hl_color = options.get('hl_color',
self.style.highlight_color) or '#f90'
self.drawables = []
def get_style_defs(self, arg=''):
raise NotImplementedError('The -S option is meaningless for the image '
'formatter. Use -O style=<stylename> instead.')
def _get_line_height(self):
"""
Get the height of a line.
"""
return self.fonth + self.line_pad
def _get_line_y(self, lineno):
"""
Get the Y coordinate of a line number.
"""
return lineno * self._get_line_height() + self.image_pad
def _get_char_width(self):
"""
Get the width of a character.
"""
return self.fontw
def _get_char_x(self, charno):
"""
Get the X coordinate of a character position.
"""
return charno * self.fontw + self.image_pad + self.line_number_width
def _get_text_pos(self, charno, lineno):
"""
Get the actual position for a character and line position.
"""
return self._get_char_x(charno), self._get_line_y(lineno)
def _get_linenumber_pos(self, lineno):
"""
Get the actual position for the start of a line number.
"""
return (self.image_pad, self._get_line_y(lineno))
def _get_text_color(self, style):
"""
Get the correct color for the token from the style.
"""
if style['color'] is not None:
fill = '#' + style['color']
else:
fill = '#000'
return fill
def _get_style_font(self, style):
"""
Get the correct font for the style.
"""
return self.fonts.get_font(style['bold'], style['italic'])
def _get_image_size(self, maxcharno, maxlineno):
"""
Get the required image size.
"""
return (self._get_char_x(maxcharno) + self.image_pad,
self._get_line_y(maxlineno + 0) + self.image_pad)
def _draw_linenumber(self, posno, lineno):
"""
Remember a line number drawable to paint later.
"""
self._draw_text(
self._get_linenumber_pos(posno),
str(lineno).rjust(self.line_number_chars),
font=self.fonts.get_font(self.line_number_bold,
self.line_number_italic),
fill=self.line_number_fg,
)
def _draw_text(self, pos, text, font, **kw):
"""
Remember a single drawable tuple to paint later.
"""
self.drawables.append((pos, text, font, kw))
def _create_drawables(self, tokensource):
"""
Create drawables for the token content.
"""
lineno = charno = maxcharno = 0
for ttype, value in tokensource:
while ttype not in self.styles:
ttype = ttype.parent
style = self.styles[ttype]
# TODO: make sure tab expansion happens earlier in the chain. It
# really ought to be done on the input, as to do it right here is
# quite complex.
value = value.expandtabs(4)
lines = value.splitlines(True)
#print lines
for i, line in enumerate(lines):
temp = line.rstrip('\n')
if temp:
self._draw_text(
self._get_text_pos(charno, lineno),
temp,
font = self._get_style_font(style),
fill = self._get_text_color(style)
)
charno += len(temp)
maxcharno = max(maxcharno, charno)
if line.endswith('\n'):
# add a line for each extra line in the value
charno = 0
lineno += 1
self.maxcharno = maxcharno
self.maxlineno = lineno
def _draw_line_numbers(self):
"""
Create drawables for the line numbers.
"""
if not self.line_numbers:
return
for p in xrange(self.maxlineno):
n = p + self.line_number_start
if (n % self.line_number_step) == 0:
self._draw_linenumber(p, n)
def _paint_line_number_bg(self, im):
"""
Paint the line number background on the image.
"""
if not self.line_numbers:
return
if self.line_number_fg is None:
return
draw = ImageDraw.Draw(im)
recth = im.size[-1]
rectw = self.image_pad + self.line_number_width - self.line_number_pad
draw.rectangle([(0, 0),
(rectw, recth)],
fill=self.line_number_bg)
draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg)
del draw
def format(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
This implementation calculates where it should draw each token on the
pixmap, then calculates the required pixmap size and draws the items.
"""
self._create_drawables(tokensource)
self._draw_line_numbers()
im = Image.new(
'RGB',
self._get_image_size(self.maxcharno, self.maxlineno),
self.background_color
)
self._paint_line_number_bg(im)
draw = ImageDraw.Draw(im)
# Highlight
if self.hl_lines:
x = self.image_pad + self.line_number_width - self.line_number_pad + 1
recth = self._get_line_height()
rectw = im.size[0] - x
for linenumber in self.hl_lines:
y = self._get_line_y(linenumber - 1)
draw.rectangle([(x, y), (x + rectw, y + recth)],
fill=self.hl_color)
for pos, value, font, kw in self.drawables:
draw.text(pos, value, font=font, **kw)
im.save(outfile, self.image_format.upper())
# Add one formatter per format, so that the "-f gif" option gives the correct result
# when used in pygmentize.
class GifImageFormatter(ImageFormatter):
"""
Create a GIF image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
*New in Pygments 1.0.* (You could create GIF images before by passing a
suitable `image_format` option to the `ImageFormatter`.)
"""
name = 'img_gif'
aliases = ['gif']
filenames = ['*.gif']
default_image_format = 'gif'
class JpgImageFormatter(ImageFormatter):
"""
Create a JPEG image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
*New in Pygments 1.0.* (You could create JPEG images before by passing a
suitable `image_format` option to the `ImageFormatter`.)
"""
name = 'img_jpg'
aliases = ['jpg', 'jpeg']
filenames = ['*.jpg']
default_image_format = 'jpeg'
class BmpImageFormatter(ImageFormatter):
"""
Create a bitmap image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
*New in Pygments 1.0.* (You could create bitmap images before by passing a
suitable `image_format` option to the `ImageFormatter`.)
"""
name = 'img_bmp'
aliases = ['bmp', 'bitmap']
filenames = ['*.bmp']
default_image_format = 'bmp'
|
vipulkanade/EventbriteDjango | refs/heads/master | src_eventbrite_django/topthree/models.py | 1 | from __future__ import unicode_literals
from django.db import models
# Create your models here.
CATEGORIES = (
('music', 'Music'),
('business', 'Business & Professional'),
('food', 'Food & Drink'),
('community', 'Community & Culture'),
('performing','Performing & Visual Arts'),
('film', 'Film, Media & Entertainment'),
('sports','Sports & Fitness'),
('travel', 'Travel & Outdoor'),
('charity', 'Charity & Causes'),
('religion', 'Religion & Spirituality'),
('family', 'Family & Education'),
('seasonal', 'Seasonal & Holiday'),
('government', 'Government & Politics'),
('fashion', 'Fashion & Beauty'),
('home', 'Home & Lifestyle'),
('auto', 'Auto, Boat & Air'),
('hobbies', 'Hobbies & Special Interest'),
('other', 'Other'),
)
class Categories(models.Model):
category_1 = models.CharField(max_length = 20, choices = CATEGORIES)
category_2 = models.CharField(max_length = 20, choices = CATEGORIES)
category_3 = models.CharField(max_length = 20, choices = CATEGORIES)
def __unicode__(self): #__str__ for python3
return self.category_1 |
jeffreylu9/django-cms | refs/heads/wlsite | cms/extensions/__init__.py | 82 | from .models import PageExtension # nopyflakes
from .models import TitleExtension # nopyflakes
from .extension_pool import extension_pool # nopyflakes
from .admin import PageExtensionAdmin # nopyflakes
from .admin import TitleExtensionAdmin # nopyflakes
|
mikrosimage/rez | refs/heads/20160619_master.mikros.1 | src/rez/tests/data/packages/py_packages/single_unversioned.py | 8 | name = 'single_unversioned'
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
|
rgommers/scipy | refs/heads/master | scipy/special/tests/test_spence.py | 60 | import numpy as np
from numpy import sqrt, log, pi
from scipy.special._testutils import FuncData
from scipy.special import spence
def test_consistency():
# Make sure the implementation of spence for real arguments
# agrees with the implementation of spence for imaginary arguments.
x = np.logspace(-30, 300, 200)
dataset = np.vstack((x + 0j, spence(x))).T
FuncData(spence, dataset, 0, 1, rtol=1e-14).check()
def test_special_points():
# Check against known values of Spence's function.
phi = (1 + sqrt(5))/2
dataset = [(1, 0),
(2, -pi**2/12),
(0.5, pi**2/12 - log(2)**2/2),
(0, pi**2/6),
(-1, pi**2/4 - 1j*pi*log(2)),
((-1 + sqrt(5))/2, pi**2/15 - log(phi)**2),
((3 - sqrt(5))/2, pi**2/10 - log(phi)**2),
(phi, -pi**2/15 + log(phi)**2/2),
# Corrected from Zagier, "The Dilogarithm Function"
((3 + sqrt(5))/2, -pi**2/10 - log(phi)**2)]
dataset = np.asarray(dataset)
FuncData(spence, dataset, 0, 1, rtol=1e-14).check()
|
valentin-krasontovitsch/ansible | refs/heads/devel | lib/ansible/modules/files/file.py | 7 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: file
version_added: historical
short_description: Manage files and file properties
extends_documentation_fragment: files
description:
- Set attributes of files, symlinks or directories.
- Alternatively, remove files, symlinks or directories.
- Many other modules support the same options as the C(file) module - including M(copy), M(template), and M(assemble).
- For Windows targets, use the M(win_file) module instead.
options:
path:
description:
- Path to the file being managed.
type: path
required: yes
aliases: [ dest, name ]
state:
description:
- If C(absent), directories will be recursively deleted, and files or symlinks will
be unlinked. Note that C(absent) will not cause C(file) to fail if the C(path) does
not exist as the state did not change.
- If C(directory), all intermediate subdirectories will be created if they
do not exist. Since Ansible 1.7 they will be created with the supplied permissions.
- If C(file), without any other options this works mostly as a 'stat' and will return the current state of C(path).
Even with other options (i.e C(mode)), the file will be modified but will NOT be created if it does not exist;
see the C(touch) value or the M(copy) or M(template) module if you want that behavior.
- If C(hard), the hard link will be created or changed.
- If C(link), the symbolic link will be created or changed.
- If C(touch) (new in 1.4), an empty file will be created if the C(path) does not
exist, while an existing file or directory will receive updated file access and
modification times (similar to the way C(touch) works from the command line).
type: str
default: file
choices: [ absent, directory, file, hard, link, touch ]
src:
description:
- Path of the file to link to.
- This applies only to C(state=link) and C(state=hard).
- Will accept absolute, relative and non-existing paths.
- Relative paths are relative to the file being created (C(path)) which is how
the Unix command C(ln -s SRC DEST) treats relative paths.
type: path
recurse:
description:
- Recursively set the specified file attributes on directory contents.
- This applies only to C(state=directory).
type: bool
default: no
version_added: '1.1'
force:
description:
- >
Force the creation of the symlinks in two cases: the source file does
not exist (but will appear later); the destination exists and is a file (so, we need to unlink the
C(path) file and create symlink to the C(src) file in place of it).
type: bool
default: no
follow:
description:
- This flag indicates that filesystem links, if they exist, should be followed.
- Previous to Ansible 2.5, this was C(no) by default.
type: bool
default: yes
version_added: '1.8'
modification_time:
description:
- This parameter indicates the time the file's modification time should be set to.
- Should be C(preserve) when no modification is required, C(YYYYMMDDHHMM.SS) when using default time format, or C(now).
- Default is None meaning that C(preserve) is the default for C(state=[file,directory,link,hard]) and C(now) is default for C(state=touch).
type: str
version_added: "2.7"
modification_time_format:
description:
- When used with C(modification_time), indicates the time format that must be used.
- Based on default Python format (see time.strftime doc).
type: str
default: "%Y%m%d%H%M.%S"
version_added: '2.7'
access_time:
description:
- This parameter indicates the time the file's access time should be set to.
- Should be C(preserve) when no modification is required, C(YYYYMMDDHHMM.SS) when using default time format, or C(now).
- Default is C(None) meaning that C(preserve) is the default for C(state=[file,directory,link,hard]) and C(now) is default for C(state=touch).
type: str
version_added: '2.7'
access_time_format:
description:
- When used with C(access_time), indicates the time format that must be used.
- Based on default Python format (see time.strftime doc).
type: str
default: "%Y%m%d%H%M.%S"
version_added: '2.7'
seealso:
- module: assemble
- module: copy
- module: stat
- module: template
- module: win_file
author:
- Ansible Core Team
- Michael DeHaan
'''
EXAMPLES = r'''
- name: Change file ownership, group and permissions
file:
path: /etc/foo.conf
owner: foo
group: foo
mode: '0644'
- name: Create an insecure file
file:
path: /work
owner: root
group: root
mode: '1777'
- name: Create a symbolic link
file:
src: /file/to/link/to
dest: /path/to/symlink
owner: foo
group: foo
state: link
- name: Create two hard links
file:
src: '/tmp/{{ item.src }}'
dest: '{{ item.dest }}'
state: link
with_items:
- { src: x, dest: y }
- { src: z, dest: k }
- name: Touch a file, using symbolic modes to set the permissions (equivalent to 0644)
file:
path: /etc/foo.conf
state: touch
mode: u=rw,g=r,o=r
- name: Touch the same file, but add/remove some permissions
file:
path: /etc/foo.conf
state: touch
mode: u+rw,g-wx,o-rwx
- name: Touch again the same file, but dont change times this makes the task idempotent
file:
path: /etc/foo.conf
state: touch
mode: u+rw,g-wx,o-rwx
modification_time: preserve
access_time: preserve
- name: Create a directory if it does not exist
file:
path: /etc/some_directory
state: directory
mode: '0755'
- name: Update modification and access time of given file
file:
path: /etc/some_file
state: file
modification_time: now
access_time: now
'''
RETURN = r'''
'''
import errno
import os
import shutil
import sys
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes, to_native
# There will only be a single AnsibleModule object per module
module = None
class AnsibleModuleError(Exception):
def __init__(self, results):
self.results = results
def __repr__(self):
print('AnsibleModuleError(results={0})'.format(self.results))
class ParameterError(AnsibleModuleError):
pass
class Sentinel(object):
def __new__(cls, *args, **kwargs):
return cls
def _ansible_excepthook(exc_type, exc_value, tb):
# Using an exception allows us to catch it if the calling code knows it can recover
if issubclass(exc_type, AnsibleModuleError):
module.fail_json(**exc_value.results)
else:
sys.__excepthook__(exc_type, exc_value, tb)
def additional_parameter_handling(params):
"""Additional parameter validation and reformatting"""
# When path is a directory, rewrite the pathname to be the file inside of the directory
# TODO: Why do we exclude link? Why don't we exclude directory? Should we exclude touch?
# I think this is where we want to be in the future:
# when isdir(path):
# if state == absent: Remove the directory
# if state == touch: Touch the directory
# if state == directory: Assert the directory is the same as the one specified
# if state == file: place inside of the directory (use _original_basename)
# if state == link: place inside of the directory (use _original_basename. Fallback to src?)
# if state == hard: place inside of the directory (use _original_basename. Fallback to src?)
if (params['state'] not in ("link", "absent") and os.path.isdir(to_bytes(params['path'], errors='surrogate_or_strict'))):
basename = None
if params['_original_basename']:
basename = params['_original_basename']
elif params['src']:
basename = os.path.basename(params['src'])
if basename:
params['path'] = os.path.join(params['path'], basename)
# state should default to file, but since that creates many conflicts,
# default state to 'current' when it exists.
prev_state = get_state(to_bytes(params['path'], errors='surrogate_or_strict'))
if params['state'] is None:
if prev_state != 'absent':
params['state'] = prev_state
elif params['recurse']:
params['state'] = 'directory'
else:
params['state'] = 'file'
# make sure the target path is a directory when we're doing a recursive operation
if params['recurse'] and params['state'] != 'directory':
raise ParameterError(results={"msg": "recurse option requires state to be 'directory'",
"path": params["path"]})
# Make sure that src makes sense with the state
if params['src'] and params['state'] not in ('link', 'hard'):
params['src'] = None
module.warn("The src option requires state to be 'link' or 'hard'. This will become an"
" error in Ansible 2.10")
# In 2.10, switch to this
# raise ParameterError(results={"msg": "src option requires state to be 'link' or 'hard'",
# "path": params["path"]})
def get_state(path):
''' Find out current state '''
b_path = to_bytes(path, errors='surrogate_or_strict')
try:
if os.path.lexists(b_path):
if os.path.islink(b_path):
return 'link'
elif os.path.isdir(b_path):
return 'directory'
elif os.stat(b_path).st_nlink > 1:
return 'hard'
# could be many other things, but defaulting to file
return 'file'
return 'absent'
except OSError as e:
if e.errno == errno.ENOENT: # It may already have been removed
return 'absent'
else:
raise
# This should be moved into the common file utilities
def recursive_set_attributes(b_path, follow, file_args, mtime, atime):
changed = False
for b_root, b_dirs, b_files in os.walk(b_path):
for b_fsobj in b_dirs + b_files:
b_fsname = os.path.join(b_root, b_fsobj)
if not os.path.islink(b_fsname):
tmp_file_args = file_args.copy()
tmp_file_args['path'] = to_native(b_fsname, errors='surrogate_or_strict')
changed |= module.set_fs_attributes_if_different(tmp_file_args, changed, expand=False)
changed |= update_timestamp_for_file(tmp_file_args['path'], mtime, atime)
else:
# Change perms on the link
tmp_file_args = file_args.copy()
tmp_file_args['path'] = to_native(b_fsname, errors='surrogate_or_strict')
changed |= module.set_fs_attributes_if_different(tmp_file_args, changed, expand=False)
changed |= update_timestamp_for_file(tmp_file_args['path'], mtime, atime)
if follow:
b_fsname = os.path.join(b_root, os.readlink(b_fsname))
# The link target could be nonexistent
if os.path.exists(b_fsname):
if os.path.isdir(b_fsname):
# Link is a directory so change perms on the directory's contents
changed |= recursive_set_attributes(b_fsname, follow, file_args, mtime, atime)
# Change perms on the file pointed to by the link
tmp_file_args = file_args.copy()
tmp_file_args['path'] = to_native(b_fsname, errors='surrogate_or_strict')
changed |= module.set_fs_attributes_if_different(tmp_file_args, changed, expand=False)
changed |= update_timestamp_for_file(tmp_file_args['path'], mtime, atime)
return changed
def initial_diff(path, state, prev_state):
diff = {'before': {'path': path},
'after': {'path': path},
}
if prev_state != state:
diff['before']['state'] = prev_state
diff['after']['state'] = state
return diff
#
# States
#
def get_timestamp_for_time(formatted_time, time_format):
if formatted_time == 'preserve':
return None
elif formatted_time == 'now':
return Sentinel
else:
try:
struct = time.strptime(formatted_time, time_format)
struct_time = time.mktime(struct)
except (ValueError, OverflowError) as e:
raise AnsibleModuleError(results={'msg': 'Error while obtaining timestamp for time %s using format %s: %s'
% (formatted_time, time_format, to_native(e, nonstring='simplerepr'))})
return struct_time
def update_timestamp_for_file(path, mtime, atime, diff=None):
try:
# When mtime and atime are set to 'now', rely on utime(path, None) which does not require ownership of the file
# https://github.com/ansible/ansible/issues/50943
if mtime is Sentinel and atime is Sentinel:
# It's not exact but we can't rely on os.stat(path).st_mtime after setting os.utime(path, None) as it may
# not be updated. Just use the current time for the diff values
mtime = atime = time.time()
previous_mtime = os.stat(path).st_mtime
previous_atime = os.stat(path).st_atime
set_time = None
else:
# If both parameters are None 'preserve', nothing to do
if mtime is None and atime is None:
return False
previous_mtime = os.stat(path).st_mtime
previous_atime = os.stat(path).st_atime
if mtime is None:
mtime = previous_mtime
elif mtime is Sentinel:
mtime = time.time()
if atime is None:
atime = previous_atime
elif atime is Sentinel:
atime = time.time()
# If both timestamps are already ok, nothing to do
if mtime == previous_mtime and atime == previous_atime:
return False
set_time = (atime, mtime)
os.utime(path, set_time)
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
if 'after' not in diff:
diff['after'] = {}
if mtime != previous_mtime:
diff['before']['mtime'] = previous_mtime
diff['after']['mtime'] = mtime
if atime != previous_atime:
diff['before']['atime'] = previous_atime
diff['after']['atime'] = atime
except OSError as e:
raise AnsibleModuleError(results={'msg': 'Error while updating modification or access time: %s'
% to_native(e, nonstring='simplerepr'), 'path': path})
return True
def keep_backward_compatibility_on_timestamps(parameter, state):
if state in ['file', 'hard', 'directory', 'link'] and parameter is None:
return 'preserve'
elif state == 'touch' and parameter is None:
return 'now'
else:
return parameter
def execute_diff_peek(path):
"""Take a guess as to whether a file is a binary file"""
b_path = to_bytes(path, errors='surrogate_or_strict')
appears_binary = False
try:
with open(b_path, 'rb') as f:
head = f.read(8192)
except Exception:
# If we can't read the file, we're okay assuming it's text
pass
else:
if b"\x00" in head:
appears_binary = True
return appears_binary
def ensure_absent(path):
b_path = to_bytes(path, errors='surrogate_or_strict')
prev_state = get_state(b_path)
result = {}
if prev_state != 'absent':
if not module.check_mode:
if prev_state == 'directory':
try:
shutil.rmtree(b_path, ignore_errors=False)
except Exception as e:
raise AnsibleModuleError(results={'msg': "rmtree failed: %s" % to_native(e)})
else:
try:
os.unlink(b_path)
except OSError as e:
if e.errno != errno.ENOENT: # It may already have been removed
raise AnsibleModuleError(results={'msg': "unlinking failed: %s " % to_native(e),
'path': path})
diff = initial_diff(path, 'absent', prev_state)
result.update({'path': path, 'changed': True, 'diff': diff})
else:
result.update({'path': path, 'changed': False})
return result
def execute_touch(path, follow, timestamps):
b_path = to_bytes(path, errors='surrogate_or_strict')
prev_state = get_state(b_path)
changed = False
result = {'dest': path}
mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
if not module.check_mode:
if prev_state == 'absent':
# Create an empty file if the filename did not already exist
try:
open(b_path, 'wb').close()
changed = True
except (OSError, IOError) as e:
raise AnsibleModuleError(results={'msg': 'Error, could not touch target: %s'
% to_native(e, nonstring='simplerepr'),
'path': path})
# Update the attributes on the file
diff = initial_diff(path, 'touch', prev_state)
file_args = module.load_file_common_arguments(module.params)
try:
changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
except SystemExit as e:
if e.code:
# We take this to mean that fail_json() was called from
# somewhere in basic.py
if prev_state == 'absent':
# If we just created the file we can safely remove it
os.remove(b_path)
raise
result['changed'] = changed
result['diff'] = diff
return result
def ensure_file_attributes(path, follow, timestamps):
b_path = to_bytes(path, errors='surrogate_or_strict')
prev_state = get_state(b_path)
file_args = module.load_file_common_arguments(module.params)
mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
if prev_state != 'file':
if follow and prev_state == 'link':
# follow symlink and operate on original
b_path = os.path.realpath(b_path)
path = to_native(b_path, errors='strict')
prev_state = get_state(b_path)
file_args['path'] = path
if prev_state not in ('file', 'hard'):
# file is not absent and any other state is a conflict
raise AnsibleModuleError(results={'msg': 'file (%s) is %s, cannot continue' % (path, prev_state),
'path': path})
diff = initial_diff(path, 'file', prev_state)
changed = module.set_fs_attributes_if_different(file_args, False, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
return {'path': path, 'changed': changed, 'diff': diff}
def ensure_directory(path, follow, recurse, timestamps):
b_path = to_bytes(path, errors='surrogate_or_strict')
prev_state = get_state(b_path)
file_args = module.load_file_common_arguments(module.params)
mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
# For followed symlinks, we need to operate on the target of the link
if follow and prev_state == 'link':
b_path = os.path.realpath(b_path)
path = to_native(b_path, errors='strict')
file_args['path'] = path
prev_state = get_state(b_path)
changed = False
diff = initial_diff(path, 'directory', prev_state)
if prev_state == 'absent':
# Create directory and assign permissions to it
if module.check_mode:
return {'changed': True, 'diff': diff}
curpath = ''
try:
# Split the path so we can apply filesystem attributes recursively
# from the root (/) directory for absolute paths or the base path
# of a relative path. We can then walk the appropriate directory
# path to apply attributes.
# Something like mkdir -p with mode applied to all of the newly created directories
for dirname in path.strip('/').split('/'):
curpath = '/'.join([curpath, dirname])
# Remove leading slash if we're creating a relative path
if not os.path.isabs(path):
curpath = curpath.lstrip('/')
b_curpath = to_bytes(curpath, errors='surrogate_or_strict')
if not os.path.exists(b_curpath):
try:
os.mkdir(b_curpath)
changed = True
except OSError as ex:
# Possibly something else created the dir since the os.path.exists
# check above. As long as it's a dir, we don't need to error out.
if not (ex.errno == errno.EEXIST and os.path.isdir(b_curpath)):
raise
tmp_file_args = file_args.copy()
tmp_file_args['path'] = curpath
changed = module.set_fs_attributes_if_different(tmp_file_args, changed, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
except Exception as e:
raise AnsibleModuleError(results={'msg': 'There was an issue creating %s as requested:'
' %s' % (curpath, to_native(e)),
'path': path})
return {'path': path, 'changed': changed, 'diff': diff}
elif prev_state != 'directory':
# We already know prev_state is not 'absent', therefore it exists in some form.
raise AnsibleModuleError(results={'msg': '%s already exists as a %s' % (path, prev_state),
'path': path})
#
# previous state == directory
#
changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
if recurse:
changed |= recursive_set_attributes(b_path, follow, file_args, mtime, atime)
return {'path': path, 'changed': changed, 'diff': diff}
def ensure_symlink(path, src, follow, force, timestamps):
b_path = to_bytes(path, errors='surrogate_or_strict')
b_src = to_bytes(src, errors='surrogate_or_strict')
prev_state = get_state(b_path)
file_args = module.load_file_common_arguments(module.params)
mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
# source is both the source of a symlink or an informational passing of the src for a template module
# or copy module, even if this module never uses it, it is needed to key off some things
if src is None:
if follow:
# use the current target of the link as the source
src = to_native(os.path.realpath(b_path), errors='strict')
b_src = to_bytes(src, errors='surrogate_or_strict')
if not os.path.islink(b_path) and os.path.isdir(b_path):
relpath = path
else:
b_relpath = os.path.dirname(b_path)
relpath = to_native(b_relpath, errors='strict')
absrc = os.path.join(relpath, src)
b_absrc = to_bytes(absrc, errors='surrogate_or_strict')
if not force and not os.path.exists(b_absrc):
raise AnsibleModuleError(results={'msg': 'src file does not exist, use "force=yes" if you'
' really want to create the link: %s' % absrc,
'path': path, 'src': src})
if prev_state == 'directory':
if not force:
raise AnsibleModuleError(results={'msg': 'refusing to convert from %s to symlink for %s'
% (prev_state, path),
'path': path})
elif os.listdir(b_path):
# refuse to replace a directory that has files in it
raise AnsibleModuleError(results={'msg': 'the directory %s is not empty, refusing to'
' convert it' % path,
'path': path})
elif prev_state in ('file', 'hard') and not force:
raise AnsibleModuleError(results={'msg': 'refusing to convert from %s to symlink for %s'
% (prev_state, path),
'path': path})
diff = initial_diff(path, 'link', prev_state)
changed = False
if prev_state == 'absent':
changed = True
elif prev_state == 'link':
b_old_src = os.readlink(b_path)
if b_old_src != b_src:
diff['before']['src'] = to_native(b_old_src, errors='strict')
diff['after']['src'] = src
changed = True
elif prev_state == 'hard':
changed = True
if not force:
raise AnsibleModuleError(results={'msg': 'Cannot link because a hard link exists at destination',
'dest': path, 'src': src})
elif prev_state == 'file':
changed = True
if not force:
raise AnsibleModuleError(results={'msg': 'Cannot link because a file exists at destination',
'dest': path, 'src': src})
elif prev_state == 'directory':
changed = True
if os.path.exists(b_path):
if not force:
raise AnsibleModuleError(results={'msg': 'Cannot link because a file exists at destination',
'dest': path, 'src': src})
else:
raise AnsibleModuleError(results={'msg': 'unexpected position reached', 'dest': path, 'src': src})
if changed and not module.check_mode:
if prev_state != 'absent':
# try to replace atomically
b_tmppath = to_bytes(os.path.sep).join(
[os.path.dirname(b_path), to_bytes(".%s.%s.tmp" % (os.getpid(), time.time()))]
)
try:
if prev_state == 'directory':
os.rmdir(b_path)
os.symlink(b_src, b_tmppath)
os.rename(b_tmppath, b_path)
except OSError as e:
if os.path.exists(b_tmppath):
os.unlink(b_tmppath)
raise AnsibleModuleError(results={'msg': 'Error while replacing: %s'
% to_native(e, nonstring='simplerepr'),
'path': path})
else:
try:
os.symlink(b_src, b_path)
except OSError as e:
raise AnsibleModuleError(results={'msg': 'Error while linking: %s'
% to_native(e, nonstring='simplerepr'),
'path': path})
if module.check_mode and not os.path.exists(b_path):
return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
# Whenever we create a link to a nonexistent target we know that the nonexistent target
# cannot have any permissions set on it. Skip setting those and emit a warning (the user
# can set follow=False to remove the warning)
if follow and os.path.islink(b_path) and not os.path.exists(file_args['path']):
module.warn('Cannot set fs attributes on a non-existent symlink target. follow should be'
' set to False to avoid this.')
else:
changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
def ensure_hardlink(path, src, follow, force, timestamps):
b_path = to_bytes(path, errors='surrogate_or_strict')
b_src = to_bytes(src, errors='surrogate_or_strict')
prev_state = get_state(b_path)
file_args = module.load_file_common_arguments(module.params)
mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
# src is the source of a hardlink. We require it if we are creating a new hardlink
if src is None and not os.path.exists(b_path):
raise AnsibleModuleError(results={'msg': 'src and dest are required for creating new hardlinks'})
# Toshio: Begin suspect block
# I believe that this block of code is wrong for hardlinks.
# src may be relative.
# If it is relative, it should be relative to the cwd (so just use abspath).
# This is different from symlinks where src is relative to the symlink's path.
# Why must src be an absolute path?
if not os.path.isabs(b_src):
raise AnsibleModuleError(results={'msg': "src must be an absolute path"})
# If this is a link, then it can't be a dir so why is it in the conditional?
if not os.path.islink(b_path) and os.path.isdir(b_path):
relpath = path
else:
b_relpath = os.path.dirname(b_path)
relpath = to_native(b_relpath, errors='strict')
# Why? This does nothing because src was checked to be absolute above?
absrc = os.path.join(relpath, src)
b_absrc = to_bytes(absrc, errors='surrogate_or_strict')
if not force and not os.path.exists(b_absrc):
raise AnsibleModuleError(results={'msg': 'src file does not exist, use "force=yes" if you'
' really want to create the link: %s' % absrc,
'path': path, 'src': src})
# Toshio: end suspect block
diff = initial_diff(path, 'hard', prev_state)
changed = False
if prev_state == 'absent':
changed = True
elif prev_state == 'link':
b_old_src = os.readlink(b_path)
if b_old_src != b_src:
diff['before']['src'] = to_native(b_old_src, errors='strict')
diff['after']['src'] = src
changed = True
elif prev_state == 'hard':
if not os.stat(b_path).st_ino == os.stat(b_src).st_ino:
changed = True
if not force:
raise AnsibleModuleError(results={'msg': 'Cannot link, different hard link exists at destination',
'dest': path, 'src': src})
elif prev_state == 'file':
changed = True
if not force:
raise AnsibleModuleError(results={'msg': 'Cannot link, %s exists at destination' % prev_state,
'dest': path, 'src': src})
elif prev_state == 'directory':
changed = True
if os.path.exists(b_path):
if os.stat(b_path).st_ino == os.stat(b_src).st_ino:
return {'path': path, 'changed': False}
elif not force:
raise AnsibleModuleError(results={'msg': 'Cannot link: different hard link exists at destination',
'dest': path, 'src': src})
else:
raise AnsibleModuleError(results={'msg': 'unexpected position reached', 'dest': path, 'src': src})
if changed and not module.check_mode:
if prev_state != 'absent':
# try to replace atomically
b_tmppath = to_bytes(os.path.sep).join(
[os.path.dirname(b_path), to_bytes(".%s.%s.tmp" % (os.getpid(), time.time()))]
)
try:
if prev_state == 'directory':
if os.path.exists(b_path):
try:
os.unlink(b_path)
except OSError as e:
if e.errno != errno.ENOENT: # It may already have been removed
raise
os.link(b_src, b_tmppath)
os.rename(b_tmppath, b_path)
except OSError as e:
if os.path.exists(b_tmppath):
os.unlink(b_tmppath)
raise AnsibleModuleError(results={'msg': 'Error while replacing: %s'
% to_native(e, nonstring='simplerepr'),
'path': path})
else:
try:
os.link(b_src, b_path)
except OSError as e:
raise AnsibleModuleError(results={'msg': 'Error while linking: %s'
% to_native(e, nonstring='simplerepr'),
'path': path})
if module.check_mode and not os.path.exists(b_path):
return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
def main():
global module
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', choices=['absent', 'directory', 'file', 'hard', 'link', 'touch']),
path=dict(type='path', required=True, aliases=['dest', 'name']),
_original_basename=dict(type='str'), # Internal use only, for recursive ops
recurse=dict(type='bool', default=False),
force=dict(type='bool', default=False), # Note: Should not be in file_common_args in future
follow=dict(type='bool', default=True), # Note: Different default than file_common_args
_diff_peek=dict(type='str'), # Internal use only, for internal checks in the action plugins
src=dict(type='path'), # Note: Should not be in file_common_args in future
modification_time=dict(type='str'),
modification_time_format=dict(type='str', default='%Y%m%d%H%M.%S'),
access_time=dict(type='str'),
access_time_format=dict(type='str', default='%Y%m%d%H%M.%S'),
),
add_file_common_args=True,
supports_check_mode=True,
)
# When we rewrite basic.py, we will do something similar to this on instantiating an AnsibleModule
sys.excepthook = _ansible_excepthook
additional_parameter_handling(module.params)
params = module.params
state = params['state']
recurse = params['recurse']
force = params['force']
follow = params['follow']
path = params['path']
src = params['src']
timestamps = {}
timestamps['modification_time'] = keep_backward_compatibility_on_timestamps(params['modification_time'], state)
timestamps['modification_time_format'] = params['modification_time_format']
timestamps['access_time'] = keep_backward_compatibility_on_timestamps(params['access_time'], state)
timestamps['access_time_format'] = params['access_time_format']
# short-circuit for diff_peek
if params['_diff_peek'] is not None:
appears_binary = execute_diff_peek(to_bytes(path, errors='surrogate_or_strict'))
module.exit_json(path=path, changed=False, appears_binary=appears_binary)
if state == 'file':
result = ensure_file_attributes(path, follow, timestamps)
elif state == 'directory':
result = ensure_directory(path, follow, recurse, timestamps)
elif state == 'link':
result = ensure_symlink(path, src, follow, force, timestamps)
elif state == 'hard':
result = ensure_hardlink(path, src, follow, force, timestamps)
elif state == 'touch':
result = execute_touch(path, follow, timestamps)
elif state == 'absent':
result = ensure_absent(path)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
vstorm/douban-fm-client | refs/heads/master | test/test_collect.py | 1 | # import nose
from test import client
def test_collect_channel():
cids = {"3000411": "You're Beautiful",
"3701080": "亲密爱人",
"3813365": "愛をこめて。海"}
for cid in cids.keys():
client.fm.collect_channel(cid=cid)
def test_collect_programmes():
pids = {"1364115": "日本BGM",
"300327": "会想起你,文艺青年",
"26265": "那些感动你的中国摇滚乐"}
for pid in pids.keys():
client.fm.collect_programme(pid=pid)
def test_like_song():
sids = {"1954748": "漂洋过海来看你",
"1455725": "公路",
"2005435": "离别赋"}
for sid in sids.keys():
client.fm.like_song(sid=sid)
# if __name__ == "__main__":
# nose.main() |
you21979/phantomjs | refs/heads/2.0 | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/bot/earlywarningsystemtask.py | 127 | # Copyright (c) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.tool.bot.patchanalysistask import PatchAnalysisTask, PatchAnalysisTaskDelegate, UnableToApplyPatch
class EarlyWarningSystemTaskDelegate(PatchAnalysisTaskDelegate):
pass
class EarlyWarningSystemTask(PatchAnalysisTask):
def __init__(self, delegate, patch, should_run_tests=True):
PatchAnalysisTask.__init__(self, delegate, patch)
self._should_run_tests = should_run_tests
def validate(self):
self._patch = self._delegate.refetch_patch(self._patch)
if self._patch.is_obsolete():
return False
if self._patch.bug().is_closed():
return False
if self._patch.review() == "-":
return False
return True
def run(self):
if not self.validate():
return False
if not self._clean():
return False
if not self._update():
return False
if not self._apply():
raise UnableToApplyPatch(self._patch)
if not self._build():
if not self._build_without_patch():
return False
return self.report_failure()
if not self._should_run_tests:
return True
return self._test_patch()
|
bbenko/django-dice | refs/heads/master | dice/views.py | 6027 | # Create your views here.
|
jasonthomas/zamboni | refs/heads/master | lib/geoip/tests/test_geoip.py | 19 | from random import randint
import mock
import requests
from nose.tools import eq_
import mkt.site.tests
from lib.geoip import GeoIP
def generate_settings(url='', default='restofworld', timeout=0.2):
return mock.Mock(GEOIP_URL=url, GEOIP_DEFAULT_VAL=default,
GEOIP_DEFAULT_TIMEOUT=timeout)
class GeoIPTest(mkt.site.tests.TestCase):
@mock.patch('requests.post')
def test_lookup(self, mock_post):
url = 'localhost'
geoip = GeoIP(generate_settings(url=url))
mock_post.return_value = mock.Mock(status_code=200, json=lambda: {
'country_code': 'US',
'country_name': 'United States'
})
ip = '1.1.1.1'
result = geoip.lookup(ip)
mock_post.assert_called_with('{0}/country.json'.format(url),
timeout=0.2, data={'ip': ip})
eq_(result, 'us')
@mock.patch('requests.post')
def test_no_url(self, mock_post):
geoip = GeoIP(generate_settings())
result = geoip.lookup('2.2.2.2')
assert not mock_post.called
eq_(result, 'restofworld')
@mock.patch('requests.post')
def test_bad_request(self, mock_post):
url = 'localhost'
geoip = GeoIP(generate_settings(url=url))
mock_post.return_value = mock.Mock(status_code=404, json=lambda: None)
ip = '3.3.3.3'
result = geoip.lookup(ip)
mock_post.assert_called_with('{0}/country.json'.format(url),
timeout=0.2, data={'ip': ip})
eq_(result, 'restofworld')
@mock.patch('requests.post')
def test_timeout(self, mock_post):
url = 'localhost'
geoip = GeoIP(generate_settings(url=url))
mock_post.side_effect = requests.Timeout
ip = '3.3.3.3'
result = geoip.lookup(ip)
mock_post.assert_called_with('{0}/country.json'.format(url),
timeout=0.2, data={'ip': ip})
eq_(result, 'restofworld')
@mock.patch('requests.post')
def test_connection_error(self, mock_post):
url = 'localhost'
geoip = GeoIP(generate_settings(url=url))
mock_post.side_effect = requests.ConnectionError
ip = '3.3.3.3'
result = geoip.lookup(ip)
mock_post.assert_called_with('{0}/country.json'.format(url),
timeout=0.2, data={'ip': ip})
eq_(result, 'restofworld')
@mock.patch('requests.post')
def test_private_ip(self, mock_post):
url = 'localhost'
geoip = GeoIP(generate_settings(url=url))
addrs = [
'127.0.0.1',
'10.{0}.{1}.{2}'.format(randint(0, 255), randint(0, 255),
randint(0, 255)),
'192.168.{0}.{1}'.format(randint(0, 255), randint(0, 255)),
'172.{0}.{1}.{2}'.format(randint(16, 31), randint(0, 255),
randint(0, 255))
]
for ip in addrs:
result = geoip.lookup(ip)
assert not mock_post.called
eq_(result, 'restofworld')
|
kgraves/new-machine-bootstrap | refs/heads/master | fabfile.py | 1 | # TODO possibly start using visudo so we don't have to type in password during
# run of fabric. Although it's not a huge deal right now.
from fabric.api import abort
from fabric.api import env
from fabric.api import execute
from fabric.api import get
from fabric.api import local
from fabric.api import put
from fabric.api import run
from fabric.api import settings
from fabric.api import sudo
from fabric.colors import green
from fabric.colors import red
from fabric.context_managers import cd, lcd
from fabric.contrib.console import confirm
from fabric.decorators import parallel
from fabric.decorators import runs_once
from fabric.decorators import task
@task
def setup_machine():
"""
Main task that will run all other installation and setup tasks
"""
#TODO setup selections for certain packages (e.g. node installation)
execute('install_development_deps')
execute('install_db_stuff')
execute('install_java_stuff')
execute('install_node_stuff')
execute('install_python_stuff')
execute('install_ruby_stuff')
execute('install_xps13de_stuff')
execute('install_personal_stuff')
@task
def install_personal_stuff():
"""
Installs/Sets up all non-development packages
"""
execute('install_chrome')
execute('install_gnome_tweak_tool')
execute('install_gimp')
execute('install_flux')
execute('remove_depricated_bluetooth_package')
execute('install_spotify')
execute('install_lynx')
execute('install_image_magick')
@task
def install_chrome():
"""
Downloads chrome from google, and installs it via dpkg
"""
# TODO try to figure out how to install extensions.
# It looks as though there is a preferences file where you can specify
# this. see: https://developer.chrome.com/extensions/external_extensions#preferences
with lcd('/tmp'):
local('wget https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb')
local('sudo dpkg -i google-chrome-stable_current_amd64.deb')
@task
def install_gnome_tweak_tool():
"""
Install gnome-tweak-tool via apt-get
"""
local('sudo apt-get -y install gnome-tweak-tool')
@task
def install_gimp():
"""
Install gimp via apt-get
"""
local('sudo apt-get -y install gimp')
@task
def install_flux():
"""
Download flux tarball, untar, then put in /usr/local/bin
"""
with cd('/tmp'):
local('wget https://justgetflux.com/linux/xflux64.tgz')
local('tar xzvf xflux64.tgz')
local('cp xflux /usr/local/bin/xflux')
@task
def remove_depricated_bluetooth_package():
"""
Uninstalls deprecated bluetooth package.
NOTE This should only be run when this package is not used.
"""
local('sudo apt-get -y purge oem-wireless-bluetooth-intel-7260-dkms')
@task
def install_spotify():
"""
installs deps, adds apt source, and install spotify
"""
# install libgcrypt
with lcd('/tmp'):
local('wget -O libcrypt.deb https://launchpad.net/ubuntu/+archive/primary/+files/libgcrypt11_1.5.3-2ubuntu4.2_amd64.deb')
local('sudo dpkg -i libcrypt.deb')
# setup spotify apt source
local('sudo echo "deb http://repository.spotify.com stable non-free" >> /etc/apt/sources.list')
local('sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 94558F59')
local('sudo apt-get -y update')
# install spotify
local('sudo apt-get -y install spotify-client')
@task
def install_lynx():
"""
Install cli web browser
"""
local('sudo apt-get -y install lynx')
@task
def install_image_magick():
"""
Installs imagemagick
"""
local('sudo apt-get -y install imagemagick')
@task
def install_development_deps():
"""
Installs all development dependencies
"""
# deps via apt-get
deps = ' '.join([
'build-essentials',
'bzr',
'libc++1',
'git',
'python-dev',
'python-pip',
'vim'
])
local('sudo apt-get -y install %s' % deps)
# install heroku-tookbelt
local('wget -qO- https://toolbelt.heroku.com/install-ubuntu.sh | sh')
# install/setup vim stuff
local('mkdirk ~/.vim')
with lcd('~/.vim'):
local('mkdir autoload bundle colors')
with lcd('./autoload'):
local('wget https://tpo.pe/pathogen.vim')
with lcd('./bundle'):
local('git clone git@github.com:kien/ctrlp.vim.git')
local('git clone git@github.com:digitaltoad/vim-jade.git')
local('git clone git@github.com:plasticboy/vim-markdown.git')
with lcd('./colors'):
local('wget https://raw.githubusercontent.com/nanotech/jellybeans.vim/master/colors/jellybeans.vim')
@task
def install_db_stuff():
"""
Installs all dbs needed locally:
- mongodb
TODO figure out if there is a 'latest' release, so we do not have to settle
for a particular release.
"""
# install mongodb
with lcd('/tmp'):
# download tarball and untar
local('wget -O mongo3.0.3.tgz https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-ubuntu1410-clang-3.0.3.tgz')
local('tar xzvf mongo3.0.3.tgz')
# copy all binaries to /usr/bin
with lcd('./mongo3.0.3/bin'):
local('sudo cp ./* /usr/bin/')
# create mongo data direcotry
# TODO figure out which user will be running mongod and change permissions
# for that user/group
local('sudo mkdir -p /data/db')
local('sudo chmod 777 /data && sudo chmod 777 /data/db')
@task
def install_java_stuff():
"""
"""
# TODO
pass
@task
def install_node_stuff():
"""
install a more current version of node than what ubuntu ppa has.
I ran into an issue where I needed some sync functions in the fs package
that only the newer versions of node had.
"""
# TODO possibly merge some functionality with below node_stuff task into a
# general node_stuff function/task
with lcd('/tmp'):
# download and untar node
local('wget -O node-v0.12.2.tar.gz http://nodejs.org/dist/v0.12.2/node-v0.12.2-linux-x64.tar.gz')
local('tar xzvf node-v0.12.2.tar.gz')
# copy bin to /usr/bin
with lcd('./node-v0.12.2/bin'):
local('sudo cp ./node /usr/bin/node-v0.12.2')
# add node alias to .bashrc
local('echo "# add node alias" >> .bashrc')
local('echo "alias node="/usr/bin/node-v0.12.2"" >> .bashrc')
# globally install npm packages
packages = ' '.join([
'bower',
'express-generator'
])
local('npm install -g %s' % packages)
@task
def install_node_stuff_via_apt():
"""
install node via apt-get
NOTE This will install an older (not up to date) version of node. For a more
recent version of node, see the above function.
"""
# install node via apt-get
local('sudo apt-get -y install node')
# add npm/bin directory to path and add node alias
with lcd('~/'):
# just to ensure that .bashrc exists
local('touch .bashrc')
local('echo "# add npm bin directory to path" >> .bashrc')
local('echo "export PATH="\$PATH:\$HOME/npm/bin"" >> .bashrc')
local('echo "# add node alias" >> .bashrc')
local('echo "alias node="/usr/bin/nodejs"" >> .bashrc')
# set global npm directory to ~/npm, rather than /usr/local...
# permissions issues :(
local('npm config set prefix ~/npm')
# globally install npm packages
packages = ' '.join([
'bower',
'express-generator'
])
local('npm install -g %s' % packages)
@task
def install_python_stuff():
"""
Upgrade pip, install pip packages, and some virtualenv stuff.
NOTE dependencies are installed with install_development_deps task.
"""
local('sudo pip install --upgrade pip')
local('sudo pip install virtualenv')
# add virtualenv's activate alias
with lcd('~/'):
local('echo "alias aa="source ./env/bin/activate" >> .bashrc')
@task
def install_ruby_stuff():
"""
Install dependencies, rbenv
"""
# install deps
local('sudo apt-get -y install libffi-dev libgdbm-dev libncurses5-dev')
# install rbenv
with lcd('~/'):
local('git clone https://github.com/sstephenson/rbenv.git ~/.rbenv')
local('echo "export PATH=\'$HOME/.rbenv/bin:$PATH\'" >> ~/.bashrc')
local('echo "eval \'$(rbenv init -)\'" >> ~/.bashrc')
# for debugging
# should print 'rbenv is a function'
local('type rbenv')
@task
def install_rvm():
"""
This is NOT installed when ruby stuff is installed.
Rbenv is installed by default with ruby stuff. If you want to use rvm
instaed, you should uninstall rbenv first before running this.
"""
# TODO write task to uninstall rvm
local('gpg --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3')
local('curl -L https://get.rvm.io | bash -s stable')
@task
def install_xps13de_stuff():
"""
Installs Dell xps13 de specific stuff, and makes some rc changes
"""
# TODO add change to /etc/rc.local
# TODO install libsmbios
pass
|
zerkrx/zerkbox | refs/heads/develop | lib/pip/commands/hash.py | 514 | from __future__ import absolute_import
import hashlib
import logging
import sys
from pip.basecommand import Command
from pip.status_codes import ERROR
from pip.utils import read_chunks
from pip.utils.hashes import FAVORITE_HASH, STRONG_HASHES
logger = logging.getLogger(__name__)
class HashCommand(Command):
"""
Compute a hash of a local package archive.
These can be used with --hash in a requirements file to do repeatable
installs.
"""
name = 'hash'
usage = '%prog [options] <file> ...'
summary = 'Compute hashes of package archives.'
def __init__(self, *args, **kw):
super(HashCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-a', '--algorithm',
dest='algorithm',
choices=STRONG_HASHES,
action='store',
default=FAVORITE_HASH,
help='The hash algorithm to use: one of %s' %
', '.join(STRONG_HASHES))
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
if not args:
self.parser.print_usage(sys.stderr)
return ERROR
algorithm = options.algorithm
for path in args:
logger.info('%s:\n--hash=%s:%s',
path, algorithm, _hash_of_file(path, algorithm))
def _hash_of_file(path, algorithm):
"""Return the hash digest of a file."""
with open(path, 'rb') as archive:
hash = hashlib.new(algorithm)
for chunk in read_chunks(archive):
hash.update(chunk)
return hash.hexdigest()
|
aospan/linux-stable-netup-universal-dvb-1.4 | refs/heads/master | tools/perf/scripts/python/netdev-times.py | 1544 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
callchain, irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, callchain, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, callchain, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
|
Leaderman/pyspark | refs/heads/master | 1.5.1/examples/sql/spark_sql_dataframe_select.py | 1 | from pyspark import SparkConf, SparkContext
from pyspark.sql import SQLContext, Row
conf = SparkConf().setAppName("spark_sql_dataframe_select")
sc = SparkContext(conf=conf)
sqlCtx = SQLContext(sc)
lines = sc.parallelize(["a,1", "b,2", "c,3"])
people = lines.map(lambda line: line.split(",")).map(
lambda words: Row(name=words[0], age=words[1]))
schemaPeople = sqlCtx.createDataFrame(people)
schemaPeople.select("*").show()
schemaPeople.select("name", "age").show()
schemaPeople.select("name", schemaPeople["age"]).show()
# error schemaPeople.select("name", schemaPeople2["age"]).show()
# error schemaPeople.select("name", "age * 2").show()
schemaPeople.select(schemaPeople["name"].alias(
"name2"), schemaPeople.age.cast("int").alias("age2")).show()
sc.stop()
|
liamgh/liamgreenhughes-sl4a-tf101 | refs/heads/master | python/src/Lib/bsddb/test/test_thread.py | 33 | """TestCases for multi-threaded access to a DB.
"""
import os
import sys
import time
import errno
from random import random
DASH = '-'
try:
WindowsError
except NameError:
class WindowsError(Exception):
pass
import unittest
from test_all import db, dbutils, test_support, verbose, have_threads, \
get_new_environment_path, get_new_database_path
if have_threads :
from threading import Thread
import sys
if sys.version_info[0] < 3 :
from threading import currentThread
else :
from threading import current_thread as currentThread
#----------------------------------------------------------------------
class BaseThreadedTestCase(unittest.TestCase):
dbtype = db.DB_UNKNOWN # must be set in derived class
dbopenflags = 0
dbsetflags = 0
envflags = 0
import sys
if sys.version_info[:3] < (2, 4, 0):
def assertTrue(self, expr, msg=None):
self.failUnless(expr,msg=msg)
def setUp(self):
if verbose:
dbutils._deadlock_VerboseFile = sys.stdout
self.homeDir = get_new_environment_path()
self.env = db.DBEnv()
self.setEnvOpts()
self.env.open(self.homeDir, self.envflags | db.DB_CREATE)
self.filename = self.__class__.__name__ + '.db'
self.d = db.DB(self.env)
if self.dbsetflags:
self.d.set_flags(self.dbsetflags)
self.d.open(self.filename, self.dbtype, self.dbopenflags|db.DB_CREATE)
def tearDown(self):
self.d.close()
self.env.close()
test_support.rmtree(self.homeDir)
def setEnvOpts(self):
pass
def makeData(self, key):
return DASH.join([key] * 5)
#----------------------------------------------------------------------
class ConcurrentDataStoreBase(BaseThreadedTestCase):
dbopenflags = db.DB_THREAD
envflags = db.DB_THREAD | db.DB_INIT_CDB | db.DB_INIT_MPOOL
readers = 0 # derived class should set
writers = 0
records = 1000
def test01_1WriterMultiReaders(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test01_1WriterMultiReaders..." % \
self.__class__.__name__
keys=range(self.records)
import random
random.shuffle(keys)
records_per_writer=self.records//self.writers
readers_per_writer=self.readers//self.writers
self.assertEqual(self.records,self.writers*records_per_writer)
self.assertEqual(self.readers,self.writers*readers_per_writer)
self.assertTrue((records_per_writer%readers_per_writer)==0)
readers = []
for x in xrange(self.readers):
rt = Thread(target = self.readerThread,
args = (self.d, x),
name = 'reader %d' % x,
)#verbose = verbose)
import sys
if sys.version_info[0] < 3 :
rt.setDaemon(True)
else :
rt.daemon = True
readers.append(rt)
writers=[]
for x in xrange(self.writers):
a=keys[records_per_writer*x:records_per_writer*(x+1)]
a.sort() # Generate conflicts
b=readers[readers_per_writer*x:readers_per_writer*(x+1)]
wt = Thread(target = self.writerThread,
args = (self.d, a, b),
name = 'writer %d' % x,
)#verbose = verbose)
writers.append(wt)
for t in writers:
import sys
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
for t in writers:
t.join()
for t in readers:
t.join()
def writerThread(self, d, keys, readers):
import sys
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
if verbose:
print "%s: creating records %d - %d" % (name, start, stop)
count=len(keys)//len(readers)
count2=count
for x in keys :
key = '%04d' % x
dbutils.DeadlockWrap(d.put, key, self.makeData(key),
max_retries=12)
if verbose and x % 100 == 0:
print "%s: records %d - %d finished" % (name, start, x)
count2-=1
if not count2 :
readers.pop().start()
count2=count
if verbose:
print "%s: finished creating records" % name
if verbose:
print "%s: thread finished" % name
def readerThread(self, d, readerNum):
import sys
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
for i in xrange(5) :
c = d.cursor()
count = 0
rec = c.first()
while rec:
count += 1
key, data = rec
self.assertEqual(self.makeData(key), data)
rec = c.next()
if verbose:
print "%s: found %d records" % (name, count)
c.close()
if verbose:
print "%s: thread finished" % name
class BTreeConcurrentDataStore(ConcurrentDataStoreBase):
dbtype = db.DB_BTREE
writers = 2
readers = 10
records = 1000
class HashConcurrentDataStore(ConcurrentDataStoreBase):
dbtype = db.DB_HASH
writers = 2
readers = 10
records = 1000
#----------------------------------------------------------------------
class SimpleThreadedBase(BaseThreadedTestCase):
dbopenflags = db.DB_THREAD
envflags = db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK
readers = 10
writers = 2
records = 1000
def setEnvOpts(self):
self.env.set_lk_detect(db.DB_LOCK_DEFAULT)
def test02_SimpleLocks(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test02_SimpleLocks..." % self.__class__.__name__
keys=range(self.records)
import random
random.shuffle(keys)
records_per_writer=self.records//self.writers
readers_per_writer=self.readers//self.writers
self.assertEqual(self.records,self.writers*records_per_writer)
self.assertEqual(self.readers,self.writers*readers_per_writer)
self.assertTrue((records_per_writer%readers_per_writer)==0)
readers = []
for x in xrange(self.readers):
rt = Thread(target = self.readerThread,
args = (self.d, x),
name = 'reader %d' % x,
)#verbose = verbose)
import sys
if sys.version_info[0] < 3 :
rt.setDaemon(True)
else :
rt.daemon = True
readers.append(rt)
writers = []
for x in xrange(self.writers):
a=keys[records_per_writer*x:records_per_writer*(x+1)]
a.sort() # Generate conflicts
b=readers[readers_per_writer*x:readers_per_writer*(x+1)]
wt = Thread(target = self.writerThread,
args = (self.d, a, b),
name = 'writer %d' % x,
)#verbose = verbose)
writers.append(wt)
for t in writers:
import sys
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
for t in writers:
t.join()
for t in readers:
t.join()
def writerThread(self, d, keys, readers):
import sys
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
if verbose:
print "%s: creating records %d - %d" % (name, start, stop)
count=len(keys)//len(readers)
count2=count
for x in keys :
key = '%04d' % x
dbutils.DeadlockWrap(d.put, key, self.makeData(key),
max_retries=12)
if verbose and x % 100 == 0:
print "%s: records %d - %d finished" % (name, start, x)
count2-=1
if not count2 :
readers.pop().start()
count2=count
if verbose:
print "%s: thread finished" % name
def readerThread(self, d, readerNum):
import sys
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
c = d.cursor()
count = 0
rec = dbutils.DeadlockWrap(c.first, max_retries=10)
while rec:
count += 1
key, data = rec
self.assertEqual(self.makeData(key), data)
rec = dbutils.DeadlockWrap(c.next, max_retries=10)
if verbose:
print "%s: found %d records" % (name, count)
c.close()
if verbose:
print "%s: thread finished" % name
class BTreeSimpleThreaded(SimpleThreadedBase):
dbtype = db.DB_BTREE
class HashSimpleThreaded(SimpleThreadedBase):
dbtype = db.DB_HASH
#----------------------------------------------------------------------
class ThreadedTransactionsBase(BaseThreadedTestCase):
dbopenflags = db.DB_THREAD | db.DB_AUTO_COMMIT
envflags = (db.DB_THREAD |
db.DB_INIT_MPOOL |
db.DB_INIT_LOCK |
db.DB_INIT_LOG |
db.DB_INIT_TXN
)
readers = 0
writers = 0
records = 2000
txnFlag = 0
def setEnvOpts(self):
#self.env.set_lk_detect(db.DB_LOCK_DEFAULT)
pass
def test03_ThreadedTransactions(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test03_ThreadedTransactions..." % \
self.__class__.__name__
keys=range(self.records)
import random
random.shuffle(keys)
records_per_writer=self.records//self.writers
readers_per_writer=self.readers//self.writers
self.assertEqual(self.records,self.writers*records_per_writer)
self.assertEqual(self.readers,self.writers*readers_per_writer)
self.assertTrue((records_per_writer%readers_per_writer)==0)
readers=[]
for x in xrange(self.readers):
rt = Thread(target = self.readerThread,
args = (self.d, x),
name = 'reader %d' % x,
)#verbose = verbose)
import sys
if sys.version_info[0] < 3 :
rt.setDaemon(True)
else :
rt.daemon = True
readers.append(rt)
writers = []
for x in xrange(self.writers):
a=keys[records_per_writer*x:records_per_writer*(x+1)]
b=readers[readers_per_writer*x:readers_per_writer*(x+1)]
wt = Thread(target = self.writerThread,
args = (self.d, a, b),
name = 'writer %d' % x,
)#verbose = verbose)
writers.append(wt)
dt = Thread(target = self.deadlockThread)
import sys
if sys.version_info[0] < 3 :
dt.setDaemon(True)
else :
dt.daemon = True
dt.start()
for t in writers:
import sys
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
for t in writers:
t.join()
for t in readers:
t.join()
self.doLockDetect = False
dt.join()
def writerThread(self, d, keys, readers):
import sys
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
count=len(keys)//len(readers)
while len(keys):
try:
txn = self.env.txn_begin(None, self.txnFlag)
keys2=keys[:count]
for x in keys2 :
key = '%04d' % x
d.put(key, self.makeData(key), txn)
if verbose and x % 100 == 0:
print "%s: records %d - %d finished" % (name, start, x)
txn.commit()
keys=keys[count:]
readers.pop().start()
except (db.DBLockDeadlockError, db.DBLockNotGrantedError), val:
if verbose:
print "%s: Aborting transaction (%s)" % (name, val[1])
txn.abort()
if verbose:
print "%s: thread finished" % name
def readerThread(self, d, readerNum):
import sys
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
finished = False
while not finished:
try:
txn = self.env.txn_begin(None, self.txnFlag)
c = d.cursor(txn)
count = 0
rec = c.first()
while rec:
count += 1
key, data = rec
self.assertEqual(self.makeData(key), data)
rec = c.next()
if verbose: print "%s: found %d records" % (name, count)
c.close()
txn.commit()
finished = True
except (db.DBLockDeadlockError, db.DBLockNotGrantedError), val:
if verbose:
print "%s: Aborting transaction (%s)" % (name, val[1])
c.close()
txn.abort()
if verbose:
print "%s: thread finished" % name
def deadlockThread(self):
self.doLockDetect = True
while self.doLockDetect:
time.sleep(0.05)
try:
aborted = self.env.lock_detect(
db.DB_LOCK_RANDOM, db.DB_LOCK_CONFLICT)
if verbose and aborted:
print "deadlock: Aborted %d deadlocked transaction(s)" \
% aborted
except db.DBError:
pass
class BTreeThreadedTransactions(ThreadedTransactionsBase):
dbtype = db.DB_BTREE
writers = 2
readers = 10
records = 1000
class HashThreadedTransactions(ThreadedTransactionsBase):
dbtype = db.DB_HASH
writers = 2
readers = 10
records = 1000
class BTreeThreadedNoWaitTransactions(ThreadedTransactionsBase):
dbtype = db.DB_BTREE
writers = 2
readers = 10
records = 1000
txnFlag = db.DB_TXN_NOWAIT
class HashThreadedNoWaitTransactions(ThreadedTransactionsBase):
dbtype = db.DB_HASH
writers = 2
readers = 10
records = 1000
txnFlag = db.DB_TXN_NOWAIT
#----------------------------------------------------------------------
def test_suite():
suite = unittest.TestSuite()
if have_threads:
suite.addTest(unittest.makeSuite(BTreeConcurrentDataStore))
suite.addTest(unittest.makeSuite(HashConcurrentDataStore))
suite.addTest(unittest.makeSuite(BTreeSimpleThreaded))
suite.addTest(unittest.makeSuite(HashSimpleThreaded))
suite.addTest(unittest.makeSuite(BTreeThreadedTransactions))
suite.addTest(unittest.makeSuite(HashThreadedTransactions))
suite.addTest(unittest.makeSuite(BTreeThreadedNoWaitTransactions))
suite.addTest(unittest.makeSuite(HashThreadedNoWaitTransactions))
else:
print "Threads not available, skipping thread tests."
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
zenodo/invenio | refs/heads/zenodo-master | invenio/legacy/websubmit/functions/Link_Records.py | 13 |
# This file is part of Invenio.
# Copyright (C) 2012, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
This function schedule a BibUpload append that will create a symmetric link
between two records based on the MARC field 787 OTHER RELATIONSHIP ENTRY (R)
(or base on other MARC field, see parameters C{directRelationshipMARC} and
C {reverseRelationshipMARC})
787 OTHER RELATIONSHIP ENTRY (R)
Indicators
First Note controller
0 - Display note (in $i)
1 - Do not display note
Subfield Code(s)
$i Relationship information (R) - [CER]
$r Report number
$w Record control number (R) - [CER]
NOTE: Used to link Conference papers and Slides records ($i Conference paper/Slides - $w CDS recid)
Example:
http://cds.cern.ch/record/1372158
7870_ $$iSlides$$rLHCb-TALK-2011-087$$w1353576
We need to include in the submission form for LHCb-PROC a field for the related repnr, from which to create the 7870 field. It would be perfect if at the same time the inverse 7870 field could be inserted in the TALK record:
7870_ $$iConference paper$$rLHCb-PROC-2011-041$$w1372158
"""
import re
import tempfile
import time
import os
from os.path import exists, join
from invenio.legacy.bibrecord import \
record_xml_output, \
record_add_field, \
record_add_fields, \
record_get_field_instances, \
create_record
from invenio.modules.formatter import format_record
from invenio.modules.formatter.api import get_tag_from_name
from invenio.legacy.search_engine import search_pattern, get_fieldvalues
from invenio.config import CFG_TMPDIR
from invenio.legacy.bibsched.bibtask import task_low_level_submission, bibtask_allocate_sequenceid
from invenio.legacy.websubmit.config import InvenioWebSubmitFunctionError
CFG_OTHER_RELATIONSHIP_ENTRY = (get_tag_from_name('other relationship entry') or '787')[:3]
CFG_PRIMARY_REPORTNUMBER = get_tag_from_name('primary report number') or '037__a'
RE_FILENAME = re.compile("\\<pa\\>file\\:(.+)\\<\\/pa\\>", re.I)
def Link_Records(parameters, curdir, form, user_info=None):
"""
This function create a MARC link between two records (the 1st specified in the
edsrn file or SN, the second specified by edsrn2 file, where you can store
the reportnumber or directly the recid.
Parameters:
* edsrn: the file containing the report number or recid of the
first record (A) to be linked.
* edsrn2: the file containing the report number(s) or recid(s) of
the second record(s) (B) to be linked (one value per line).
* In "directRelationship" you should specify either the name of a file (by using
<pa>file:filename</pa>) or directly, what is the relationship
of the second record to be stored in the metadata of the 1st record (A->B).
Use the keyword "none" if you explicitely want to skip the recording of
this relation (no modification of record A).
* In the value/file "reverseRelationship" you can similarly specify the other
direction of the arrow (B->A)
Use the keyword "none" if you explicitely want to skip the recording of
this relation (no modification of record(s) B).
* keep_original_edsrn2: if edsrn2 is a report number, should we
use it as label when linking, or shall we use instead the
report number retrieved from the matching record?
* directRelationshipMARC: in which MARC tag + indicators shall we
store the relation in the first record (A). By default uses the
value found in tag name "other relationship entry" or 7870_.
The value can be directly provided or specifed in file (using
<pa>file:filename</pa>)
* reverseRelationshipMARC: in which MARC tag + indicators shall we
store the relation in the second record (B). By default uses the
value found in tag name "other relationship entry" or 7870_.
The value can be directly provided or specifed in file (using
<pa>file:filename</pa>)
* bibuploadMode: shall the created XML be sent in --append mode
(default) or using --correct. Possible values are:
* append (or leave empty)
* correct
This setting will depend on how you have set up your
submisson workflow.
* silentFailures: if set to "True", do not raise an exception
when the linking fails due to impossibility to retrieve the
corresponding "remote" record(s) B (for eg. non-existing report
number, report number matching several records, etc.). In these
cases the faulty report number is ignored.
* considerEmpty: when using bibuploadMode with 'correct', should
missing linking information (edsrn2 values) removes the linking
or simply not do anything? You might want to tweak this setting
depending on how the submission is presenting MBI pages (either
full form, or selected fields). If False (or empty), and no
linking information is provided, the linking is not removed
from the original record. If True (or any other value), and no
linking information is provided, the linking is removed from
the record. The value can be directly provided or specifed in
file (using <pa>file:filename</pa>)
"""
global sysno
edsrn = parameters["edsrn"]
edsrn2 = parameters["edsrn2"]
direct_relationship = parameters["directRelationship"]
reverse_relationship = parameters["reverseRelationship"]
keep_original_edsrn2 = parameters.get("keep_original_edsrn2", "True")
if keep_original_edsrn2 == "True":
keep_original_edsrn2 = True
elif keep_original_edsrn2 == "False":
keep_original_edsrn2 = False
else:
keep_original_edsrn2 = True
direct_relationship_MARC = parameters["directRelationshipMARC"]
reverse_relationship_MARC = parameters["reverseRelationshipMARC"]
bibupload_mode = parameters["bibuploadMode"]
if not bibupload_mode in ('append', 'correct'):
bibupload_mode = 'append'
silent_failures_p = parameters.get("silentFailures", "True") == 'True'
consider_empty_p = parameters.get("considerEmpty", "False")
g = RE_FILENAME.match(consider_empty_p)
if g:
filename = g.group(1)
if exists(join(curdir, filename)):
consider_empty_p = open(join(curdir, filename)).read().strip()
else:
consider_empty_p = ''
if consider_empty_p in ('False', ''):
consider_empty_p = False
else:
consider_empty_p = True
recid_a = int(sysno)
if exists(join(curdir, edsrn)):
rn_a = open(join(curdir, edsrn)).read().strip()
else:
rn_a = ""
if not rn_a:
try:
recid_a, rn_a = get_recid_and_reportnumber(recid=sysno)
except ValueError as err:
raise InvenioWebSubmitFunctionError("Error in finding the current record and its reportnumber: %s" % err)
g = RE_FILENAME.match(direct_relationship)
if g:
filename = g.group(1)
if exists(join(curdir, filename)):
direct_relationship = open(join(curdir, filename)).read().strip()
if not direct_relationship:
raise InvenioWebSubmitFunctionError("Can not retrieve direct relationship")
elif direct_relationship == 'none':
direct_relationship = None
g = RE_FILENAME.match(reverse_relationship)
if g:
filename = g.group(1)
if exists(join(curdir, filename)):
reverse_relationship = open(join(curdir, filename)).read().strip()
if not reverse_relationship:
raise InvenioWebSubmitFunctionError("Can not retrieve reverse relationship")
elif reverse_relationship == 'none':
reverse_relationship = None
g = RE_FILENAME.match(direct_relationship_MARC)
if g:
filename = g.group(1)
if exists(join(curdir, filename)):
direct_relationship_MARC = open(join(curdir, filename)).read().strip()
g = RE_FILENAME.match(reverse_relationship_MARC)
if g:
filename = g.group(1)
if exists(join(curdir, filename)):
reverse_relationship_MARC = open(join(curdir, filename)).read().strip()
recids_and_rns_b = []
if exists(join(curdir, edsrn2)):
for rn_b in open(join(curdir, edsrn2)).readlines():
rn_b = rn_b.strip()
if not rn_b:
continue
if rn_b.isdigit():
recid_b = int(rn_b)
rn_b = ""
try:
recid_b, rn_b = get_recid_and_reportnumber(recid=recid_b)
except ValueError, err:
if silent_failures_p:
continue
raise
else:
try:
recid_b, rn_b = get_recid_and_reportnumber(reportnumber=rn_b,
keep_original_reportnumber=keep_original_edsrn2)
except ValueError, err:
if silent_failures_p:
continue
raise
recids_and_rns_b.append((recid_b, rn_b))
if not recids_and_rns_b and bibupload_mode == 'append':
return ""
marcxml = _prepare_marcxml(recid_a, rn_a, recids_and_rns_b, reverse_relationship, direct_relationship,
marc_for_a=direct_relationship_MARC, marc_for_b=reverse_relationship_MARC,
upload_mode=bibupload_mode, consider_empty_p=consider_empty_p)
fd, name = tempfile.mkstemp(dir=CFG_TMPDIR, prefix="%s_%s" % \
(rn_a.replace('/', '_'),
time.strftime("%Y-%m-%d_%H:%M:%S")), suffix=".xml")
try:
os.write(fd, marcxml)
finally:
os.close(fd)
sequence_id = bibtask_allocate_sequenceid(curdir)
bibupload_id = task_low_level_submission('bibupload', 'websubmit.Link_Records', '--' + bibupload_mode, name, '-P', '3', '-I', str(sequence_id))
open(join(curdir, 'bibupload_link_record_id'), 'w').write(str(bibupload_id))
return ""
def get_recid_and_reportnumber(recid=None, reportnumber=None, keep_original_reportnumber=True):
"""
Given at least a recid or a reportnumber, this function will look into
the system for the matching record and will return a normalized
recid and the primary reportnumber.
@raises ValueError: in case of no record matched.
"""
if recid:
## Recid specified receives priority.
recid = int(recid)
values = get_fieldvalues(recid, CFG_PRIMARY_REPORTNUMBER)
if values:
## Let's take whatever reportnumber is stored in the matching record
reportnumber = values[0]
return recid, reportnumber
else:
raise ValueError("The record %s does not have a primary report number" % recid)
elif reportnumber:
## Ok reportnumber specified, let's better try 1st with primary and then
## with other reportnumber
recids = search_pattern(p='%s:"%s"' % (CFG_PRIMARY_REPORTNUMBER, reportnumber))
if not recids:
## Not found as primary
recids = search_pattern(p='reportnumber:"%s"' % reportnumber)
if len(recids) > 1:
raise ValueError('More than one record matches the reportnumber "%s": %s' % (reportnumber, ', '.join([str(i) for i in recids])))
elif len(recids) == 1:
recid = list(recids)[0]
if keep_original_reportnumber:
return recid, reportnumber
else:
reportnumbers = get_fieldvalues(recid, CFG_PRIMARY_REPORTNUMBER)
if not reportnumbers:
raise ValueError("The matched record %s does not have a primary report number" % recid)
return recid, reportnumbers[0]
else:
raise ValueError("No records are matched by the provided reportnumber: %s" % reportnumber)
raise ValueError("At least the recid or the reportnumber must be specified")
def get_unlinked_records(recid_a, marc_for_b, display_in_b, upload_mode, recids_and_rns_b):
"""
Retrieve list of recids that were already linked to recid_a using
this relation (marc_for_b), and that should no longer be linked
after this update (in 'correct' mode) as they are no longer part of
recids_and_rns_b.
"""
unlinked_recids = []
if upload_mode == 'correct':
marc_tag_for_b, marc_ind1_for_b, marc_ind2_for_b = \
_prepare_marc(marc_for_b, CFG_OTHER_RELATIONSHIP_ENTRY, display_in_b and "0" or "1")
already_linked_recids = search_pattern(p=str(recid_a), m='e', f=marc_tag_for_b + marc_ind1_for_b + marc_ind2_for_b + 'w')
to_be_linked_recids = [recid for recid, rn in recids_and_rns_b]
unlinked_recids = [recid for recid in already_linked_recids if not recid in to_be_linked_recids]
return unlinked_recids
def _prepare_marcxml(recid_a, rn_a, recids_and_rns_b, what_is_a_for_b, what_is_b_for_a, display_in_a=True, display_in_b=True, marc_for_a=None, marc_for_b=None, upload_mode='append', consider_empty_p=False):
output = '<collection>'
record_a = {}
record_b = {}
if what_is_b_for_a is not None:
marc_tag_for_a, marc_ind1_for_a, marc_ind2_for_a = \
_prepare_marc(marc_for_a, CFG_OTHER_RELATIONSHIP_ENTRY, display_in_a and "0" or "1")
record_add_field(record_a, "001", controlfield_value=str(recid_a))
if upload_mode == 'correct' and not recids_and_rns_b and consider_empty_p:
# Add empty field in order to account for cases where all
# linkings are removed by the submitter
record_add_field(record_a, marc_tag_for_a, ind1=marc_ind1_for_a, ind2=marc_ind2_for_a)
for recid_b, rn_b in recids_and_rns_b:
record_add_field(record_a, marc_tag_for_a, ind1=marc_ind1_for_a, ind2=marc_ind2_for_a,
subfields=[('i', what_is_b_for_a), ('r', rn_b), ('w', str(recid_b))])
output += record_xml_output(record_a)
if what_is_a_for_b is not None:
marc_tag_for_b, marc_ind1_for_b, marc_ind2_for_b = \
_prepare_marc(marc_for_b, CFG_OTHER_RELATIONSHIP_ENTRY, display_in_b and "0" or "1")
for recid_b, rn_b in recids_and_rns_b:
record_b = {}
record_add_field(record_b, "001", controlfield_value=str(recid_b))
if upload_mode == 'correct':
original_linking_fields = _get_record_linking_fields(recid_b, recid_a, marc_tag_for_b, marc_ind1_for_b, marc_ind2_for_b)
record_add_fields(record_b, marc_tag_for_b, original_linking_fields)
record_add_field(record_b, marc_tag_for_b, ind1=marc_ind1_for_b, ind2=marc_ind2_for_b,
subfields=[('i', what_is_a_for_b), ('r', rn_a), ('w', str(recid_a))])
output += record_xml_output(record_b)
# Remove linking in remote records where adequate
if consider_empty_p:
unlinked_recids = get_unlinked_records(recid_a, marc_for_b, display_in_b, upload_mode, recids_and_rns_b)
for recid_b in unlinked_recids:
record_b = {}
record_add_field(record_b, "001", controlfield_value=str(recid_b))
original_linking_fields = _get_record_linking_fields(recid_b, recid_a, marc_tag_for_b, marc_ind1_for_b, marc_ind2_for_b)
if not original_linking_fields:
# Add empty field in order to account for cases where all
# linkings are removed by the submitter
record_add_field(record_b, marc_tag_for_b, ind1=marc_ind1_for_b, ind2=marc_ind2_for_b)
record_add_fields(record_b, marc_tag_for_b, original_linking_fields)
output += record_xml_output(record_b)
output += '</collection>'
return output
def _get_record_linking_fields(recid_b, recid_a, tag, ind1, ind2):
"""
Returns the fields (defined by tag, ind1, ind2) in record (given
by recid_b) that do not link to another given record (recid_a).
"""
fields = []
rec = create_record(format_record(recid_b, "xm"))[0]
for field_instance in record_get_field_instances(rec, tag=tag, ind1=ind1, ind2=ind2):
if not ('w', str(recid_a)) in field_instance[0]:
fields.append(field_instance)
return fields
def _prepare_marc(marc_txt, default_tag, default_ind1=" ", default_ind2=" "):
"""Returns (tag, ind1, ind2) tuple by parsing input marc_txt and
falling back to default value if needed"""
marc_tag = default_tag
marc_ind1 = default_ind1
marc_ind2 = default_ind2
if marc_txt:
if len(marc_txt) > 2:
marc_tag = marc_txt[:3]
if len(marc_txt) > 3:
marc_ind1 = marc_txt[3]
if len(marc_txt) > 4:
marc_ind2 = marc_txt[4]
return (marc_tag, marc_ind1, marc_ind2)
|
mikelj/h-store | refs/heads/master | src/benchmarks/edu/brown/benchmark/auctionmark/docs/ddl2latex.py | 9 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import re
import string
table_regex = re.compile("CREATE TABLE ([A-Z\_]+)[\s]+\(", re.IGNORECASE)
col_regex = re.compile("[\s]+([a-z\_]+)[\s]+([A-Z]+).*?(?:REFERENCES[\s]+([A-Z\_]+)[\s]+\([\s]*([a-z\_]+)[\s]*\))?[\s]*,")
headers = [ 'Column', 'Type', 'Cardinality', 'References', 'Description' ]
for line in sys.stdin:
match = table_regex.match(line)
if match:
table_name = match.group(1)
columns = [ ]
for line in sys.stdin:
match = col_regex.match(line)
if match:
attributes = match.groups()
columns.append(map(lambda x: string.replace(x, '_', '\_') if x else None, attributes))
else:
break
## FOR
## Latex Output
#print '%', table_name
print '\\subsubsection{\\texttt{%s}}' % table_name.replace('_', '\_')
print """
\\begin{tabular}{ll}
Number of Records: & TODO \\\\
Average Tuple Size: & TODO \\\\
Total Size: & TODO \\\\
\\end{tabular}
\\vspace*{0.1in}
""";
print '\\noindent \\begin{tabular*}{0.75\\textwidth}{@{\\extracolsep{\\fill}} lllll}'
print ' & '.join([ '\\textbf{%s}' % col for col in headers ]) + " \\\\"
for col in columns:
try:
col_name = "%-18s" % col[0]
col_type = "%-10s" % col[1]
col_fkey = "%-15s" % ("%s.%s" % (col[2], col[3]) if col[2] else '-')
print " & ".join([col_name, col_type, 'TODO', col_fkey, '-' ]) + " \\\\"
except:
print "Failed to output:", col
raise
## FOR
print "\\end{tabular*}"
print
## IF (table)
## FOR |
kervi/kervi | refs/heads/master | kervi/kervi/plugin/message_bus/kervi_bus.py | 1 |
class KerviBus:
def __init__(self, config):
pass
@property
def is_connected(self):
raise NotImplementedError
def reset(self, process_id, signal_port, ip=None, root_address=None, event_port=None):
raise NotImplementedError
def stop(self):
raise NotImplementedError
def send_command(self, command, *args, **kwargs):
raise NotImplementedError
def register_command_handler(self, command, func, **kwargs):
raise NotImplementedError
def unregister_command_handler(self, command, func, **kwargs):
raise NotImplementedError
def trigger_event(self, event, id, *args, **kwargs):
raise NotImplementedError
def register_event_handler(self, event, func, component_id=None, **kwargs):
raise NotImplementedError
def send_query(self, query, *args, **kwargs):
raise NotImplementedError
def register_query_handler(self, query, func, **kwargs):
raise NotImplementedError
def unregister_query_handler(self, query, func, **kwargs):
raise NotImplementedError |
zvolsky/codex2020 | refs/heads/master | controllers/library.py | 1 | # -*- coding: utf-8 -*-
from mzutils import slugify
from plugin_mz import formstyle_bootstrap3_compact_factory
from dal_common import hide_all_fields
# from dal_common import ... # in choose_library()
@auth.requires_login()
def choose_library():
"""
request.args(0): missing: Show, 'all': ShowAll, '<id>': Set
"""
from dal_common import get_my_libraries_with_names, get_all_libraries
def set_this(library_id):
session.library_id = library_id
redirect(URL('default', 'index'))
spec_request = request.args(0)
admin = auth.has_membership('admin')
active = accessible = None
my_rw = []
my_ro = []
my_libraries = get_my_libraries_with_names()
if spec_request:
if spec_request == 'all':
accessible = get_all_libraries(admin=admin,
exclude_ids=[library.library.id for library in my_libraries])
else: # Set the library
if admin:
row = db(db.library.id == spec_request).select(db.library.id).first()
if row:
set_this(row.id)
else:
for row in my_libraries:
if str(row.library.id) == spec_request:
set_this(row.library.id) # contains redirect
if session.library_id:
row = db(db.library.id == session.library_id).select(db.library.library).first()
if row:
active = row.library # for admin this is not always inside the next cycle
for library in my_libraries:
if not active and library.auth_lib.rw:
session.library_id = library.library.id
active = library.library.library
elif active and session.library_id == library.library.id:
pass
elif library.auth_lib.rw:
my_rw.append(library)
else:
my_ro.append(library)
return dict(active=active, my_rw=my_rw, my_ro=my_ro, accessible=accessible, admin=admin)
@auth.requires_login()
def new():
"""will create a library"""
hide_all_fields(db.library)
db.library.library.writable = db.library.library.readable = True
db.library.ltype.writable = db.library.ltype.readable = True
form = SQLFORM(db.library,
formstyle=formstyle_bootstrap3_compact_factory(),
submit_button=T("Vytvořit knihovnu"))
if not auth.library_id:
form.vars.library = T("zkušební") + ' ' + auth.user.username
if form.process().accepted:
__clear_libstyle()
auth_lib_id = db.auth_lib.insert(auth_user_id=auth.user_id, library_id=form.vars.id, rw=True)
now = datetime.datetime.now()
db.lib_rights.insert(auth_lib_id=auth_lib_id, allowed='W', given=now)
db.lib_rights.insert(auth_lib_id=auth_lib_id, allowed='A', given=now)
auth.library_id = form.vars.id
redirect(URL('library', args='home'))
return dict(form=form)
def library():
"""edit library info"""
if not auth.library_id or not auth.user_id:
redirect(URL('choose_library'))
important = db(db.library.id == auth.library_id).select(
db.library.imp_system, db.library.slug, db.library.library).first()
if not important:
redirect(URL('choose_library'))
ownership = db((db.auth_lib.auth_user_id == auth.user_id) & (db.auth_lib.library_id == auth.library_id) & (db.auth_lib.rw == True)).select().first()
if not ownership:
redirect(URL('choose_library'))
section = request.args(0) or 'home'
hide_all_fields(db.library)
if section == 'home':
db.library.library.writable = db.library.library.readable = True
db.library.street.writable = db.library.street.readable = True
db.library.city.writable = db.library.city.readable = True
db.library.plz.writable = db.library.plz.readable = True
db.library.ltype.writable = db.library.ltype.readable = True
elif section == 'software':
db.library.old_system.writable = db.library.old_system.readable = True
db.library.imp_system.writable = db.library.imp_system.readable = True
elif section == 'publish':
db.library.is_public.writable = db.library.is_public.readable = True
db.library.slug.writable = db.library.slug.readable = True
db.library.news_cnt.writable = db.library.news_cnt.readable = True
if not important.slug and important.library:
db.library[auth.library_id] = dict(slug=slugify(important.library))
db.commit()
form = SQLFORM(db.library, auth.library_id,
formstyle=formstyle_bootstrap3_compact_factory(),
submit_button=T("Uložit"))
if form.process().accepted:
__clear_libstyle()
response.flash = T("Uloženo")
return dict(form=form, important=important, section=section)
def __clear_libstyle(): # session.libstyle will be recreated when needed
if 'libstyle' in session:
del session.libstyle
@auth.requires_login()
def places():
db.place.id.readable = False
grid = SQLFORM.grid(db.place,
searchable=False,
showbuttontext=False,
csv=False
)
return dict(grid=grid)
@auth.requires_login()
def stgri():
db.stat_group.id.readable = False
db.stat_group.tbl.readable = db.stat_group.tbl.writable = False
db.stat_group.tbl.default = 'I'
grid = SQLFORM.grid(db.stat_group,
searchable=False,
showbuttontext=False,
csv=False
)
db.stat_group._common_filter = lambda query: (db.stat_group.library_id == auth.library_id) & (db.stat_group.tbl == 'I')
return dict(grid=grid)
@auth.requires_login()
def stgrt():
db.stat_group.id.readable = False
db.stat_group.tbl.readable = db.stat_group.tbl.writable = False
db.stat_group.tbl.default = 'T'
grid = SQLFORM.grid(db.stat_group,
searchable=False,
showbuttontext=False,
csv=False
)
db.stat_group._common_filter = lambda query: (db.stat_group.library_id == auth.library_id) & (db.stat_group.tbl == 'T')
return dict(grid=grid)
@auth.requires_login()
def stgrr():
db.stat_group.id.readable = False
db.stat_group.tbl.readable = db.stat_group.tbl.writable = False
db.stat_group.tbl.default = 'R'
grid = SQLFORM.grid(db.stat_group,
searchable=False,
showbuttontext=False,
csv=False
)
db.stat_group._common_filter = lambda query: (db.stat_group.library_id == auth.library_id) & (db.stat_group.tbl == 'R')
return dict(grid=grid)
'''
@auth.requires_login()
def stgrb():
db.stat_group.id.readable = False
db.stat_group.tbl.readable = db.stat_group.tbl.writable = False
db.stat_group.tbl.default = 'B'
grid = SQLFORM.grid(db.stat_group,
searchable=False,
showbuttontext=False,
csv=False
)
db.stat_group._common_filter = lambda query: (db.stat_group.library_id == auth.library_id) & (db.stat_group.tbl == 'B')
return dict(grid=grid)
'''
|
ridwannaruto/WikiLanka | refs/heads/Initialize | vendor/doctrine/orm/docs/en/conf.py | 2448 | # -*- coding: utf-8 -*-
#
# Doctrine 2 ORM documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 3 18:10:24 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('_exts'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['configurationblock']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Doctrine 2 ORM'
copyright = u'2010-12, Doctrine Project Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2'
# The full version, including alpha/beta/rc tags.
release = '2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'doctrine'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Doctrine2ORMdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Doctrine2ORM.tex', u'Doctrine 2 ORM Documentation',
u'Doctrine Project Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
primary_domain = "dcorm"
def linkcode_resolve(domain, info):
if domain == 'dcorm':
return 'http://'
return None
|
2ndQuadrant/ansible | refs/heads/master | lib/ansible/modules/cloud/amazon/ec2_placement_group.py | 39 | #!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_placement_group
short_description: Create or delete an EC2 Placement Group
description:
- Create an EC2 Placement Group; if the placement group already exists,
nothing is done. Or, delete an existing placement group. If the placement
group is absent, do nothing. See also
U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html)
version_added: "2.5"
author: "Brad Macpherson (@iiibrad)"
options:
name:
description:
- The name for the placement group.
required: true
state:
description:
- Create or delete placement group.
required: false
default: present
choices: [ 'present', 'absent' ]
strategy:
description:
- Placement group strategy. Cluster will cluster instances into a
low-latency group in a single Availability Zone, while Spread spreads
instances across underlying hardware.
required: false
default: cluster
choices: [ 'cluster', 'spread' ]
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide
# for details.
# Create a placement group.
- ec2_placement_group:
name: my-cluster
state: present
# Create a Spread placement group.
- ec2_placement_group:
name: my-cluster
state: present
strategy: spread
# Delete a placement group.
- ec2_placement_group:
name: my-cluster
state: absent
'''
RETURN = '''
placement_group:
description: Placement group attributes
returned: when state != absent
type: complex
contains:
name:
description: PG name
type: str
sample: my-cluster
state:
description: PG state
type: str
sample: "available"
strategy:
description: PG strategy
type: str
sample: "cluster"
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import (AWSRetry,
boto3_conn,
ec2_argument_spec,
get_aws_connection_info)
try:
from botocore.exceptions import (BotoCoreError, ClientError)
except ImportError:
pass # caught by AnsibleAWSModule
@AWSRetry.exponential_backoff()
def get_placement_group_details(connection, module):
name = module.params.get("name")
try:
response = connection.describe_placement_groups(
Filters=[{
"Name": "group-name",
"Values": [name]
}])
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(
e,
msg="Couldn't find placement group named [%s]" % name)
if len(response['PlacementGroups']) != 1:
return None
else:
placement_group = response['PlacementGroups'][0]
return {
"name": placement_group['GroupName'],
"state": placement_group['State'],
"strategy": placement_group['Strategy'],
}
@AWSRetry.exponential_backoff()
def create_placement_group(connection, module):
name = module.params.get("name")
strategy = module.params.get("strategy")
try:
connection.create_placement_group(
GroupName=name, Strategy=strategy, DryRun=module.check_mode)
except (BotoCoreError, ClientError) as e:
if e.response['Error']['Code'] == "DryRunOperation":
module.exit_json(changed=True, placement_group={
"name": name,
"state": 'DryRun',
"strategy": strategy,
})
module.fail_json_aws(
e,
msg="Couldn't create placement group [%s]" % name)
module.exit_json(changed=True,
placement_group=get_placement_group_details(
connection, module
))
@AWSRetry.exponential_backoff()
def delete_placement_group(connection, module):
name = module.params.get("name")
try:
connection.delete_placement_group(
GroupName=name, DryRun=module.check_mode)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(
e,
msg="Couldn't delete placement group [%s]" % name)
module.exit_json(changed=True)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(type='str'),
state=dict(default='present', choices=['present', 'absent']),
strategy=dict(default='cluster', choices=['cluster', 'spread'])
)
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True
)
region, ec2_url, aws_connect_params = get_aws_connection_info(
module, boto3=True)
connection = boto3_conn(module,
resource='ec2', conn_type='client',
region=region, endpoint=ec2_url, **aws_connect_params)
state = module.params.get("state")
if state == 'present':
placement_group = get_placement_group_details(connection, module)
if placement_group is None:
create_placement_group(connection, module)
else:
strategy = module.params.get("strategy")
if placement_group['strategy'] == strategy:
module.exit_json(
changed=False, placement_group=placement_group)
else:
name = module.params.get("name")
module.fail_json(
msg=("Placement group '{}' exists, can't change strategy" +
" from '{}' to '{}'").format(
name,
placement_group['strategy'],
strategy))
elif state == 'absent':
placement_group = get_placement_group_details(connection, module)
if placement_group is None:
module.exit_json(changed=False)
else:
delete_placement_group(connection, module)
if __name__ == '__main__':
main()
|
akosyakov/intellij-community | refs/heads/master | python/testData/intentions/afterReplaceBackQuoteExpression.py | 83 | repr(a + b, 34 + a) |
manderson23/NewsBlur | refs/heads/master | apps/rss_feeds/migrations/0051_unique_hash.py | 18 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'Feed', fields ['feed_address']
db.delete_unique('feeds', ['feed_address'])
# Adding field 'Feed.branch_from_feed'
db.add_column('feeds', 'branch_from_feed', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['rss_feeds.Feed'], null=True, blank=True), keep_default=False)
# Changing field 'Feed.hash_address_and_link'
db.alter_column('feeds', 'hash_address_and_link', self.gf('django.db.models.fields.CharField')(default=123, unique=True, max_length=64))
# Adding index on 'Feed', fields ['hash_address_and_link']
db.create_index('feeds', ['hash_address_and_link'])
# Adding unique constraint on 'Feed', fields ['hash_address_and_link']
db.create_unique('feeds', ['hash_address_and_link'])
def backwards(self, orm):
# Removing unique constraint on 'Feed', fields ['hash_address_and_link']
db.delete_unique('feeds', ['hash_address_and_link'])
# Removing index on 'Feed', fields ['hash_address_and_link']
db.delete_index('feeds', ['hash_address_and_link'])
# Deleting field 'Feed.branch_from_feed'
db.delete_column('feeds', 'branch_from_feed_id')
# Adding unique constraint on 'Feed', fields ['feed_address']
db.create_unique('feeds', ['feed_address'])
# Changing field 'Feed.hash_address_and_link'
db.alter_column('feeds', 'hash_address_and_link', self.gf('django.db.models.fields.CharField')(max_length=64, null=True))
models = {
'rss_feeds.duplicatefeed': {
'Meta': {'object_name': 'DuplicateFeed'},
'duplicate_address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'duplicate_feed_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'duplicate_addresses'", 'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'rss_feeds.feed': {
'Meta': {'ordering': "['feed_title']", 'object_name': 'Feed', 'db_table': "'feeds'"},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'active_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1', 'db_index': 'True'}),
'average_stories_per_month': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'branch_from_feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']", 'null': 'True', 'blank': 'True'}),
'creation': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'days_to_trim': ('django.db.models.fields.IntegerField', [], {'default': '90'}),
'etag': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'exception_code': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'favicon_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'favicon_not_found': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'feed_address': ('django.db.models.fields.URLField', [], {'max_length': '255'}),
'feed_address_locked': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'feed_link': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'feed_link_locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'feed_title': ('django.db.models.fields.CharField', [], {'default': "'[Untitled]'", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fetched_once': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_feed_exception': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'has_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'has_page_exception': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'hash_address_and_link': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_load_time': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'min_to_decay': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'next_scheduled_update': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'num_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'premium_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'queued_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'stories_last_month': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'rss_feeds.feeddata': {
'Meta': {'object_name': 'FeedData'},
'feed': ('utils.fields.AutoOneToOneField', [], {'related_name': "'data'", 'unique': 'True', 'to': "orm['rss_feeds.Feed']"}),
'feed_classifier_counts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'feed_tagline': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'popular_authors': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'popular_tags': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'story_count_history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'rss_feeds.feedloadtime': {
'Meta': {'object_name': 'FeedLoadtime'},
'date_accessed': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'loadtime': ('django.db.models.fields.FloatField', [], {})
}
}
complete_apps = ['rss_feeds']
|
vinoth3v/In | refs/heads/master | In/templates/html/TextDiv.tpl.py | 2 | <<%= tag %> <%= attributes %>><%= value %> <%= children %></<%= tag %>> |
fcelda/tuned | refs/heads/master | tuned/exports/dbus_exporter.py | 2 | import interfaces
import decorator
import dbus.service
import dbus.mainloop.glib
import gobject
import inspect
import threading
class DBusExporter(interfaces.ExporterInterface):
"""
Export method calls through DBus Interface.
We take a method to be exported and create a simple wrapper function
to call it. This is required as we need the original function to be
bound to the original object instance. While the wrapper will be bound
to an object we dynamically construct.
"""
def __init__(self, bus_name, interface_name, object_name):
gobject.threads_init()
self._dbus_object_cls = None
self._dbus_object = None
self._dbus_methods = {}
self._bus_name = bus_name
self._interface_name = interface_name
self._object_name = object_name
self._thread = None
self._main_loop = gobject.MainLoop()
@property
def bus_name(self):
return self._bus_name
@property
def interface_name(self):
return self._interface_name
@property
def object_name(self):
return self._object_name
def export(self, method, in_signature, out_signature):
if not inspect.ismethod(method):
raise Exception("Only bound methods can be exported.")
method_name = method.__name__
if method_name in self._dbus_methods:
raise Exception("Method with this name is already exported.")
def wrapper(wrapped, owner, *args, **kwargs):
return method(*args, **kwargs)
wrapper = decorator.decorator(wrapper, method.im_func)
wrapper = dbus.service.method(self._interface_name, in_signature, out_signature)(wrapper)
self._dbus_methods[method_name] = wrapper
def _construct_dbus_object_class(self):
if self._dbus_object_cls is not None:
raise Exception("The exporter class was already build.")
unique_name = "DBusExporter_%d" % id(self)
cls = type(unique_name, (dbus.service.Object,), self._dbus_methods)
self._dbus_object_cls = cls
def start(self):
if self._dbus_object_cls is None:
self._construct_dbus_object_class()
self.stop()
self._thread = threading.Thread(target=self._thread_code)
self._thread.start()
def stop(self):
if self._thread is not None and self._thread.is_alive():
self._main_loop.quit()
self._thread.join()
self._thread = None
def _thread_code(self):
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
bus = dbus.SystemBus()
bus_name = dbus.service.BusName(self._bus_name, bus)
bus_object = self._dbus_object_cls(bus, self._object_name, bus_name)
self._main_loop.run()
del bus_object
|
blurstudio/cross3d | refs/heads/master | cross3d/abstract/abstractscenemodel.py | 1 | ##
# \namespace cross3d.abstract.abstractscenemodel
#
# \remarks The AbstractSceneModel class provides the base foundation for the 3d Object framework for the cross3d system
# This class will provide a generic overview structure for all manipulations of 3d models
#
# \author douglas
# \author Blur Studio
# \date 04/05/10
#
import cross3d
from cross3d.constants import ObjectType
from cross3d import SceneObject, Group, application, abstractmethod
class AbstractSceneModel(SceneObject):
"""
The SceneModel class provides the base foundation for the 3d Object
framework for the cross3d system. This class will provide a generic
overview structure for all manipulations of 3d models
"""
_objectType = ObjectType.Model
#--------------------------------------------------------------------------------------------------------------------
# public methods
#--------------------------------------------------------------------------------------------------------------------
@abstractmethod
def _nativeGroups(self, wildcard='*'):
return []
@abstractmethod
def explode(self):
return False
@abstractmethod
def recompose(self):
return False
@abstractmethod
def _addNativeObjects(self, nativeObjects):
return False
@abstractmethod
def isReferenced(self):
return False
def addObjects(self, objects):
return self._addNativeObjects([obj.nativePointer() for obj in objects])
def objects(self, wildcard='*', type=0):
return [SceneObject(self._scene, nativeObject) for nativeObject in self._nativeObjects(wildcard=wildcard, type=type)]
def _nativeObjects(self, wildcard='*', type=0):
# Adding model name space to wildcard.
wildcard = '%s%s%s' % (self.displayName(), application.nameSpaceSeparator(), wildcard)
return self._scene._nativeObjects(wildcard=wildcard, objectType=type)
def groups(self, wildcard='*'):
groups = []
for nativeGroup in self._nativeGroups(wildcard):
groups.append(Group(self._scene, nativeGroup))
return groups
@abstractmethod
def exportToOtherPackages(self, path):
""" Exports the model to other 3D packages supported by this module.
"""
return False
@abstractmethod
def export(self, fileName):
return False
@abstractmethod
def resolutionsPaths(self):
return []
@abstractmethod
def update(self):
return False
@abstractmethod
def offloaded(self):
return False
@abstractmethod
def offload(self):
return False
@abstractmethod
def addResolution(self, name='', path='', load=False):
return False
@abstractmethod
def removeResolution(self, name):
return False
@abstractmethod
def resolutionPath(self, name=''):
return ''
@abstractmethod
def setResolutionPath(self, path, name=''):
return False
@abstractmethod
def setResolution(self, name):
return False
def resetTimeControllers(self):
""" Removes any customization on time controllers of alembics, PC2s and TMCs making sure the data is read as exported.
Returns: Wheter or not the operation was a success.
"""
return False
@abstractmethod
def resolution(self):
return ''
@abstractmethod
def resolutions(self):
return []
@abstractmethod
def savePose(self, basePath, name='', objects=[]):
return False
@abstractmethod
def saveAnimation(self, basePath, name='', objects=[]):
return False
@abstractmethod
def loadPose(self, path):
return False
@abstractmethod
def loadAnimation(self, path):
return False
@abstractmethod
def loadAnimationInMixer(self, path, name=''):
return False
@abstractmethod
def matchPose(self, model, objects=[]):
return False
@abstractmethod
def matchAnimation(self, model, objects=[]):
return False
def findObject(self, displayName='', uniqueId=0):
name = '.'.join([self.displayName(), displayName])
return self._scene.findObject(name, uniqueId)
# register the symbol
cross3d.registerSymbol('SceneModel', AbstractSceneModel, ifNotFound=True)
|
SlashRoot/hendrix | refs/heads/master | hendrix/utils/__init__.py | 4 | from importlib import import_module
import chalk
import os
import six
import sys
HENDRIX_DIR = os.path.dirname(os.path.abspath(__file__))
SHARE_PATH = os.path.join(
os.path.dirname(sys.executable),
'share/hendrix'
)
def get_pid(options):
"""returns The default location of the pid file for process management"""
namespace = options['settings'] if options['settings'] else options['wsgi']
return '%s/%s_%s.pid' % (
HENDRIX_DIR, options['http_port'], namespace.replace('.', '_')
)
def responseInColor(request, status, headers, prefix='Response', opts=None):
"Prints the response info in color"
code, message = status.split(None, 1)
message = '%s [%s] => Request %s %s %s on pid %d' % (
prefix,
code,
str(request.host),
request.method,
request.path,
os.getpid()
)
signal = int(code)/100
if signal == 2:
chalk.green(message, opts=opts)
elif signal == 3:
chalk.blue(message, opts=opts)
else:
chalk.red(message, opts=opts)
def import_string(dotted_path):
"""
Import a dotted module path and return the attribute/class designated by
the last name in the path. Raise ImportError if the import failed.
"""
try:
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError:
msg = "%s doesn't look like a module path" % dotted_path
six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])
module = import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError:
msg = 'Module "%s" does not define a "%s" attribute/class' % (
dotted_path, class_name
)
six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])
|
AndreLamurias/IBRel | refs/heads/master | src/reader/Transmir_corpus.py | 2 | import logging
import random
import progressbar as pb
from postprocessing import ssm
from reader.pubmed_corpus import PubmedCorpus
from mirna_base import MirbaseDB
from config import config
from text.mirna_entity import MirnaEntity, mirna_graph
from text.protein_entity import ProteinEntity, get_uniprot_name
class TransmirCorpus(PubmedCorpus):
"""
Corpus generated from the TransmiR database, using distant supervision
"""
def __init__(self, corpusdir, **kwargs):
self.mirbase = MirbaseDB(config.mirbase_path)
self.mirbase.load_graph()
self.mirnas = {}
self.tfs = {}
self.pairs = {}
self.pmids = set()
self.normalized_mirnas = set() # normalized to miRBase
self.normalized_tfs = set() #normalized to maybe UniProt
self.normalized_pairs = set()
self.db_path = corpusdir
self.load_database()
super(TransmirCorpus, self).__init__(corpusdir, self.pmids, **kwargs)
# TODO: use negatome
def load_database(self):
logging.info("Loading TransmiR database...")
with open(self.db_path, 'r') as dbfile:
for line in dbfile:
tsv = line.strip().split("\t")
if tsv[-1].lower() == "human":
tfname = tsv[0]
mirname = tsv[3]
func = tsv[5].split(";")
disease = tsv[6].split(";")
active = tsv[7]
pmid = tsv[8].split(";")
# for f in func:
# funcs.add(f.strip())
# for d in disease:
# if d != "see HMDD (http://cmbi.bjmu.edu.cn/hmdd)":
# diseases.add(d.strip())
for p in pmid:
logging.info(p)
self.pmids.add(p.strip())
#mirnas[mirname] = (func, [d for d in disease if d != "see HMDD (http://cmbi.bjmu.edu.cn/hmdd)"])
#entries[(tfname, mirname)] = (active, pmid)
def normalize_entities(self):
logging.info("Normalizing entities...")
for mir in self.mirnas:
match = self.mirbase.map_label(mir)
#if match > 0.6:
self.normalized_mirnas.add(match[0])
def load_annotations(self, db_path, etype, ptype):
self.mirnas = {}
self.tfs = {}
self.pairs = {}
self.pmids = set()
self.normalized_mirnas = set() # normalized to miRBase
self.normalized_tfs = set() # normalized to maybe UniProt
self.normalized_pairs = set()
with open(db_path, 'r') as dbfile:
for line in dbfile:
tsv = line.strip().split("\t")
if tsv[-1].lower() == "human":
pmids = tsv[8].split(";")
tfname = tsv[0]
mirname = tsv[3]
for pmid in pmids:
if pmid not in self.tfs:
self.tfs[pmid] = set()
if pmid not in self.mirnas:
self.mirnas[pmid] = set()
if not mirname.startswith("hsa-"):
mirname = "hsa-" + mirname
#self.mirnas[pmid].add(mirname)
tf = None
for pmidtf in self.tfs[pmid]:
if pmidtf.text == tfname:
tf = pmidtf
if tf is None:
eid = len(self.tfs[pmid]) + len(self.mirnas[pmid])
tf = ProteinEntity([], pmid, text=tfname, did=pmid, eid="{}.e{}".format(pmid, eid))
self.tfs[pmid].add(tf)
mirna = None
for pmidmir in self.mirnas[pmid]:
if pmidmir.text == mirname:
mirna = pmidmir
if mirna is None:
eid = len(self.tfs[pmid]) + len(self.mirnas[pmid])
mirna = MirnaEntity([], pmid, text=mirname, did=pmid, eid="{}.e{}".format(pmid, eid))
self.mirnas[pmid].add(mirna)
tf.targets.append((mirna.eid, "miRNA-gene"))
# print "tf gos: {}".format(" ".join(tf.go_ids))
#print "mirna gos: {}".format(" ".join(mirna.go_ids))
# self.normalize_entities()
#self.run_analysis()
def run_analysis(self):
correct_count = 0 # numver of real miRNA-gene pairs with common gos
incorrect_count = 0 #number of random miRNA-gene pairs with common go
all_tfs = []
all_mirnas = []
for pmid in self.tfs:
all_tfs = []
all_mirnas = []
correct_count = 0 # numver of real miRNA-gene pairs with common gos
incorrect_count = 0 # number of random miRNA-gene pairs with common go
for tf in self.tfs[pmid]:
all_tfs.append(tf)
mirna = None
for mirna_eid in tf.targets:
for m in self.mirnas[pmid]:
if m.eid == mirna_eid[0]:
mirna = m
break
all_mirnas.append(mirna)
# common_gos = set(tf.go_ids).intersection(set(mirna.go_ids))
# if len(common_gos) > 0:
# print "{}->{} common gos:{}".format(tf.text, mirna.text, " ".join(common_gos))
# correct_count += 1
if len(all_tfs) > 1 and len(all_mirnas) > 1:
for i in range(0, 10):
random_tf = random.choice(all_tfs)
random_mirna = random.choice(all_mirnas)
common_gos = set(random_tf.go_ids).intersection(set(random_mirna.go_ids))
if (random_mirna.eid, "miRNA-gene") in random_tf.targets:
#if len(common_gos) > 0:
if random_mirna.best_go.startswith("GO:") and random_tf.best_go.startswith("GO"):
ss = ssm.simui_go(random_mirna.best_go, random_tf.best_go)
#print "correct:", ss
correct_count += ss
else:
correct_count += 1
else:
#if len(common_gos) > 0:
if random_mirna.best_go.startswith("GO:") and random_tf.best_go.startswith("GO"):
ss = ssm.simui_go(random_mirna.best_go, random_tf.best_go)
print "incorrect:", ss
incorrect_count += ss
else:
incorrect_count += 1
print "{}-{} ({} mirnas, {} tfs".format(correct_count, incorrect_count, len(all_mirnas), len(all_tfs))
def get_transmir_gold_ann_set(goldpath, entitytype):
logging.info("loading gold standard... {}".format(goldpath))
gold_entities = set()
gold_relations = {}
with open(goldpath, 'r') as goldfile:
for l in goldfile:
tsv = l.strip().split("\t")
if tsv[-1].lower() == "human":
# print "gold standard", tsv[8], tsv[0], tsv[3], entitytype
pmids = tsv[8].split(";")
norm_mirna = mirna_graph.map_label(tsv[3])
if norm_mirna < 99:
norm_mirna[0] = tsv[3]
norm_gene = get_uniprot_name(tsv[0])
for did in pmids:
if entitytype == "mirna":
gold_entities.add(("PMID" + did, "0", "0", norm_mirna[0].lower()))
elif entitytype == "protein":
gold_entities.add(("PMID" + did, "0", "0", norm_gene[0].lower()))
gold_relations[("PMID" + did, norm_mirna[0], norm_gene[0], norm_mirna[0] + "=>" + norm_gene[0])] = [tsv[3] + "=>" + tsv[0]]
#gold_relations[("PMID", norm_mirna[0], norm_gene[0], norm_mirna[0] + "=>" + norm_gene[0])] = [tsv[3] + "=>" + tsv[0]]
# print gold_entities
return gold_entities, gold_relations
|
broferek/ansible | refs/heads/devel | lib/ansible/plugins/terminal/frr.py | 47 | #
# (c) 2018 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible.errors import AnsibleConnectionFailure
from ansible.plugins.terminal import TerminalBase
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n]?[\w\+\-\.:\/\[\]]+(?:\([^\)]+\)){0,3}(?:[>#]) ?$")
]
terminal_stderr_re = [
re.compile(br"% Command incomplete", re.I),
re.compile(br"% Unknown command", re.I),
re.compile(br"(?:\S+) instance is already running", re.I),
re.compile(br"% (?:Create|Specify) .* first", re.I),
re.compile(br"(?:\S+) is not running", re.I),
re.compile(br"% Can't find .*", re.I),
re.compile(br"invalid input", re.I),
re.compile(br"connection timed out", re.I),
re.compile(br"[^\r\n]+ not found"),
]
def on_open_shell(self):
try:
self._exec_cli_command(b'terminal length 0')
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
def on_become(self, passwd=None):
# NOTE: For FRR, enable password only takes effect when telnetting to individual daemons
# vtysh will always drop into enable mode since it runs as a privileged process
pass
def on_unbecome(self):
# NOTE: For FRR, enable password only takes effect when telnetting to individual daemons
# vtysh will always drop into enable mode since it runs as a privileged process
pass
|
MarkTheF4rth/youtube-dl | refs/heads/master | youtube_dl/extractor/ssa.py | 122 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
unescapeHTML,
parse_duration,
)
class SSAIE(InfoExtractor):
_VALID_URL = r'http://ssa\.nls\.uk/film/(?P<id>\d+)'
_TEST = {
'url': 'http://ssa.nls.uk/film/3561',
'info_dict': {
'id': '3561',
'ext': 'flv',
'title': 'SHETLAND WOOL',
'description': 'md5:c5afca6871ad59b4271e7704fe50ab04',
'duration': 900,
'thumbnail': 're:^https?://.*\.jpg$',
},
'params': {
# rtmp download
'skip_download': True,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
streamer = self._search_regex(
r"'streamer'\s*,\S*'(rtmp[^']+)'", webpage, 'streamer')
play_path = self._search_regex(
r"'file'\s*,\s*'([^']+)'", webpage, 'file').rpartition('.')[0]
def search_field(field_name, fatal=False):
return self._search_regex(
r'<span\s+class="field_title">%s:</span>\s*<span\s+class="field_content">([^<]+)</span>' % field_name,
webpage, 'title', fatal=fatal)
title = unescapeHTML(search_field('Title', fatal=True)).strip('()[]')
description = unescapeHTML(search_field('Description'))
duration = parse_duration(search_field('Running time'))
thumbnail = self._search_regex(
r"'image'\s*,\s*'([^']+)'", webpage, 'thumbnails', fatal=False)
return {
'id': video_id,
'url': streamer,
'play_path': play_path,
'ext': 'flv',
'title': title,
'description': description,
'duration': duration,
'thumbnail': thumbnail,
}
|
adamlincoln/pokersim | refs/heads/master | tests/test_brains/test_foldwithnopostfloppairorbetter.py | 1 | from pokersim.Table import Table
from pokersim.Player import Player
from pokersim.Card import Card
from pokersim.Decision import Decision
def test_foldwithnopostfloppairorbetter_call_preflop():
table = Table()
num_players = 3
for i in xrange(num_players):
player = Player(10, 'FoldWithNoPostFlopPairOrBetter')
player.sit(table, i)
table.initialize_hand()
table.players[0].hole_cards.append(Card('D', 2))
table.players[0].hole_cards.append(Card('H', 9))
assert table.players[0].decide() == Decision.CALL
def test_foldwithnopostfloppairorbetter_call_postflop_1():
table = Table()
num_players = 3
for i in xrange(num_players):
player = Player(10, 'FoldWithNoPostFlopPairOrBetter')
player.sit(table, i)
table.initialize_hand()
table.players[0].hole_cards.append(Card('D', 9))
table.players[0].hole_cards.append(Card('H', 9))
table.board.append(Card('S', 10))
table.board.append(Card('S', 10))
table.board.append(Card('S', 10))
assert table.players[0].decide() == Decision.CALL
def test_foldwithnopostfloppairorbetter_call_postflop_2():
table = Table()
num_players = 3
for i in xrange(num_players):
player = Player(10, 'FoldWithNoPostFlopPairOrBetter')
player.sit(table, i)
table.initialize_hand()
table.players[0].hole_cards.append(Card('D', 9))
table.players[0].hole_cards.append(Card('H', 9))
table.board.append(Card('S', 'K'))
table.board.append(Card('S', 3))
table.board.append(Card('S', 7))
assert table.players[0].decide() == Decision.CALL
def test_foldwithnopostfloppairorbetter_fold_postflop():
table = Table()
num_players = 3
for i in xrange(num_players):
player = Player(10, 'FoldWithNoPostFlopPairOrBetter')
player.sit(table, i)
table.initialize_hand()
table.players[0].hole_cards.append(Card('D', 2))
table.players[0].hole_cards.append(Card('H', 9))
table.board.append(Card('S', 'K'))
table.board.append(Card('S', 3))
table.board.append(Card('S', 7))
assert table.players[0].decide() == Decision.FOLD
|
edmorley/django | refs/heads/master | tests/template_loader/tests.py | 153 | from django.template import TemplateDoesNotExist
from django.template.loader import (
get_template, render_to_string, select_template,
)
from django.test import SimpleTestCase, override_settings
from django.test.client import RequestFactory
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.dummy.TemplateStrings',
'APP_DIRS': True,
}, {
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'context_processors': [
'django.template.context_processors.request',
],
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
},
}])
class TemplateLoaderTests(SimpleTestCase):
def test_get_template_first_engine(self):
template = get_template("template_loader/hello.html")
self.assertEqual(template.render(), "Hello! (template strings)\n")
def test_get_template_second_engine(self):
template = get_template("template_loader/goodbye.html")
self.assertEqual(template.render(), "Goodbye! (Django templates)\n")
def test_get_template_using_engine(self):
template = get_template("template_loader/hello.html", using="django")
self.assertEqual(template.render(), "Hello! (Django templates)\n")
def test_get_template_not_found(self):
with self.assertRaises(TemplateDoesNotExist) as e:
get_template("template_loader/unknown.html")
self.assertEqual(
e.exception.chain[-1].tried[0][0].template_name,
'template_loader/unknown.html',
)
self.assertEqual(e.exception.chain[-1].backend.name, 'django')
def test_select_template_first_engine(self):
template = select_template(["template_loader/unknown.html",
"template_loader/hello.html"])
self.assertEqual(template.render(), "Hello! (template strings)\n")
def test_select_template_second_engine(self):
template = select_template(["template_loader/unknown.html",
"template_loader/goodbye.html"])
self.assertEqual(template.render(), "Goodbye! (Django templates)\n")
def test_select_template_using_engine(self):
template = select_template(["template_loader/unknown.html",
"template_loader/hello.html"], using="django")
self.assertEqual(template.render(), "Hello! (Django templates)\n")
def test_select_template_empty(self):
with self.assertRaises(TemplateDoesNotExist):
select_template([])
def test_select_template_string(self):
with self.assertRaisesMessage(
TypeError,
"select_template() takes an iterable of template names but got a "
"string: 'template_loader/hello.html'. Use get_template() if you "
"want to load a single template by name."
):
select_template('template_loader/hello.html')
def test_select_template_not_found(self):
with self.assertRaises(TemplateDoesNotExist) as e:
select_template(["template_loader/unknown.html",
"template_loader/missing.html"])
self.assertEqual(
e.exception.chain[0].tried[0][0].template_name,
'template_loader/unknown.html',
)
self.assertEqual(e.exception.chain[0].backend.name, 'dummy')
self.assertEqual(
e.exception.chain[-1].tried[0][0].template_name,
'template_loader/missing.html',
)
self.assertEqual(e.exception.chain[-1].backend.name, 'django')
def test_select_template_tries_all_engines_before_names(self):
template = select_template(["template_loader/goodbye.html",
"template_loader/hello.html"])
self.assertEqual(template.render(), "Goodbye! (Django templates)\n")
def test_render_to_string_first_engine(self):
content = render_to_string("template_loader/hello.html")
self.assertEqual(content, "Hello! (template strings)\n")
def test_render_to_string_second_engine(self):
content = render_to_string("template_loader/goodbye.html")
self.assertEqual(content, "Goodbye! (Django templates)\n")
def test_render_to_string_with_request(self):
request = RequestFactory().get('/foobar/')
content = render_to_string("template_loader/request.html", request=request)
self.assertEqual(content, "/foobar/\n")
def test_render_to_string_using_engine(self):
content = render_to_string("template_loader/hello.html", using="django")
self.assertEqual(content, "Hello! (Django templates)\n")
def test_render_to_string_not_found(self):
with self.assertRaises(TemplateDoesNotExist) as e:
render_to_string("template_loader/unknown.html")
self.assertEqual(
e.exception.chain[-1].tried[0][0].template_name,
'template_loader/unknown.html',
)
self.assertEqual(e.exception.chain[-1].backend.name, 'django')
def test_render_to_string_with_list_first_engine(self):
content = render_to_string(["template_loader/unknown.html",
"template_loader/hello.html"])
self.assertEqual(content, "Hello! (template strings)\n")
def test_render_to_string_with_list_second_engine(self):
content = render_to_string(["template_loader/unknown.html",
"template_loader/goodbye.html"])
self.assertEqual(content, "Goodbye! (Django templates)\n")
def test_render_to_string_with_list_using_engine(self):
content = render_to_string(["template_loader/unknown.html",
"template_loader/hello.html"], using="django")
self.assertEqual(content, "Hello! (Django templates)\n")
def test_render_to_string_with_list_empty(self):
with self.assertRaises(TemplateDoesNotExist):
render_to_string([])
def test_render_to_string_with_list_not_found(self):
with self.assertRaises(TemplateDoesNotExist) as e:
render_to_string(["template_loader/unknown.html",
"template_loader/missing.html"])
self.assertEqual(
e.exception.chain[0].tried[0][0].template_name,
'template_loader/unknown.html',
)
self.assertEqual(e.exception.chain[0].backend.name, 'dummy')
self.assertEqual(
e.exception.chain[1].tried[0][0].template_name,
'template_loader/unknown.html',
)
self.assertEqual(e.exception.chain[1].backend.name, 'django')
self.assertEqual(
e.exception.chain[2].tried[0][0].template_name,
'template_loader/missing.html',
)
self.assertEqual(e.exception.chain[2].backend.name, 'dummy')
self.assertEqual(
e.exception.chain[3].tried[0][0].template_name,
'template_loader/missing.html',
)
self.assertEqual(e.exception.chain[3].backend.name, 'django')
def test_render_to_string_with_list_tries_all_engines_before_names(self):
content = render_to_string(["template_loader/goodbye.html",
"template_loader/hello.html"])
self.assertEqual(content, "Goodbye! (Django templates)\n")
|
rwl/openpowersystem | refs/heads/master | ucte/core/curve_data.py | 1 | #------------------------------------------------------------------------------
# Copyright (C) 2009 Richard Lincoln
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation; version 2 dated June, 1991.
#
# This software is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANDABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#------------------------------------------------------------------------------
""" Data point values for defining a curve or schedule
"""
# <<< imports
# @generated
from ucte.element import Element
from ucte.core.curve import Curve
from google.appengine.ext import db
# >>> imports
class CurveData(Element):
""" Data point values for defining a curve or schedule
"""
# <<< curve_data.attributes
# @generated
# The data value of the first Y-axis variable, depending on the Y-axis units
y1value = db.FloatProperty()
# The data value of the X-axis variable, depending on the X-axis units
xvalue = db.FloatProperty()
# The data value of the second Y-axis variable (if present), depending on the Y-axis units
y2value = db.FloatProperty()
# >>> curve_data.attributes
# <<< curve_data.references
# @generated
# The Curve defined by this CurveData.
curve_schedule = db.ReferenceProperty(Curve,
collection_name="curve_schedule_datas")
# >>> curve_data.references
# <<< curve_data.operations
# @generated
# >>> curve_data.operations
# EOF -------------------------------------------------------------------------
|
chrisspen/burlap | refs/heads/master | burlap/tarball.py | 1 | from __future__ import print_function
from burlap import Satchel
from burlap.constants import *
from burlap.decorators import task
from burlap.common import only_hostname
RSYNC = 'rsync'
#DEPRECATED: TODO: remove tarball functionality, and rename to CodeSatchel
class TarballSatchel(Satchel):
name = 'tarball'
def set_defaults(self):
self.env.clean = 1
self.env.gzip = 1
self.env.method = RSYNC
self.env.rsync_source_dir = 'src'
self.env.rsync_source_dirs = [] # This overrides rsync_source_dir
self.env.rsync_target_dir = None
self.env.rsync_target_host = '{user}@{host_string}:'
self.env.rsync_auth = '--rsh "ssh -t -o StrictHostKeyChecking=no -i {key_filename}"'
self.env.rsync_command_template = (
'rsync '
'--recursive --verbose --perms --times --links '
'--compress --copy-links {exclude_str} '
'--delete --delete-before --force '
'{rsync_auth} '
'{rsync_source_dir} '
'{rsync_target_host}{rsync_target_dir}'
)
self.env.exclusions = [
'*_local.py',
'*.pyc',
'*.svn',
'*.tar.gz',
'*.log',
'twistd.pid',
'*.sqlite',
]
self.env.dir = '.burlap/tarball_cache'
self.env.extra_dirs = []
self.env.perm_user = 'www-data'
self.env.perm_group = 'www-data'
self.env.perm_chmod = None
self.env.set_permissions = True
@property
def timestamp(self):
from burlap.common import get_last_modified_timestamp
r = self.local_renderer
fn = r.env.rsync_source_dir
if self.verbose:
print('tarball.fn:', fn)
return get_last_modified_timestamp(fn, ignore=[_ for _ in r.env.exclusions if '/' not in _])
@task
def changed(self):
lm = self.last_manifest
last_timestamp = lm.timestamp
current_timestamp = self.timestamp
self.vprint('last_timestamp:', last_timestamp)
self.vprint('current_timestamp:', current_timestamp)
ret = last_timestamp == current_timestamp
print('NO change' if ret else 'CHANGED!')
return ret
def record_manifest(self):
"""
Called after a deployment to record any data necessary to detect changes
for a future deployment.
"""
manifest = super(TarballSatchel, self).record_manifest()
manifest['timestamp'] = self.timestamp
return manifest
@task
def set_permissions(self):
r = self.local_renderer
if r.env.rsync_target_dir:
if r.env.perm_chmod:
r.sudo('chmod -R {perm_chmod} {rsync_target_dir}')
r.sudo('chown -R {perm_user}:{perm_group} {rsync_target_dir}')
def _run_rsync(self, src, dst):
print('rsync %s -> %s' % (src, dst))
r = self.local_renderer
r.env.hostname = only_hostname(r.genv.host_string)
real_rsync_target_dir = r.env.rsync_target_dir
try:
# Rsync to a temporary directory where we'll have full permissions.
tmp_dir = '/tmp/tmp_%s_%s' % (self.env.rsync_target_dir.replace('/', '_'), src.replace('/', '_'))
r.env.rsync_target_dir = tmp_dir
r.env.rsync_source_dir = src
r.local(self.env.rsync_command_template)
# Then rsync from the temp directory as sudo to complete the operation.
r.env.rsync_source_dir = tmp_dir+'/*'
r.env.rsync_target_dir = real_rsync_target_dir
r.env.rsync_target_host = ''
r.env.rsync_auth = ''
r.sudo(self.env.rsync_command_template)
finally:
r.env.rsync_target_dir = real_rsync_target_dir
@task
def deploy_rsync(self, *args, **kwargs):
r = self.local_renderer
# Confirm source directories.
src_dirs = list(self.env.rsync_source_dirs)
if not src_dirs:
src_dirs.append(self.env.rsync_source_dir)
# Confirm target directories.
assert self.env.rsync_target_dir
r.env.exclude_str = ' '.join('--exclude=%s' % _ for _ in self.env.exclusions)
for src_dir in src_dirs:
self._run_rsync(src=src_dir, dst=self.env.rsync_target_dir)
if self.env.set_permissions:
self.set_permissions()
@task(precursors=['gitchecker', 'packager', 'apache2', 'pip', 'user'])
def configure(self, *args, **kwargs):
if self.env.method == RSYNC:
self.deploy_rsync(*args, **kwargs)
tarball_satchel = TarballSatchel()
deploy = tarball_satchel.configure
|
stephansigg/IPSN_localisation_active-DF | refs/heads/master | Sources/mainwindow.py | 1 | import wx
class MainWindow(wx.Frame):
def __init__(self, parent, title):
self.dirname=''
wx.Frame.__init__(self, parent, title=title, size=(200,-1))
#self.quote = wx.StaticText(self, label="Your quote :", pos=(20, 30))
# A multiline TextCtrl - This is here to show how the events work in this program, don't pay too much attention to it
self.logger = wx.TextCtrl(self, pos=(500,20), size=(200,300), style=wx.TE_MULTILINE | wx.TE_READONLY)
# button
self.button =wx.Button(self, label="Start Recording", pos=(50, 100))
self.Bind(wx.EVT_BUTTON, self.OnClick,self.button)
self.button =wx.Button(self, label="Stop Recording", pos=(200, 100))
self.Bind(wx.EVT_BUTTON, self.OnClick,self.button)
self.button =wx.Button(self, label="Pause", pos=(350, 100))
self.Bind(wx.EVT_BUTTON, self.OnClick,self.button)
# the edit control - one line version.
self.lblname = wx.StaticText(self, label="Coodinator (Room, Row, Column) :", pos=(20,60))
self.editname = wx.TextCtrl(self, value="Enter Coordinator", pos=(270, 60), size=(200,-1))
self.Bind(wx.EVT_TEXT, self.EvtText, self.editname)
self.Bind(wx.EVT_CHAR, self.EvtChar, self.editname)
def EvtRadioBox(self, event):
self.logger.AppendText('EvtRadioBox: %d\n' % event.GetInt())
def EvtComboBox(self, event):
self.logger.AppendText('EvtComboBox: %s\n' % event.GetString())
def OnClick(self,event):
self.logger.AppendText(" Click on object with Id %d\n" %event.GetId())
def EvtText(self, event):
self.logger.AppendText('EvtText: %s\n' % event.GetString())
def EvtChar(self, event):
self.logger.AppendText('EvtChar: %d\n' % event.GetKeyCode())
event.Skip()
def EvtCheckBox(self, event):
self.logger.AppendText('EvtCheckBox: %d\n' % event.Checked())
app = wx.App(False)
frame = MainWindow(None, "Indoor Location Learning")
frame.Show()
app.MainLoop()
|
neumerance/deploy | refs/heads/master | openstack_dashboard/dashboards/admin/domains/panel.py | 12 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _ # noqa
import horizon
from openstack_dashboard.api import keystone
from openstack_dashboard.dashboards.admin import dashboard
class Domains(horizon.Panel):
name = _("Domains")
slug = 'domains'
if keystone.VERSIONS.active >= 3:
dashboard.Admin.register(Domains)
|
larsks/cloud-init-patches | refs/heads/lpc-rhel-7-patches | tests/unittests/test_pathprefix2dict.py | 10 | from cloudinit import util
from .helpers import TestCase, populate_dir
import shutil
import tempfile
class TestPathPrefix2Dict(TestCase):
def setUp(self):
super(TestPathPrefix2Dict, self).setUp()
self.tmp = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.tmp)
def test_required_only(self):
dirdata = {'f1': b'f1content', 'f2': b'f2content'}
populate_dir(self.tmp, dirdata)
ret = util.pathprefix2dict(self.tmp, required=['f1', 'f2'])
self.assertEqual(dirdata, ret)
def test_required_missing(self):
dirdata = {'f1': b'f1content'}
populate_dir(self.tmp, dirdata)
kwargs = {'required': ['f1', 'f2']}
self.assertRaises(ValueError, util.pathprefix2dict, self.tmp, **kwargs)
def test_no_required_and_optional(self):
dirdata = {'f1': b'f1c', 'f2': b'f2c'}
populate_dir(self.tmp, dirdata)
ret = util.pathprefix2dict(self.tmp, required=None,
optional=['f1', 'f2'])
self.assertEqual(dirdata, ret)
def test_required_and_optional(self):
dirdata = {'f1': b'f1c', 'f2': b'f2c'}
populate_dir(self.tmp, dirdata)
ret = util.pathprefix2dict(self.tmp, required=['f1'], optional=['f2'])
self.assertEqual(dirdata, ret)
# vi: ts=4 expandtab
|
guillochon/staged-recipes | refs/heads/master | .travis_scripts/create_feedstocks.py | 1 | #!/usr/bin/env python
"""
Convert all recipes into feedstocks.
This script is to be run in a TravisCI context, with all secret environment variables defined (BINSTAR_TOKEN, GH_TOKEN)
Such as:
export GH_TOKEN=$(cat ~/.conda-smithy/github.token)
"""
from __future__ import print_function
from conda_build.metadata import MetaData
from contextlib import contextmanager
from datetime import datetime
from github import Github, GithubException
import os.path
import shutil
import subprocess
import sys
import tempfile
import traceback
# Enable DEBUG to run the diagnostics, without actually creating new feedstocks.
DEBUG = False
recipe_directory_name = 'recipes'
def list_recipes():
if os.path.isdir(recipe_directory_name):
recipes = os.listdir(recipe_directory_name)
else:
recipes = []
for recipe_dir in recipes:
# We don't list the "example" feedstock. It is an example, and is there
# to be helpful.
if recipe_dir.startswith('example'):
continue
path = os.path.abspath(os.path.join(recipe_directory_name, recipe_dir))
yield path, MetaData(path).name()
@contextmanager
def tmp_dir(*args, **kwargs):
temp_dir = tempfile.mkdtemp(*args, **kwargs)
try:
yield temp_dir
finally:
shutil.rmtree(temp_dir)
def repo_exists(gh, organization, name):
# Use the organization provided.
org = gh.get_organization(organization)
try:
org.get_repo(name)
return True
except GithubException as e:
if e.status == 404:
return False
raise
def print_rate_limiting_info(gh):
# Compute some info about our GitHub API Rate Limit.
# Note that it doesn't count against our limit to
# get this info. So, we should be doing this regularly
# to better know when it is going to run out. Also,
# this will help us better understand where we are
# spending it and how to better optimize it.
# Get GitHub API Rate Limit usage and total
gh_api_remaining = gh.get_rate_limit().rate.remaining
gh_api_total = gh.get_rate_limit().rate.limit
# Compute time until GitHub API Rate Limit reset
gh_api_reset_time = gh.get_rate_limit().rate.reset
gh_api_reset_time -= datetime.utcnow()
print("")
print("GitHub API Rate Limit Info:")
print("---------------------------")
print("Currently remaining {remaining} out of {total}.".format(remaining=gh_api_remaining, total=gh_api_total))
print("Will reset in {time}.".format(time=gh_api_reset_time))
print("")
if __name__ == '__main__':
exit_code = 0
is_merged_pr = (os.environ.get('TRAVIS_BRANCH') == 'master' and os.environ.get('TRAVIS_PULL_REQUEST') == 'false')
smithy_conf = os.path.expanduser('~/.conda-smithy')
if not os.path.exists(smithy_conf):
os.mkdir(smithy_conf)
def write_token(name, token):
with open(os.path.join(smithy_conf, name + '.token'), 'w') as fh:
fh.write(token)
if 'APPVEYOR_TOKEN' in os.environ:
write_token('appveyor', os.environ['APPVEYOR_TOKEN'])
if 'CIRCLE_TOKEN' in os.environ:
write_token('circle', os.environ['CIRCLE_TOKEN'])
gh = None
if 'GH_TOKEN' in os.environ:
write_token('github', os.environ['GH_TOKEN'])
gh = Github(os.environ['GH_TOKEN'])
# Get our initial rate limit info.
print_rate_limiting_info(gh)
owner_info = ['--organization', 'conda-forge']
print('Calculating the recipes which need to be turned into feedstocks.')
with tmp_dir('__feedstocks') as feedstocks_dir:
feedstock_dirs = []
for num, (recipe_dir, name) in enumerate(list_recipes()):
if num >= 7:
exit_code = 1
break
feedstock_dir = os.path.join(feedstocks_dir, name + '-feedstock')
print('Making feedstock for {}'.format(name))
try:
subprocess.check_call(['conda', 'smithy', 'init', recipe_dir,
'--feedstock-directory', feedstock_dir])
except subprocess.CalledProcessError:
traceback.print_exception(*sys.exc_info())
continue
if not is_merged_pr:
# We just want to check that conda-smithy is doing its thing without having any metadata issues.
continue
feedstock_dirs.append([feedstock_dir, name, recipe_dir])
subprocess.check_call(['git', 'remote', 'add', 'upstream_with_token',
'https://conda-forge-manager:{}@github.com/conda-forge/{}-feedstock'.format(os.environ['GH_TOKEN'],
name)],
cwd=feedstock_dir)
# Sometimes we already have the feedstock created. We need to deal with that case.
if repo_exists(gh, 'conda-forge', name + '-feedstock'):
subprocess.check_call(['git', 'fetch', 'upstream_with_token'], cwd=feedstock_dir)
subprocess.check_call(['git', 'branch', '-m', 'master', 'old'], cwd=feedstock_dir)
try:
subprocess.check_call(['git', 'checkout', '-b', 'master', 'upstream_with_token/master'], cwd=feedstock_dir)
except subprocess.CalledProcessError:
# Sometimes, we have a repo, but there are no commits on it! Just catch that case.
subprocess.check_call(['git', 'checkout', '-b' 'master'], cwd=feedstock_dir)
else:
subprocess.check_call(['conda', 'smithy', 'register-github', feedstock_dir] + owner_info)
# Break the previous loop to allow the TravisCI registering to take place only once per function call.
# Without this, intermittent failures to synch the TravisCI repos ensue.
# Hang on to any CI registration errors that occur and raise them at the end.
for feedstock_dir, name, recipe_dir in feedstock_dirs:
# Try to register each feedstock with CI.
# However sometimes their APIs have issues for whatever reason.
# In order to bank our progress, we note the error and handle it.
# After going through all the recipes and removing the converted ones,
# we fail the build so that people are aware that things did not clear.
try:
subprocess.check_call(['conda', 'smithy', 'register-ci', '--feedstock_directory', feedstock_dir] + owner_info)
except subprocess.CalledProcessError:
exit_code = 1
traceback.print_exception(*sys.exc_info())
continue
subprocess.check_call(['conda', 'smithy', 'rerender'], cwd=feedstock_dir)
subprocess.check_call(['git', 'commit', '-am', "Re-render the feedstock after CI registration."], cwd=feedstock_dir)
for i in range(5):
try:
# Capture the output, as it may contain the GH_TOKEN.
out = subprocess.check_output(['git', 'push', 'upstream_with_token', 'HEAD:master'], cwd=feedstock_dir,
stderr=subprocess.STDOUT)
subprocess.check_call(['conda', 'smithy', 'register-github', '--add-teams', feedstock_dir] + owner_info)
break
except subprocess.CalledProcessError:
pass
# Likely another job has already pushed to this repo.
# Place our changes on top of theirs and try again.
out = subprocess.check_output(['git', 'fetch', 'upstream_with_token', 'master'], cwd=feedstock_dir,
stderr=subprocess.STDOUT)
try:
subprocess.check_call(['git', 'rebase', 'upstream_with_token/master', 'master'], cwd=feedstock_dir)
except subprocess.CalledProcessError:
# Handle rebase failure by choosing the changes in `master`.
subprocess.check_call(['git', 'checkout', 'master', '--', '.'], cwd=feedstock_dir)
subprocess.check_call(['git', 'rebase', '--continue'], cwd=feedstock_dir)
# Remove this recipe from the repo.
if is_merged_pr:
subprocess.check_call(['git', 'rm', '-rf', recipe_dir])
# Update status based on the remote.
subprocess.check_call(['git', 'stash', '--keep-index', '--include-untracked'])
subprocess.check_call(['git', 'fetch'])
subprocess.check_call(['git', 'rebase', '--autostash'])
subprocess.check_call(['git', 'add', '.'])
try:
subprocess.check_call(['git', 'stash', 'pop'])
except subprocess.CalledProcessError:
# In case there was nothing to stash.
# Finish quietly.
pass
# Parse `git status --porcelain` to handle some merge conflicts and generate the removed recipe list.
changed_files = subprocess.check_output(['git', 'status', '--porcelain', recipe_directory_name],
universal_newlines=True)
changed_files = changed_files.splitlines()
# Add all files from AU conflicts. They are new files that we weren't tracking previously.
# Adding them resolves the conflict and doesn't actually add anything to the index.
new_file_conflicts = filter(lambda _: _.startswith("AU "), changed_files)
new_file_conflicts = map(lambda _ : _.replace("AU", "", 1).lstrip(), new_file_conflicts)
for each_new_file in new_file_conflicts:
subprocess.check_call(['git', 'add', each_new_file])
# Generate a fresh listing of recipes removed.
#
# * Each line we get back is a change to a file in the recipe directory.
# * We narrow the list down to recipes that are staged for deletion (ignores examples).
# * Then we clean up the list so that it only has the recipe names.
removed_recipes = filter(lambda _: _.startswith("D "), changed_files)
removed_recipes = map(lambda _ : _.replace("D", "", 1).lstrip(), removed_recipes)
removed_recipes = map(lambda _ : os.path.relpath(_, recipe_directory_name), removed_recipes)
removed_recipes = map(lambda _ : _.split(os.path.sep)[0], removed_recipes)
removed_recipes = sorted(set(removed_recipes))
# Commit any removed packages.
subprocess.check_call(['git', 'status'])
if removed_recipes:
msg = ('Removed recipe{s} ({}) after converting into feedstock{s}.'
''.format(', '.join(removed_recipes),
s=('s' if len(removed_recipes) > 1 else '')))
msg += ' [ci skip]'
if is_merged_pr:
# Capture the output, as it may contain the GH_TOKEN.
out = subprocess.check_output(['git', 'remote', 'add', 'upstream_with_token',
'https://conda-forge-manager:{}@github.com/conda-forge/staged-recipes'.format(os.environ['GH_TOKEN'])],
stderr=subprocess.STDOUT)
subprocess.check_call(['git', 'commit', '-m', msg])
# Capture the output, as it may contain the GH_TOKEN.
branch = os.environ.get('TRAVIS_BRANCH')
out = subprocess.check_output(['git', 'push', 'upstream_with_token', 'HEAD:%s' % branch],
stderr=subprocess.STDOUT)
else:
print('Would git commit, with the following message: \n {}'.format(msg))
if gh:
# Get our final rate limit info.
print_rate_limiting_info(gh)
sys.exit(exit_code)
|
mhils/HoneyProxy | refs/heads/master | libhproxy/flowcollection.py | 1 | from libmproxy import encoding
from libhproxy.honey import HoneyProxy
import re, socket, cgi, StringIO
import hashlib #@UnusedImport
"""
flatten a given fieldStorage and return a dict with the following structure:
{"filenameA":"filecontentA",...}
This dict will be processed for creating hash checksums
"""
def getParts(fieldStorage,parts=None):
if parts == None:
parts = {}
if type(fieldStorage.value) != type([]):
name = ""
# empty strings -> None; else: strip()
fieldStorage.name = fieldStorage.name.strip() if (fieldStorage.name != None and fieldStorage.name.strip() != "") else None
fieldStorage.filename = fieldStorage.filename.strip() if (fieldStorage.filename != None and fieldStorage.filename.strip() != "") else None
if fieldStorage.name == None and fieldStorage.filename == None:
if "Checksum" in parts:
return parts
name = "Checksum"
else:
if fieldStorage.name != None:
name = str(fieldStorage.name)
if fieldStorage.filename != None:
name += ": " + str(fieldStorage.filename)
else:
if len(fieldStorage.value) < 1025:
return parts #don't calculate md5s for really small chunks
elif fieldStorage.filename != None:
name = str(fieldStorage.filename)
#find next avail. name
i=2
if name in parts:
name += " (2)"
while name in parts:
i += 1
name = name[:-(2+len(str(i)))] + ("(%d)" % i)
parts[name] = str(fieldStorage.value)
else:
for i in fieldStorage.value:
getParts(i,parts)
return parts
class FlowCollection:
"""
Collects all flows, gives them an id, decodes content.
"""
regex_charset = re.compile("charset=\s*([\S]+|['\"][^'\"]+['\"])")
regex_isip = re.compile("^([0-9]{1,3}\.){3}[0-9]{1,3}$")
def __init__(self):
self._flows_serialized = []
self._flows = []
self._decoded_contents = []
def getFlow(self,flowId):
return self._flows[flowId]
def getDecodedContents(self):
return self._decoded_contents
def getFlowsSerialized(self):
return self._flows_serialized
def addFlow(self, flow):
"""
Adds a flow to all lists in the corresponding format
"""
flowRepr = flow._get_state()
flowRepr["id"] = len(self._flows_serialized)
#In transparent mode, we are unsure about the actual host, but we want to show it in the GUI.
#Firstly, we get the Host from the request headers.
#As this might be faked, we go on and check whether the request IP matches one of the DNS entries belonging to the headerHost
if(FlowCollection.regex_isip.match(flowRepr["request"]["host"])):
try:
headerHost = flow.request.headers["Host"]
if(headerHost):
headerHost = headerHost[0]
info = socket.getaddrinfo(flowRepr["request"]["host"], flowRepr["request"]["port"],0,0,socket.SOL_TCP)
for i in info:
if i[4][0] == flowRepr["request"]["host"]:
flowRepr["request"]["host_guess"] = headerHost
break
except socket.gaierror:
pass
except:
import traceback
print traceback.format_exc()
#Save decoded content
decoded_content = {}
algorithms = ["md5","sha1","sha256"]
for i in ["request","response"]:
#strip content out of the flowRepr
flowRepr[i]["contentLength"] = len(flowRepr[i]["content"])
del flowRepr[i]["content"]
r = getattr(flow,i)
decoded = r.content
#decode with http content-encoding
try:
ce = r.headers["content-encoding"]
if ce and ce[0] in encoding.ENCODINGS:
decoded_ = encoding.decode(ce[0],decoded)
if decoded_ != None: #If the decoding fails, encoding.decode returns None.
decoded = decoded_
except:
print "Warning: Data cannot be decoded with given Content Encoding."
#calculate hashsums
flowRepr[i]["contentChecksums"] = {}
parts = {"Checksum":decoded}
#Handle multipart checksums
if i == "request":
try:
headers = dict(map(str.lower, map(str,a)) for a in flow.request.headers) # odict -> (lowered) dict
fs = cgi.FieldStorage(StringIO.StringIO(decoded),headers,environ={ 'REQUEST_METHOD':'POST' })
parts = getParts(fs)
except Exception as e:
import traceback
traceback.print_exc()
print "Warning: Cannot decode multipart"
for item, data in parts.viewitems():
checksums = {}
for a in algorithms:
checksums[a] = getattr(hashlib,a)(data).hexdigest()
flowRepr[i]["contentChecksums"][item] = checksums
#decode with http content-type encoding
ct = r.headers["content-type"]
default_charset = "latin-1" #HTTP 1.1 says that the default charset is ISO-8859-1
#RFC2616 3.7.1
charset = default_charset
if ct:
m = FlowCollection.regex_charset.search(ct[0])
if m:
charset = m.group(1).strip('"').strip('"\'')
#TODO: guess from html metadata
try:
decoded = decoded.decode(charset)
except:
try:
decoded = decoded.decode(default_charset)
except:
print "Warning: Could not decode request."
import traceback
print traceback.format_exc()
try:
decoded = decoded.encode('utf-8')
except:
print "Warning: Cannot encode request to utf8"
decoded_content[i] = decoded
self._flows.append(flow)
self._flows_serialized.append(flowRepr)
self._decoded_contents.append(decoded_content)
return len(self._flows_serialized)-1
class includeDecodedContent(object):
"""
A context manager that adds the decoded request and response content to a serialized list of flows
and removes it after execution of the block
Example:
with includeDecodedContent(flows):
search(flows)
"""
def __init__(self, flows):
self.flows = flows
def __enter__(self):
for flow in self.flows:
for i in ["request","response"]:
flow[i]["content"] = HoneyProxy.getProxyMaster().getFlowCollection().getDecodedContents()[flow.get("id")][i]
def __exit__(self, exc_type, value, tb):
for flow in self.flows:
for i in ["request","response"]:
del flow[i]["content"]
|
Darkmer/masterchief | refs/heads/master | CourseBuilderenv/lib/python2.7/site-packages/pip/basecommand.py | 392 | """Base Command class, and related routines"""
import os
import sys
import tempfile
import traceback
import time
import optparse
from pip import cmdoptions
from pip.locations import running_under_virtualenv
from pip.log import logger
from pip.download import PipSession
from pip.exceptions import (BadCommand, InstallationError, UninstallationError,
CommandError, PreviousBuildDirError)
from pip.backwardcompat import StringIO
from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pip.status_codes import (SUCCESS, ERROR, UNKNOWN_ERROR, VIRTUALENV_NOT_FOUND,
PREVIOUS_BUILD_DIR_ERROR)
from pip.util import get_prog
__all__ = ['Command']
class Command(object):
name = None
usage = None
hidden = False
def __init__(self):
parser_kw = {
'usage': self.usage,
'prog': '%s %s' % (get_prog(), self.name),
'formatter': UpdatingDefaultsHelpFormatter(),
'add_help_option': False,
'name': self.name,
'description': self.__doc__,
}
self.parser = ConfigOptionParser(**parser_kw)
# Commands should add options to this option group
optgroup_name = '%s Options' % self.name.capitalize()
self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name)
# Add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, self.parser)
self.parser.add_option_group(gen_opts)
def _build_session(self, options):
session = PipSession()
# Handle custom ca-bundles from the user
if options.cert:
session.verify = options.cert
# Handle timeouts
if options.timeout:
session.timeout = options.timeout
# Handle configured proxies
if options.proxy:
session.proxies = {
"http": options.proxy,
"https": options.proxy,
}
# Determine if we can prompt the user for authentication or not
session.auth.prompting = not options.no_input
return session
def setup_logging(self):
pass
def parse_args(self, args):
# factored out for testability
return self.parser.parse_args(args)
def main(self, args):
options, args = self.parse_args(args)
level = 1 # Notify
level += options.verbose
level -= options.quiet
level = logger.level_for_integer(4 - level)
complete_log = []
logger.add_consumers(
(level, sys.stdout),
(logger.DEBUG, complete_log.append),
)
if options.log_explicit_levels:
logger.explicit_levels = True
self.setup_logging()
#TODO: try to get these passing down from the command?
# without resorting to os.environ to hold these.
if options.no_input:
os.environ['PIP_NO_INPUT'] = '1'
if options.exists_action:
os.environ['PIP_EXISTS_ACTION'] = ' '.join(options.exists_action)
if options.require_venv:
# If a venv is required check if it can really be found
if not running_under_virtualenv():
logger.fatal('Could not find an activated virtualenv (required).')
sys.exit(VIRTUALENV_NOT_FOUND)
if options.log:
log_fp = open_logfile(options.log, 'a')
logger.add_consumers((logger.DEBUG, log_fp))
else:
log_fp = None
exit = SUCCESS
store_log = False
try:
status = self.run(options, args)
# FIXME: all commands should return an exit status
# and when it is done, isinstance is not needed anymore
if isinstance(status, int):
exit = status
except PreviousBuildDirError:
e = sys.exc_info()[1]
logger.fatal(str(e))
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = PREVIOUS_BUILD_DIR_ERROR
except (InstallationError, UninstallationError):
e = sys.exc_info()[1]
logger.fatal(str(e))
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = ERROR
except BadCommand:
e = sys.exc_info()[1]
logger.fatal(str(e))
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = ERROR
except CommandError:
e = sys.exc_info()[1]
logger.fatal('ERROR: %s' % e)
logger.info('Exception information:\n%s' % format_exc())
exit = ERROR
except KeyboardInterrupt:
logger.fatal('Operation cancelled by user')
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = ERROR
except:
logger.fatal('Exception:\n%s' % format_exc())
store_log = True
exit = UNKNOWN_ERROR
if store_log:
log_file_fn = options.log_file
text = '\n'.join(complete_log)
try:
log_file_fp = open_logfile(log_file_fn, 'w')
except IOError:
temp = tempfile.NamedTemporaryFile(delete=False)
log_file_fn = temp.name
log_file_fp = open_logfile(log_file_fn, 'w')
logger.fatal('Storing debug log for failure in %s' % log_file_fn)
log_file_fp.write(text)
log_file_fp.close()
if log_fp is not None:
log_fp.close()
return exit
def format_exc(exc_info=None):
if exc_info is None:
exc_info = sys.exc_info()
out = StringIO()
traceback.print_exception(*exc_info, **dict(file=out))
return out.getvalue()
def open_logfile(filename, mode='a'):
"""Open the named log file in append mode.
If the file already exists, a separator will also be printed to
the file to separate past activity from current activity.
"""
filename = os.path.expanduser(filename)
filename = os.path.abspath(filename)
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
exists = os.path.exists(filename)
log_fp = open(filename, mode)
if exists:
log_fp.write('%s\n' % ('-' * 60))
log_fp.write('%s run on %s\n' % (sys.argv[0], time.strftime('%c')))
return log_fp
|
caasiu/xbmc-addons-chinese | refs/heads/master | plugin.video.cntv-replay/default.py | 6 | import sys, os, time
import urllib, urlparse
import xbmc, xbmcgui, xbmcplugin, xbmcaddon
def cn_time_s(): # return CST (China Standard Time) in seconds
lc_time=time.localtime()
gm_time_s=time.mktime(time.gmtime())
return gm_time_s + (8-lc_time.tm_isdst)*60*60 # CST = GMT + 8h, tm_isdst = {1,0,-1}
addon = xbmcaddon.Addon()
title=addon.getAddonInfo('name')
thumbnail=addon.getAddonInfo('icon')
pwd_path=addon.getAddonInfo('path')
mediaType='Video'
base_url = sys.argv[0]
addon_handle = int(sys.argv[1])
args = urlparse.parse_qs(sys.argv[2][1:])
media_ended=False
media_stopped=False
# geneate tv_listings
f = open(os.path.join(pwd_path,'cctv_channels.txt'),'r') # lsit of channels
tv_listing = []
for line in f:
if line.startswith('cctv'):
tv_listing.append(line.split())
f.close
xbmcplugin.setContent(addon_handle, 'movies')
channel='cctv00'
class XBMCPlayer( xbmc.Player ):
def __init__( self, *args ):
pass
xbmc.log( '#=#=#=# '+ channel +' XBMCPlayer Initialized #=#=#=#' )
def __del__(self):
xbmc.log( '#=#=#=# '+ channel +' XBMCPlayer Destructed #=#=#=#' )
def nPlayBackPaused( self ) :
xbmc.log( '#=#=#=# Status: '+ channel +' Playback Paused #=#=#=#' )
def onPlayBackResumed( self) :
xbmc.log( '#=#=#=# Status: '+ channel +' Playback Resumed #=#=#=#' )
def onPlayBackStarted( self ):
# Will be called when xbmc starts playing a file
xbmc.log( '#=#=#=# Status: '+ channel +' Playback Started #=#=#=#' )
global media_ended
global media_stopped
media_ended=False
media_stopped=False
def onPlayBackEnded( self ):
# Will be called when xbmc ended playing a file
xbmc.log( '#=#=#=# Status: '+ channel +' Playback Ended, #=#=#=#' )
global media_ended
media_ended=True
global media_stopped # let treated media_ended the same as media_stopped for now
media_stopped=True
def onPlayBackStopped( self ):
# Will be called when user stops xbmc playing a file
xbmc.log( '#=#=#=# Status: '+ channel +' Playback Stopped #=#=#=#' )
global media_stopped
media_stopped=True
# self.stop()
class XBMCMonitor( xbmc.Monitor ):
def __init__( self, *args ):
pass
xbmc.log( "#=#=#=# Monitor initialized #=#=#=#" )
def __del__( self ):
xbmc.log( "#=#=#=# Monitor destructed #=#=#=#" )
def abortRequested( self ):
# Returns True if abort has been requested.
xbmc.log( "#=#=#=# Status: ** abort *** has been requestd #=#=#=#" )
def cntvplay (ch):
b_url='http://220.243.235.9/v.cctv.com/live_back/nettv_' + ch +'/' + ch +'-'
# b_url='http://8.37.234.13/v.cctv.com/live_back/nettv_' + ch +'/' + ch +'-'
player = XBMCPlayer()
monitor = XBMCMonitor()
global media_stopped
while(not media_stopped):
cur=cn_time_s()
hr = (time.strftime("%Y-%m-%d-%H",time.localtime(cur-600)))
seg = '%03d' % (int((time.strftime("%M",time.localtime(cur-600))))/5+1)
url = b_url + hr + "-" + seg + '.mp4?wsiphost=local'
li = xbmcgui.ListItem(label=title, iconImage=thumbnail, thumbnailImage=thumbnail, path=url)
li.setInfo(type=mediaType, infoLabels={ "Title": title })
player.play(item=url, listitem=li)
for x in range(1, 300):
if monitor.waitForAbort(1) or media_stopped: # Sleep/wait for abort for 1 second
xbmc.log( '#=#=#=# '+ ch +' aborted or media_stopped #=#=#=#' )
media_stopped=True
break # Abort was requested while waiting. Exit the while loop.
player.stop()
xbmc.log( '#=#=#=# left ' + ch + ' #=#=#=#' )
def build_url(query):
return base_url + '?' + urllib.urlencode(query)
mode = args.get('mode', None)
if mode is None: # first time call, fill up the tv_listing
for i in tv_listing:
url = build_url({'mode': 'folder', 'foldername': i[0]})
li = xbmcgui.ListItem(i[0], iconImage=pwd_path + '/' + i[0]+'.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
elif mode[0] == 'folder':
channel=args['foldername'][0]
cntvplay(channel) # should get cctv1, cctv2 etc
|
saideepchandg/oracle-r12-accounting | refs/heads/master | lib/django/core/mail/backends/smtp.py | 477 | """SMTP email backend class."""
import smtplib
import ssl
import threading
from django.conf import settings
from django.core.mail.backends.base import BaseEmailBackend
from django.core.mail.message import sanitize_address
from django.core.mail.utils import DNS_NAME
class EmailBackend(BaseEmailBackend):
"""
A wrapper that manages the SMTP network connection.
"""
def __init__(self, host=None, port=None, username=None, password=None,
use_tls=None, fail_silently=False, use_ssl=None, timeout=None,
ssl_keyfile=None, ssl_certfile=None,
**kwargs):
super(EmailBackend, self).__init__(fail_silently=fail_silently)
self.host = host or settings.EMAIL_HOST
self.port = port or settings.EMAIL_PORT
self.username = settings.EMAIL_HOST_USER if username is None else username
self.password = settings.EMAIL_HOST_PASSWORD if password is None else password
self.use_tls = settings.EMAIL_USE_TLS if use_tls is None else use_tls
self.use_ssl = settings.EMAIL_USE_SSL if use_ssl is None else use_ssl
self.timeout = settings.EMAIL_TIMEOUT if timeout is None else timeout
self.ssl_keyfile = settings.EMAIL_SSL_KEYFILE if ssl_keyfile is None else ssl_keyfile
self.ssl_certfile = settings.EMAIL_SSL_CERTFILE if ssl_certfile is None else ssl_certfile
if self.use_ssl and self.use_tls:
raise ValueError(
"EMAIL_USE_TLS/EMAIL_USE_SSL are mutually exclusive, so only set "
"one of those settings to True.")
self.connection = None
self._lock = threading.RLock()
def open(self):
"""
Ensures we have a connection to the email server. Returns whether or
not a new connection was required (True or False).
"""
if self.connection:
# Nothing to do if the connection is already open.
return False
connection_class = smtplib.SMTP_SSL if self.use_ssl else smtplib.SMTP
# If local_hostname is not specified, socket.getfqdn() gets used.
# For performance, we use the cached FQDN for local_hostname.
connection_params = {'local_hostname': DNS_NAME.get_fqdn()}
if self.timeout is not None:
connection_params['timeout'] = self.timeout
if self.use_ssl:
connection_params.update({
'keyfile': self.ssl_keyfile,
'certfile': self.ssl_certfile,
})
try:
self.connection = connection_class(self.host, self.port, **connection_params)
# TLS/SSL are mutually exclusive, so only attempt TLS over
# non-secure connections.
if not self.use_ssl and self.use_tls:
self.connection.ehlo()
self.connection.starttls(keyfile=self.ssl_keyfile, certfile=self.ssl_certfile)
self.connection.ehlo()
if self.username and self.password:
self.connection.login(self.username, self.password)
return True
except smtplib.SMTPException:
if not self.fail_silently:
raise
def close(self):
"""Closes the connection to the email server."""
if self.connection is None:
return
try:
try:
self.connection.quit()
except (ssl.SSLError, smtplib.SMTPServerDisconnected):
# This happens when calling quit() on a TLS connection
# sometimes, or when the connection was already disconnected
# by the server.
self.connection.close()
except smtplib.SMTPException:
if self.fail_silently:
return
raise
finally:
self.connection = None
def send_messages(self, email_messages):
"""
Sends one or more EmailMessage objects and returns the number of email
messages sent.
"""
if not email_messages:
return
with self._lock:
new_conn_created = self.open()
if not self.connection:
# We failed silently on open().
# Trying to send would be pointless.
return
num_sent = 0
for message in email_messages:
sent = self._send(message)
if sent:
num_sent += 1
if new_conn_created:
self.close()
return num_sent
def _send(self, email_message):
"""A helper method that does the actual sending."""
if not email_message.recipients():
return False
from_email = sanitize_address(email_message.from_email, email_message.encoding)
recipients = [sanitize_address(addr, email_message.encoding)
for addr in email_message.recipients()]
message = email_message.message()
try:
self.connection.sendmail(from_email, recipients, message.as_bytes(linesep='\r\n'))
except smtplib.SMTPException:
if not self.fail_silently:
raise
return False
return True
|
ASCrookes/django | refs/heads/master | tests/admin_inlines/test_templates.py | 285 | from __future__ import unicode_literals
from django.template.loader import render_to_string
from django.test import SimpleTestCase
class TestTemplates(SimpleTestCase):
def test_javascript_escaping(self):
context = {
'inline_admin_formset': {
'formset': {'prefix': 'my-prefix'},
'opts': {'verbose_name': 'verbose name\\'},
},
}
output = render_to_string('admin/edit_inline/stacked.html', context)
self.assertIn('prefix: "my\\u002Dprefix",', output)
self.assertIn('addText: "Add another Verbose name\\u005C"', output)
output = render_to_string('admin/edit_inline/tabular.html', context)
self.assertIn('prefix: "my\\u002Dprefix",', output)
self.assertIn('addText: "Add another Verbose name\\u005C"', output)
|
Yen-Chung-En/2015cdb_g1_0623-2 | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/event.py | 603 | #!/usr/bin/env python
'''Pygame module for interacting with events and queues.
Pygame handles all it's event messaging through an event queue. The routines
in this module help you manage that event queue. The input queue is heavily
dependent on the pygame display module. If the display has not been
initialized and a video mode not set, the event queue will not really work.
The queue is a regular queue of Event objects, there are a variety of ways
to access the events it contains. From simply checking for the existance of
events, to grabbing them directly off the stack.
All events have a type identifier. This event type is in between the values
of NOEVENT and NUMEVENTS. All user defined events can have the value of
USEREVENT or higher. It is recommended make sure your event id's follow this
system.
To get the state of various input devices, you can forego the event queue
and access the input devices directly with their appropriate modules; mouse,
key, and joystick. If you use this method, remember that pygame requires some
form of communication with the system window manager and other parts of the
platform. To keep pygame in synch with the system, you will need to call
pygame.event.pump() to keep everything current. You'll want to call this
function usually once per game loop.
The event queue offers some simple filtering. This can help performance
slightly by blocking certain event types from the queue, use the
pygame.event.set_allowed() and pygame.event.set_blocked() to work with
this filtering. All events default to allowed.
Joysticks will not send any events until the device has been initialized.
An Event object contains an event type and a readonly set of member data.
The Event object contains no method functions, just member data. Event
objects are retrieved from the pygame event queue. You can create your
own new events with the pygame.event.Event() function.
Your program must take steps to keep the event queue from overflowing. If the
program is not clearing or getting all events off the queue at regular
intervals, it can overflow. When the queue overflows an exception is thrown.
All Event objects contain an event type identifier in the Event.type member.
You may also get full access to the Event's member data through the Event.dict
method. All other member lookups will be passed through to the Event's
dictionary values.
While debugging and experimenting, you can print the Event objects for a
quick display of its type and members. Events that come from the system
will have a guaranteed set of member items based on the type. Here is a
list of the Event members that are defined with each type.
QUIT
(none)
ACTIVEEVENT
gain, state
KEYDOWN
unicode, key, mod
KEYUP
key, mod
MOUSEMOTION
pos, rel, buttons
MOUSEBUTTONUP
pos, button
MOUSEBUTTONDOWN
pos, button
JOYAXISMOTION
joy, axis, value
JOYBALLMOTION
joy, ball, rel
JOYHATMOTION
joy, hat, value
JOYBUTTONUP
joy, button
JOYBUTTONDOWN
joy, button
VIDEORESIZE
size, w, h
VIDEOEXPOSE
(none)
USEREVENT
code
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from copy import copy
#from ctypes import * #brython
from SDL import *
import pygame.base
import pygame.locals
import pygame.display
def pump():
'''Internally process pygame event handlers.
For each frame of your game, you will need to make some sort of call to
the event queue. This ensures your program can internally interact with
the rest of the operating system. If you are not using other event
functions in your game, you should call pygame.event.pump() to allow
pygame to handle internal actions.
This function is not necessary if your program is consistently processing
events on the queue through the other pygame.event functions.
There are important things that must be dealt with internally in the event
queue. The main window may need to be repainted or respond to the system.
If you fail to make a call to the event queue for too long, the system may
decide your program has locked up.
'''
pygame.display._video_init_check()
SDL_PumpEvents()
def get(typelist=None):
'''Get events from the queue.
pygame.event.get(): return Eventlist
pygame.event.get(type): return Eventlist
pygame.event.get(typelist): return Eventlist
This will get all the messages and remove them from the queue. If a type
or sequence of types is given only those messages will be removed from the
queue.
If you are only taking specific events from the queue, be aware that the
queue could eventually fill up with the events you are not interested.
:Parameters:
`typelist` : int or sequence of int
Event type or list of event types that can be returned.
:rtype: list of `Event`
'''
pygame.display._video_init_check()
if typelist is None:
mask = SDL_ALLEVENTS
else:
if hasattr(typelist, '__len__'):
mask = reduce(lambda a,b: a | SDL_EVENTMASK(b), typelist, 0)
else:
mask = int(typelist)
SDL_PumpEvents()
events = []
new_events = SDL_PeepEvents(1, SDL_GETEVENT, mask)
while new_events:
events.append(Event(0, sdl_event=new_events[0]))
new_events = SDL_PeepEvents(1, SDL_GETEVENT, mask)
return events
def poll():
'''Get a single event from the queue.
Returns a single event from the queue. If the event queue is empty an event
of type pygame.NOEVENT will be returned immediately. The returned event is
removed from the queue.
:rtype: Event
'''
pygame.display._video_init_check()
event = SDL_PollEventAndReturn()
if event:
return Event(0, sdl_event=event, keep_userdata=True)
else:
return Event(pygame.locals.NOEVENT)
def wait():
'''Wait for a single event from the queue.
Returns a single event from the queue. If the queue is empty this function
will wait until one is created. While the program is waiting it will sleep
in an idle state. This is important for programs that want to share the
system with other applications.
:rtype: Event
'''
pygame.display._video_init_check()
return Event(0, sdl_event=SDL_WaitEventAndReturn())
def peek(typelist=None):
'''Test if event types are waiting on the queue.
Returns true if there are any events of the given type waiting on the
queue. If a sequence of event types is passed, this will return True if
any of those events are on the queue.
:Parameters:
`typelist` : int or sequence of int
Event type or list of event types to look for.
:rtype: bool
'''
pygame.display._video_init_check()
if typelist is None:
mask = SDL_ALLEVENTS
else:
if hasattr(typelist, '__len__'):
mask = reduce(lambda a,b: a | SDL_EVENTMASK(b), typelist, 0)
else:
mask = SDL_EVENTMASK(int(typelist))
SDL_PumpEvents()
events = SDL_PeepEvents(1, SDL_PEEKEVENT, mask)
if typelist is None:
if events:
return Event(0, sdl_event=events[0], keep_userdata=True)
else:
return Event(pygame.locals.NOEVENT) # XXX deviation from pygame
return len(events) > 0
def clear(typelist=None):
'''Remove all events from the queue.
Remove all events or events of a specific type from the queue. This has the
same effect as `get` except nothing is returned. This can be slightly more
effecient when clearing a full event queue.
:Parameters:
`typelist` : int or sequence of int
Event type or list of event types to remove.
'''
pygame.display._video_init_check()
if typelist is None:
mask = SDL_ALLEVENTS
else:
if hasattr(typelist, '__len__'):
mask = reduce(lambda a,b: a | SDL_EVENTMASK(b), typelist, 0)
else:
mask = int(typelist)
SDL_PumpEvents()
events = []
new_events = SDL_PeepEvents(1, SDL_GETEVENT, mask)
while new_events:
new_events = SDL_PeepEvents(1, SDL_GETEVENT, mask)
_event_names = {
SDL_ACTIVEEVENT: 'ActiveEvent',
SDL_KEYDOWN: 'KeyDown',
SDL_KEYUP: 'KeyUp',
SDL_MOUSEMOTION: 'MouseMotion',
SDL_MOUSEBUTTONDOWN:'MouseButtonDown',
SDL_MOUSEBUTTONUP: 'MouseButtonUp',
SDL_JOYAXISMOTION: 'JoyAxisMotion',
SDL_JOYBALLMOTION: 'JoyBallMotion',
SDL_JOYHATMOTION: 'JoyHatMotion',
SDL_JOYBUTTONUP: 'JoyButtonUp',
SDL_JOYBUTTONDOWN: 'JoyButtonDown',
SDL_QUIT: 'Quit',
SDL_SYSWMEVENT: 'SysWMEvent',
SDL_VIDEORESIZE: 'VideoResize',
SDL_VIDEOEXPOSE: 'VideoExpose',
SDL_NOEVENT: 'NoEvent'
}
def event_name(event_type):
'''Get the string name from an event id.
Pygame uses integer ids to represent the event types. If you want to
report these types to the user they should be converted to strings. This
will return a the simple name for an event type. The string is in the
CamelCase style.
:Parameters:
- `event_type`: int
:rtype: str
'''
if event_type >= SDL_USEREVENT and event_type < SDL_NUMEVENTS:
return 'UserEvent'
return _event_names.get(event_type, 'Unknown')
def set_blocked(typelist):
'''Control which events are allowed on the queue.
The given event types are not allowed to appear on the event queue. By
default all events can be placed on the queue. It is safe to disable an
event type multiple times.
If None is passed as the argument, this has the opposite effect and none of
the event types are allowed to be placed on the queue.
:note: events posted with `post` will not be blocked.
:Parameters:
`typelist` : int or sequence of int or None
Event type or list of event types to disallow.
'''
pygame.display._video_init_check()
if typelist is None:
SDL_EventState(SDL_ALLEVENTS, SDL_IGNORE)
elif hasattr(typelist, '__len__'):
for val in typelist:
SDL_EventState(val, SDL_IGNORE)
else:
SDL_EventState(typelist, SDL_IGNORE)
def set_allowed(typelist):
'''Control which events are allowed on the queue.
The given event types are allowed to appear on the event queue. By default
all events can be placed on the queue. It is safe to enable an event type
multiple times.
If None is passed as the argument, this has the opposite effect and all of
the event types are allowed to be placed on the queue.
:Parameters:
`typelist` : int or sequence of int or None
Event type or list of event types to disallow.
'''
pygame.display._video_init_check()
if typelist is None:
SDL_EventState(SDL_ALLEVENTS, SDL_ENABLE)
elif hasattr(typelist, '__len__'):
for val in typelist:
SDL_EventState(val, SDL_ENABLE)
else:
SDL_EventState(typelist, SDL_ENABLE)
def get_blocked(typelist):
'''Test if a type of event is blocked from the queue.
Returns true if the given event type is blocked from the queue.
:Parameters:
- `event_type`: int
:rtype: int
'''
pygame.display._video_init_check()
if typelist == None:
return SDL_EventState(SDL_ALLEVENTS, SDL_QUERY) == SDL_ENABLE
elif hasattr(typelist, '__len__'): # XXX undocumented behaviour
for val in typelist:
if SDL_EventState(val, SDL_QUERY) == SDL_ENABLE:
return True
return False
else:
return SDL_EventState(typelist, SDL_QUERY) == SDL_ENABLE
def set_grab(grab):
'''Control the sharing of input devices with other applications.
When your program runs in a windowed environment, it will share the mouse
and keyboard devices with other applications that have focus. If your
program sets the event grab to True, it will lock all input into your
program.
It is best to not always grab the input, since it prevents the user from
doing other things on their system.
:Parameters:
- `grab`: bool
'''
pygame.display._video_init_check()
if grab:
SDL_WM_GrabInput(SDL_GRAB_ON)
else:
SDL_WM_GrabInput(SDL_GRAB_OFF)
def get_grab():
'''Test if the program is sharing input devices.
Returns true when the input events are grabbed for this application. Use
`set_grab` to control this state.
:rtype: bool
'''
pygame.display._video_init_check()
return SDL_WM_GrabInput(SDL_GRAB_QUERY) == SDL_GRAB_ON
_USEROBJECT_CHECK1 = int(0xdeadbeef) # signed
_USEROBJECT_CHECK2 = 0xfeedf00d
_user_event_objects = {}
_user_event_nextid = 1
def post(event):
'''Place a new event on the queue.
This places a new event at the end of the event queue. These Events will
later be retrieved from the other queue functions.
This is usually used for placing pygame.USEREVENT events on the queue.
Although any type of event can be placed, if using the sytem event types
your program should be sure to create the standard attributes with
appropriate values.
:Parameters:
`event` : Event
Event to add to the queue.
'''
global _user_event_nextid
pygame.display._video_init_check()
sdl_event = SDL_Event(event.type)
sdl_event.user.code = _USEROBJECT_CHECK1
sdl_event.user.data1 = c_void_p(_USEROBJECT_CHECK2)
sdl_event.user.data2 = c_void_p(_user_event_nextid)
_user_event_objects[_user_event_nextid] = event
_user_event_nextid += 1
SDL_PushEvent(sdl_event)
class Event:
def __init__(self, event_type, event_dict=None, sdl_event=None,
keep_userdata=False, **attributes):
'''Create a new event object.
Creates a new event with the given type. The event is created with the
given attributes and values. The attributes can come from a dictionary
argument, or as string keys from a dictionary.
The given attributes will be readonly attributes on the new event
object itself. These are the only attributes on the Event object,
there are no methods attached to Event objects.
:Parameters:
`event_type` : int
Event type to create
`event_dict` : dict
Dictionary of attributes to assign.
`sdl_event` : `SDL_Event`
Construct a Pygame event from the given SDL_Event; used
internally.
`keep_userdata` : bool
Used internally.
`attributes` : additional keyword arguments
Additional attributes to assign to the event.
'''
if sdl_event:
uevent = cast(pointer(sdl_event), POINTER(SDL_UserEvent)).contents
if uevent.code == _USEROBJECT_CHECK1 and \
uevent.data1 == _USEROBJECT_CHECK2 and \
uevent.data2 in _user_event_objects:
# An event that was posted; grab dict from local store.
id = sdl_event.data2
for key, value in _user_event_objects[id].__dict__.items():
setattr(self, key, value)
# Free memory unless just peeking
if not keep_userdata:
del _user_event_objects[id]
else:
# Standard SDL event
self.type = sdl_event.type
if self.type == SDL_QUIT:
pass
elif self.type == SDL_ACTIVEEVENT:
self.gain = sdl_event.gain
self.state = sdl_event.state
elif self.type == SDL_KEYDOWN:
self.unicode = sdl_event.keysym.unicode
self.key = sdl_event.keysym.sym
self.mod = sdl_event.keysym.mod
elif self.type == SDL_KEYUP:
self.key = sdl_event.keysym.sym
self.mod = sdl_event.keysym.mod
elif self.type == SDL_MOUSEMOTION:
self.pos = (sdl_event.x, sdl_event.y)
self.rel = (sdl_event.xrel, sdl_event.yrel)
self.buttons = (sdl_event.state & SDL_BUTTON(1) != 0,
sdl_event.state & SDL_BUTTON(2) != 0,
sdl_event.state & SDL_BUTTON(3) != 0)
elif self.type in (SDL_MOUSEBUTTONDOWN, SDL_MOUSEBUTTONUP):
self.pos = (sdl_event.x, sdl_event.y)
self.button = sdl_event.button
elif self.type == SDL_JOYAXISMOTION:
self.joy = sdl_event.which
self.axis = sdl_event.axis
self.value = sdl_event.value / 32767.0
elif self.type == SDL_JOYBALLMOTION:
self.joy = sdl_event.which
self.ball = sdl_event.ball
self.rel = (sdl_event.xrel, sdl_event.yrel)
elif self.type == SDL_JOYHATMOTION:
self.joy = sdl_event.which
self.hat = sdl_event.hat
hx = hy = 0
if sdl_event.value & SDL_HAT_UP:
hy = 1
if sdl_event.value & SDL_HAT_DOWN:
hy = -1
if sdl_event.value & SDL_HAT_RIGHT:
hx = 1
if sdl_event.value & SDL_HAT_LEFT:
hx = -1
self.value = (hx, hy)
elif self.type in (SDL_JOYBUTTONUP, SDL_JOYBUTTONDOWN):
self.joy = sdl_event.which
self.button = sdl_event.button
elif self.type == SDL_VIDEORESIZE:
self.size = (sdl_event.w, sdl_event.h)
self.w = sdl_event.w
self.h = sdl_event.h
elif self.type == SDL_VIDEOEXPOSE:
pass
elif self.type == SDL_SYSWMEVENT:
pass ### XXX: not implemented
elif self.type >= SDL_USEREVENT and self.type < SDL_NUMEVENTS:
self.code = sdl_event.code
else:
# Create an event (not from event queue)
self.type = event_type
if event_dict:
for key, value in event_dict.items():
setattr(self, key, value)
for key, value in attributes.items():
setattr(self, key, value)
# Bizarre undocumented but used by some people.
self.dict = self.__dict__
def __repr__(self):
d = copy(self.__dict__)
del d['type']
return '<Event(%d-%s %r)>' % \
(self.type, event_name(self.type), d)
def __nonzero__(self):
return self.type != SDL_NOEVENT
EventType = Event
|
torchingloom/edx-platform | refs/heads/select/release | common/lib/xmodule/xmodule/tests/test_stringify.py | 68 | from nose.tools import assert_equals # pylint: disable=E0611
from lxml import etree
from xmodule.stringify import stringify_children
def test_stringify():
text = 'Hi <div x="foo">there <span>Bruce</span><b>!</b></div>'
html = '''<html a="b" foo="bar">{0}</html>'''.format(text)
xml = etree.fromstring(html)
out = stringify_children(xml)
assert_equals(out, text)
def test_stringify_again():
html = r"""<html name="Voltage Source Answer" >A voltage source is non-linear!
<div align="center">
<img src="/static/images/circuits/voltage-source.png"/>
\(V=V_C\)
</div>
But it is <a href="http://mathworld.wolfram.com/AffineFunction.html">affine</a>,
which means linear except for an offset.
</html>
"""
html = """<html>A voltage source is non-linear!
<div align="center">
</div>
But it is <a href="http://mathworld.wolfram.com/AffineFunction.html">affine</a>,
which means linear except for an offset.
</html>
"""
xml = etree.fromstring(html)
out = stringify_children(xml)
print "output:"
print out
# Tracking strange content repeating bug
# Should appear once
assert_equals(out.count("But it is "), 1)
|
todaychi/hue | refs/heads/master | desktop/core/ext-py/Django-1.6.10/tests/forms_tests/tests/test_media.py | 131 | # -*- coding: utf-8 -*-
from django.forms import TextInput, Media, TextInput, CharField, Form, MultiWidget
from django.template import Template, Context
from django.test import TestCase
from django.test.utils import override_settings
@override_settings(
STATIC_URL=None,
MEDIA_URL='http://media.example.com/media/',
)
class FormsMediaTestCase(TestCase):
"""Tests for the media handling on widgets and forms"""
def test_construction(self):
# Check construction of media objects
m = Media(css={'all': ('path/to/css1','/path/to/css2')}, js=('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3'))
self.assertEqual(str(m), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
class Foo:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
m3 = Media(Foo)
self.assertEqual(str(m3), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# A widget can exist without a media definition
class MyWidget(TextInput):
pass
w = MyWidget()
self.assertEqual(str(w.media), '')
def test_media_dsl(self):
###############################################################
# DSL Class-based media definitions
###############################################################
# A widget can define media if it needs to.
# Any absolute path will be preserved; relative paths are combined
# with the value of settings.MEDIA_URL
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
w1 = MyWidget1()
self.assertEqual(str(w1.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# Media objects can be interrogated by media type
self.assertEqual(str(w1.media['css']), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />""")
self.assertEqual(str(w1.media['js']), """<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
def test_combine_media(self):
# Media objects can be combined. Any given media resource will appear only
# once. Duplicated media definitions are ignored.
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2','/path/to/css3')
}
js = ('/path/to/js1','/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w1 = MyWidget1()
w2 = MyWidget2()
w3 = MyWidget3()
self.assertEqual(str(w1.media + w2.media + w3.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Check that media addition hasn't affected the original objects
self.assertEqual(str(w1.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# Regression check for #12879: specifying the same CSS or JS file
# multiple times in a single Media instance should result in that file
# only being included once.
class MyWidget4(TextInput):
class Media:
css = {'all': ('/path/to/css1', '/path/to/css1')}
js = ('/path/to/js1', '/path/to/js1')
w4 = MyWidget4()
self.assertEqual(str(w4.media), """<link href="/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>""")
def test_media_property(self):
###############################################################
# Property-based media definitions
###############################################################
# Widget media can be defined as a property
class MyWidget4(TextInput):
def _media(self):
return Media(css={'all': ('/some/path',)}, js = ('/some/js',))
media = property(_media)
w4 = MyWidget4()
self.assertEqual(str(w4.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>""")
# Media properties can reference the media of their parents
class MyWidget5(MyWidget4):
def _media(self):
return super(MyWidget5, self).media + Media(css={'all': ('/other/path',)}, js = ('/other/js',))
media = property(_media)
w5 = MyWidget5()
self.assertEqual(str(w5.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>
<script type="text/javascript" src="/other/js"></script>""")
def test_media_property_parent_references(self):
# Media properties can reference the media of their parents,
# even if the parent media was defined using a class
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget6(MyWidget1):
def _media(self):
return super(MyWidget6, self).media + Media(css={'all': ('/other/path',)}, js = ('/other/js',))
media = property(_media)
w6 = MyWidget6()
self.assertEqual(str(w6.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/other/js"></script>""")
def test_media_inheritance(self):
###############################################################
# Inheritance of media
###############################################################
# If a widget extends another but provides no media definition, it inherits the parent widget's media
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget7(MyWidget1):
pass
w7 = MyWidget7()
self.assertEqual(str(w7.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# If a widget extends another but defines media, it extends the parent widget's media by default
class MyWidget8(MyWidget1):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w8 = MyWidget8()
self.assertEqual(str(w8.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_from_property(self):
# If a widget extends another but defines media, it extends the parents widget's media,
# even if the parent defined media using a property.
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget4(TextInput):
def _media(self):
return Media(css={'all': ('/some/path',)}, js = ('/some/js',))
media = property(_media)
class MyWidget9(MyWidget4):
class Media:
css = {
'all': ('/other/path',)
}
js = ('/other/js',)
w9 = MyWidget9()
self.assertEqual(str(w9.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>
<script type="text/javascript" src="/other/js"></script>""")
# A widget can disable media inheritance by specifying 'extend=False'
class MyWidget10(MyWidget1):
class Media:
extend = False
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w10 = MyWidget10()
self.assertEqual(str(w10.media), """<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_extends(self):
# A widget can explicitly enable full media inheritance by specifying 'extend=True'
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget11(MyWidget1):
class Media:
extend = True
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w11 = MyWidget11()
self.assertEqual(str(w11.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_single_type(self):
# A widget can enable inheritance of one media type by specifying extend as a tuple
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget12(MyWidget1):
class Media:
extend = ('css',)
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w12 = MyWidget12()
self.assertEqual(str(w12.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_multi_media(self):
###############################################################
# Multi-media handling for CSS
###############################################################
# A widget can define CSS media for multiple output media types
class MultimediaWidget(TextInput):
class Media:
css = {
'screen, print': ('/file1','/file2'),
'screen': ('/file3',),
'print': ('/file4',)
}
js = ('/path/to/js1','/path/to/js4')
multimedia = MultimediaWidget()
self.assertEqual(str(multimedia.media), """<link href="/file4" type="text/css" media="print" rel="stylesheet" />
<link href="/file3" type="text/css" media="screen" rel="stylesheet" />
<link href="/file1" type="text/css" media="screen, print" rel="stylesheet" />
<link href="/file2" type="text/css" media="screen, print" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_multi_widget(self):
###############################################################
# Multiwidget media handling
###############################################################
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2','/path/to/css3')
}
js = ('/path/to/js1','/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
# MultiWidgets have a default media definition that gets all the
# media from the component widgets
class MyMultiWidget(MultiWidget):
def __init__(self, attrs=None):
widgets = [MyWidget1, MyWidget2, MyWidget3]
super(MyMultiWidget, self).__init__(widgets, attrs)
mymulti = MyMultiWidget()
self.assertEqual(str(mymulti.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_form_media(self):
###############################################################
# Media processing for forms
###############################################################
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2','/path/to/css3')
}
js = ('/path/to/js1','/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
# You can ask a form for the media required by its widgets.
class MyForm(Form):
field1 = CharField(max_length=20, widget=MyWidget1())
field2 = CharField(max_length=20, widget=MyWidget2())
f1 = MyForm()
self.assertEqual(str(f1.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Form media can be combined to produce a single media definition.
class AnotherForm(Form):
field3 = CharField(max_length=20, widget=MyWidget3())
f2 = AnotherForm()
self.assertEqual(str(f1.media + f2.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Forms can also define media, following the same rules as widgets.
class FormWithMedia(Form):
field1 = CharField(max_length=20, widget=MyWidget1())
field2 = CharField(max_length=20, widget=MyWidget2())
class Media:
js = ('/some/form/javascript',)
css = {
'all': ('/some/form/css',)
}
f3 = FormWithMedia()
self.assertEqual(str(f3.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>
<script type="text/javascript" src="/some/form/javascript"></script>""")
# Media works in templates
self.assertEqual(Template("{{ form.media.js }}{{ form.media.css }}").render(Context({'form': f3})), """<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>
<script type="text/javascript" src="/some/form/javascript"></script><link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />""")
@override_settings(
STATIC_URL='http://media.example.com/static/',
MEDIA_URL='http://media.example.com/media/',
)
class StaticFormsMediaTestCase(TestCase):
"""Tests for the media handling on widgets and forms"""
def test_construction(self):
# Check construction of media objects
m = Media(css={'all': ('path/to/css1','/path/to/css2')}, js=('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3'))
self.assertEqual(str(m), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
class Foo:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
m3 = Media(Foo)
self.assertEqual(str(m3), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# A widget can exist without a media definition
class MyWidget(TextInput):
pass
w = MyWidget()
self.assertEqual(str(w.media), '')
def test_media_dsl(self):
###############################################################
# DSL Class-based media definitions
###############################################################
# A widget can define media if it needs to.
# Any absolute path will be preserved; relative paths are combined
# with the value of settings.MEDIA_URL
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
w1 = MyWidget1()
self.assertEqual(str(w1.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# Media objects can be interrogated by media type
self.assertEqual(str(w1.media['css']), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />""")
self.assertEqual(str(w1.media['js']), """<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
def test_combine_media(self):
# Media objects can be combined. Any given media resource will appear only
# once. Duplicated media definitions are ignored.
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2','/path/to/css3')
}
js = ('/path/to/js1','/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w1 = MyWidget1()
w2 = MyWidget2()
w3 = MyWidget3()
self.assertEqual(str(w1.media + w2.media + w3.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Check that media addition hasn't affected the original objects
self.assertEqual(str(w1.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# Regression check for #12879: specifying the same CSS or JS file
# multiple times in a single Media instance should result in that file
# only being included once.
class MyWidget4(TextInput):
class Media:
css = {'all': ('/path/to/css1', '/path/to/css1')}
js = ('/path/to/js1', '/path/to/js1')
w4 = MyWidget4()
self.assertEqual(str(w4.media), """<link href="/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>""")
def test_media_property(self):
###############################################################
# Property-based media definitions
###############################################################
# Widget media can be defined as a property
class MyWidget4(TextInput):
def _media(self):
return Media(css={'all': ('/some/path',)}, js = ('/some/js',))
media = property(_media)
w4 = MyWidget4()
self.assertEqual(str(w4.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>""")
# Media properties can reference the media of their parents
class MyWidget5(MyWidget4):
def _media(self):
return super(MyWidget5, self).media + Media(css={'all': ('/other/path',)}, js = ('/other/js',))
media = property(_media)
w5 = MyWidget5()
self.assertEqual(str(w5.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>
<script type="text/javascript" src="/other/js"></script>""")
def test_media_property_parent_references(self):
# Media properties can reference the media of their parents,
# even if the parent media was defined using a class
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget6(MyWidget1):
def _media(self):
return super(MyWidget6, self).media + Media(css={'all': ('/other/path',)}, js = ('/other/js',))
media = property(_media)
w6 = MyWidget6()
self.assertEqual(str(w6.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/other/js"></script>""")
def test_media_inheritance(self):
###############################################################
# Inheritance of media
###############################################################
# If a widget extends another but provides no media definition, it inherits the parent widget's media
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget7(MyWidget1):
pass
w7 = MyWidget7()
self.assertEqual(str(w7.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# If a widget extends another but defines media, it extends the parent widget's media by default
class MyWidget8(MyWidget1):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w8 = MyWidget8()
self.assertEqual(str(w8.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_from_property(self):
# If a widget extends another but defines media, it extends the parents widget's media,
# even if the parent defined media using a property.
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget4(TextInput):
def _media(self):
return Media(css={'all': ('/some/path',)}, js = ('/some/js',))
media = property(_media)
class MyWidget9(MyWidget4):
class Media:
css = {
'all': ('/other/path',)
}
js = ('/other/js',)
w9 = MyWidget9()
self.assertEqual(str(w9.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>
<script type="text/javascript" src="/other/js"></script>""")
# A widget can disable media inheritance by specifying 'extend=False'
class MyWidget10(MyWidget1):
class Media:
extend = False
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w10 = MyWidget10()
self.assertEqual(str(w10.media), """<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_extends(self):
# A widget can explicitly enable full media inheritance by specifying 'extend=True'
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget11(MyWidget1):
class Media:
extend = True
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w11 = MyWidget11()
self.assertEqual(str(w11.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_single_type(self):
# A widget can enable inheritance of one media type by specifying extend as a tuple
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget12(MyWidget1):
class Media:
extend = ('css',)
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w12 = MyWidget12()
self.assertEqual(str(w12.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_multi_media(self):
###############################################################
# Multi-media handling for CSS
###############################################################
# A widget can define CSS media for multiple output media types
class MultimediaWidget(TextInput):
class Media:
css = {
'screen, print': ('/file1','/file2'),
'screen': ('/file3',),
'print': ('/file4',)
}
js = ('/path/to/js1','/path/to/js4')
multimedia = MultimediaWidget()
self.assertEqual(str(multimedia.media), """<link href="/file4" type="text/css" media="print" rel="stylesheet" />
<link href="/file3" type="text/css" media="screen" rel="stylesheet" />
<link href="/file1" type="text/css" media="screen, print" rel="stylesheet" />
<link href="/file2" type="text/css" media="screen, print" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_multi_widget(self):
###############################################################
# Multiwidget media handling
###############################################################
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2','/path/to/css3')
}
js = ('/path/to/js1','/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
# MultiWidgets have a default media definition that gets all the
# media from the component widgets
class MyMultiWidget(MultiWidget):
def __init__(self, attrs=None):
widgets = [MyWidget1, MyWidget2, MyWidget3]
super(MyMultiWidget, self).__init__(widgets, attrs)
mymulti = MyMultiWidget()
self.assertEqual(str(mymulti.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_form_media(self):
###############################################################
# Media processing for forms
###############################################################
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2','/path/to/css3')
}
js = ('/path/to/js1','/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
# You can ask a form for the media required by its widgets.
class MyForm(Form):
field1 = CharField(max_length=20, widget=MyWidget1())
field2 = CharField(max_length=20, widget=MyWidget2())
f1 = MyForm()
self.assertEqual(str(f1.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Form media can be combined to produce a single media definition.
class AnotherForm(Form):
field3 = CharField(max_length=20, widget=MyWidget3())
f2 = AnotherForm()
self.assertEqual(str(f1.media + f2.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Forms can also define media, following the same rules as widgets.
class FormWithMedia(Form):
field1 = CharField(max_length=20, widget=MyWidget1())
field2 = CharField(max_length=20, widget=MyWidget2())
class Media:
js = ('/some/form/javascript',)
css = {
'all': ('/some/form/css',)
}
f3 = FormWithMedia()
self.assertEqual(str(f3.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>
<script type="text/javascript" src="/some/form/javascript"></script>""")
# Media works in templates
self.assertEqual(Template("{{ form.media.js }}{{ form.media.css }}").render(Context({'form': f3})), """<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>
<script type="text/javascript" src="/some/form/javascript"></script><link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />""")
|
hthompson6/contrail-controller | refs/heads/master | src/config/utils/service_appliance.py | 13 | #!/usr/bin/python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import sys
import time
import argparse
import ConfigParser
import json
from vnc_api.vnc_api import *
from cfgm_common.exceptions import *
class SAProvisioner(object):
def __init__(self, args_str=None):
self._args = None
if not args_str:
args_str = '|'.join(sys.argv[1:])
self._parse_args(args_str)
connected = False
tries = 0
while not connected:
try:
self._vnc_lib = VncApi(
self._args.admin_user, self._args.admin_password,
self._args.admin_tenant_name,
self._args.api_server_ip,
self._args.api_server_port, '/',
auth_host=self._args.openstack_ip)
connected = True
except ResourceExhaustionError: # haproxy throws 503
if tries < 10:
tries += 1
time.sleep(3)
else:
raise
if self._args.oper == 'add':
self.add_sa()
elif self._args.oper == 'del':
self.del_sa()
else:
print "Unknown operation %s. Only 'add' and 'del' supported"\
% (self._args.oper)
# end __init__
def _parse_args(self, args_str):
'''
Eg. python service_appliance.py --name bigip --device_ip <ip>
--user_credential {"user": "root", "password": "c0ntrail123"}
--api_server_ip 127.0.0.1
--api_server_port 8082
--oper <add | del>
'''
# Source any specified config/ini file
# Turn off help, so we print all options in response to -h
conf_parser = argparse.ArgumentParser(add_help=False)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(args_str.split('|'))
defaults = {
'api_server_ip': '127.0.0.1',
'api_server_port': '8082',
'oper': 'add',
}
ksopts = {
'admin_user': 'user1',
'admin_password': 'password1',
'admin_tenant_name': 'default-domain'
}
if args.conf_file:
config = ConfigParser.SafeConfigParser()
config.read([args.conf_file])
defaults.update(dict(config.items("DEFAULTS")))
if 'KEYSTONE' in config.sections():
ksopts.update(dict(config.items("KEYSTONE")))
# Override with CLI options
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
)
defaults.update(ksopts)
parser.set_defaults(**defaults)
parser.add_argument(
"--name", help="name of service appliance", required=True)
parser.add_argument(
"--service_appliance_set", help="name of service appliance set",
required=True)
parser.add_argument("--device_ip", help="Address of the loadbalancer device")
parser.add_argument("--properties",
help="JSON dictionary of config params for the lbaas device",
type=json.loads, default=json.loads("{}"))
parser.add_argument("--user_credential",
help="JSON dictionary of login details to the lbaas device",
type=json.loads, default=json.loads("{}"))
parser.add_argument(
"--api_server_ip", help="IP address of api server", required=True)
parser.add_argument("--api_server_port", help="Port of api server")
parser.add_argument(
"--oper", default='add',
help="Provision operation to be done(add or del)")
parser.add_argument(
"--admin_user", help="Name of keystone admin user")
parser.add_argument(
"--admin_password", help="Password of keystone admin user")
parser.add_argument(
"--admin_tenant_name", help="Tenamt name for keystone admin user")
parser.add_argument(
"--openstack_ip", help="IP address of openstack node")
self._args = parser.parse_args(remaining_argv)
# end _parse_args
def add_sa(self):
default_gsc_name = "default-global-system-config"
sa_set_fq_name = [default_gsc_name, self._args.service_appliance_set]
sa_fq_name = [default_gsc_name, self._args.service_appliance_set, self._args.name]
try:
sa_set_obj = self._vnc_lib.service_appliance_set_read(fq_name=sa_set_fq_name)
except NoIdError as e:
print str(e)
return
sa_obj = ServiceAppliance(self._args.name, sa_set_obj)
try:
sa_obj = self._vnc_lib.service_appliance_read(fq_name=sa_fq_name)
return
except NoIdError:
pass
sa_obj.set_service_appliance_ip_address(self._args.device_ip)
uci = UserCredentials(self._args.user_credential['user'],
self._args.user_credential['password'])
sa_obj.set_service_appliance_user_credentials(uci)
kvp_array = []
for r,c in self._args.properties.iteritems():
kvp = KeyValuePair(r,c)
kvp_array.append(kvp)
kvps = KeyValuePairs()
if kvp_array:
kvps.set_key_value_pair(kvp_array)
sa_obj.set_service_appliance_properties(kvps)
sa_uuid = self._vnc_lib.service_appliance_create(sa_obj)
# end add_sa
def del_sa(self):
default_gsc_name = "default-global-system-config"
sa_fq_name = [default_gsc_name, self._args.service_appliance_set, self._args.name]
self._vnc_lib.service_appliance_delete(fq_name=sa_fq_name)
# end del_sa
# end class SAProvisioner
def main(args_str=None):
SAProvisioner(args_str)
# end main
if __name__ == "__main__":
main()
|
2013Commons/HUE-SHARK | refs/heads/master | desktop/core/ext-py/Django-1.2.3/django/contrib/gis/geos/point.py | 403 | from ctypes import c_uint
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.geometry import GEOSGeometry
from django.contrib.gis.geos import prototypes as capi
class Point(GEOSGeometry):
_minlength = 2
_maxlength = 3
def __init__(self, x, y=None, z=None, srid=None):
"""
The Point object may be initialized with either a tuple, or individual
parameters.
For Example:
>>> p = Point((5, 23)) # 2D point, passed in as a tuple
>>> p = Point(5, 23, 8) # 3D point, passed in with individual parameters
"""
if isinstance(x, (tuple, list)):
# Here a tuple or list was passed in under the `x` parameter.
ndim = len(x)
coords = x
elif isinstance(x, (int, float, long)) and isinstance(y, (int, float, long)):
# Here X, Y, and (optionally) Z were passed in individually, as parameters.
if isinstance(z, (int, float, long)):
ndim = 3
coords = [x, y, z]
else:
ndim = 2
coords = [x, y]
else:
raise TypeError('Invalid parameters given for Point initialization.')
point = self._create_point(ndim, coords)
# Initializing using the address returned from the GEOS
# createPoint factory.
super(Point, self).__init__(point, srid=srid)
def _create_point(self, ndim, coords):
"""
Create a coordinate sequence, set X, Y, [Z], and create point
"""
if ndim < 2 or ndim > 3:
raise TypeError('Invalid point dimension: %s' % str(ndim))
cs = capi.create_cs(c_uint(1), c_uint(ndim))
i = iter(coords)
capi.cs_setx(cs, 0, i.next())
capi.cs_sety(cs, 0, i.next())
if ndim == 3: capi.cs_setz(cs, 0, i.next())
return capi.create_point(cs)
def _set_list(self, length, items):
ptr = self._create_point(length, items)
if ptr:
capi.destroy_geom(self.ptr)
self._ptr = ptr
self._set_cs()
else:
# can this happen?
raise GEOSException('Geometry resulting from slice deletion was invalid.')
def _set_single(self, index, value):
self._cs.setOrdinate(index, 0, value)
def __iter__(self):
"Allows iteration over coordinates of this Point."
for i in xrange(len(self)):
yield self[i]
def __len__(self):
"Returns the number of dimensions for this Point (either 0, 2 or 3)."
if self.empty: return 0
if self.hasz: return 3
else: return 2
def _get_single_external(self, index):
if index == 0:
return self.x
elif index == 1:
return self.y
elif index == 2:
return self.z
_get_single_internal = _get_single_external
def get_x(self):
"Returns the X component of the Point."
return self._cs.getOrdinate(0, 0)
def set_x(self, value):
"Sets the X component of the Point."
self._cs.setOrdinate(0, 0, value)
def get_y(self):
"Returns the Y component of the Point."
return self._cs.getOrdinate(1, 0)
def set_y(self, value):
"Sets the Y component of the Point."
self._cs.setOrdinate(1, 0, value)
def get_z(self):
"Returns the Z component of the Point."
if self.hasz:
return self._cs.getOrdinate(2, 0)
else:
return None
def set_z(self, value):
"Sets the Z component of the Point."
if self.hasz:
self._cs.setOrdinate(2, 0, value)
else:
raise GEOSException('Cannot set Z on 2D Point.')
# X, Y, Z properties
x = property(get_x, set_x)
y = property(get_y, set_y)
z = property(get_z, set_z)
### Tuple setting and retrieval routines. ###
def get_coords(self):
"Returns a tuple of the point."
return self._cs.tuple
def set_coords(self, tup):
"Sets the coordinates of the point with the given tuple."
self._cs[0] = tup
# The tuple and coords properties
tuple = property(get_coords, set_coords)
coords = tuple
|
dialogtekgeek/DSTC6-End-to-End-Conversation-Modeling | refs/heads/master | ChatbotBaseline/tools/do_conversation.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Interactive neural conversation demo
Copyright (c) 2017 Takaaki Hori (thori@merl.com)
This software is released under the MIT License.
http://opensource.org/licenses/mit-license.php
"""
import argparse
import sys
import os
import pickle
import re
import six
import numpy as np
import chainer
from chainer import cuda
from nltk.tokenize import casual_tokenize
##################################
# main
if __name__ =="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', '-g', default=0, type=int,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--beam', '-b', default=5, type=int,
help='set beam width')
parser.add_argument('--penalty', '-p', default=1., type=float,
help='set insertion penalty')
parser.add_argument('--nbest', '-n', default=1, type=int,
help='generate n-best sentences')
parser.add_argument('--maxlen', default=20, type=int,
help='set maximum sequence length in beam search')
parser.add_argument('model', nargs=1,
help='conversation model file')
args = parser.parse_args()
if args.gpu >= 0:
cuda.check_cuda_available()
cuda.get_device(args.gpu).use()
xp = cuda.cupy
else:
xp = np
# use chainer in testing mode
chainer.config.train = False
# Prepare RNN model and load data
print("--- do neural conversations ------")
print('Loading model params from ' + args.model[0])
with open(args.model[0], 'rb') as f:
vocab, model, train_args = pickle.load(f)
if args.gpu >= 0:
model.to_gpu()
# report data summary
print('vocabulary size = %d' % len(vocab))
vocablist = sorted(vocab.keys(), key=lambda s:vocab[s])
# generate sentences
print("--- start conversation [push Cntl-D to exit] ------")
unk = vocab['<unk>']
eos = vocab['<eos>']
state = None
while True:
try:
input_str = six.moves.input('U: ')
except EOFError:
break
if input_str:
if input_str=='exit' or input_str=='quit':
break
sentence = []
for token in casual_tokenize(input_str, preserve_case=False, reduce_len=True):
# make a space before apostrophe
token = re.sub(r'^([a-z]+)\'([a-z]+)$','\\1 \'\\2',token)
for w in token.split():
sentence.append(vocab[w] if w in vocab else unk)
x_data = np.array(sentence, dtype=np.int32)
x = chainer.Variable(xp.asarray(x_data))
besthyps,state = model.generate(state, x, eos, eos, unk=unk,
maxlen=args.maxlen,
beam=args.beam,
penalty=args.penalty,
nbest=args.nbest)
## print sentence
if args.nbest == 1:
sys.stdout.write('S:')
for w in besthyps[0][0]:
if w != eos:
sys.stdout.write(' ' + vocablist[w])
sys.stdout.write('\n')
else:
for n,s in enumerate(besthyps):
sys.stdout.write('S%d:' % n)
for w in s[0]:
if w != eos:
sys.stdout.write(' ' + vocablist[w])
sys.stdout.write(' (%f)' % s[1])
else:
print("--- start conversation [push Cntl-D to exit] ------")
state = None
print('done')
|
nthien/docker-registry | refs/heads/master | docker_registry/lib/rqueue.py | 35 | # -*- coding: utf-8 -*-
# this module is a slight modification of Ted Nyman's QR
# https://raw.github.com/tnm/qr/master/qr.py
import logging
from docker_registry.core import compat
json = compat.json
class NullHandler(logging.Handler):
"""A logging handler that discards all logging records."""
def emit(self, record):
pass
# Clients can add handlers if they are interested.
log = logging.getLogger('qr')
log.addHandler(NullHandler())
class worker(object):
def __init__(self, q, *args, **kwargs):
self.q = q
self.err = kwargs.get('err', None)
self.args = args
self.kwargs = kwargs
def __call__(self, f):
def wrapped():
while True:
# Blocking pop
next = self.q.pop(block=True)
if not next:
continue
try:
# Try to execute the user's callback.
f(next, *self.args, **self.kwargs)
except Exception as e:
try:
# Failing that, let's call the user's
# err-back, which we should keep from
# ever throwing an exception
self.err(e, *self.args, **self.kwargs)
except Exception:
pass
return wrapped
class BaseQueue(object):
"""Base functionality common to queues."""
def __init__(self, r_conn, key, **kwargs):
self.serializer = json
self.redis = r_conn
self.key = key
def __len__(self):
"""Return the length of the queue."""
return self.redis.llen(self.key)
def __getitem__(self, val):
"""Get a slice or a particular index."""
try:
slice = self.redis.lrange(self.key, val.start, val.stop - 1)
return [self._unpack(i) for i in slice]
except AttributeError:
return self._unpack(self.redis.lindex(self.key, val))
except Exception as e:
log.error('Get item failed ** %s' % repr(e))
return None
def _pack(self, val):
"""Prepares a message to go into Redis."""
return self.serializer.dumps(val, 1)
def _unpack(self, val):
"""Unpacks a message stored in Redis."""
try:
return self.serializer.loads(val)
except TypeError:
return None
def dump(self, fobj):
"""Destructively dump the contents of the queue into fp."""
next = self.redis.rpop(self.key)
while next:
fobj.write(next)
next = self.redis.rpop(self.key)
def load(self, fobj):
"""Load the contents of the provided fobj into the queue."""
try:
while True:
val = self._pack(self.serializer.load(fobj))
self.redis.lpush(self.key, val)
except Exception:
return
def dumpfname(self, fname, truncate=False):
"""Destructively dump the contents of the queue into fname."""
if truncate:
with file(fname, 'w+') as f:
self.dump(f)
else:
with file(fname, 'a+') as f:
self.dump(f)
def loadfname(self, fname):
"""Load the contents of the contents of fname into the queue."""
with file(fname) as f:
self.load(f)
def extend(self, vals):
"""Extends the elements in the queue."""
with self.redis.pipeline(transaction=False) as pipe:
for val in vals:
pipe.lpush(self.key, self._pack(val))
pipe.execute()
def peek(self):
"""Look at the next item in the queue."""
return self[-1]
def elements(self):
"""Return all elements as a Python list."""
return [self._unpack(o) for o in self.redis.lrange(self.key, 0, -1)]
def elements_as_json(self):
"""Return all elements as JSON object."""
return json.dumps(self.elements)
def clear(self):
"""Removes all the elements in the queue."""
self.redis.delete(self.key)
class CappedCollection(BaseQueue):
"""a bounded queue
Implements a capped collection (the collection never
gets larger than the specified size).
"""
def __init__(self, r_conn, key, size, **kwargs):
BaseQueue.__init__(self, r_conn, key, **kwargs)
self.size = size
def push(self, element):
size = self.size
with self.redis.pipeline() as pipe:
# ltrim is zero-indexed
val = self._pack(element)
pipe = pipe.lpush(self.key, val).ltrim(self.key, 0, size - 1)
pipe.execute()
def extend(self, vals):
"""Extends the elements in the queue."""
with self.redis.pipeline() as pipe:
for val in vals:
pipe.lpush(self.key, self._pack(val))
pipe.ltrim(self.key, 0, self.size - 1)
pipe.execute()
def pop(self, block=False):
if not block:
popped = self.redis.rpop(self.key)
else:
queue, popped = self.redis.brpop(self.key)
log.debug('Popped ** %s ** from key ** %s **' % (popped, self.key))
return self._unpack(popped)
|
public-ink/public-ink | refs/heads/master | server/appengine/lib/graphql/utils/build_ast_schema.py | 3 | from ..execution.values import get_argument_values
from ..language import ast
from ..pyutils.ordereddict import OrderedDict
from ..type import (GraphQLArgument, GraphQLBoolean,
GraphQLDeprecatedDirective, GraphQLDirective,
GraphQLEnumType, GraphQLEnumValue, GraphQLField,
GraphQLFloat, GraphQLID, GraphQLIncludeDirective,
GraphQLInputObjectField, GraphQLInputObjectType,
GraphQLInt, GraphQLInterfaceType, GraphQLList,
GraphQLNonNull, GraphQLObjectType, GraphQLScalarType,
GraphQLSchema, GraphQLSkipDirective, GraphQLString,
GraphQLUnionType)
from ..type.introspection import (__Directive, __DirectiveLocation,
__EnumValue, __Field, __InputValue, __Schema,
__Type, __TypeKind)
from ..utils.value_from_ast import value_from_ast
def _build_wrapped_type(inner_type, input_type_ast):
if isinstance(input_type_ast, ast.ListType):
return GraphQLList(_build_wrapped_type(inner_type, input_type_ast.type))
if isinstance(input_type_ast, ast.NonNullType):
return GraphQLNonNull(_build_wrapped_type(inner_type, input_type_ast.type))
return inner_type
def _get_inner_type_name(type_ast):
if isinstance(type_ast, (ast.ListType, ast.NonNullType)):
return _get_inner_type_name(type_ast.type)
return type_ast.name.value
def _get_named_type_ast(type_ast):
named_type = type_ast
while isinstance(named_type, (ast.ListType, ast.NonNullType)):
named_type = named_type.type
return named_type
def _false(*_):
return False
def _none(*_):
return None
def build_ast_schema(document):
assert isinstance(document, ast.Document), 'must pass in Document ast.'
schema_def = None
type_asts = (
ast.ScalarTypeDefinition,
ast.ObjectTypeDefinition,
ast.InterfaceTypeDefinition,
ast.EnumTypeDefinition,
ast.UnionTypeDefinition,
ast.InputObjectTypeDefinition,
)
type_defs = []
directive_defs = []
for d in document.definitions:
if isinstance(d, ast.SchemaDefinition):
if schema_def:
raise Exception('Must provide only one schema definition.')
schema_def = d
if isinstance(d, type_asts):
type_defs.append(d)
elif isinstance(d, ast.DirectiveDefinition):
directive_defs.append(d)
if not schema_def:
raise Exception('Must provide a schema definition.')
query_type_name = None
mutation_type_name = None
subscription_type_name = None
for operation_type in schema_def.operation_types:
type_name = operation_type.type.name.value
if operation_type.operation == 'query':
if query_type_name:
raise Exception('Must provide only one query type in schema.')
query_type_name = type_name
elif operation_type.operation == 'mutation':
if mutation_type_name:
raise Exception('Must provide only one mutation type in schema.')
mutation_type_name = type_name
elif operation_type.operation == 'subscription':
if subscription_type_name:
raise Exception('Must provide only one subscription type in schema.')
subscription_type_name = type_name
if not query_type_name:
raise Exception('Must provide schema definition with query type.')
ast_map = {d.name.value: d for d in type_defs}
if query_type_name not in ast_map:
raise Exception('Specified query type "{}" not found in document.'.format(query_type_name))
if mutation_type_name and mutation_type_name not in ast_map:
raise Exception('Specified mutation type "{}" not found in document.'.format(mutation_type_name))
if subscription_type_name and subscription_type_name not in ast_map:
raise Exception('Specified subscription type "{}" not found in document.'.format(subscription_type_name))
inner_type_map = OrderedDict([
('String', GraphQLString),
('Int', GraphQLInt),
('Float', GraphQLFloat),
('Boolean', GraphQLBoolean),
('ID', GraphQLID),
('__Schema', __Schema),
('__Directive', __Directive),
('__DirectiveLocation', __DirectiveLocation),
('__Type', __Type),
('__Field', __Field),
('__InputValue', __InputValue),
('__EnumValue', __EnumValue),
('__TypeKind', __TypeKind),
])
def get_directive(directive_ast):
return GraphQLDirective(
name=directive_ast.name.value,
locations=[node.value for node in directive_ast.locations],
args=make_input_values(directive_ast.arguments, GraphQLArgument),
)
def get_object_type(type_ast):
type = type_def_named(type_ast.name.value)
assert isinstance(type, GraphQLObjectType), 'AST must provide object type'
return type
def produce_type_def(type_ast):
type_name = _get_named_type_ast(type_ast).name.value
type_def = type_def_named(type_name)
return _build_wrapped_type(type_def, type_ast)
def type_def_named(type_name):
if type_name in inner_type_map:
return inner_type_map[type_name]
if type_name not in ast_map:
raise Exception('Type "{}" not found in document'.format(type_name))
inner_type_def = make_schema_def(ast_map[type_name])
if not inner_type_def:
raise Exception('Nothing constructed for "{}".'.format(type_name))
inner_type_map[type_name] = inner_type_def
return inner_type_def
def make_schema_def(definition):
if not definition:
raise Exception('def must be defined.')
handler = _schema_def_handlers.get(type(definition))
if not handler:
raise Exception('Type kind "{}" not supported.'.format(type(definition).__name__))
return handler(definition)
def make_type_def(definition):
return GraphQLObjectType(
name=definition.name.value,
fields=lambda: make_field_def_map(definition),
interfaces=make_implemented_interfaces(definition)
)
def make_field_def_map(definition):
return OrderedDict(
(f.name.value, GraphQLField(
type=produce_type_def(f.type),
args=make_input_values(f.arguments, GraphQLArgument),
deprecation_reason=get_deprecation_reason(f.directives),
))
for f in definition.fields
)
def make_implemented_interfaces(definition):
return [produce_type_def(i) for i in definition.interfaces]
def make_input_values(values, cls):
return OrderedDict(
(value.name.value, cls(
type=produce_type_def(value.type),
default_value=value_from_ast(value.default_value, produce_type_def(value.type))
))
for value in values
)
def make_interface_def(definition):
return GraphQLInterfaceType(
name=definition.name.value,
resolve_type=_none,
fields=lambda: make_field_def_map(definition)
)
def make_enum_def(definition):
values = OrderedDict((v.name.value, GraphQLEnumValue(deprecation_reason=get_deprecation_reason(v.directives)))
for v in definition.values)
return GraphQLEnumType(
name=definition.name.value,
values=values
)
def make_union_def(definition):
return GraphQLUnionType(
name=definition.name.value,
resolve_type=_none,
types=[produce_type_def(t) for t in definition.types]
)
def make_scalar_def(definition):
return GraphQLScalarType(
name=definition.name.value,
serialize=_none,
# Validation calls the parse functions to determine if a literal value is correct.
# Returning none, however would cause the scalar to fail validation. Returning false,
# will cause them to pass.
parse_literal=_false,
parse_value=_false
)
def make_input_object_def(definition):
return GraphQLInputObjectType(
name=definition.name.value,
fields=make_input_values(definition.fields, GraphQLInputObjectField)
)
_schema_def_handlers = {
ast.ObjectTypeDefinition: make_type_def,
ast.InterfaceTypeDefinition: make_interface_def,
ast.EnumTypeDefinition: make_enum_def,
ast.UnionTypeDefinition: make_union_def,
ast.ScalarTypeDefinition: make_scalar_def,
ast.InputObjectTypeDefinition: make_input_object_def
}
types = [type_def_named(definition.name.value) for definition in type_defs]
directives = [get_directive(d) for d in directive_defs]
# If specified directive were not explicitly declared, add them.
find_skip_directive = (directive.name for directive in directives if directive.name == 'skip')
find_include_directive = (directive.name for directive in directives if directive.name == 'include')
find_deprecated_directive = (directive.name for directive in directives if directive.name == 'deprecated')
if not next(find_skip_directive, None):
directives.append(GraphQLSkipDirective)
if not next(find_include_directive, None):
directives.append(GraphQLIncludeDirective)
if not next(find_deprecated_directive, None):
directives.append(GraphQLDeprecatedDirective)
schema_kwargs = {'query': get_object_type(ast_map[query_type_name])}
if mutation_type_name:
schema_kwargs['mutation'] = get_object_type(ast_map[mutation_type_name])
if subscription_type_name:
schema_kwargs['subscription'] = get_object_type(ast_map[subscription_type_name])
if directive_defs:
schema_kwargs['directives'] = directives
if types:
schema_kwargs['types'] = types
return GraphQLSchema(**schema_kwargs)
def get_deprecation_reason(directives):
deprecated_ast = next((directive for directive in directives
if directive.name.value == GraphQLDeprecatedDirective.name),
None)
if deprecated_ast:
args = get_argument_values(GraphQLDeprecatedDirective.args, deprecated_ast.arguments)
return args['reason']
else:
return None
|
mingdachen/cuda-convnet2 | refs/heads/master | layer.py | 162 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from math import exp
import sys
import ConfigParser as cfg
import os
import numpy as n
import numpy.random as nr
from math import ceil, floor
from collections import OrderedDict
from os import linesep as NL
from python_util.options import OptionsParser
import re
class LayerParsingError(Exception):
pass
# A neuron that doesn't take parameters
class NeuronParser:
def __init__(self, type, func_str, uses_acts=True, uses_inputs=True):
self.type = type
self.func_str = func_str
self.uses_acts = uses_acts
self.uses_inputs = uses_inputs
def parse(self, type):
if type == self.type:
return {'type': self.type,
'params': {},
'usesActs': self.uses_acts,
'usesInputs': self.uses_inputs}
return None
# A neuron that takes parameters
class ParamNeuronParser(NeuronParser):
neuron_regex = re.compile(r'^\s*(\w+)\s*\[\s*(\w+(\s*,\w+)*)\s*\]\s*$')
def __init__(self, type, func_str, uses_acts=True, uses_inputs=True):
NeuronParser.__init__(self, type, func_str, uses_acts, uses_inputs)
m = self.neuron_regex.match(type)
self.base_type = m.group(1)
self.param_names = m.group(2).split(',')
assert len(set(self.param_names)) == len(self.param_names)
def parse(self, type):
m = re.match(r'^%s\s*\[([\d,\.\s\-]*)\]\s*$' % self.base_type, type)
if m:
try:
param_vals = [float(v.strip()) for v in m.group(1).split(',')]
if len(param_vals) == len(self.param_names):
return {'type': self.base_type,
'params': dict(zip(self.param_names, param_vals)),
'usesActs': self.uses_acts,
'usesInputs': self.uses_inputs}
except TypeError:
pass
return None
class AbsTanhNeuronParser(ParamNeuronParser):
def __init__(self):
ParamNeuronParser.__init__(self, 'abstanh[a,b]', 'f(x) = a * |tanh(b * x)|')
def parse(self, type):
dic = ParamNeuronParser.parse(self, type)
# Make b positive, since abs(tanh(bx)) = abs(tanh(-bx)) and the C++ code
# assumes b is positive.
if dic:
dic['params']['b'] = abs(dic['params']['b'])
return dic
class ParamParser:
lrs_regex = re.compile(r'^\s*(\w+)\s*(?:\[\s*(\w+(\s*;\w+)*)\s*\])?\s*$')
param_converters = {'i': int,
'f': float}
def __init__(self, type):
m = self.lrs_regex.match(type)
self.base_type = m.group(1)
param_names_with_type = m.group(2).split(';') if m.group(2) is not None else []
self.param_names = [p[1:] for p in param_names_with_type]
self.param_types = [self.param_converters[p[0]] for p in param_names_with_type]
self.param_regex_inner = ";".join([('\s*%s\s*=\s*[^;,\s=]+\s*' % p) for p in self.param_names])
self.regex_str = ('^%s\s*(?:\[(%s)\])?\s*$') % (self.base_type, self.param_regex_inner)
assert len(set(self.param_names)) == len(self.param_names)
def parse(self, type):
m = re.match(self.regex_str, type, flags=re.IGNORECASE)
if m:
try:
param_vals = [ptype(v.split('=')[1].strip()) for ptype,v in zip(self.param_types, m.group(1).split(';'))] if m.group(1) is not None else []
if len(param_vals) == len(self.param_names):
return {'type': self.base_type,
'params': dict(zip(self.param_names, param_vals))}
except TypeError:
pass
return None
# Subclass that throws more convnet-specific exceptions than the default
class MyConfigParser(cfg.SafeConfigParser):
def safe_get(self, section, option, f=cfg.SafeConfigParser.get, typestr=None, default=None):
try:
return f(self, section, option)
except cfg.NoOptionError, e:
if default is not None:
return default
raise LayerParsingError("Layer '%s': required parameter '%s' missing" % (section, option))
except ValueError, e:
if typestr is None:
raise e
raise LayerParsingError("Layer '%s': parameter '%s' must be %s" % (section, option, typestr))
def safe_get_list(self, section, option, f=str, typestr='strings', default=None):
v = self.safe_get(section, option, default=default)
if type(v) == list:
return v
try:
return [f(x.strip()) for x in v.split(',')]
except:
raise LayerParsingError("Layer '%s': parameter '%s' must be ','-delimited list of %s" % (section, option, typestr))
def safe_get_int(self, section, option, default=None):
return self.safe_get(section, option, f=cfg.SafeConfigParser.getint, typestr='int', default=default)
def safe_get_float(self, section, option, default=None):
return self.safe_get(section, option, f=cfg.SafeConfigParser.getfloat, typestr='float', default=default)
def safe_get_bool(self, section, option, default=None):
return self.safe_get(section, option, f=cfg.SafeConfigParser.getboolean, typestr='bool', default=default)
def safe_get_float_list(self, section, option, default=None):
return self.safe_get_list(section, option, float, typestr='floats', default=default)
def safe_get_int_list(self, section, option, default=None):
return self.safe_get_list(section, option, int, typestr='ints', default=default)
def safe_get_bool_list(self, section, option, default=None):
return self.safe_get_list(section, option, lambda x: x.lower() in ('true', '1'), typestr='bools', default=default)
# A class that implements part of the interface of MyConfigParser
class FakeConfigParser(object):
def __init__(self, dic):
self.dic = dic
def safe_get(self, section, option, default=None):
if option in self.dic:
return self.dic[option]
return default
def safe_get_int(self, section, option, default=None):
return int(self.safe_get(section, option, default))
def safe_get_int_list(self, section, option, default=None):
return list(self.safe_get(section, option, default))
class LayerParser:
def __init__(self):
self.dic = {}
self.set_defaults()
# Post-processing step -- this is called after all layers have been initialized
def optimize(self, layers):
self.dic['actsTarget'] = -1
self.dic['actsGradTarget'] = -1
if len(set(len(l['gpu']) for l in layers.values() if 'inputs' in l and self.dic['name'] in l['inputs'])) > 1:
# print set(len(l['gpu']) for l in layers.values())
raise LayerParsingError("Layer '%s': all next layers must have equal number of replicas." % (self.dic['name']))
def parse_params(self, vals, parsers, param_name, human_name, num_params=1):
dic, name = self.dic, self.dic['name']
# print vals
if len(vals) != num_params and len(vals) != 1:
raise LayerParsingError("Layer '%s': expected list of length %d for %s but got list of length %d."% (name, num_params, param_name, len(vals)))
parsed = []
# print vals
for v in vals:
for p in parsers:
parsedv = p.parse(v)
if parsedv:
parsed += [parsedv]
break
if len(parsed) == 1 and num_params > 1:
parsed = parsed * num_params
if len(parsed) == num_params:
return parsed
# print parsed, vals
raise LayerParsingError("Layer '%s': unable to parse %s %s=%s." % (name, human_name, param_name, ",".join(vals)))
# Add parameters from layer parameter file
def add_params(self, mcp):
pass
# self.dic['conserveMem'] = mcp.convnet.op.get_value('conserve_mem') if mcp.convnet is not None else 0
def init(self, dic):
self.dic = dic
return self
def set_defaults(self):
self.dic['outputs'] = 0
self.dic['parser'] = self
self.dic['requiresParams'] = False
# Does this layer use its own activity matrix
# for some purpose other than computing its output?
# Usually, this will only be true for layers that require their
# own activity matrix for gradient computations. For example, layers
# with logistic units must compute the gradient y * (1 - y), where y is
# the activity matrix.
#
# Layers that do not not use their own activity matrix should advertise
# this, since this will enable memory-saving matrix re-use optimizations.
#
# The default value of this property is True, for safety purposes.
# If a layer advertises that it does not use its own activity matrix when
# in fact it does, bad things will happen.
self.dic['usesActs'] = True
# Does this layer use the activity matrices of its input layers
# for some purpose other than computing its output?
#
# Again true by default for safety
self.dic['usesInputs'] = True
# Force this layer to use its own activity gradient matrix,
# instead of borrowing one from one of its inputs.
#
# This should be true for layers where the mapping from output
# gradient to input gradient is non-elementwise.
self.dic['forceOwnActs'] = True
# Does this layer need the gradient at all?
# Should only be true for layers with parameters (weights).
self.dic['gradConsumer'] = False
# The gpu indices on which this layer runs
self.dic['gpu'] = [-1]
def parse(self, name, mcp, prev_layers, model=None):
self.prev_layers = prev_layers
self.dic['name'] = name
self.dic['type'] = mcp.safe_get(name, 'type')
self.dic['id'] = len(prev_layers)
return self.dic
def verify_float_range(self, v, param_name, _min, _max):
self.verify_num_range(v, param_name, _min, _max, strconv=lambda x: '%.3f' % x)
def verify_num_range(self, v, param_name, _min, _max, strconv=lambda x:'%d' % x):
if type(v) == list:
for i,vv in enumerate(v):
self._verify_num_range(vv, param_name, _min, _max, i, strconv=strconv)
else:
self._verify_num_range(v, param_name, _min, _max, strconv=strconv)
def _verify_num_range(self, v, param_name, _min, _max, input=-1, strconv=lambda x:'%d' % x):
layer_name = self.dic['name'] if input < 0 else '%s[%d]' % (self.dic['name'], input)
if _min is not None and _max is not None and (v < _min or v > _max):
raise LayerParsingError("Layer '%s': parameter '%s' must be in the range %s-%s" % (layer_name, param_name, strconv(_min), strconv(_max)))
elif _min is not None and v < _min:
raise LayerParsingError("Layer '%s': parameter '%s' must be greater than or equal to %s" % (layer_name, param_name, strconv(_min)))
elif _max is not None and v > _max:
raise LayerParsingError("Layer '%s': parameter '%s' must be smaller than or equal to %s" % (layer_name, param_name, strconv(_max)))
def verify_divisible(self, value, div, value_name, div_name=None, input_idx=0):
layer_name = self.dic['name'] if len(self.dic['inputs']) == 0 else '%s[%d]' % (self.dic['name'], input_idx)
if value % div != 0:
raise LayerParsingError("Layer '%s': parameter '%s' must be divisible by %s" % (layer_name, value_name, str(div) if div_name is None else "'%s'" % div_name))
def verify_str_in(self, value, param_name, lst, input_idx=-1):
lname = self.dic['name'] if input_idx == -1 else ('%s[%d]' % (self.dic['name'], input_idx))
if value not in lst:
raise LayerParsingError("Layer '%s': parameter '%s' must be one of %s" % (lname, param_name, ", ".join("'%s'" % s for s in lst)))
def verify_int_in(self, value, param_name, lst):
if value not in lst:
raise LayerParsingError("Layer '%s': parameter '%s' must be one of %s" % (self.dic['name'], param_name, ", ".join("'%d'" % s for s in lst)))
def verify_all_ints_in(self, values, param_name, lst):
if len([v for v in values if v not in lst]) > 0:
raise LayerParsingError("Layer '%s': all parameters to '%s' must be among %s" % (self.dic['name'], param_name, ", ".join("'%d'" % s for s in lst)))
def verify_input_dims(self, dims):
for i,d in enumerate(dims):
if d is not None and self.dic['numInputs'][i] != d: # first input must be labels
raise LayerParsingError("Layer '%s': dimensionality of input %d must be %d" % (self.dic['name'], i, d))
# This looks for neuron=x arguments in various layers, and creates
# separate layer definitions for them.
@staticmethod
def detach_neuron_layers(layers):
for name,l in layers.items():
if l['type'] != 'neuron' and 'neuron' in l and l['neuron']:
NeuronLayerParser().detach_neuron_layer(name, layers)
@staticmethod
def parse_layers(layer_cfg_path, param_cfg_path, model, layers={}):
try:
if not os.path.exists(layer_cfg_path):
raise LayerParsingError("Layer definition file '%s' does not exist" % layer_cfg_path)
if not os.path.exists(param_cfg_path):
raise LayerParsingError("Layer parameter file '%s' does not exist" % param_cfg_path)
if len(layers) == 0:
mcp = MyConfigParser(dict_type=OrderedDict)
mcp.readfp(open(layer_cfg_path))
for name in mcp.sections():
if not mcp.has_option(name, 'type'):
raise LayerParsingError("Layer '%s': no type given" % name)
ltype = mcp.safe_get(name, 'type')
if ltype not in layer_parsers:
raise LayerParsingError("Layer '%s': Unknown layer type: '%s'" % (name, ltype))
layers[name] = layer_parsers[ltype]().parse(name, mcp, layers, model)
LayerParser.detach_neuron_layers(layers)
for l in layers.values():
l['parser'].optimize(layers)
del l['parser']
for name,l in layers.items():
if not l['type'].startswith('cost.'):
found = max(name in l2['inputs'] for l2 in layers.values() if 'inputs' in l2)
if not found:
raise LayerParsingError("Layer '%s' of type '%s' is unused" % (name, l['type']))
mcp = MyConfigParser(dict_type=OrderedDict)
mcp.readfp(open(param_cfg_path))
# mcp.convnet = model
for name,l in layers.items():
if not mcp.has_section(name) and l['requiresParams']:
raise LayerParsingError("Layer '%s' of type '%s' requires extra parameters, but none given in file '%s'." % (name, l['type'], param_cfg_path))
lp = layer_parsers[l['type']]().init(l)
lp.add_params(mcp)
except LayerParsingError, e:
print e
sys.exit(1)
return layers
@staticmethod
def register_layer_parser(ltype, cls):
if ltype in layer_parsers:
raise LayerParsingError("Layer type '%s' already registered" % ltype)
layer_parsers[ltype] = cls
# Any layer that takes an input (i.e. non-data layer)
class LayerWithInputParser(LayerParser):
def __init__(self, num_inputs=-1):
LayerParser.__init__(self)
self.num_inputs = num_inputs
def verify_num_params(self, params, auto_expand=True):
for param in params:
if len(self.dic[param]) != len(self.dic['inputs']):
if auto_expand and len(self.dic[param]) == 1:
self.dic[param] *= len(self.dic['inputs'])
else:
raise LayerParsingError("Layer '%s': %s list length does not match number of inputs" % (self.dic['name'], param))
# layers: dictionary: name -> layer
def optimize(self, layers):
LayerParser.optimize(self, layers)
dic = self.dic
# Check if I have an input that no one else uses.
#print "Layer %s optimizing" % dic['name']
if not dic['forceOwnActs']:
for i, inp in enumerate(dic['inputLayers']):
if inp['outputs'] == dic['outputs'] and sum(('inputs' in ll) and (inp['name'] in ll['inputs']) for ll in layers.itervalues()) == 1:
# I can share my activity matrix with this layer
# if it does not use its activity matrix, and I
# do not need to remember my inputs.
# TODO: a dropout layer should always be able to overwrite
# its input. Make it so.
# print "Layer %s(uses inputs=%d), input %s(uses acts = %d)" % (dic['name'], dic['usesInputs'], inp['name'], inp['usesActs'])
if not inp['usesActs'] and not dic['usesInputs']:
dic['actsTarget'] = i
print "Layer %s using acts from layer %s" % (dic['name'], inp['name'])
# print "Layer '%s' sharing activity matrix with layer '%s'" % (dic['name'], l['name'])
# I can share my gradient matrix with this layer if we're on the same GPU.
# This is different from the logic for actsTarget because this guy doesn't
# have an actsGrad matrix on my GPU if our GPUs are different, so there's
# nothing to share.
if dic['gpu'] == inp['gpu']:
dic['actsGradTarget'] = i
# print "Layer '%s' sharing activity gradient matrix with layer '%s'" % (dic['name'], l['name'])
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerParser.parse(self, name, mcp, prev_layers, model)
dic['inputs'] = [inp.strip() for inp in mcp.safe_get(name, 'inputs').split(',')]
for inp in dic['inputs']:
if inp not in prev_layers:
raise LayerParsingError("Layer '%s': input layer '%s' not defined" % (name, inp))
dic['inputLayers'] = [prev_layers[inp] for inp in dic['inputs']]
dic['gpu'] = mcp.safe_get_int_list(name, 'gpu', default=dic['inputLayers'][0]['gpu'])
dic['gpus'] = ", ".join('%s' % d for d in dic['gpu'])
dic['numReplicas'] = len(dic['gpu'])
if len(set(dic['gpu'])) != len(dic['gpu']):
raise LayerParsingError("Layer '%s': all replicas must run on different GPUs." % (name))
for inp in dic['inputs']:
# Data layers do not explicitly define how many replicas they have.
# The number of replicas for a data layer is given by the number of replicas
# in the next layer(s). So we set that here.
inpl = prev_layers[inp]
if inpl['type'] == 'data':
inpl['numReplicas'] = dic['numReplicas']
if inpl['numReplicas'] % dic['numReplicas'] != 0:
raise LayerParsingError("Layer '%s': number of replicas (%d) must divide number of replicas in all input layers (input %s has %d replicas)." % (name, dic['numReplicas'], inpl['name'], inpl['numReplicas']))
if len(set(inp['numReplicas'] for inp in dic['inputLayers'])) != 1:
raise LayerParsingError("Layer '%s': all input layers must have equal numbers of replicas." % (name))
# Need to also assert that all *next* layers have equal number of replicas but this is hard so it's done in Layer.optimize
for inp in dic['inputLayers']:
if inp['outputs'] == 0:
raise LayerParsingError("Layer '%s': input layer '%s' does not produce any output" % (name, inp['name']))
dic['numInputs'] = [inp['outputs'] for inp in dic['inputLayers']]
# Layers can declare a neuron activation function to apply to their output, as a shortcut
# to avoid declaring a separate neuron layer above themselves.
dic['neuron'] = mcp.safe_get(name, 'neuron', default="")
if self.num_inputs > 0 and len(dic['numInputs']) != self.num_inputs:
raise LayerParsingError("Layer '%s': number of inputs must be %d" % (name, self.num_inputs))
if model:
self.verify_all_ints_in(dic['gpu'], 'gpu', range(len(model.op.get_value('gpu'))))
return dic
def verify_img_size(self):
dic = self.dic
if dic['numInputs'][0] % dic['imgPixels'] != 0 or dic['imgSize'] * dic['imgSize'] != dic['imgPixels']:
raise LayerParsingError("Layer '%s': has %-d dimensional input, not interpretable as %d-channel images" % (dic['name'], dic['numInputs'][0], dic['channels']))
@staticmethod
def grad_consumers_below(dic):
if dic['gradConsumer']:
return True
if 'inputLayers' in dic:
return any(LayerWithInputParser.grad_consumers_below(l) for l in dic['inputLayers'])
def verify_no_grads(self):
if LayerWithInputParser.grad_consumers_below(self.dic):
raise LayerParsingError("Layer '%s': layers of type '%s' cannot propagate gradient and must not be placed over layers with parameters." % (self.dic['name'], self.dic['type']))
class NailbedLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['stride'] = mcp.safe_get_int(name, 'stride')
self.verify_num_range(dic['channels'], 'channels', 1, None)
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['outputsX'] = (dic['imgSize'] + dic['stride'] - 1) / dic['stride']
dic['start'] = (dic['imgSize'] - dic['stride'] * (dic['outputsX'] - 1)) / 2
dic['outputs'] = dic['channels'] * dic['outputsX']**2
self.verify_num_range(dic['outputsX'], 'outputsX', 0, None)
self.verify_img_size()
print "Initialized bed-of-nails layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (name, dic['gpus'], dic['outputsX'], dic['outputsX'], dic['channels'])
return dic
class GaussianBlurLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['outputs'] = dic['numInputs'][0]
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['filterSize'] = mcp.safe_get_int(name, 'filterSize')
dic['stdev'] = mcp.safe_get_float(name, 'stdev')
self.verify_num_range(dic['channels'], 'channels', 1, None)
self.verify_int_in(dic['filterSize'], 'filterSize', [3, 5, 7, 9])
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['filter'] = n.array([exp(-(dic['filterSize']/2 - i)**2 / float(2 * dic['stdev']**2))
for i in xrange(dic['filterSize'])], dtype=n.float32).reshape(1, dic['filterSize'])
dic['filter'] /= dic['filter'].sum()
self.verify_img_size()
if dic['filterSize'] > dic['imgSize']:
raise LayerParsingError("Later '%s': filter size (%d) must be smaller than image size (%d)." % (dic['name'], dic['filterSize'], dic['imgSize']))
print "Initialized Gaussian blur layer '%s', producing %dx%d %d-channel output" % (name, dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class HorizontalReflectionLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = dic['numInputs'][0]
dic['channels'] = mcp.safe_get_int(name, 'channels')
self.verify_num_range(dic['channels'], 'channels', 1, 3)
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
self.verify_img_size()
print "Initialized horizontal reflection layer '%s', producing %dx%d %d-channel output" % (name, dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class ResizeLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['scale'] = mcp.safe_get_float(name, 'scale')
dic['tgtSize'] = int(floor(dic['imgSize'] / dic['scale']))
dic['tgtPixels'] = dic['tgtSize']**2
self.verify_num_range(dic['channels'], 'channels', 1, None)
# Really not recommended to use this for such severe scalings
self.verify_float_range(dic['scale'], 'scale', 0.5, 2)
dic['outputs'] = dic['channels'] * dic['tgtPixels']
self.verify_img_size()
self.verify_no_grads()
print "Initialized resize layer '%s', producing %dx%d %d-channel output" % (name, dic['tgtSize'], dic['tgtSize'], dic['channels'])
return dic
class RandomScaleLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['channels'] = mcp.safe_get_int(name, 'channels')
self.verify_num_range(dic['channels'], 'channels', 1, None)
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['maxScale'] = mcp.safe_get_float(name, 'maxScale')
dic['tgtSize'] = mcp.safe_get_int(name, 'tgtSize')
min_size = int(floor(dic['imgSize'] / dic['maxScale']))
max_size = dic['imgSize'] #int(floor(dic['imgSize'] * dic['maxScale']))
if dic['tgtSize'] < min_size:
raise LayerParsingError("Layer '%s': target size must be greater than minimum image size after rescaling (%d)" % (name, min_size))
if dic['tgtSize'] > max_size:
raise LayerParsingError("Layer '%s': target size must be smaller than maximum image size after rescaling (%d)" % (name, max_size))
dic['tgtPixels'] = dic['tgtSize']**2
self.verify_float_range(dic['maxScale'], 'maxScale', 1, 2)
dic['outputs'] = dic['channels'] * dic['tgtPixels']
self.verify_img_size()
self.verify_no_grads()
print "Initialized random scale layer '%s', producing %dx%d %d-channel output" % (name, dic['tgtSize'], dic['tgtSize'], dic['channels'])
return dic
class CropLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['channels'] = mcp.safe_get_int(name, 'channels')
self.verify_num_range(dic['channels'], 'channels', 1, None)
dic['startX'] = mcp.safe_get_int(name, 'startX')
dic['startY'] = mcp.safe_get_int(name, 'startY', default=dic['startX'])
dic['sizeX'] = mcp.safe_get_int(name, 'sizeX')
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['outputs'] = dic['channels'] * (dic['sizeX']**2)
self.verify_num_range(dic['startX'], 'startX', 0, dic['imgSize']-1)
self.verify_num_range(dic['sizeX'], 'sizeX', 1, dic['imgSize'])
self.verify_num_range(dic['startY'], 'startY', 0, dic['imgSize']-1)
self.verify_img_size()
self.verify_no_grads()
if dic['startX'] + dic['sizeX'] > dic['imgSize']:
raise LayerParsingError("Layer '%s': startX (%d) + sizeX (%d) > imgSize (%d)" % (name, dic['startX'], dic['sizeX'], dic['imgSize']))
print "Initialized cropping layer '%s', producing %dx%d %d-channel output" % (name, dic['sizeX'], dic['sizeX'], dic['channels'])
return dic
class ColorTransformLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / 3
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['channels'] = 3
dic['outputs'] = dic['numInputs'][0]
self.verify_img_size()
self.verify_no_grads()
return dic
class RGBToYUVLayerParser(ColorTransformLayerParser):
def __init__(self):
ColorTransformLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model=None):
dic = ColorTransformLayerParser.parse(self, name, mcp, prev_layers, model)
print "Initialized RGB --> YUV layer '%s', producing %dx%d %d-channel output" % (name, dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class RGBToLABLayerParser(ColorTransformLayerParser):
def __init__(self):
ColorTransformLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model=None):
dic = ColorTransformLayerParser.parse(self, name, mcp, prev_layers, model)
dic['center'] = mcp.safe_get_bool(name, 'center', default=False)
print "Initialized RGB --> LAB layer '%s', producing %dx%d %d-channel output" % (name, dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class NeuronLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
@staticmethod
def get_unused_layer_name(layers, wish):
if wish not in layers:
return wish
for i in xrange(1, 100):
name = '%s.%d' % (wish, i)
if name not in layers:
return name
raise LayerParsingError("This is insane.")
def parse_neuron(self, neuron_str):
for n in neuron_parsers:
p = n.parse(neuron_str)
if p: # Successfully parsed neuron, return it
self.dic['neuron'] = p
self.dic['usesActs'] = self.dic['neuron']['usesActs']
self.dic['usesInputs'] = self.dic['neuron']['usesInputs']
return
# Could not parse neuron
# Print available neuron types
colnames = ['Neuron type', 'Function']
m = max(len(colnames[0]), OptionsParser._longest_value(neuron_parsers, key=lambda x:x.type)) + 2
ntypes = [OptionsParser._bold(colnames[0].ljust(m))] + [n.type.ljust(m) for n in neuron_parsers]
fnames = [OptionsParser._bold(colnames[1])] + [n.func_str for n in neuron_parsers]
usage_lines = NL.join(ntype + fname for ntype,fname in zip(ntypes, fnames))
raise LayerParsingError("Layer '%s': unable to parse neuron type '%s'. Valid neuron types: %sWhere neurons have parameters, they must be floats." % (self.dic['name'], neuron_str, NL + usage_lines + NL))
def detach_neuron_layer(self, src_name, layers):
dic = self.dic
# self.set_defaults()
dic['name'] = NeuronLayerParser.get_unused_layer_name(layers, '%s_neuron' % src_name)
dic['type'] = 'neuron'
dic['inputs'] = src_name
dic['neuron'] = layers[src_name]['neuron']
dic['gpu'] = layers[src_name]['gpu']
# Yes it's not entirely correct to pass all of layers as prev_layers, but it's harmless
dic = self.parse(dic['name'], FakeConfigParser(dic), layers)
dic['src_layer'] = src_name
# Link upper layers to this new one
for l in layers.values():
if 'inputs' in l:
l['inputs'] = [inp if inp != src_name else dic['name'] for inp in l['inputs']]
l['inputLayers'] = [inp if inp['name'] != src_name else dic for inp in l['inputLayers']]
layers[dic['name']] = dic
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = dic['numInputs'][0]
self.parse_neuron(dic['neuron'])
dic['forceOwnActs'] = False
print "Initialized neuron layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class EltwiseSumLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self)
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['coeffs'] = mcp.safe_get_float_list(name, 'coeffs', default=[1.0] * len(dic['inputs']))
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
if len(set(dic['numInputs'])) != 1:
raise LayerParsingError("Layer '%s': all inputs must have the same dimensionality. Got dimensionalities: %s" % (name, ", ".join(str(s) for s in dic['numInputs'])))
dic['outputs'] = dic['numInputs'][0]
dic['usesInputs'] = False
dic['usesActs'] = False
dic['forceOwnActs'] = False
dic['requiresParams'] = True
print "Initialized elementwise sum layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class EltwiseMaxLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
if len(dic['inputs']) < 2:
raise LayerParsingError("Layer '%s': elementwise max layer must have at least 2 inputs, got %d." % (name, len(dic['inputs'])))
if len(set(dic['numInputs'])) != 1:
raise LayerParsingError("Layer '%s': all inputs must have the same dimensionality. Got dimensionalities: %s" % (name, ", ".join(str(s) for s in dic['numInputs'])))
dic['outputs'] = dic['numInputs'][0]
print "Initialized elementwise max layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class SumLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['stride'] = mcp.safe_get_int(name, 'stride', default=1)
self.verify_divisible(dic['numInputs'][0], dic['stride'], 'input dimensionality', 'stride')
dic['outputs'] = dic['numInputs'][0] / dic['stride']
print "Initialized sum layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class DropoutLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['enable'] = mcp.safe_get_bool(name, 'enable', default=True)
dic['keep'] = mcp.safe_get_float(name, 'keep', default=0.5)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
dic['usesInputs'] = False
dic['usesActs'] = False
dic['forceOwnActs'] = False
dic['outputs'] = dic['numInputs'][0]
print "Initialized %s layer '%s' on GPUs %s, producing %d outputs" % (dic['type'], name, dic['gpus'], dic['outputs'])
return dic
class Dropout2LayerParser(DropoutLayerParser):
def __init__(self):
DropoutLayerParser.__init__(self)
class WeightLayerParser(LayerWithInputParser):
LAYER_PAT = re.compile(r'^\s*([^\s\[]+)(?:\[(\d+)\])?\s*$') # matches things like layername[5], etc
def __init__(self, num_inputs=-1):
LayerWithInputParser.__init__(self, num_inputs=num_inputs)
@staticmethod
def get_layer_name(name_str):
m = WeightLayerParser.LAYER_PAT.match(name_str)
if not m:
return None
return m.group(1), m.group(2)
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['momW'] = mcp.safe_get_float_list(name, 'momW')
dic['momB'] = mcp.safe_get_float(name, 'momB')
dic['superEps'] = mcp.safe_get_float(name, 'superEps', default=0.0)
dic['superMom'] = mcp.safe_get_float(name, 'superMom', default=0.0)
dic['wc'] = mcp.safe_get_float_list(name, 'wc', default=[0.0] * len(dic['inputs']))
dic['wball'] = mcp.safe_get_float_list(name, 'wball', default=[0.0] * len(dic['inputs']))
self.verify_num_params(['momW', 'wc', 'wball'])
# dic['wballNormed'] = [wball * nweights for wball,nweights in zip(dic['wball'], dic['weightsPerFilter'])]
dic['wballNormed'] = dic['wball']
# Convert from old-style 0.001,0.02 hyperparam specification to new-stye
# const[base=0.001],const[base=0.02] and so forth
def convert_scalars_to_schedules(scalars):
parts = scalars.split(',')
for i,p in enumerate(parts):
p = p.strip()
if re.match('(?:\d*\.)?\d+$', p):
parts[i] = 'const[base=%s]' % p
return parts
dic['epsW'] = self.parse_params(convert_scalars_to_schedules(mcp.safe_get(name, 'epsW')), lrs_parsers, 'epsW', 'learning rate schedule', num_params=len(dic['inputs']))
dic['epsB'] = self.parse_params(convert_scalars_to_schedules(mcp.safe_get(name, 'epsB')), lrs_parsers, 'epsB', 'learning rate schedule', num_params=1)[0]
dic['updatePeriod'] = mcp.safe_get_int(name, 'updatePeriod', default=0) # 0 means update as often as possible
# TODO: assert that updatePeriod is a multiple of active pass period, which is unknown here.
# the assert has to go in some post-processing step..
dic['gradConsumer'] = dic['epsB']['params']['base'] > 0 or any(w['params']['base'] > 0 for w in dic['epsW'])
@staticmethod
def unshare_weights(layer, layers, matrix_idx=None):
def unshare(layer, layers, indices):
for i in indices:
if layer['weightSourceLayers'][i] >= 0:
src_matrix_idx = layer['weightSourceMatrixIndices'][i]
layer['weightSourceLayers'][i] = ""
layer['weightSourceMatrixIndices'][i] = -1
layer['weights'][i] = layer['weights'][i].copy()
layer['weightsInc'][i] = n.zeros_like(layer['weights'][i])
print "Unshared weight matrix %s[%d] from %s[%d]." % (layer['name'], i, layer['weightSourceLayers'][i], src_matrix_idx)
else:
print "Weight matrix %s[%d] already unshared." % (layer['name'], i)
if 'weightSourceLayers' in layer:
unshare(layer, layers, range(len(layer['inputs'])) if matrix_idx is None else [matrix_idx])
# Load weight/biases initialization module
def call_init_func(self, param_name, shapes, input_idx=-1):
dic = self.dic
func_pat = re.compile('^([^\.]+)\.([^\(\)]+)\s*(?:\(([^,]+(?:,[^,]+)*)\))?$')
m = func_pat.match(dic[param_name])
if not m:
raise LayerParsingError("Layer '%s': '%s' parameter must have format 'moduleName.functionName(param1,param2,...)'; got: %s." % (dic['name'], param_name, dic['initWFunc']))
module, func = m.group(1), m.group(2)
params = m.group(3).split(',') if m.group(3) is not None else []
try:
mod = __import__(module)
return getattr(mod, func)(dic['name'], input_idx, shapes, params=params) if input_idx >= 0 else getattr(mod, func)(dic['name'], shapes, params=params)
except (ImportError, AttributeError, TypeError), e:
raise LayerParsingError("Layer '%s': %s." % (dic['name'], e))
def make_weights(self, initW, rows, cols, order='C'):
dic = self.dic
dic['weights'], dic['weightsInc'] = [], []
if dic['initWFunc']: # Initialize weights from user-supplied python function
# Initialization function is supplied in the format
# module.func
for i in xrange(len(dic['inputs'])):
dic['weights'] += [self.call_init_func('initWFunc', (rows[i], cols[i]), input_idx=i)]
if type(dic['weights'][i]) != n.ndarray:
raise LayerParsingError("Layer '%s[%d]': weight initialization function %s must return numpy.ndarray object. Got: %s." % (dic['name'], i, dic['initWFunc'], type(dic['weights'][i])))
if dic['weights'][i].dtype != n.float32:
raise LayerParsingError("Layer '%s[%d]': weight initialization function %s must weight matrices consisting of single-precision floats. Got: %s." % (dic['name'], i, dic['initWFunc'], dic['weights'][i].dtype))
if dic['weights'][i].shape != (rows[i], cols[i]):
raise LayerParsingError("Layer '%s[%d]': weight matrix returned by weight initialization function %s has wrong shape. Should be: %s; got: %s." % (dic['name'], i, dic['initWFunc'], (rows[i], cols[i]), dic['weights'][i].shape))
# Convert to desired order
dic['weights'][i] = n.require(dic['weights'][i], requirements=order)
dic['weightsInc'] += [n.zeros_like(dic['weights'][i])]
print "Layer '%s[%d]' initialized weight matrices from function %s" % (dic['name'], i, dic['initWFunc'])
else:
for i in xrange(len(dic['inputs'])):
if dic['weightSourceLayers'][i] != '': # Shared weight matrix
src_layer = self.prev_layers[dic['weightSourceLayers'][i]] if dic['weightSourceLayers'][i] != dic['name'] else dic
dic['weights'] += [src_layer['weights'][dic['weightSourceMatrixIndices'][i]]]
dic['weightsInc'] += [src_layer['weightsInc'][dic['weightSourceMatrixIndices'][i]]]
if dic['weights'][i].shape != (rows[i], cols[i]):
raise LayerParsingError("Layer '%s': weight sharing source matrix '%s' has shape %dx%d; should be %dx%d."
% (dic['name'], dic['weightSource'][i], dic['weights'][i].shape[0], dic['weights'][i].shape[1], rows[i], cols[i]))
print "Layer '%s' initialized weight matrix %d from %s" % (dic['name'], i, dic['weightSource'][i])
else:
dic['weights'] += [n.array(initW[i] * nr.randn(rows[i], cols[i]), dtype=n.single, order=order)]
dic['weightsInc'] += [n.zeros_like(dic['weights'][i])]
def make_biases(self, rows, cols, order='C'):
dic = self.dic
if dic['initBFunc']:
dic['biases'] = self.call_init_func('initBFunc', (rows, cols))
if type(dic['biases']) != n.ndarray:
raise LayerParsingError("Layer '%s': bias initialization function %s must return numpy.ndarray object. Got: %s." % (dic['name'], dic['initBFunc'], type(dic['biases'])))
if dic['biases'].dtype != n.float32:
raise LayerParsingError("Layer '%s': bias initialization function %s must return numpy.ndarray object consisting of single-precision floats. Got: %s." % (dic['name'], dic['initBFunc'], dic['biases'].dtype))
if dic['biases'].shape != (rows, cols):
raise LayerParsingError("Layer '%s': bias vector returned by bias initialization function %s has wrong shape. Should be: %s; got: %s." % (dic['name'], dic['initBFunc'], (rows, cols), dic['biases'].shape))
dic['biases'] = n.require(dic['biases'], requirements=order)
print "Layer '%s' initialized bias vector from function %s" % (dic['name'], dic['initBFunc'])
else:
dic['biases'] = dic['initB'] * n.ones((rows, cols), order=order, dtype=n.single)
dic['biasesInc'] = n.zeros_like(dic['biases'])
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
dic['gradConsumer'] = True
dic['usesActs'] = False
dic['initW'] = mcp.safe_get_float_list(name, 'initW', default=0.01)
dic['initB'] = mcp.safe_get_float(name, 'initB', default=0)
dic['initWFunc'] = mcp.safe_get(name, 'initWFunc', default="")
dic['initBFunc'] = mcp.safe_get(name, 'initBFunc', default="")
# Find shared weight matrices
dic['weightSource'] = mcp.safe_get_list(name, 'weightSource', default=[''] * len(dic['inputs']))
self.verify_num_params(['initW'])
self.verify_num_params(['weightSource'], auto_expand=False)
dic['weightSourceLayers'] = []
dic['weightSourceMatrixIndices'] = []
for i, src_name in enumerate(dic['weightSource']):
src_layer_matrix_idx = -1
src_layer_name = ''
if src_name != '':
src_layer_match = WeightLayerParser.get_layer_name(src_name)
if src_layer_match is None:
raise LayerParsingError("Layer '%s': unable to parse weight sharing source '%s'. Format is layer[idx] or just layer, in which case idx=0 is used." % (name, src_name))
src_layer_name = src_layer_match[0]
src_layer_matrix_idx = int(src_layer_match[1]) if src_layer_match[1] is not None else 0
if src_layer_name not in prev_layers and src_layer_name != name:
raise LayerParsingError("Layer '%s': weight sharing source layer '%s' does not exist." % (name, src_layer_name))
# src_layer_idx = prev_names.index(src_layer_name) if src_layer_name != name else len(prev_names)
src_layer = prev_layers[src_layer_name] if src_layer_name != name else dic
if src_layer['gpu'] != dic['gpu']:
raise LayerParsingError("Layer '%s': weight sharing source layer '%s' runs on GPUs %s, while '%s' runs on GPUs %s." % (name, src_layer_name, src_layer['gpu'], name, dic['gpu']))
if src_layer['type'] != dic['type']:
raise LayerParsingError("Layer '%s': weight sharing source layer '%s' is of type '%s'; should be '%s'." % (name, src_layer_name, src_layer['type'], dic['type']))
if src_layer_name != name and len(src_layer['weights']) <= src_layer_matrix_idx:
raise LayerParsingError("Layer '%s': weight sharing source layer '%s' has %d weight matrices, but '%s[%d]' requested." % (name, src_layer_name, len(src_layer['weights']), src_name, src_layer_matrix_idx))
if src_layer_name == name and src_layer_matrix_idx >= i:
raise LayerParsingError("Layer '%s': weight sharing source '%s[%d]' not defined yet." % (name, name, src_layer_matrix_idx))
dic['weightSourceLayers'] += [src_layer_name]
dic['weightSourceMatrixIndices'] += [src_layer_matrix_idx]
return dic
class FCLayerParser(WeightLayerParser):
def __init__(self):
WeightLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = WeightLayerParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = mcp.safe_get_int(name, 'outputs')
dic['weightsPerFilter'] = dic['numInputs']
self.verify_num_range(dic['outputs'], 'outputs', 1, None)
self.make_weights(dic['initW'], dic['numInputs'], [dic['outputs']] * len(dic['numInputs']), order='F')
self.make_biases(1, dic['outputs'], order='F')
print "Initialized fully-connected layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class SplitFCLayerParser(WeightLayerParser):
def __init__(self):
WeightLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = WeightLayerParser.parse(self, name, mcp, prev_layers, model)
dic['parts'] = mcp.safe_get_int(name, 'parts')
dic['outputs'] = mcp.safe_get_int(name, 'outputs') * dic['parts']
dic['weightsPerFilter'] = dic['numInputs']
self.verify_num_range(dic['parts'], 'parts', 1, None)
self.make_weights(dic['initW'], dic['numInputs'], [dic['outputs']/dic['parts']] * len(dic['numInputs']), order='F')
self.make_biases(1, dic['outputs'], order='F')
for i in xrange(len(dic['numInputs'])):
self.verify_divisible(dic['numInputs'][i], dic['parts'], 'numInputs', 'parts', input_idx=i)
print "Initialized split fully-connected layer '%s' on GPUs %s, producing %d outputs in %d parts" % (name, dic['gpus'], dic['outputs'], dic['parts'])
return dic
class LocalLayerParser(WeightLayerParser):
def __init__(self):
WeightLayerParser.__init__(self)
# Convert convolutional layer to unshared, locally-connected layer
@staticmethod
def conv_to_local(layers, lname):
layer = layers[lname]
if layer['type'] == 'conv':
layer['type'] = 'local'
for inp,inpname in enumerate(layer['inputs']):
src_layer_name = layer['weightSourceLayers'][inp]
if src_layer_name != '':
src_layer = layers[src_layer_name]
src_matrix_idx = layer['weightSourceMatrixIndices'][inp]
LocalLayerParser.conv_to_local(layers, src_layer_name)
for w in ('weights', 'weightsInc'):
layer[w][inp] = src_layer[w][src_matrix_idx]
else:
layer['weights'][inp] = n.require(n.reshape(n.tile(n.reshape(layer['weights'][inp], (1, n.prod(layer['weights'][inp].shape))), (layer['modules'], 1)),
(layer['modules'] * layer['filterChannels'][inp] * layer['filterPixels'][inp], layer['filters'])),
requirements='C')
layer['weightsInc'][inp] = n.zeros_like(layer['weights'][inp])
if layer['sharedBiases']:
layer['biases'] = n.require(n.repeat(layer['biases'], layer['modules'], axis=0), requirements='C')
layer['biasesInc'] = n.zeros_like(layer['biases'])
print "Converted layer '%s' from convolutional to unshared, locally-connected" % layer['name']
# Also call this function on any layers sharing my weights
for l in layers:
if 'weightSourceLayers' in l and lname in l['weightSourceLayers']:
LocalLayerParser.conv_to_local(layers, l)
return layer
def parse(self, name, mcp, prev_layers, model):
dic = WeightLayerParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
dic['usesActs'] = False
# Supplied values
dic['channels'] = mcp.safe_get_int_list(name, 'channels')
dic['padding'] = mcp.safe_get_int_list(name, 'padding', default=[0]*len(dic['inputs']))
dic['stride'] = mcp.safe_get_int_list(name, 'stride', default=[1]*len(dic['inputs']))
dic['filterSize'] = mcp.safe_get_int_list(name, 'filterSize')
dic['filters'] = mcp.safe_get_int_list(name, 'filters')
dic['groups'] = mcp.safe_get_int_list(name, 'groups', default=[1]*len(dic['inputs']))
dic['initW'] = mcp.safe_get_float_list(name, 'initW')
dic['initCFunc'] = mcp.safe_get(name, 'initCFunc', default='')
dic['modulesX'] = mcp.safe_get_int(name, 'modulesX', default=0)
self.verify_num_params(['channels', 'padding', 'stride', 'filterSize', \
'filters', 'groups', 'initW'])
self.verify_num_range(dic['stride'], 'stride', 1, None)
self.verify_num_range(dic['filterSize'],'filterSize', 1, None)
self.verify_num_range(dic['padding'], 'padding', 0, None)
self.verify_num_range(dic['channels'], 'channels', 1, None)
self.verify_num_range(dic['groups'], 'groups', 1, None)
self.verify_num_range(dic['modulesX'], 'modulesX', 0, None)
for i in xrange(len(dic['filters'])):
self.verify_divisible(dic['filters'][i], 16, 'filters', input_idx=i)
# Computed values
dic['imgPixels'] = [numInputs/channels for numInputs,channels in zip(dic['numInputs'], dic['channels'])]
dic['imgSize'] = [int(n.sqrt(imgPixels)) for imgPixels in dic['imgPixels']]
self.verify_num_range(dic['imgSize'], 'imgSize', 1, None)
dic['filters'] = [filters*groups for filters,groups in zip(dic['filters'], dic['groups'])]
dic['filterPixels'] = [filterSize**2 for filterSize in dic['filterSize']]
if dic['modulesX'] <= 0:
dic['modulesX'] = [1 + int(ceil((2*padding + imgSize - filterSize) / float(stride))) for padding,imgSize,filterSize,stride in zip(dic['padding'], dic['imgSize'], dic['filterSize'], dic['stride'])]
else:
dic['modulesX'] = [dic['modulesX']] * len(dic['inputs'])
dic['filterChannels'] = [channels/groups for channels,groups in zip(dic['channels'], dic['groups'])]
if len(set(dic['modulesX'])) != 1 or len(set(dic['filters'])) != 1:
raise LayerParsingError("Layer '%s': all inputs must produce equally-dimensioned output. Dimensions are: %s." % (name, ", ".join("%dx%dx%d" % (filters, modulesX, modulesX) for filters,modulesX in zip(dic['filters'], dic['modulesX']))))
dic['modulesX'] = dic['modulesX'][0]
dic['modules'] = dic['modulesX']**2
dic['filters'] = dic['filters'][0]
dic['outputs'] = dic['modules'] * dic['filters']
# dic['filterConns'] = [[]] * len(dic['inputs'])
for i in xrange(len(dic['inputs'])):
if dic['numInputs'][i] % dic['imgPixels'][i] != 0 or dic['imgSize'][i] * dic['imgSize'][i] != dic['imgPixels'][i]:
raise LayerParsingError("Layer '%s[%d]': has %-d dimensional input, not interpretable as square %d-channel images" % (name, i, dic['numInputs'][i], dic['channels'][i]))
if dic['channels'][i] > 3 and dic['channels'][i] % 4 != 0:
raise LayerParsingError("Layer '%s[%d]': number of channels must be smaller than 4 or divisible by 4" % (name, i))
# if dic['filterSize'][i] > totalPadding[i] + dic['imgSize'][i]:
# raise LayerParsingError("Layer '%s[%d]': filter size (%d) greater than image size + padding (%d)" % (name, i, dic['filterSize'][i], dic['padding'][i] + dic['imgSize'][i]))
if -dic['padding'][i] + dic['stride'][i] * (dic['modulesX'] - 1) + dic['filterSize'][i] < dic['imgSize'][i]:
raise LayerParsingError("Layer '%s[%d]': %dx%d output map with padding=%d, stride=%d does not cover entire input image." % (name, i, dic['modulesX'], dic['outputsX'], dic['padding'][i], dic['stride'][i]))
if dic['groups'][i] > 1:
self.verify_divisible(dic['channels'][i], 4*dic['groups'][i], 'channels', '4 * groups', input_idx=i)
self.verify_divisible(dic['channels'][i], dic['groups'][i], 'channels', 'groups', input_idx=i)
self.verify_divisible(dic['filters'], 16*dic['groups'][i], 'filters * groups', input_idx=i)
dic['padding'][i] = -dic['padding'][i]
# dic['overSample'] = [groups*filterChannels/channels for groups,filterChannels,channels in zip(dic['groups'], dic['filterChannels'], dic['channels'])]
dic['weightsPerFilter'] = [fc * (fz**2) for fc, fz in zip(dic['filterChannels'], dic['filterSize'])]
return dic
class ConvLayerParser(LocalLayerParser):
def __init__(self):
LocalLayerParser.__init__(self)
def add_params(self, mcp):
LocalLayerParser.add_params(self, mcp)
self.dic['wcNormMax'] = mcp.safe_get_float_list(self.dic['name'], 'wcNormMax', default=[0.0] * len(self.dic['inputs']))
self.dic['wcNormMin'] = mcp.safe_get_float_list(self.dic['name'], 'wcNormMin', default=[0.0] * len(self.dic['inputs']))
self.verify_num_params(['wcNormMax', 'wcNormMin'])
for min,max in zip(self.dic['wcNormMin'], self.dic['wcNormMax']):
if min > max:
raise LayerParsingError("Layer '%s': wcNormMin must be <= wcNormMax." % (self.dic['name']))
def parse(self, name, mcp, prev_layers, model):
dic = LocalLayerParser.parse(self, name, mcp, prev_layers, model)
dic['sumWidth'] = mcp.safe_get_int(name, 'sumWidth')
dic['sharedBiases'] = mcp.safe_get_bool(name, 'sharedBiases', default=True)
num_biases = dic['filters'] if dic['sharedBiases'] else dic['modules']*dic['filters']
eltmult = lambda list1, list2: [l1 * l2 for l1,l2 in zip(list1, list2)]
self.make_weights(dic['initW'], eltmult(dic['filterPixels'], dic['filterChannels']), [dic['filters']] * len(dic['inputs']), order='C')
self.make_biases(num_biases, 1, order='C')
print "Initialized convolutional layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (name, dic['gpus'], dic['modulesX'], dic['modulesX'], dic['filters'])
return dic
class LocalUnsharedLayerParser(LocalLayerParser):
def __init__(self):
LocalLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = LocalLayerParser.parse(self, name, mcp, prev_layers, model)
eltmult = lambda list1, list2: [l1 * l2 for l1,l2 in zip(list1, list2)]
scmult = lambda x, lst: [x * l for l in lst]
self.make_weights(dic['initW'], scmult(dic['modules'], eltmult(dic['filterPixels'], dic['filterChannels'])), [dic['filters']] * len(dic['inputs']), order='C')
self.make_biases(dic['modules'] * dic['filters'], 1, order='C')
print "Initialized locally-connected layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (name, dic['gpus'], dic['modulesX'], dic['modulesX'], dic['filters'])
return dic
class DataLayerParser(LayerParser):
def __init__(self):
LayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = LayerParser.parse(self, name, mcp, prev_layers, model)
dic['dataIdx'] = mcp.safe_get_int(name, 'dataIdx')
dic['start'] = mcp.safe_get_int(name, 'start', default=0)
dic['end'] = mcp.safe_get_int(name, 'end', default=model.train_data_provider.get_data_dims(idx=dic['dataIdx']))
dic['outputs'] = dic['end'] - dic['start']
# dic['usesActs'] = False
print "Initialized data layer '%s', producing %d outputs" % (name, dic['outputs'])
return dic
class SoftmaxLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = dic['inputLayers'][0]['outputs']
print "Initialized softmax layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class ConcatentionLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = sum(l['outputs'] for l in dic['inputLayers'])
dic['copyOffsets'] = [sum(dic['inputLayers'][j]['outputs'] for j in xrange(i)) for i in xrange(len(dic['inputLayers']))]
print "Initialized concatenation layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class PassThroughLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self)
# Note: this doesn't verify all the necessary constraints. Layer construction may still fail in C++ code.
# For example, it does not verify that every layer only has one pass-through parent. Obviously having
# two such parents is incoherent.
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
# if len(dic['inputLayers']) == 1:
# raise LayerParsingError("Layer %s: pass-through layer must have more than one input." % dic['name'])
if len(dic['gpu']) != len(dic['inputLayers'][0]['gpu']):
raise LayerParsingError("Layer '%s': number of replicas in pass-through layer must be equivalent to number of replicas in input layers." % dic['name'])
for inp in dic['inputLayers']:
conflicting_layers = [l for l in prev_layers.values() if l['type'] == 'pass' and inp['name'] in l['inputs'] and len(set(dic['gpu']).intersection(set(l['gpu']))) > 0]
if len(conflicting_layers) > 0:
raise LayerParsingError("Layer '%s' conflicts with layer '%s'. Both pass-through layers take layer '%s' as input and operate on an overlapping set of GPUs." % (dic['name'], conflicting_layers[0]['name'], inp['name']))
dic['outputs'] = sum(l['outputs'] for l in dic['inputLayers'])
# dic['copyOffsets'] = [sum(dic['inputLayers'][j]['outputs'] for j in xrange(i)) for i in xrange(len(dic['inputLayers']))]
print "Initialized pass-through layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class PoolLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['sizeX'] = mcp.safe_get_int(name, 'sizeX')
dic['start'] = mcp.safe_get_int(name, 'start', default=0)
dic['stride'] = mcp.safe_get_int(name, 'stride')
dic['outputsX'] = mcp.safe_get_int(name, 'outputsX', default=0)
dic['pool'] = mcp.safe_get(name, 'pool')
# Avg pooler does not use its acts or inputs
dic['usesActs'] = dic['pool'] != 'avg'
dic['usesInputs'] = dic['pool'] != 'avg'
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
if dic['pool'] == 'avg':
dic['sum'] = mcp.safe_get_bool(name, 'sum', default=False)
self.verify_num_range(dic['sizeX'], 'sizeX', 1, dic['imgSize'])
self.verify_num_range(dic['stride'], 'stride', 1, dic['sizeX'])
self.verify_num_range(dic['outputsX'], 'outputsX', 0, None)
self.verify_num_range(dic['channels'], 'channels', 1, None)
if LayerWithInputParser.grad_consumers_below(dic):
self.verify_divisible(dic['channels'], 16, 'channels')
self.verify_str_in(dic['pool'], 'pool', ['max', 'maxabs', 'avg'])
self.verify_img_size()
if dic['outputsX'] <= 0:
dic['outputsX'] = int(ceil((dic['imgSize'] - dic['start'] - dic['sizeX']) / float(dic['stride']))) + 1;
dic['outputs'] = dic['outputsX']**2 * dic['channels']
print "Initialized %s-pooling layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (dic['pool'], name, dic['gpus'], dic['outputsX'], dic['outputsX'], dic['channels'])
return dic
class CrossMapPoolLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['size'] = mcp.safe_get_int(name, 'size')
dic['start'] = mcp.safe_get_int(name, 'start', default=0)
dic['stride'] = mcp.safe_get_int(name, 'stride')
dic['outputChannels'] = mcp.safe_get_int(name, 'outputs', default=0)
dic['pool'] = mcp.safe_get(name, 'pool')
dic['requiresParams'] = False
# Avg pooler does not use its acts or inputs
dic['usesActs'] = 'pool' != 'avg'
dic['usesInputs'] = 'pool' != 'avg'
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['outputs'] = dic['outputChannels'] * dic['imgPixels']
self.verify_num_range(dic['size'], 'size', 1, dic['channels'])
self.verify_num_range(dic['stride'], 'stride', 1, dic['size'])
self.verify_num_range(dic['outputChannels'], 'outputChannels', 0, None)
self.verify_num_range(dic['channels'], 'channels', 1, None)
self.verify_num_range(dic['start'], 'start', None, 0)
self.verify_str_in(dic['pool'], 'pool', ['max'])
self.verify_img_size()
covered_chans = dic['start'] + (dic['outputChannels'] - 1) * dic['stride'] + dic['size']
if covered_chans < dic['channels']:
raise LayerParsingError("Layer '%s': cross-map pooling with start=%d, stride=%d, size=%d, outputs=%d covers only %d of %d input channels." % \
(name, dic['start'], dic['stride'], dic['size'], dic['outputChannels'], covered_chans, dic['channels']))
print "Initialized cross-map %s-pooling layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (dic['pool'], name, dic['gpus'], dic['imgSize'], dic['imgSize'], dic['outputChannels'])
return dic
class NormLayerParser(LayerWithInputParser):
RESPONSE_NORM = 'response'
CONTRAST_NORM = 'contrast'
CROSSMAP_RESPONSE_NORM = 'cross-map response'
def __init__(self, norm_type):
LayerWithInputParser.__init__(self, num_inputs=1)
self.norm_type = norm_type
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['scale'] = mcp.safe_get_float(name, 'scale')
dic['scale'] /= dic['size'] if self.norm_type == self.CROSSMAP_RESPONSE_NORM else dic['size']**2
dic['pow'] = mcp.safe_get_float(name, 'pow')
dic['minDiv'] = mcp.safe_get_float(name, 'minDiv', default=1.0)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['size'] = mcp.safe_get_int(name, 'size')
dic['blocked'] = mcp.safe_get_bool(name, 'blocked', default=False)
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
# Contrast normalization layer does not use its inputs
dic['usesInputs'] = self.norm_type != self.CONTRAST_NORM
self.verify_num_range(dic['channels'], 'channels', 1, None)
if self.norm_type == self.CROSSMAP_RESPONSE_NORM:
self.verify_num_range(dic['size'], 'size', 2, dic['channels'])
if dic['channels'] % 16 != 0:
raise LayerParsingError("Layer '%s': number of channels must be divisible by 16 when using crossMap" % name)
else:
self.verify_num_range(dic['size'], 'size', 1, dic['imgSize'])
if self.norm_type != self.CROSSMAP_RESPONSE_NORM and dic['channels'] > 3 and dic['channels'] % 4 != 0:
raise LayerParsingError("Layer '%s': number of channels must be smaller than 4 or divisible by 4" % name)
self.verify_img_size()
dic['outputs'] = dic['imgPixels'] * dic['channels']
print "Initialized %s-normalization layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (self.norm_type, name, dic['gpus'], dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class CostParser(LayerWithInputParser):
def __init__(self, num_inputs=-1):
LayerWithInputParser.__init__(self, num_inputs=num_inputs)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
# Stored as string because python can't pickle lambda functions
dic['outputFilter'] = 'lambda costs,num_cases: [c/num_cases for c in costs]'
dic['children'] = mcp.safe_get_list(name, 'children', default=[])
# Aggregated costs only produce outputs which are additive.
for c in dic['children']:
if c not in prev_layers:
raise LayerParsingError("Layer '%s': child cost layer '%s' not defined" % (name, c))
if prev_layers[c]['type'] != dic['type']:
raise LayerParsingError("Layer '%s': child cost layer '%s' must have same type as parent" % (name, c))
prev_layers[c]['aggregated'] = 1
dic['aggregated'] = dic['children'] != []
del dic['neuron']
return dic
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['coeff'] = mcp.safe_get_float(name, 'coeff')
dic['gradConsumer'] = dic['coeff'] > 0
class CrossEntCostParser(CostParser):
def __init__(self):
CostParser.__init__(self, num_inputs=2)
def parse(self, name, mcp, prev_layers, model):
dic = CostParser.parse(self, name, mcp, prev_layers, model)
if dic['numInputs'][0] != model.train_data_provider.get_num_classes(): # first input must be labels
raise LayerParsingError("Layer '%s': Dimensionality of first input must be equal to number of labels" % name)
if dic['inputLayers'][1]['type'] != 'softmax':
raise LayerParsingError("Layer '%s': Second input must be softmax layer" % name)
if dic['numInputs'][1] != model.train_data_provider.get_num_classes():
raise LayerParsingError("Layer '%s': Softmax input '%s' must produce %d outputs, because that is the number of classes in the dataset" \
% (name, dic['inputs'][1], model.train_data_provider.get_num_classes()))
print "Initialized cross-entropy cost '%s' on GPUs %s" % (name, dic['gpus'])
return dic
class LogregCostParser(CostParser):
def __init__(self):
CostParser.__init__(self, num_inputs=2)
def add_params(self, mcp):
CostParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['topk'] = mcp.safe_get_int(name, 'topk', default=1)
if dic['topk'] > dic['numInputs'][1]:
raise LayerParsingError("Layer '%s': parameter 'topk'must not have value greater than the number of classess." % (name))
def parse(self, name, mcp, prev_layers, model):
dic = CostParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
if dic['numInputs'][0] != 1: # first input must be labels
raise LayerParsingError("Layer '%s': dimensionality of first input must be 1" % name)
if dic['inputLayers'][1]['type'] != 'softmax':
raise LayerParsingError("Layer '%s': second input must be softmax layer" % name)
if dic['numInputs'][1] != model.train_data_provider.get_num_classes():
raise LayerParsingError("Layer '%s': softmax input '%s' must produce %d outputs, because that is the number of classes in the dataset" \
% (name, dic['inputs'][1], model.train_data_provider.get_num_classes()))
print "Initialized logistic regression cost '%s' on GPUs %s" % (name, dic['gpus'])
return dic
class BinomialCrossEntCostParser(CostParser):
def __init__(self):
CostParser.__init__(self, num_inputs=2)
def add_params(self, mcp):
CostParser.add_params(self, mcp)
self.dic['posWeight'] = mcp.safe_get_float(self.dic['name'], 'posWeight', default=1.0)
def parse(self, name, mcp, prev_layers, model):
dic = CostParser.parse(self, name, mcp, prev_layers, model)
if dic['numInputs'][0] != dic['numInputs'][1]:
raise LayerParsingError("Layer '%s': both inputs must produce the same number of outputs" % (name))
if 'neuron' not in dic['inputLayers'][1] or dic['inputLayers'][1]['neuron'] != 'logistic':
print "WARNING: Layer '%s': input '%s' is not logistic, results may not be what you intend." % (dic['name'], dic['inputs'][1])
if dic['type'] == 'cost.bce':
print "Initialized binomial cross-entropy cost '%s' on GPUs %s" % (name, dic['gpus'])
dic['computeSoftmaxErrorRate'] = True
return dic
class DetectionCrossEntCostParser(BinomialCrossEntCostParser):
def __init__(self):
BinomialCrossEntCostParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = BinomialCrossEntCostParser.parse(self, name, mcp, prev_layers, model)
if dic['numInputs'][0] != model.train_data_provider.get_num_classes(): # first input must be labels
raise LayerParsingError("Layer '%s': Dimensionality of first input must be equal to number of labels" % name)
dic['computeSoftmaxErrorRate'] = False
dic['outputFilter'] = 'lambda costs,num_cases: [c/num_cases for c in costs[:2]] + [(class_cost[2] / class_cost[j] if class_cost[j] > 0 else n.inf) for class_cost in [costs[2:][i*3:(i+1)*3] for i in range(len(costs[2:])/3)] for j in range(2)]'
dic['outputFilterFormatter'] = 'lambda self,costs: "(crossent) %.6f, (err) %.6f, " % (costs[0], costs[1]) + ", ".join("(%s) %.6f, %.6f" % (self.train_data_provider.batch_meta["label_names"][i/2-1],costs[i],costs[i+1]) for i in xrange(2, len(costs), 2))'
print "Initialized detection cross-entropy cost '%s' on GPUs %s" % (name, dic['gpus'])
return dic
class SumOfSquaresCostParser(CostParser):
def __init__(self):
CostParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model):
dic = CostParser.parse(self, name, mcp, prev_layers, model)
print "Initialized sum-of-squares cost '%s' on GPUs %s" % (name, dic['gpus'])
return dic
# All the layer parsers
layer_parsers = {'data' : lambda : DataLayerParser(),
'fc': lambda : FCLayerParser(),
'sfc': lambda : SplitFCLayerParser(),
'conv': lambda : ConvLayerParser(),
'local': lambda : LocalUnsharedLayerParser(),
'softmax': lambda : SoftmaxLayerParser(),
'eltsum': lambda : EltwiseSumLayerParser(),
'eltmax': lambda : EltwiseMaxLayerParser(),
'sum': lambda : SumLayerParser(),
'neuron': lambda : NeuronLayerParser(),
'pool': lambda : PoolLayerParser(),
'cmpool': lambda : CrossMapPoolLayerParser(),
'rnorm': lambda : NormLayerParser(NormLayerParser.RESPONSE_NORM),
'cnorm': lambda : NormLayerParser(NormLayerParser.CONTRAST_NORM),
'cmrnorm': lambda : NormLayerParser(NormLayerParser.CROSSMAP_RESPONSE_NORM),
'nailbed': lambda : NailbedLayerParser(),
'blur': lambda : GaussianBlurLayerParser(),
'href': lambda : HorizontalReflectionLayerParser(),
'resize': lambda : ResizeLayerParser(),
'rgb2yuv': lambda : RGBToYUVLayerParser(),
'rgb2lab': lambda : RGBToLABLayerParser(),
'rscale': lambda : RandomScaleLayerParser(),
'crop': lambda : CropLayerParser(),
'concat': lambda : ConcatentionLayerParser(),
'pass': lambda : PassThroughLayerParser(),
'dropout': lambda : DropoutLayerParser(),
'dropout2': lambda : Dropout2LayerParser(),
'cost.logreg': lambda : LogregCostParser(),
'cost.crossent': lambda : CrossEntCostParser(),
'cost.bce': lambda : BinomialCrossEntCostParser(),
'cost.dce': lambda : DetectionCrossEntCostParser(),
'cost.sum2': lambda : SumOfSquaresCostParser()}
# All the neuron parsers
# This isn't a name --> parser mapping as the layer parsers above because neurons don't have fixed names.
# A user may write tanh[0.5,0.25], etc.
neuron_parsers = sorted([NeuronParser('ident', 'f(x) = x', uses_acts=False, uses_inputs=False),
NeuronParser('logistic', 'f(x) = 1 / (1 + e^-x)', uses_acts=True, uses_inputs=False),
NeuronParser('abs', 'f(x) = |x|', uses_acts=False, uses_inputs=True),
NeuronParser('relu', 'f(x) = max(0, x)', uses_acts=True, uses_inputs=False),
NeuronParser('nrelu', 'f(x) = max(0, x) + noise', uses_acts=True, uses_inputs=False),
NeuronParser('softrelu', 'f(x) = log(1 + e^x)', uses_acts=True, uses_inputs=False),
NeuronParser('square', 'f(x) = x^2', uses_acts=False, uses_inputs=True),
NeuronParser('sqrt', 'f(x) = sqrt(x)', uses_acts=True, uses_inputs=False),
ParamNeuronParser('log[a]', 'f(x) = log(a + x)', uses_acts=False, uses_inputs=True),
ParamNeuronParser('tanh[a,b]', 'f(x) = a * tanh(b * x)', uses_acts=True, uses_inputs=False),
ParamNeuronParser('brelu[a]', 'f(x) = min(a, max(0, x))', uses_acts=True, uses_inputs=False),
ParamNeuronParser('linear[a,b]', 'f(x) = a * x + b', uses_acts=True, uses_inputs=False),
ParamNeuronParser('drelu[a]', 'f(x) = x - a * tanh(x / a)', uses_acts=False, uses_inputs=True)],
key=lambda x:x.type)
# Learning rate schedules
lrs_parsers = sorted([ParamParser('const[fbase]'),
ParamParser('linear[fbase;ftgtFactor]'),
ParamParser('exp[fbase;ftgtFactor]'),
ParamParser('dexp[fbase;ftgtFactor;inumSteps]')])
|
fengren/python_koans | refs/heads/master | python2/koans/about_scope.py | 100 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
import jims
import joes
counter = 0 # Global
class AboutScope(Koan):
#
# NOTE:
# Look in jims.py and joes.py to see definitions of Dog used
# for this set of tests
#
def test_dog_is_not_available_in_the_current_scope(self):
try:
fido = Dog()
except Exception as ex:
self.assertMatch(__, ex[0])
def test_you_can_reference_nested_classes_using_the_scope_operator(self):
fido = jims.Dog()
# name 'jims' module name is taken from jims.py filename
rover = joes.Dog()
self.assertEqual(__, fido.identify())
self.assertEqual(__, rover.identify())
self.assertEqual(____, type(fido) == type(rover))
self.assertEqual(____, jims.Dog == joes.Dog)
# ------------------------------------------------------------------
class str(object):
pass
def test_bare_bones_class_names_do_not_assume_the_current_scope(self):
self.assertEqual(____, AboutScope.str == str)
def test_nested_string_is_not_the_same_as_the_system_string(self):
self.assertEqual(____, self.str == type("HI"))
def test_str_without_self_prefix_stays_in_the_global_scope(self):
self.assertEqual(____, str == type("HI"))
# ------------------------------------------------------------------
PI = 3.1416
def test_constants_are_defined_with_an_initial_uppercase_letter(self):
self.assertAlmostEqual(_____, self.PI)
# Note, floating point numbers in python are not precise.
# assertAlmostEqual will check that it is 'close enough'
def test_constants_are_assumed_by_convention_only(self):
self.PI = "rhubarb"
self.assertEqual(_____, self.PI)
# There aren't any real constants in python. Its up to the developer
# to keep to the convention and not modify them.
# ------------------------------------------------------------------
def increment_using_local_counter(self, counter):
counter = counter + 1
def increment_using_global_counter(self):
global counter
counter = counter + 1
def test_incrementing_with_local_counter(self):
global counter
start = counter
self.increment_using_local_counter(start)
self.assertEqual(____, counter == start + 1)
def test_incrementing_with_global_counter(self):
global counter
start = counter
self.increment_using_global_counter()
self.assertEqual(____, counter == start + 1)
# ------------------------------------------------------------------
global deadly_bingo
deadly_bingo = [4, 8, 15, 16, 23, 42]
def test_global_attributes_can_be_created_in_the_middle_of_a_class(self):
self.assertEqual(__, deadly_bingo[5])
|
markfinger/recipe-crawler-for-beaney | refs/heads/master | requests/packages/chardet/utf8prober.py | 2918 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .mbcssm import UTF8SMModel
ONE_CHAR_PROB = 0.5
class UTF8Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(UTF8SMModel)
self.reset()
def reset(self):
CharSetProber.reset(self)
self._mCodingSM.reset()
self._mNumOfMBChar = 0
def get_charset_name(self):
return "utf-8"
def feed(self, aBuf):
for c in aBuf:
codingState = self._mCodingSM.next_state(c)
if codingState == constants.eError:
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
if self._mCodingSM.get_current_charlen() >= 2:
self._mNumOfMBChar += 1
if self.get_state() == constants.eDetecting:
if self.get_confidence() > constants.SHORTCUT_THRESHOLD:
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
unlike = 0.99
if self._mNumOfMBChar < 6:
for i in range(0, self._mNumOfMBChar):
unlike = unlike * ONE_CHAR_PROB
return 1.0 - unlike
else:
return unlike
|
akaariai/django | refs/heads/master | tests/migrations/test_migrations_squashed_erroneous/1_auto.py | 1155 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
operations = [
migrations.RunPython(migrations.RunPython.noop)
]
|
Chilledheart/seahub | refs/heads/master | seahub/auth/management/__init__.py | 96 | """
Creates permissions for all installed apps that need permissions.
"""
from django.db.models import get_models, signals
from django.contrib.auth import models as auth_app
def _get_permission_codename(action, opts):
return u'%s_%s' % (action, opts.object_name.lower())
def _get_all_permissions(opts):
"Returns (codename, name) for all permissions in the given opts."
perms = []
for action in ('add', 'change', 'delete'):
perms.append((_get_permission_codename(action, opts), u'Can %s %s' % (action, opts.verbose_name_raw)))
return perms + list(opts.permissions)
def create_permissions(app, created_models, verbosity, **kwargs):
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import Permission
app_models = get_models(app)
if not app_models:
return
for klass in app_models:
ctype = ContentType.objects.get_for_model(klass)
for codename, name in _get_all_permissions(klass._meta):
p, created = Permission.objects.get_or_create(codename=codename, content_type__pk=ctype.id,
defaults={'name': name, 'content_type': ctype})
if created and verbosity >= 2:
print "Adding permission '%s'" % p
def create_superuser(app, created_models, verbosity, **kwargs):
from django.contrib.auth.models import User
from django.core.management import call_command
if User in created_models and kwargs.get('interactive', True):
msg = "\nYou just installed Django's auth system, which means you don't have " \
"any superusers defined.\nWould you like to create one now? (yes/no): "
confirm = raw_input(msg)
while 1:
if confirm not in ('yes', 'no'):
confirm = raw_input('Please enter either "yes" or "no": ')
continue
if confirm == 'yes':
call_command("createsuperuser", interactive=True)
break
signals.post_syncdb.connect(create_permissions,
dispatch_uid = "django.contrib.auth.management.create_permissions")
signals.post_syncdb.connect(create_superuser,
sender=auth_app, dispatch_uid = "django.contrib.auth.management.create_superuser")
|
project-capo/amber-python-drivers | refs/heads/master | src/amberdriver/common/__init__.py | 47 | __author__ = 'paoolo'
|
bmotlaghFLT/FLT_PhantomJS | refs/heads/master | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/checkout/checkout.py | 119 | # Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import StringIO
from webkitpy.common.config import urls
from webkitpy.common.checkout.changelog import ChangeLog, parse_bug_id_from_changelog
from webkitpy.common.checkout.commitinfo import CommitInfo
from webkitpy.common.checkout.scm import CommitMessage
from webkitpy.common.memoized import memoized
from webkitpy.common.system.executive import ScriptError
# This class represents the WebKit-specific parts of the checkout (like ChangeLogs).
# FIXME: Move a bunch of ChangeLog-specific processing from SCM to this object.
# NOTE: All paths returned from this class should be absolute.
class Checkout(object):
def __init__(self, scm, executive=None, filesystem=None):
self._scm = scm
# FIXME: We shouldn't be grabbing at private members on scm.
self._executive = executive or self._scm._executive
self._filesystem = filesystem or self._scm._filesystem
def is_path_to_changelog(self, path):
return self._filesystem.basename(path) == "ChangeLog"
def _latest_entry_for_changelog_at_revision(self, changelog_path, revision):
changelog_contents = self._scm.contents_at_revision(changelog_path, revision)
# contents_at_revision returns a byte array (str()), but we know
# that ChangeLog files are utf-8. parse_latest_entry_from_file
# expects a file-like object which vends unicode(), so we decode here.
# Old revisions of Sources/WebKit/wx/ChangeLog have some invalid utf8 characters.
changelog_file = StringIO.StringIO(changelog_contents.decode("utf-8", "ignore"))
return ChangeLog.parse_latest_entry_from_file(changelog_file)
def changelog_entries_for_revision(self, revision, changed_files=None):
if not changed_files:
changed_files = self._scm.changed_files_for_revision(revision)
# FIXME: This gets confused if ChangeLog files are moved, as
# deletes are still "changed files" per changed_files_for_revision.
# FIXME: For now we hack around this by caching any exceptions
# which result from having deleted files included the changed_files list.
changelog_entries = []
for path in changed_files:
if not self.is_path_to_changelog(path):
continue
try:
changelog_entries.append(self._latest_entry_for_changelog_at_revision(path, revision))
except ScriptError:
pass
return changelog_entries
def _changelog_data_for_revision(self, revision):
changed_files = self._scm.changed_files_for_revision(revision)
changelog_entries = self.changelog_entries_for_revision(revision, changed_files=changed_files)
# Assume for now that the first entry has everything we need:
# FIXME: This will throw an exception if there were no ChangeLogs.
if not len(changelog_entries):
return None
changelog_entry = changelog_entries[0]
return {
"bug_id": parse_bug_id_from_changelog(changelog_entry.contents()),
"author_name": changelog_entry.author_name(),
"author_email": changelog_entry.author_email(),
"author": changelog_entry.author(),
"reviewer_text": changelog_entry.reviewer_text(),
"reviewer": changelog_entry.reviewer(),
"contents": changelog_entry.contents(),
"changed_files": changed_files,
}
@memoized
def commit_info_for_revision(self, revision):
committer_email = self._scm.committer_email_for_revision(revision)
changelog_data = self._changelog_data_for_revision(revision)
if not changelog_data:
return None
return CommitInfo(revision, committer_email, changelog_data)
def bug_id_for_revision(self, revision):
return self.commit_info_for_revision(revision).bug_id()
def _modified_files_matching_predicate(self, git_commit, predicate, changed_files=None):
# SCM returns paths relative to scm.checkout_root
# Callers (especially those using the ChangeLog class) may
# expect absolute paths, so this method returns absolute paths.
if not changed_files:
changed_files = self._scm.changed_files(git_commit)
return filter(predicate, map(self._scm.absolute_path, changed_files))
def modified_changelogs(self, git_commit, changed_files=None):
return self._modified_files_matching_predicate(git_commit, self.is_path_to_changelog, changed_files=changed_files)
def modified_non_changelogs(self, git_commit, changed_files=None):
return self._modified_files_matching_predicate(git_commit, lambda path: not self.is_path_to_changelog(path), changed_files=changed_files)
def commit_message_for_this_commit(self, git_commit, changed_files=None, return_stderr=False):
changelog_paths = self.modified_changelogs(git_commit, changed_files)
if not len(changelog_paths):
raise ScriptError(message="Found no modified ChangeLogs, cannot create a commit message.\n"
"All changes require a ChangeLog. See:\n %s" % urls.contribution_guidelines)
message_text = self._scm.run([self._scm.script_path('commit-log-editor'), '--print-log'] + changelog_paths, return_stderr=return_stderr)
return CommitMessage(message_text.splitlines())
def recent_commit_infos_for_files(self, paths):
revisions = set(sum(map(self._scm.revisions_changing_file, paths), []))
return set(map(self.commit_info_for_revision, revisions))
def suggested_reviewers(self, git_commit, changed_files=None):
changed_files = self.modified_non_changelogs(git_commit, changed_files)
commit_infos = sorted(self.recent_commit_infos_for_files(changed_files), key=lambda info: info.revision(), reverse=True)
reviewers = filter(lambda person: person and person.can_review, sum(map(lambda info: [info.reviewer(), info.author()], commit_infos), []))
unique_reviewers = reduce(lambda suggestions, reviewer: suggestions + [reviewer if reviewer not in suggestions else None], reviewers, [])
return filter(lambda reviewer: reviewer, unique_reviewers)
def bug_id_for_this_commit(self, git_commit, changed_files=None):
try:
return parse_bug_id_from_changelog(self.commit_message_for_this_commit(git_commit, changed_files).message())
except ScriptError, e:
pass # We might not have ChangeLogs.
def apply_patch(self, patch):
# It's possible that the patch was not made from the root directory.
# We should detect and handle that case.
# FIXME: Move _scm.script_path here once we get rid of all the dependencies.
# --force (continue after errors) is the common case, so we always use it.
args = [self._scm.script_path('svn-apply'), "--force"]
if patch.reviewer():
args += ['--reviewer', patch.reviewer().full_name]
self._executive.run_command(args, input=patch.contents(), cwd=self._scm.checkout_root)
def apply_reverse_diff(self, revision):
self._scm.apply_reverse_diff(revision)
# We revert the ChangeLogs because removing lines from a ChangeLog
# doesn't make sense. ChangeLogs are append only.
changelog_paths = self.modified_changelogs(git_commit=None)
if len(changelog_paths):
self._scm.revert_files(changelog_paths)
conflicts = self._scm.conflicted_files()
if len(conflicts):
raise ScriptError(message="Failed to apply reverse diff for revision %s because of the following conflicts:\n%s" % (revision, "\n".join(conflicts)))
def apply_reverse_diffs(self, revision_list):
for revision in sorted(revision_list, reverse=True):
self.apply_reverse_diff(revision)
|
jarus/django-registration | refs/heads/master | registration/tests/urls.py | 138 | """
URLs used in the unit tests for django-registration.
You should not attempt to use these URLs in any sort of real or
development environment; instead, use
``registration/backends/default/urls.py``. This URLconf includes those
URLs, and also adds several additional URLs which serve no purpose
other than to test that optional keyword arguments are properly
handled.
"""
from django.conf.urls.defaults import *
from django.views.generic.simple import direct_to_template
from registration.views import activate
from registration.views import register
urlpatterns = patterns('',
# Test the 'activate' view with custom template
# name.
url(r'^activate-with-template-name/(?P<activation_key>\w+)/$',
activate,
{'template_name': 'registration/test_template_name.html',
'backend': 'registration.backends.default.DefaultBackend'},
name='registration_test_activate_template_name'),
# Test the 'activate' view with
# extra_context_argument.
url(r'^activate-extra-context/(?P<activation_key>\w+)/$',
activate,
{'extra_context': {'foo': 'bar', 'callable': lambda: 'called'},
'backend': 'registration.backends.default.DefaultBackend'},
name='registration_test_activate_extra_context'),
# Test the 'activate' view with success_url argument.
url(r'^activate-with-success-url/(?P<activation_key>\w+)/$',
activate,
{'success_url': 'registration_test_custom_success_url',
'backend': 'registration.backends.default.DefaultBackend'},
name='registration_test_activate_success_url'),
# Test the 'register' view with custom template
# name.
url(r'^register-with-template-name/$',
register,
{'template_name': 'registration/test_template_name.html',
'backend': 'registration.backends.default.DefaultBackend'},
name='registration_test_register_template_name'),
# Test the'register' view with extra_context
# argument.
url(r'^register-extra-context/$',
register,
{'extra_context': {'foo': 'bar', 'callable': lambda: 'called'},
'backend': 'registration.backends.default.DefaultBackend'},
name='registration_test_register_extra_context'),
# Test the 'register' view with custom URL for
# closed registration.
url(r'^register-with-disallowed-url/$',
register,
{'disallowed_url': 'registration_test_custom_disallowed',
'backend': 'registration.backends.default.DefaultBackend'},
name='registration_test_register_disallowed_url'),
# Set up a pattern which will correspond to the
# custom 'disallowed_url' above.
url(r'^custom-disallowed/$',
direct_to_template,
{'template': 'registration/registration_closed.html'},
name='registration_test_custom_disallowed'),
# Test the 'register' view with custom redirect
# on successful registration.
url(r'^register-with-success_url/$',
register,
{'success_url': 'registration_test_custom_success_url',
'backend': 'registration.backends.default.DefaultBackend'},
name='registration_test_register_success_url'
),
# Pattern for custom redirect set above.
url(r'^custom-success/$',
direct_to_template,
{'template': 'registration/test_template_name.html'},
name='registration_test_custom_success_url'),
(r'', include('registration.backends.default.urls')),
)
|
nadeaud/binutils-gdb | refs/heads/master | etc/update-copyright.py | 7 | #!/usr/bin/python
#
# Copyright (C) 2013-2017 Free Software Foundation, Inc.
#
# This script is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
# This script adjusts the copyright notices at the top of source files
# so that they have the form:
#
# Copyright XXXX-YYYY Free Software Foundation, Inc.
#
# It doesn't change code that is known to be maintained elsewhere or
# that carries a non-FSF copyright.
#
# Pass --this-year to the script if you want it to add the current year
# to all applicable notices. Pass --quilt if you are using quilt and
# want files to be added to the quilt before being changed.
#
# By default the script will update all directories for which the
# output has been vetted. You can instead pass the names of individual
# directories, including those that haven't been approved. So:
#
# update-copyright.pl --this-year
#
# is the command that would be used at the beginning of a year to update
# all copyright notices (and possibly at other times to check whether
# new files have been added with old years). On the other hand:
#
# update-copyright.pl --this-year libjava
#
# would run the script on just libjava/.
#
# This script was copied from gcc's contrib/ and modified to suit
# binutils. In contrast to the gcc script, this one will update
# the testsuite and --version output strings too.
import os
import re
import sys
import time
import subprocess
class Errors:
def __init__ (self):
self.num_errors = 0
def report (self, filename, string):
if filename:
string = filename + ': ' + string
sys.stderr.write (string + '\n')
self.num_errors += 1
def ok (self):
return self.num_errors == 0
class GenericFilter:
def __init__ (self):
self.skip_files = set()
self.skip_dirs = set()
self.skip_extensions = set()
self.fossilised_files = set()
self.own_files = set()
self.skip_files |= set ([
# Skip licence files.
'COPYING',
'COPYING.LIB',
'COPYING3',
'COPYING3.LIB',
'COPYING.LIBGLOSS',
'COPYING.NEWLIB',
'LICENSE',
'fdl.texi',
'gpl_v3.texi',
'fdl-1.3.xml',
'gpl-3.0.xml',
# Skip auto- and libtool-related files
'aclocal.m4',
'compile',
'config.guess',
'config.sub',
'depcomp',
'install-sh',
'libtool.m4',
'ltmain.sh',
'ltoptions.m4',
'ltsugar.m4',
'ltversion.m4',
'lt~obsolete.m4',
'missing',
'mkdep',
'mkinstalldirs',
'move-if-change',
'shlibpath.m4',
'symlink-tree',
'ylwrap',
# Skip FSF mission statement, etc.
'gnu.texi',
'funding.texi',
'appendix_free.xml',
# Skip imported texinfo files.
'texinfo.tex',
])
self.skip_extensions |= set ([
# Maintained by the translation project.
'.po',
# Automatically-generated.
'.pot',
])
self.skip_dirs |= set ([
'autom4te.cache',
])
def get_line_filter (self, dir, filename):
if filename.startswith ('ChangeLog'):
# Ignore references to copyright in changelog entries.
return re.compile ('\t')
return None
def skip_file (self, dir, filename):
if filename in self.skip_files:
return True
(base, extension) = os.path.splitext (os.path.join (dir, filename))
if extension in self.skip_extensions:
return True
if extension == '.in':
# Skip .in files produced by automake.
if os.path.exists (base + '.am'):
return True
# Skip files produced by autogen
if (os.path.exists (base + '.def')
and os.path.exists (base + '.tpl')):
return True
# Skip configure files produced by autoconf
if filename == 'configure':
if os.path.exists (base + '.ac'):
return True
if os.path.exists (base + '.in'):
return True
return False
def skip_dir (self, dir, subdir):
return subdir in self.skip_dirs
def is_fossilised_file (self, dir, filename):
if filename in self.fossilised_files:
return True
# Only touch current current ChangeLogs.
if filename != 'ChangeLog' and filename.find ('ChangeLog') >= 0:
return True
return False
def by_package_author (self, dir, filename):
return filename in self.own_files
class Copyright:
def __init__ (self, errors):
self.errors = errors
# Characters in a range of years. Include '.' for typos.
ranges = '[0-9](?:[-0-9.,\s]|\s+and\s+)*[0-9]'
# Non-whitespace characters in a copyright holder's name.
name = '[\w.,-]'
# Matches one year.
self.year_re = re.compile ('[0-9]+')
# Matches part of a year or copyright holder.
self.continuation_re = re.compile (ranges + '|' + name)
# Matches a full copyright notice:
self.copyright_re = re.compile (
# 1: 'Copyright (C)', etc.
'([Cc]opyright'
'|[Cc]opyright\s+\([Cc]\)'
'|[Cc]opyright\s+%s'
'|[Cc]opyright\s+©'
'|[Cc]opyright\s+@copyright{}'
'|@set\s+copyright[\w-]+)'
# 2: the years. Include the whitespace in the year, so that
# we can remove any excess.
'(\s*(?:' + ranges + ',?'
'|@value\{[^{}]*\})\s*)'
# 3: 'by ', if used
'(by\s+)?'
# 4: the copyright holder. Don't allow multiple consecutive
# spaces, so that right-margin gloss doesn't get caught
# (e.g. gnat_ugn.texi).
'(' + name + '(?:\s?' + name + ')*)?')
# A regexp for notices that might have slipped by. Just matching
# 'copyright' is too noisy, and 'copyright.*[0-9]' falls foul of
# HTML header markers, so check for 'copyright' and two digits.
self.other_copyright_re = re.compile ('(^|[^\._])copyright[^=]*[0-9][0-9]',
re.IGNORECASE)
self.comment_re = re.compile('#+|[*]+|;+|%+|//+|@c |dnl ')
self.holders = { '@copying': '@copying' }
self.holder_prefixes = set()
# True to 'quilt add' files before changing them.
self.use_quilt = False
# If set, force all notices to include this year.
self.max_year = None
# Goes after the year(s). Could be ', '.
self.separator = ' '
def add_package_author (self, holder, canon_form = None):
if not canon_form:
canon_form = holder
self.holders[holder] = canon_form
index = holder.find (' ')
while index >= 0:
self.holder_prefixes.add (holder[:index])
index = holder.find (' ', index + 1)
def add_external_author (self, holder):
self.holders[holder] = None
class BadYear():
def __init__ (self, year):
self.year = year
def __str__ (self):
return 'unrecognised year: ' + self.year
def parse_year (self, string):
year = int (string)
if len (string) == 2:
if year > 70:
return year + 1900
elif len (string) == 4:
return year
raise self.BadYear (string)
def year_range (self, years):
year_list = [self.parse_year (year)
for year in self.year_re.findall (years)]
assert len (year_list) > 0
return (min (year_list), max (year_list))
def set_use_quilt (self, use_quilt):
self.use_quilt = use_quilt
def include_year (self, year):
assert not self.max_year
self.max_year = year
def canonicalise_years (self, dir, filename, filter, years):
# Leave texinfo variables alone.
if years.startswith ('@value'):
return years
(min_year, max_year) = self.year_range (years)
# Update the upper bound, if enabled.
if self.max_year and not filter.is_fossilised_file (dir, filename):
max_year = max (max_year, self.max_year)
# Use a range.
if min_year == max_year:
return '%d' % min_year
else:
return '%d-%d' % (min_year, max_year)
def strip_continuation (self, line):
line = line.lstrip()
match = self.comment_re.match (line)
if match:
line = line[match.end():].lstrip()
return line
def is_complete (self, match):
holder = match.group (4)
return (holder
and (holder not in self.holder_prefixes
or holder in self.holders))
def update_copyright (self, dir, filename, filter, file, line, match):
orig_line = line
next_line = None
pathname = os.path.join (dir, filename)
intro = match.group (1)
if intro.startswith ('@set'):
# Texinfo year variables should always be on one line
after_years = line[match.end (2):].strip()
if after_years != '':
self.errors.report (pathname,
'trailing characters in @set: '
+ after_years)
return (False, orig_line, next_line)
else:
# If it looks like the copyright is incomplete, add the next line.
while not self.is_complete (match):
try:
next_line = file.next()
except StopIteration:
break
# If the next line doesn't look like a proper continuation,
# assume that what we've got is complete.
continuation = self.strip_continuation (next_line)
if not self.continuation_re.match (continuation):
break
# Merge the lines for matching purposes.
orig_line += next_line
line = line.rstrip() + ' ' + continuation
next_line = None
# Rematch with the longer line, at the original position.
match = self.copyright_re.match (line, match.start())
assert match
holder = match.group (4)
# Use the filter to test cases where markup is getting in the way.
if filter.by_package_author (dir, filename):
assert holder not in self.holders
elif not holder:
self.errors.report (pathname, 'missing copyright holder')
return (False, orig_line, next_line)
elif holder not in self.holders:
self.errors.report (pathname,
'unrecognised copyright holder: ' + holder)
return (False, orig_line, next_line)
else:
# See whether the copyright is associated with the package
# author.
canon_form = self.holders[holder]
if not canon_form:
return (False, orig_line, next_line)
# Make sure the author is given in a consistent way.
line = (line[:match.start (4)]
+ canon_form
+ line[match.end (4):])
# Remove any 'by'
line = line[:match.start (3)] + line[match.end (3):]
# Update the copyright years.
years = match.group (2).strip()
if (self.max_year
and match.start(0) > 0 and line[match.start(0)-1] == '"'
and not filter.is_fossilised_file (dir, filename)):
# A printed copyright date consists of the current year
canon_form = '%d' % self.max_year
else:
try:
canon_form = self.canonicalise_years (dir, filename, filter, years)
except self.BadYear as e:
self.errors.report (pathname, str (e))
return (False, orig_line, next_line)
line = (line[:match.start (2)]
+ ' ' + canon_form + self.separator
+ line[match.end (2):])
# Use the standard (C) form.
if intro.endswith ('right'):
intro += ' (C)'
elif intro.endswith ('(c)'):
intro = intro[:-3] + '(C)'
line = line[:match.start (1)] + intro + line[match.end (1):]
# Strip trailing whitespace
line = line.rstrip() + '\n'
return (line != orig_line, line, next_line)
def process_file (self, dir, filename, filter):
pathname = os.path.join (dir, filename)
if filename.endswith ('.tmp'):
# Looks like something we tried to create before.
try:
os.remove (pathname)
except OSError:
pass
return
lines = []
changed = False
line_filter = filter.get_line_filter (dir, filename)
with open (pathname, 'r') as file:
prev = None
for line in file:
while line:
next_line = None
# Leave filtered-out lines alone.
if not (line_filter and line_filter.match (line)):
match = self.copyright_re.search (line)
if match:
res = self.update_copyright (dir, filename, filter,
file, line, match)
(this_changed, line, next_line) = res
changed = changed or this_changed
# Check for copyright lines that might have slipped by.
elif self.other_copyright_re.search (line):
self.errors.report (pathname,
'unrecognised copyright: %s'
% line.strip())
lines.append (line)
line = next_line
# If something changed, write the new file out.
if changed and self.errors.ok():
tmp_pathname = pathname + '.tmp'
with open (tmp_pathname, 'w') as file:
for line in lines:
file.write (line)
if self.use_quilt:
subprocess.call (['quilt', 'add', pathname])
os.rename (tmp_pathname, pathname)
def process_tree (self, tree, filter):
for (dir, subdirs, filenames) in os.walk (tree):
# Don't recurse through directories that should be skipped.
for i in xrange (len (subdirs) - 1, -1, -1):
if filter.skip_dir (dir, subdirs[i]):
del subdirs[i]
# Handle the files in this directory.
for filename in filenames:
if filter.skip_file (dir, filename):
sys.stdout.write ('Skipping %s\n'
% os.path.join (dir, filename))
else:
self.process_file (dir, filename, filter)
class CmdLine:
def __init__ (self, copyright = Copyright):
self.errors = Errors()
self.copyright = copyright (self.errors)
self.dirs = []
self.default_dirs = []
self.chosen_dirs = []
self.option_handlers = dict()
self.option_help = []
self.add_option ('--help', 'Print this help', self.o_help)
self.add_option ('--quilt', '"quilt add" files before changing them',
self.o_quilt)
self.add_option ('--this-year', 'Add the current year to every notice',
self.o_this_year)
def add_option (self, name, help, handler):
self.option_help.append ((name, help))
self.option_handlers[name] = handler
def add_dir (self, dir, filter = GenericFilter()):
self.dirs.append ((dir, filter))
def o_help (self, option = None):
sys.stdout.write ('Usage: %s [options] dir1 dir2...\n\n'
'Options:\n' % sys.argv[0])
format = '%-15s %s\n'
for (what, help) in self.option_help:
sys.stdout.write (format % (what, help))
sys.stdout.write ('\nDirectories:\n')
format = '%-25s'
i = 0
for (dir, filter) in self.dirs:
i += 1
if i % 3 == 0 or i == len (self.dirs):
sys.stdout.write (dir + '\n')
else:
sys.stdout.write (format % dir)
sys.exit (0)
def o_quilt (self, option):
self.copyright.set_use_quilt (True)
def o_this_year (self, option):
self.copyright.include_year (time.localtime().tm_year)
def main (self):
for arg in sys.argv[1:]:
if arg[:1] != '-':
self.chosen_dirs.append (arg)
elif arg in self.option_handlers:
self.option_handlers[arg] (arg)
else:
self.errors.report (None, 'unrecognised option: ' + arg)
if self.errors.ok():
if len (self.chosen_dirs) == 0:
self.chosen_dirs = self.default_dirs
if len (self.chosen_dirs) == 0:
self.o_help()
else:
for chosen_dir in self.chosen_dirs:
canon_dir = os.path.join (chosen_dir, '')
count = 0
for (dir, filter) in self.dirs:
if (dir + os.sep).startswith (canon_dir):
count += 1
self.copyright.process_tree (dir, filter)
if count == 0:
self.errors.report (None, 'unrecognised directory: '
+ chosen_dir)
sys.exit (0 if self.errors.ok() else 1)
#----------------------------------------------------------------------------
class TopLevelFilter (GenericFilter):
def skip_dir (self, dir, subdir):
return True
class ConfigFilter (GenericFilter):
def __init__ (self):
GenericFilter.__init__ (self)
def skip_file (self, dir, filename):
if filename.endswith ('.m4'):
pathname = os.path.join (dir, filename)
with open (pathname) as file:
# Skip files imported from gettext.
if file.readline().find ('gettext-') >= 0:
return True
return GenericFilter.skip_file (self, dir, filename)
class LdFilter (GenericFilter):
def __init__ (self):
GenericFilter.__init__ (self)
self.skip_extensions |= set ([
# ld testsuite output match files.
'.ro',
])
class BinutilsCopyright (Copyright):
def __init__ (self, errors):
Copyright.__init__ (self, errors)
canon_fsf = 'Free Software Foundation, Inc.'
self.add_package_author ('Free Software Foundation', canon_fsf)
self.add_package_author ('Free Software Foundation.', canon_fsf)
self.add_package_author ('Free Software Foundation Inc.', canon_fsf)
self.add_package_author ('Free Software Foundation, Inc', canon_fsf)
self.add_package_author ('Free Software Foundation, Inc.', canon_fsf)
self.add_package_author ('The Free Software Foundation', canon_fsf)
self.add_package_author ('The Free Software Foundation, Inc.', canon_fsf)
self.add_package_author ('Software Foundation, Inc.', canon_fsf)
self.add_external_author ('Carnegie Mellon University')
self.add_external_author ('John D. Polstra.')
self.add_external_author ('Linaro Ltd.')
self.add_external_author ('MIPS Computer Systems, Inc.')
self.add_external_author ('Red Hat Inc.')
self.add_external_author ('Regents of the University of California.')
self.add_external_author ('The Regents of the University of California.')
self.add_external_author ('Third Eye Software, Inc.')
self.add_external_author ('Ulrich Drepper')
self.add_external_author ('Synopsys Inc.')
class BinutilsCmdLine (CmdLine):
def __init__ (self):
CmdLine.__init__ (self, BinutilsCopyright)
self.add_dir ('.', TopLevelFilter())
self.add_dir ('bfd')
self.add_dir ('binutils')
self.add_dir ('config', ConfigFilter())
self.add_dir ('cpu')
self.add_dir ('elfcpp')
self.add_dir ('etc')
self.add_dir ('gas')
self.add_dir ('gdb')
self.add_dir ('gold')
self.add_dir ('gprof')
self.add_dir ('include')
self.add_dir ('ld', LdFilter())
self.add_dir ('libdecnumber')
self.add_dir ('libiberty')
self.add_dir ('opcodes')
self.add_dir ('readline')
self.add_dir ('sim')
self.default_dirs = [
'bfd',
'binutils',
'elfcpp',
'etc',
'gas',
'gold',
'gprof',
'include',
'ld',
'libiberty',
'opcodes',
]
BinutilsCmdLine().main()
|
avasilevich/spolks | refs/heads/master | p2p/utils/interfaces.py | 1 | import netifaces
def list_interfaces():
broadcast_interfaces = {}
interfaces = netifaces.interfaces()
for entry in interfaces:
try:
entry_data = netifaces.ifaddresses(entry)[netifaces.AF_INET][0]
if 'broadcast' in entry_data:
broadcast_interfaces[entry] = entry_data
except KeyError:
pass
return broadcast_interfaces
def get_iface_addr(interface):
ip = netifaces.ifaddresses(interface)[netifaces.AF_INET][0]['addr']
broadcast_ip = netifaces.ifaddresses(interface)[netifaces.AF_INET][0][
'broadcast']
return ip, broadcast_ip
def main():
bcast_list = list_interfaces()
print()
print(bcast_list)
if __name__ == '__main__':
main() |
retomerz/intellij-community | refs/heads/master | python/testData/copyPaste/singleLine/Indent22.after.py | 747 | class C:
def foo(self):
x = 1
y = 2
|
rbtcollins/pip | refs/heads/develop | pip/baseparser.py | 424 | """Base option parser setup"""
from __future__ import absolute_import
import sys
import optparse
import os
import re
import textwrap
from distutils.util import strtobool
from pip._vendor.six import string_types
from pip._vendor.six.moves import configparser
from pip.locations import (
legacy_config_file, config_basename, running_under_virtualenv,
site_config_files
)
from pip.utils import appdirs, get_terminal_size
_environ_prefix_re = re.compile(r"^PIP_", re.I)
class PrettyHelpFormatter(optparse.IndentedHelpFormatter):
"""A prettier/less verbose help formatter for optparse."""
def __init__(self, *args, **kwargs):
# help position must be aligned with __init__.parseopts.description
kwargs['max_help_position'] = 30
kwargs['indent_increment'] = 1
kwargs['width'] = get_terminal_size()[0] - 2
optparse.IndentedHelpFormatter.__init__(self, *args, **kwargs)
def format_option_strings(self, option):
return self._format_option_strings(option, ' <%s>', ', ')
def _format_option_strings(self, option, mvarfmt=' <%s>', optsep=', '):
"""
Return a comma-separated list of option strings and metavars.
:param option: tuple of (short opt, long opt), e.g: ('-f', '--format')
:param mvarfmt: metavar format string - evaluated as mvarfmt % metavar
:param optsep: separator
"""
opts = []
if option._short_opts:
opts.append(option._short_opts[0])
if option._long_opts:
opts.append(option._long_opts[0])
if len(opts) > 1:
opts.insert(1, optsep)
if option.takes_value():
metavar = option.metavar or option.dest.lower()
opts.append(mvarfmt % metavar.lower())
return ''.join(opts)
def format_heading(self, heading):
if heading == 'Options':
return ''
return heading + ':\n'
def format_usage(self, usage):
"""
Ensure there is only one newline between usage and the first heading
if there is no description.
"""
msg = '\nUsage: %s\n' % self.indent_lines(textwrap.dedent(usage), " ")
return msg
def format_description(self, description):
# leave full control over description to us
if description:
if hasattr(self.parser, 'main'):
label = 'Commands'
else:
label = 'Description'
# some doc strings have initial newlines, some don't
description = description.lstrip('\n')
# some doc strings have final newlines and spaces, some don't
description = description.rstrip()
# dedent, then reindent
description = self.indent_lines(textwrap.dedent(description), " ")
description = '%s:\n%s\n' % (label, description)
return description
else:
return ''
def format_epilog(self, epilog):
# leave full control over epilog to us
if epilog:
return epilog
else:
return ''
def indent_lines(self, text, indent):
new_lines = [indent + line for line in text.split('\n')]
return "\n".join(new_lines)
class UpdatingDefaultsHelpFormatter(PrettyHelpFormatter):
"""Custom help formatter for use in ConfigOptionParser.
This is updates the defaults before expanding them, allowing
them to show up correctly in the help listing.
"""
def expand_default(self, option):
if self.parser is not None:
self.parser._update_defaults(self.parser.defaults)
return optparse.IndentedHelpFormatter.expand_default(self, option)
class CustomOptionParser(optparse.OptionParser):
def insert_option_group(self, idx, *args, **kwargs):
"""Insert an OptionGroup at a given position."""
group = self.add_option_group(*args, **kwargs)
self.option_groups.pop()
self.option_groups.insert(idx, group)
return group
@property
def option_list_all(self):
"""Get a list of all options, including those in option groups."""
res = self.option_list[:]
for i in self.option_groups:
res.extend(i.option_list)
return res
class ConfigOptionParser(CustomOptionParser):
"""Custom option parser which updates its defaults by checking the
configuration files and environmental variables"""
isolated = False
def __init__(self, *args, **kwargs):
self.config = configparser.RawConfigParser()
self.name = kwargs.pop('name')
self.isolated = kwargs.pop("isolated", False)
self.files = self.get_config_files()
if self.files:
self.config.read(self.files)
assert self.name
optparse.OptionParser.__init__(self, *args, **kwargs)
def get_config_files(self):
# the files returned by this method will be parsed in order with the
# first files listed being overridden by later files in standard
# ConfigParser fashion
config_file = os.environ.get('PIP_CONFIG_FILE', False)
if config_file == os.devnull:
return []
# at the base we have any site-wide configuration
files = list(site_config_files)
# per-user configuration next
if not self.isolated:
if config_file and os.path.exists(config_file):
files.append(config_file)
else:
# This is the legacy config file, we consider it to be a lower
# priority than the new file location.
files.append(legacy_config_file)
# This is the new config file, we consider it to be a higher
# priority than the legacy file.
files.append(
os.path.join(
appdirs.user_config_dir("pip"),
config_basename,
)
)
# finally virtualenv configuration first trumping others
if running_under_virtualenv():
venv_config_file = os.path.join(
sys.prefix,
config_basename,
)
if os.path.exists(venv_config_file):
files.append(venv_config_file)
return files
def check_default(self, option, key, val):
try:
return option.check_value(key, val)
except optparse.OptionValueError as exc:
print("An error occurred during configuration: %s" % exc)
sys.exit(3)
def _update_defaults(self, defaults):
"""Updates the given defaults with values from the config files and
the environ. Does a little special handling for certain types of
options (lists)."""
# Then go and look for the other sources of configuration:
config = {}
# 1. config files
for section in ('global', self.name):
config.update(
self.normalize_keys(self.get_config_section(section))
)
# 2. environmental variables
if not self.isolated:
config.update(self.normalize_keys(self.get_environ_vars()))
# Accumulate complex default state.
self.values = optparse.Values(self.defaults)
late_eval = set()
# Then set the options with those values
for key, val in config.items():
# ignore empty values
if not val:
continue
option = self.get_option(key)
# Ignore options not present in this parser. E.g. non-globals put
# in [global] by users that want them to apply to all applicable
# commands.
if option is None:
continue
if option.action in ('store_true', 'store_false', 'count'):
val = strtobool(val)
elif option.action == 'append':
val = val.split()
val = [self.check_default(option, key, v) for v in val]
elif option.action == 'callback':
late_eval.add(option.dest)
opt_str = option.get_opt_string()
val = option.convert_value(opt_str, val)
# From take_action
args = option.callback_args or ()
kwargs = option.callback_kwargs or {}
option.callback(option, opt_str, val, self, *args, **kwargs)
else:
val = self.check_default(option, key, val)
defaults[option.dest] = val
for key in late_eval:
defaults[key] = getattr(self.values, key)
self.values = None
return defaults
def normalize_keys(self, items):
"""Return a config dictionary with normalized keys regardless of
whether the keys were specified in environment variables or in config
files"""
normalized = {}
for key, val in items:
key = key.replace('_', '-')
if not key.startswith('--'):
key = '--%s' % key # only prefer long opts
normalized[key] = val
return normalized
def get_config_section(self, name):
"""Get a section of a configuration"""
if self.config.has_section(name):
return self.config.items(name)
return []
def get_environ_vars(self):
"""Returns a generator with all environmental vars with prefix PIP_"""
for key, val in os.environ.items():
if _environ_prefix_re.search(key):
yield (_environ_prefix_re.sub("", key).lower(), val)
def get_default_values(self):
"""Overridding to make updating the defaults after instantiation of
the option parser possible, _update_defaults() does the dirty work."""
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return optparse.Values(self.defaults)
defaults = self._update_defaults(self.defaults.copy()) # ours
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, string_types):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return optparse.Values(defaults)
def error(self, msg):
self.print_usage(sys.stderr)
self.exit(2, "%s\n" % msg)
|
CyrusBiotechnology/gcloud-python | refs/heads/master | gcloud/datastore/helpers.py | 7 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for dealing with Cloud Datastore's Protobuf API.
The non-private functions are part of the API.
"""
import datetime
from google.protobuf.internal.type_checkers import Int64ValueChecker
import six
from gcloud._helpers import _datetime_from_microseconds
from gcloud._helpers import _microseconds_from_datetime
from gcloud.datastore import _datastore_v1_pb2 as datastore_pb
from gcloud.datastore.entity import Entity
from gcloud.datastore.key import Key
__all__ = ('entity_from_protobuf', 'key_from_protobuf')
INT_VALUE_CHECKER = Int64ValueChecker()
def find_true_dataset_id(dataset_id, connection):
"""Find the true (unaliased) dataset ID.
If the given ID already has a 's~' or 'e~' prefix, does nothing.
Otherwise, looks up a bogus Key('__MissingLookupKind', 1) and reads the
true prefixed dataset ID from the response (either from found or from
missing).
For some context, see:
github.com/GoogleCloudPlatform/gcloud-python/pull/528
github.com/GoogleCloudPlatform/google-cloud-datastore/issues/59
:type dataset_id: string
:param dataset_id: The dataset ID to un-alias / prefix.
:type connection: :class:`gcloud.datastore.connection.Connection`
:param connection: A connection provided to connection to the dataset.
:rtype: string
:returns: The true / prefixed / un-aliased dataset ID.
"""
if dataset_id.startswith('s~') or dataset_id.startswith('e~'):
return dataset_id
# Create the bogus Key protobuf to be looked up and remove
# the dataset ID so the backend won't complain.
bogus_key_pb = Key('__MissingLookupKind', 1,
dataset_id=dataset_id).to_protobuf()
bogus_key_pb.partition_id.ClearField('dataset_id')
found_pbs, missing_pbs, _ = connection.lookup(dataset_id, [bogus_key_pb])
# By not passing in `deferred`, lookup will continue until
# all results are `found` or `missing`.
all_pbs = missing_pbs + found_pbs
# We only asked for one, so should only receive one.
returned_pb, = all_pbs
return returned_pb.key.partition_id.dataset_id
def entity_from_protobuf(pb):
"""Factory method for creating an entity based on a protobuf.
The protobuf should be one returned from the Cloud Datastore
Protobuf API.
:type pb: :class:`gcloud.datastore._datastore_v1_pb2.Entity`
:param pb: The Protobuf representing the entity.
:rtype: :class:`gcloud.datastore.entity.Entity`
:returns: The entity derived from the protobuf.
"""
key = None
if pb.HasField('key'):
key = key_from_protobuf(pb.key)
entity_props = {}
exclude_from_indexes = []
for property_pb in pb.property:
value = _get_value_from_property_pb(property_pb)
entity_props[property_pb.name] = value
# Check if property_pb.value was indexed. Lists need to be
# special-cased and we require all `indexed` values in a list agree.
if isinstance(value, list):
indexed_values = set(value_pb.indexed
for value_pb in property_pb.value.list_value)
if len(indexed_values) != 1:
raise ValueError('For a list_value, subvalues must either all '
'be indexed or all excluded from indexes.')
if not indexed_values.pop():
exclude_from_indexes.append(property_pb.name)
else:
if not property_pb.value.indexed:
exclude_from_indexes.append(property_pb.name)
entity = Entity(key=key, exclude_from_indexes=exclude_from_indexes)
entity.update(entity_props)
return entity
def key_from_protobuf(pb):
"""Factory method for creating a key based on a protobuf.
The protobuf should be one returned from the Cloud Datastore
Protobuf API.
:type pb: :class:`gcloud.datastore._datastore_v1_pb2.Key`
:param pb: The Protobuf representing the key.
:rtype: :class:`gcloud.datastore.key.Key`
:returns: a new `Key` instance
"""
path_args = []
for element in pb.path_element:
path_args.append(element.kind)
if element.HasField('id'):
path_args.append(element.id)
# This is safe: we expect proto objects returned will only have
# one of `name` or `id` set.
if element.HasField('name'):
path_args.append(element.name)
dataset_id = None
if pb.partition_id.HasField('dataset_id'):
dataset_id = pb.partition_id.dataset_id
namespace = None
if pb.partition_id.HasField('namespace'):
namespace = pb.partition_id.namespace
return Key(*path_args, namespace=namespace, dataset_id=dataset_id)
def _pb_attr_value(val):
"""Given a value, return the protobuf attribute name and proper value.
The Protobuf API uses different attribute names based on value types
rather than inferring the type. This function simply determines the
proper attribute name based on the type of the value provided and
returns the attribute name as well as a properly formatted value.
Certain value types need to be coerced into a different type (such
as a `datetime.datetime` into an integer timestamp, or a
`gcloud.datastore.key.Key` into a Protobuf representation. This
function handles that for you.
.. note::
Values which are "text" ('unicode' in Python2, 'str' in Python3) map
to 'string_value' in the datastore; values which are "bytes"
('str' in Python2, 'bytes' in Python3) map to 'blob_value'.
For example:
>>> _pb_attr_value(1234)
('integer_value', 1234)
>>> _pb_attr_value('my_string')
('string_value', 'my_string')
:type val: `datetime.datetime`, :class:`gcloud.datastore.key.Key`,
bool, float, integer, string
:param val: The value to be scrutinized.
:returns: A tuple of the attribute name and proper value type.
"""
if isinstance(val, datetime.datetime):
name = 'timestamp_microseconds'
value = _microseconds_from_datetime(val)
elif isinstance(val, Key):
name, value = 'key', val.to_protobuf()
elif isinstance(val, bool):
name, value = 'boolean', val
elif isinstance(val, float):
name, value = 'double', val
elif isinstance(val, six.integer_types):
INT_VALUE_CHECKER.CheckValue(val) # Raise an exception if invalid.
name, value = 'integer', int(val) # Always cast to an integer.
elif isinstance(val, six.text_type):
name, value = 'string', val
elif isinstance(val, (bytes, str)):
name, value = 'blob', val
elif isinstance(val, Entity):
name, value = 'entity', val
elif isinstance(val, list):
name, value = 'list', val
else:
raise ValueError("Unknown protobuf attr type %s" % type(val))
return name + '_value', value
def _get_value_from_value_pb(value_pb):
"""Given a protobuf for a Value, get the correct value.
The Cloud Datastore Protobuf API returns a Property Protobuf which
has one value set and the rest blank. This function retrieves the
the one value provided.
Some work is done to coerce the return value into a more useful type
(particularly in the case of a timestamp value, or a key value).
:type value_pb: :class:`gcloud.datastore._datastore_v1_pb2.Value`
:param value_pb: The Value Protobuf.
:returns: The value provided by the Protobuf.
"""
result = None
if value_pb.HasField('timestamp_microseconds_value'):
microseconds = value_pb.timestamp_microseconds_value
result = _datetime_from_microseconds(microseconds)
elif value_pb.HasField('key_value'):
result = key_from_protobuf(value_pb.key_value)
elif value_pb.HasField('boolean_value'):
result = value_pb.boolean_value
elif value_pb.HasField('double_value'):
result = value_pb.double_value
elif value_pb.HasField('integer_value'):
result = value_pb.integer_value
elif value_pb.HasField('string_value'):
result = value_pb.string_value
elif value_pb.HasField('blob_value'):
result = value_pb.blob_value
elif value_pb.HasField('entity_value'):
result = entity_from_protobuf(value_pb.entity_value)
elif value_pb.list_value:
result = [_get_value_from_value_pb(x) for x in value_pb.list_value]
return result
def _get_value_from_property_pb(property_pb):
"""Given a protobuf for a Property, get the correct value.
The Cloud Datastore Protobuf API returns a Property Protobuf which
has one value set and the rest blank. This function retrieves the
the one value provided.
Some work is done to coerce the return value into a more useful type
(particularly in the case of a timestamp value, or a key value).
:type property_pb: :class:`gcloud.datastore._datastore_v1_pb2.Property`
:param property_pb: The Property Protobuf.
:returns: The value provided by the Protobuf.
"""
return _get_value_from_value_pb(property_pb.value)
def _set_protobuf_value(value_pb, val):
"""Assign 'val' to the correct subfield of 'value_pb'.
The Protobuf API uses different attribute names based on value types
rather than inferring the type.
Some value types (entities, keys, lists) cannot be directly
assigned; this function handles them correctly.
:type value_pb: :class:`gcloud.datastore._datastore_v1_pb2.Value`
:param value_pb: The value protobuf to which the value is being assigned.
:type val: :class:`datetime.datetime`, boolean, float, integer, string,
:class:`gcloud.datastore.key.Key`,
:class:`gcloud.datastore.entity.Entity`,
:param val: The value to be assigned.
"""
if val is None:
value_pb.Clear()
return
attr, val = _pb_attr_value(val)
if attr == 'key_value':
value_pb.key_value.CopyFrom(val)
elif attr == 'entity_value':
e_pb = value_pb.entity_value
e_pb.Clear()
key = val.key
if key is not None:
e_pb.key.CopyFrom(key.to_protobuf())
for item_key, value in val.items():
p_pb = e_pb.property.add()
p_pb.name = item_key
_set_protobuf_value(p_pb.value, value)
elif attr == 'list_value':
l_pb = value_pb.list_value
for item in val:
i_pb = l_pb.add()
_set_protobuf_value(i_pb, item)
else: # scalar, just assign
setattr(value_pb, attr, val)
def _prepare_key_for_request(key_pb):
"""Add protobuf keys to a request object.
:type key_pb: :class:`gcloud.datastore._datastore_v1_pb2.Key`
:param key_pb: A key to be added to a request.
:rtype: :class:`gcloud.datastore._datastore_v1_pb2.Key`
:returns: A key which will be added to a request. It will be the
original if nothing needs to be changed.
"""
if key_pb.partition_id.HasField('dataset_id'):
# We remove the dataset_id from the protobuf. This is because
# the backend fails a request if the key contains un-prefixed
# dataset ID. The backend fails because requests to
# /datastore/.../datasets/foo/...
# and
# /datastore/.../datasets/s~foo/...
# both go to the datastore given by 's~foo'. So if the key
# protobuf in the request body has dataset_id='foo', the
# backend will reject since 'foo' != 's~foo'.
new_key_pb = datastore_pb.Key()
new_key_pb.CopyFrom(key_pb)
new_key_pb.partition_id.ClearField('dataset_id')
key_pb = new_key_pb
return key_pb
|
henrykironde/scikit-learn | refs/heads/master | sklearn/externals/joblib/_memory_helpers.py | 303 | try:
# Available in Python 3
from tokenize import open as open_py_source
except ImportError:
# Copied from python3 tokenize
from codecs import lookup, BOM_UTF8
import re
from io import TextIOWrapper, open
cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def _detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that
should be used to decode a Python source file. It requires one
argment, readline, in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are
present, but disagree, a SyntaxError will be raised. If the encoding
cookie is an invalid charset, raise a SyntaxError. Note that if a
utf-8 bom is found, 'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be
returned.
"""
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return b''
def find_cookie(line):
try:
line_string = line.decode('ascii')
except UnicodeDecodeError:
return None
matches = cookie_re.findall(line_string)
if not matches:
return None
encoding = _get_normal_name(matches[0])
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
raise SyntaxError("unknown encoding: " + encoding)
if bom_found:
if codec.name != 'utf-8':
# This behaviour mimics the Python interpreter
raise SyntaxError('encoding problem: utf-8')
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
def open_py_source(filename):
"""Open a file in read only mode using the encoding detected by
detect_encoding().
"""
buffer = open(filename, 'rb')
encoding, lines = _detect_encoding(buffer.readline)
buffer.seek(0)
text = TextIOWrapper(buffer, encoding, line_buffering=True)
text.mode = 'r'
return text |
seojunyang/pyconkr-2015 | refs/heads/master | pyconkr/wsgi.py | 6 | """
WSGI config for pyconkr project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pyconkr.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
stvstnfrd/edx-platform | refs/heads/master | import_shims/lms/third_party_auth/apps.py | 4 | """Deprecated import support. Auto-generated by import_shims/generate_shims.sh."""
# pylint: disable=redefined-builtin,wrong-import-position,wildcard-import,useless-suppression,line-too-long
from import_shims.warn import warn_deprecated_import
warn_deprecated_import('third_party_auth.apps', 'common.djangoapps.third_party_auth.apps')
from common.djangoapps.third_party_auth.apps import *
|
cogeorg/black_rhino | refs/heads/master | examples/Georg2012/networkx/algorithms/link_analysis/tests/test_hits.py | 10 | #!/usr/bin/env python
from nose.tools import *
from nose import SkipTest
from nose.plugins.attrib import attr
import networkx
# Example from
# A. Langville and C. Meyer, "A survey of eigenvector methods of web
# information retrieval." http://citeseer.ist.psu.edu/713792.html
class TestHITS:
def setUp(self):
G=networkx.DiGraph()
edges=[(1,3),(1,5),\
(2,1),\
(3,5),\
(5,4),(5,3),\
(6,5)]
G.add_edges_from(edges,weight=1)
self.G=G
self.G.a=dict(zip(G,[0.000000, 0.000000, 0.366025,
0.133975, 0.500000, 0.000000]))
self.G.h=dict(zip(G,[ 0.366025, 0.000000, 0.211325,
0.000000, 0.211325, 0.211325]))
def test_hits(self):
G=self.G
h,a=networkx.hits(G,tol=1.e-08)
for n in G:
assert_almost_equal(h[n],G.h[n],places=4)
for n in G:
assert_almost_equal(a[n],G.a[n],places=4)
def test_hits_nstart(self):
G = self.G
nstart = dict([(i, 1./2) for i in G])
h, a = networkx.hits(G, nstart = nstart)
@attr('numpy')
def test_hits_numpy(self):
try:
import numpy as np
except ImportError:
raise SkipTest('NumPy not available.')
G=self.G
h,a=networkx.hits_numpy(G)
for n in G:
assert_almost_equal(h[n],G.h[n],places=4)
for n in G:
assert_almost_equal(a[n],G.a[n],places=4)
def test_hits_scipy(self):
try:
import scipy as sp
except ImportError:
raise SkipTest('SciPy not available.')
G=self.G
h,a=networkx.hits_scipy(G,tol=1.e-08)
for n in G:
assert_almost_equal(h[n],G.h[n],places=4)
for n in G:
assert_almost_equal(a[n],G.a[n],places=4)
@attr('numpy')
def test_empty(self):
try:
import numpy
except ImportError:
raise SkipTest('numpy not available.')
G=networkx.Graph()
assert_equal(networkx.hits(G),({},{}))
assert_equal(networkx.hits_numpy(G),({},{}))
assert_equal(networkx.hits_scipy(G),({},{}))
assert_equal(networkx.authority_matrix(G).shape,(0,0))
assert_equal(networkx.hub_matrix(G).shape,(0,0))
|
evilynux/fofix | refs/heads/master | pkg/ListToNSIS.py | 15 | #####################################################################
# -*- coding: iso-8859-1 -*- #
# #
# Frets on Fire X (FoFiX) #
# Copyright (C) 2009 myfingershurt #
# 2009 John Stumpo #
# #
# This program is free software; you can redistribute it and/or #
# modify it under the terms of the GNU General Public License #
# as published by the Free Software Foundation; either version 2 #
# of the License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, #
# MA 02110-1301, USA. #
#####################################################################
'''Functions for working with WinRAR listfiles and
converting them to NSIS script instructions.'''
__version__ = '$Id$'
import os
import win32api
import fnmatch
import hashlib
class NsisScriptGenerator(object):
def __init__(self, baseFolder='.'):
self.nodeList = []
self.baseFolder = baseFolder
def readList(self, listname):
l = open(listname, 'r')
for line in l:
line = line.partition('//')[0].strip() # remove comments
if not len(line):
continue
if line[0] == '"' and line[-1] == '"':
line = line[1:-1]
oldpwd = os.getcwd()
os.chdir(self.baseFolder)
if os.path.dirname(line) == '' or os.path.isdir(os.path.dirname(line)):
for f in win32api.FindFiles(line):
path = os.path.join(os.path.dirname(line), f[8])
if os.path.isfile(path) and path.find('.svn') == -1: # omit .svn folders
self.nodeList.append(path)
os.chdir(oldpwd)
l.close()
def readExcludeList(self, listname):
patterns = []
l = open(listname, 'r')
for line in l:
line = line.partition('//')[0].strip() # remove comments
if not len(line):
continue
if line[0] == '"' and line[-1] == '"':
line = line[1:-1]
patterns.append(line)
l.close()
for p in patterns:
self.nodeList = [n for n in self.nodeList if not fnmatch.fnmatch(n.lower(), p.lower())]
def getInstallScript(self):
prevFolder = None
script = ''
for f in self.nodeList:
if os.path.dirname(f) != prevFolder:
script += 'SetOutPath "$INSTDIR\\%s"\r\n' % os.path.dirname(f).replace('..\\', '')
prevFolder = os.path.dirname(f)
script += 'File "%s"\r\n' % f
script += 'SetOutPath "$INSTDIR"\r\n'
return script
def getUninstallScript(self):
prevFolder = None
script = ''
for f in reversed(self.nodeList):
if os.path.dirname(f) != prevFolder:
if prevFolder is not None:
p = prevFolder.replace('..\\', '')
while len(p):
script += 'RmDir "$INSTDIR\\%s"\r\n' % p
p = os.path.dirname(p)
prevFolder = os.path.dirname(f)
script += 'Delete "$INSTDIR\\%s"\r\n' % f.replace('..\\', '')
if prevFolder is not None:
script += 'RmDir "$INSTDIR\\%s"\r\n' % prevFolder.replace('..\\', '')
return script
def separate(path, scriptIn):
scriptOut = ([], [])
for l in scriptIn.splitlines():
if l.lower().find(path.lower()) == -1:
scriptOut[0].append(l)
else:
scriptOut[1].append(l)
return ('\r\n'.join(x) for x in scriptOut)
class NsisScriptBuilder(object):
def __init__(self, header):
self.header = header
self.sectionScripts = []
def addSection(self, secName, secInstContent, secUninstContent, secDescription, secStart='Section', secEnd='SectionEnd'):
self.sectionScripts.append([secName, secInstContent, secUninstContent, secDescription, secStart, secEnd])
def filterSection(self, secName, secFilter, secDescription, secStart='Section', secEnd='SectionEnd', instHeader='', instFooter='', uninstHeader='', uninstFooter=''):
self.sectionScripts[0][1], instContent = separate(secFilter, self.sectionScripts[0][1])
self.sectionScripts[0][2], uninstContent = separate(secFilter, self.sectionScripts[0][2])
self.addSection(secName, instHeader+instContent+instFooter, uninstHeader+uninstContent+uninstFooter, secDescription, secStart, secEnd)
def getScript(self):
script = self.header
for name, instContent, uninstContent, desc, start, end in self.sectionScripts:
script += '''
%s "%s" SecID_%s
%s
%s
''' % (start, name, hashlib.sha1(name).hexdigest(), instContent, end)
script += '!insertmacro MUI_FUNCTION_DESCRIPTION_BEGIN\r\n'
for name, instContent, uninstContent, desc, start, end in self.sectionScripts:
script += '!insertmacro MUI_DESCRIPTION_TEXT ${SecID_%s} "%s"\r\n' % (hashlib.sha1(name).hexdigest(), desc)
script += '!insertmacro MUI_FUNCTION_DESCRIPTION_END\r\n'
for name, instContent, uninstContent, desc, start, end in reversed(self.sectionScripts):
script += '''
Section "un.%s"
%s
SectionEnd
''' % (name, uninstContent)
return script
|
mhefley/hackart | refs/heads/master | api/rest_framework_config.py | 1 | from rest_framework.authentication import SessionAuthentication
class CsrfExemptSessionAuthentication (SessionAuthentication):
def enforce_csrf(self, request):
return |
flyfei/python-for-android | refs/heads/master | python-modules/twisted/twisted/test/test_socks.py | 59 | # Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.protocol.socks}, an implementation of the SOCKSv4 and
SOCKSv4a protocols.
"""
import struct, socket
from twisted.trial import unittest
from twisted.test import proto_helpers
from twisted.internet import defer, address, reactor
from twisted.internet.error import DNSLookupError
from twisted.protocols import socks
class StringTCPTransport(proto_helpers.StringTransport):
stringTCPTransport_closing = False
peer = None
def getPeer(self):
return self.peer
def getHost(self):
return address.IPv4Address('TCP', '2.3.4.5', 42)
def loseConnection(self):
self.stringTCPTransport_closing = True
class FakeResolverReactor:
"""
Bare-bones reactor with deterministic behavior for the resolve method.
"""
def __init__(self, names):
"""
@type names: C{dict} containing C{str} keys and C{str} values.
@param names: A hostname to IP address mapping. The IP addresses are
stringified dotted quads.
"""
self.names = names
def resolve(self, hostname):
"""
Resolve a hostname by looking it up in the C{names} dictionary.
"""
try:
return defer.succeed(self.names[hostname])
except KeyError:
return defer.fail(
DNSLookupError("FakeResolverReactor couldn't find " + hostname))
class SOCKSv4Driver(socks.SOCKSv4):
# last SOCKSv4Outgoing instantiated
driver_outgoing = None
# last SOCKSv4IncomingFactory instantiated
driver_listen = None
def connectClass(self, host, port, klass, *args):
# fake it
proto = klass(*args)
proto.transport = StringTCPTransport()
proto.transport.peer = address.IPv4Address('TCP', host, port)
proto.connectionMade()
self.driver_outgoing = proto
return defer.succeed(proto)
def listenClass(self, port, klass, *args):
# fake it
factory = klass(*args)
self.driver_listen = factory
if port == 0:
port = 1234
return defer.succeed(('6.7.8.9', port))
class Connect(unittest.TestCase):
"""
Tests for SOCKS and SOCKSv4a connect requests using the L{SOCKSv4} protocol.
"""
def setUp(self):
self.sock = SOCKSv4Driver()
self.sock.transport = StringTCPTransport()
self.sock.connectionMade()
self.sock.reactor = FakeResolverReactor({"localhost":"127.0.0.1"})
def tearDown(self):
outgoing = self.sock.driver_outgoing
if outgoing is not None:
self.assert_(outgoing.transport.stringTCPTransport_closing,
"Outgoing SOCKS connections need to be closed.")
def test_simple(self):
self.sock.dataReceived(
struct.pack('!BBH', 4, 1, 34)
+ socket.inet_aton('1.2.3.4')
+ 'fooBAR'
+ '\0')
sent = self.sock.transport.value()
self.sock.transport.clear()
self.assertEqual(sent,
struct.pack('!BBH', 0, 90, 34)
+ socket.inet_aton('1.2.3.4'))
self.assert_(not self.sock.transport.stringTCPTransport_closing)
self.assert_(self.sock.driver_outgoing is not None)
# pass some data through
self.sock.dataReceived('hello, world')
self.assertEqual(self.sock.driver_outgoing.transport.value(),
'hello, world')
# the other way around
self.sock.driver_outgoing.dataReceived('hi there')
self.assertEqual(self.sock.transport.value(), 'hi there')
self.sock.connectionLost('fake reason')
def test_socks4aSuccessfulResolution(self):
"""
If the destination IP address has zeros for the first three octets and
non-zero for the fourth octet, the client is attempting a v4a
connection. A hostname is specified after the user ID string and the
server connects to the address that hostname resolves to.
@see: U{http://en.wikipedia.org/wiki/SOCKS#SOCKS_4a_protocol}
"""
# send the domain name "localhost" to be resolved
clientRequest = (
struct.pack('!BBH', 4, 1, 34)
+ socket.inet_aton('0.0.0.1')
+ 'fooBAZ\0'
+ 'localhost\0')
# Deliver the bytes one by one to exercise the protocol's buffering
# logic. FakeResolverReactor's resolve method is invoked to "resolve"
# the hostname.
for byte in clientRequest:
self.sock.dataReceived(byte)
sent = self.sock.transport.value()
self.sock.transport.clear()
# Verify that the server responded with the address which will be
# connected to.
self.assertEquals(
sent,
struct.pack('!BBH', 0, 90, 34) + socket.inet_aton('127.0.0.1'))
self.assertFalse(self.sock.transport.stringTCPTransport_closing)
self.assertNotIdentical(self.sock.driver_outgoing, None)
# Pass some data through and verify it is forwarded to the outgoing
# connection.
self.sock.dataReceived('hello, world')
self.assertEquals(
self.sock.driver_outgoing.transport.value(), 'hello, world')
# Deliver some data from the output connection and verify it is
# passed along to the incoming side.
self.sock.driver_outgoing.dataReceived('hi there')
self.assertEquals(self.sock.transport.value(), 'hi there')
self.sock.connectionLost('fake reason')
def test_socks4aFailedResolution(self):
"""
Failed hostname resolution on a SOCKSv4a packet results in a 91 error
response and the connection getting closed.
"""
# send the domain name "failinghost" to be resolved
clientRequest = (
struct.pack('!BBH', 4, 1, 34)
+ socket.inet_aton('0.0.0.1')
+ 'fooBAZ\0'
+ 'failinghost\0')
# Deliver the bytes one by one to exercise the protocol's buffering
# logic. FakeResolverReactor's resolve method is invoked to "resolve"
# the hostname.
for byte in clientRequest:
self.sock.dataReceived(byte)
# Verify that the server responds with a 91 error.
sent = self.sock.transport.value()
self.assertEquals(
sent,
struct.pack('!BBH', 0, 91, 0) + socket.inet_aton('0.0.0.0'))
# A failed resolution causes the transport to drop the connection.
self.assertTrue(self.sock.transport.stringTCPTransport_closing)
self.assertIdentical(self.sock.driver_outgoing, None)
def test_accessDenied(self):
self.sock.authorize = lambda code, server, port, user: 0
self.sock.dataReceived(
struct.pack('!BBH', 4, 1, 4242)
+ socket.inet_aton('10.2.3.4')
+ 'fooBAR'
+ '\0')
self.assertEqual(self.sock.transport.value(),
struct.pack('!BBH', 0, 91, 0)
+ socket.inet_aton('0.0.0.0'))
self.assert_(self.sock.transport.stringTCPTransport_closing)
self.assertIdentical(self.sock.driver_outgoing, None)
def test_eofRemote(self):
self.sock.dataReceived(
struct.pack('!BBH', 4, 1, 34)
+ socket.inet_aton('1.2.3.4')
+ 'fooBAR'
+ '\0')
sent = self.sock.transport.value()
self.sock.transport.clear()
# pass some data through
self.sock.dataReceived('hello, world')
self.assertEqual(self.sock.driver_outgoing.transport.value(),
'hello, world')
# now close it from the server side
self.sock.driver_outgoing.transport.loseConnection()
self.sock.driver_outgoing.connectionLost('fake reason')
def test_eofLocal(self):
self.sock.dataReceived(
struct.pack('!BBH', 4, 1, 34)
+ socket.inet_aton('1.2.3.4')
+ 'fooBAR'
+ '\0')
sent = self.sock.transport.value()
self.sock.transport.clear()
# pass some data through
self.sock.dataReceived('hello, world')
self.assertEqual(self.sock.driver_outgoing.transport.value(),
'hello, world')
# now close it from the client side
self.sock.connectionLost('fake reason')
class Bind(unittest.TestCase):
"""
Tests for SOCKS and SOCKSv4a bind requests using the L{SOCKSv4} protocol.
"""
def setUp(self):
self.sock = SOCKSv4Driver()
self.sock.transport = StringTCPTransport()
self.sock.connectionMade()
self.sock.reactor = FakeResolverReactor({"localhost":"127.0.0.1"})
## def tearDown(self):
## # TODO ensure the listen port is closed
## listen = self.sock.driver_listen
## if listen is not None:
## self.assert_(incoming.transport.stringTCPTransport_closing,
## "Incoming SOCKS connections need to be closed.")
def test_simple(self):
self.sock.dataReceived(
struct.pack('!BBH', 4, 2, 34)
+ socket.inet_aton('1.2.3.4')
+ 'fooBAR'
+ '\0')
sent = self.sock.transport.value()
self.sock.transport.clear()
self.assertEqual(sent,
struct.pack('!BBH', 0, 90, 1234)
+ socket.inet_aton('6.7.8.9'))
self.assert_(not self.sock.transport.stringTCPTransport_closing)
self.assert_(self.sock.driver_listen is not None)
# connect
incoming = self.sock.driver_listen.buildProtocol(('1.2.3.4', 5345))
self.assertNotIdentical(incoming, None)
incoming.transport = StringTCPTransport()
incoming.connectionMade()
# now we should have the second reply packet
sent = self.sock.transport.value()
self.sock.transport.clear()
self.assertEqual(sent,
struct.pack('!BBH', 0, 90, 0)
+ socket.inet_aton('0.0.0.0'))
self.assert_(not self.sock.transport.stringTCPTransport_closing)
# pass some data through
self.sock.dataReceived('hello, world')
self.assertEqual(incoming.transport.value(),
'hello, world')
# the other way around
incoming.dataReceived('hi there')
self.assertEqual(self.sock.transport.value(), 'hi there')
self.sock.connectionLost('fake reason')
def test_socks4a(self):
"""
If the destination IP address has zeros for the first three octets and
non-zero for the fourth octet, the client is attempting a v4a
connection. A hostname is specified after the user ID string and the
server connects to the address that hostname resolves to.
@see: U{http://en.wikipedia.org/wiki/SOCKS#SOCKS_4a_protocol}
"""
# send the domain name "localhost" to be resolved
clientRequest = (
struct.pack('!BBH', 4, 2, 34)
+ socket.inet_aton('0.0.0.1')
+ 'fooBAZ\0'
+ 'localhost\0')
# Deliver the bytes one by one to exercise the protocol's buffering
# logic. FakeResolverReactor's resolve method is invoked to "resolve"
# the hostname.
for byte in clientRequest:
self.sock.dataReceived(byte)
sent = self.sock.transport.value()
self.sock.transport.clear()
# Verify that the server responded with the address which will be
# connected to.
self.assertEquals(
sent,
struct.pack('!BBH', 0, 90, 1234) + socket.inet_aton('6.7.8.9'))
self.assertFalse(self.sock.transport.stringTCPTransport_closing)
self.assertNotIdentical(self.sock.driver_listen, None)
# connect
incoming = self.sock.driver_listen.buildProtocol(('127.0.0.1', 5345))
self.assertNotIdentical(incoming, None)
incoming.transport = StringTCPTransport()
incoming.connectionMade()
# now we should have the second reply packet
sent = self.sock.transport.value()
self.sock.transport.clear()
self.assertEqual(sent,
struct.pack('!BBH', 0, 90, 0)
+ socket.inet_aton('0.0.0.0'))
self.assertNotIdentical(
self.sock.transport.stringTCPTransport_closing, None)
# Deliver some data from the output connection and verify it is
# passed along to the incoming side.
self.sock.dataReceived('hi there')
self.assertEquals(incoming.transport.value(), 'hi there')
# the other way around
incoming.dataReceived('hi there')
self.assertEqual(self.sock.transport.value(), 'hi there')
self.sock.connectionLost('fake reason')
def test_socks4aFailedResolution(self):
"""
Failed hostname resolution on a SOCKSv4a packet results in a 91 error
response and the connection getting closed.
"""
# send the domain name "failinghost" to be resolved
clientRequest = (
struct.pack('!BBH', 4, 2, 34)
+ socket.inet_aton('0.0.0.1')
+ 'fooBAZ\0'
+ 'failinghost\0')
# Deliver the bytes one by one to exercise the protocol's buffering
# logic. FakeResolverReactor's resolve method is invoked to "resolve"
# the hostname.
for byte in clientRequest:
self.sock.dataReceived(byte)
# Verify that the server responds with a 91 error.
sent = self.sock.transport.value()
self.assertEquals(
sent,
struct.pack('!BBH', 0, 91, 0) + socket.inet_aton('0.0.0.0'))
# A failed resolution causes the transport to drop the connection.
self.assertTrue(self.sock.transport.stringTCPTransport_closing)
self.assertIdentical(self.sock.driver_outgoing, None)
def test_accessDenied(self):
self.sock.authorize = lambda code, server, port, user: 0
self.sock.dataReceived(
struct.pack('!BBH', 4, 2, 4242)
+ socket.inet_aton('10.2.3.4')
+ 'fooBAR'
+ '\0')
self.assertEqual(self.sock.transport.value(),
struct.pack('!BBH', 0, 91, 0)
+ socket.inet_aton('0.0.0.0'))
self.assert_(self.sock.transport.stringTCPTransport_closing)
self.assertIdentical(self.sock.driver_listen, None)
def test_eofRemote(self):
self.sock.dataReceived(
struct.pack('!BBH', 4, 2, 34)
+ socket.inet_aton('1.2.3.4')
+ 'fooBAR'
+ '\0')
sent = self.sock.transport.value()
self.sock.transport.clear()
# connect
incoming = self.sock.driver_listen.buildProtocol(('1.2.3.4', 5345))
self.assertNotIdentical(incoming, None)
incoming.transport = StringTCPTransport()
incoming.connectionMade()
# now we should have the second reply packet
sent = self.sock.transport.value()
self.sock.transport.clear()
self.assertEqual(sent,
struct.pack('!BBH', 0, 90, 0)
+ socket.inet_aton('0.0.0.0'))
self.assert_(not self.sock.transport.stringTCPTransport_closing)
# pass some data through
self.sock.dataReceived('hello, world')
self.assertEqual(incoming.transport.value(),
'hello, world')
# now close it from the server side
incoming.transport.loseConnection()
incoming.connectionLost('fake reason')
def test_eofLocal(self):
self.sock.dataReceived(
struct.pack('!BBH', 4, 2, 34)
+ socket.inet_aton('1.2.3.4')
+ 'fooBAR'
+ '\0')
sent = self.sock.transport.value()
self.sock.transport.clear()
# connect
incoming = self.sock.driver_listen.buildProtocol(('1.2.3.4', 5345))
self.assertNotIdentical(incoming, None)
incoming.transport = StringTCPTransport()
incoming.connectionMade()
# now we should have the second reply packet
sent = self.sock.transport.value()
self.sock.transport.clear()
self.assertEqual(sent,
struct.pack('!BBH', 0, 90, 0)
+ socket.inet_aton('0.0.0.0'))
self.assert_(not self.sock.transport.stringTCPTransport_closing)
# pass some data through
self.sock.dataReceived('hello, world')
self.assertEqual(incoming.transport.value(),
'hello, world')
# now close it from the client side
self.sock.connectionLost('fake reason')
def test_badSource(self):
self.sock.dataReceived(
struct.pack('!BBH', 4, 2, 34)
+ socket.inet_aton('1.2.3.4')
+ 'fooBAR'
+ '\0')
sent = self.sock.transport.value()
self.sock.transport.clear()
# connect from WRONG address
incoming = self.sock.driver_listen.buildProtocol(('1.6.6.6', 666))
self.assertIdentical(incoming, None)
# Now we should have the second reply packet and it should
# be a failure. The connection should be closing.
sent = self.sock.transport.value()
self.sock.transport.clear()
self.assertEqual(sent,
struct.pack('!BBH', 0, 91, 0)
+ socket.inet_aton('0.0.0.0'))
self.assert_(self.sock.transport.stringTCPTransport_closing)
|
EricMuller/mywebmarks-backend | refs/heads/master | requirements/twisted/Twisted-17.1.0/build/lib.linux-x86_64-3.5/twisted/conch/test/test_default.py | 12 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.conch.client.default}.
"""
from __future__ import absolute_import, division
import sys
from twisted.python.reflect import requireModule
if requireModule('cryptography') and requireModule('pyasn1'):
from twisted.conch.client.agent import SSHAgentClient
from twisted.conch.client.default import SSHUserAuthClient
from twisted.conch.client.options import ConchOptions
from twisted.conch.client import default
from twisted.conch.ssh.keys import Key
skip = None
else:
skip = "cryptography and PyASN1 required for twisted.conch.client.default."
from twisted.trial.unittest import TestCase
from twisted.python.filepath import FilePath
from twisted.conch.error import ConchError
from twisted.conch.test import keydata
from twisted.test.proto_helpers import StringTransport
from twisted.python.compat import nativeString
from twisted.python.runtime import platform
if platform.isWindows():
windowsSkip = (
"genericAnswers and getPassword does not work on Windows."
" Should be fixed as part of fixing bug 6409 and 6410")
else:
windowsSkip = skip
ttySkip = None
if not sys.stdin.isatty():
ttySkip = "sys.stdin is not an interactive tty"
if not sys.stdout.isatty():
ttySkip = "sys.stdout is not an interactive tty"
class SSHUserAuthClientTests(TestCase):
"""
Tests for L{SSHUserAuthClient}.
@type rsaPublic: L{Key}
@ivar rsaPublic: A public RSA key.
"""
def setUp(self):
self.rsaPublic = Key.fromString(keydata.publicRSA_openssh)
self.tmpdir = FilePath(self.mktemp())
self.tmpdir.makedirs()
self.rsaFile = self.tmpdir.child('id_rsa')
self.rsaFile.setContent(keydata.privateRSA_openssh)
self.tmpdir.child('id_rsa.pub').setContent(keydata.publicRSA_openssh)
def test_signDataWithAgent(self):
"""
When connected to an agent, L{SSHUserAuthClient} can use it to
request signatures of particular data with a particular L{Key}.
"""
client = SSHUserAuthClient(b"user", ConchOptions(), None)
agent = SSHAgentClient()
transport = StringTransport()
agent.makeConnection(transport)
client.keyAgent = agent
cleartext = b"Sign here"
client.signData(self.rsaPublic, cleartext)
self.assertEqual(
transport.value(),
b"\x00\x00\x00\x8b\r\x00\x00\x00u" + self.rsaPublic.blob() +
b"\x00\x00\x00\t" + cleartext +
b"\x00\x00\x00\x00")
def test_agentGetPublicKey(self):
"""
L{SSHUserAuthClient} looks up public keys from the agent using the
L{SSHAgentClient} class. That L{SSHAgentClient.getPublicKey} returns a
L{Key} object with one of the public keys in the agent. If no more
keys are present, it returns L{None}.
"""
agent = SSHAgentClient()
agent.blobs = [self.rsaPublic.blob()]
key = agent.getPublicKey()
self.assertTrue(key.isPublic())
self.assertEqual(key, self.rsaPublic)
self.assertIsNone(agent.getPublicKey())
def test_getPublicKeyFromFile(self):
"""
L{SSHUserAuthClient.getPublicKey()} is able to get a public key from
the first file described by its options' C{identitys} list, and return
the corresponding public L{Key} object.
"""
options = ConchOptions()
options.identitys = [self.rsaFile.path]
client = SSHUserAuthClient(b"user", options, None)
key = client.getPublicKey()
self.assertTrue(key.isPublic())
self.assertEqual(key, self.rsaPublic)
def test_getPublicKeyAgentFallback(self):
"""
If an agent is present, but doesn't return a key,
L{SSHUserAuthClient.getPublicKey} continue with the normal key lookup.
"""
options = ConchOptions()
options.identitys = [self.rsaFile.path]
agent = SSHAgentClient()
client = SSHUserAuthClient(b"user", options, None)
client.keyAgent = agent
key = client.getPublicKey()
self.assertTrue(key.isPublic())
self.assertEqual(key, self.rsaPublic)
def test_getPublicKeyBadKeyError(self):
"""
If L{keys.Key.fromFile} raises a L{keys.BadKeyError}, the
L{SSHUserAuthClient.getPublicKey} tries again to get a public key by
calling itself recursively.
"""
options = ConchOptions()
self.tmpdir.child('id_dsa.pub').setContent(keydata.publicDSA_openssh)
dsaFile = self.tmpdir.child('id_dsa')
dsaFile.setContent(keydata.privateDSA_openssh)
options.identitys = [self.rsaFile.path, dsaFile.path]
self.tmpdir.child('id_rsa.pub').setContent(b'not a key!')
client = SSHUserAuthClient(b"user", options, None)
key = client.getPublicKey()
self.assertTrue(key.isPublic())
self.assertEqual(key, Key.fromString(keydata.publicDSA_openssh))
self.assertEqual(client.usedFiles, [self.rsaFile.path, dsaFile.path])
def test_getPrivateKey(self):
"""
L{SSHUserAuthClient.getPrivateKey} will load a private key from the
last used file populated by L{SSHUserAuthClient.getPublicKey}, and
return a L{Deferred} which fires with the corresponding private L{Key}.
"""
rsaPrivate = Key.fromString(keydata.privateRSA_openssh)
options = ConchOptions()
options.identitys = [self.rsaFile.path]
client = SSHUserAuthClient(b"user", options, None)
# Populate the list of used files
client.getPublicKey()
def _cbGetPrivateKey(key):
self.assertFalse(key.isPublic())
self.assertEqual(key, rsaPrivate)
return client.getPrivateKey().addCallback(_cbGetPrivateKey)
def test_getPrivateKeyPassphrase(self):
"""
L{SSHUserAuthClient} can get a private key from a file, and return a
Deferred called back with a private L{Key} object, even if the key is
encrypted.
"""
rsaPrivate = Key.fromString(keydata.privateRSA_openssh)
passphrase = b'this is the passphrase'
self.rsaFile.setContent(rsaPrivate.toString('openssh', passphrase))
options = ConchOptions()
options.identitys = [self.rsaFile.path]
client = SSHUserAuthClient(b"user", options, None)
# Populate the list of used files
client.getPublicKey()
def _getPassword(prompt):
self.assertEqual(
prompt,
"Enter passphrase for key '%s': " % (self.rsaFile.path,))
return nativeString(passphrase)
def _cbGetPrivateKey(key):
self.assertFalse(key.isPublic())
self.assertEqual(key, rsaPrivate)
self.patch(client, '_getPassword', _getPassword)
return client.getPrivateKey().addCallback(_cbGetPrivateKey)
def test_getPassword(self):
"""
Get the password using
L{twisted.conch.client.default.SSHUserAuthClient.getPassword}
"""
class FakeTransport:
def __init__(self, host):
self.transport = self
self.host = host
def getPeer(self):
return self
options = ConchOptions()
client = SSHUserAuthClient(b"user", options, None)
client.transport = FakeTransport("127.0.0.1")
def getpass(prompt):
self.assertEqual(prompt, "user@127.0.0.1's password: ")
return 'bad password'
self.patch(default.getpass, 'getpass', getpass)
d = client.getPassword()
d.addCallback(self.assertEqual, b'bad password')
return d
test_getPassword.skip = windowsSkip or ttySkip
def test_getPasswordPrompt(self):
"""
Get the password using
L{twisted.conch.client.default.SSHUserAuthClient.getPassword}
using a different prompt.
"""
options = ConchOptions()
client = SSHUserAuthClient(b"user", options, None)
prompt = b"Give up your password"
def getpass(p):
self.assertEqual(p, nativeString(prompt))
return 'bad password'
self.patch(default.getpass, 'getpass', getpass)
d = client.getPassword(prompt)
d.addCallback(self.assertEqual, b'bad password')
return d
test_getPasswordPrompt.skip = windowsSkip or ttySkip
def test_getPasswordConchError(self):
"""
Get the password using
L{twisted.conch.client.default.SSHUserAuthClient.getPassword}
and trigger a {twisted.conch.error import ConchError}.
"""
options = ConchOptions()
client = SSHUserAuthClient(b"user", options, None)
def getpass(prompt):
raise KeyboardInterrupt("User pressed CTRL-C")
self.patch(default.getpass, 'getpass', getpass)
stdout, stdin = sys.stdout, sys.stdin
d = client.getPassword(b'?')
@d.addErrback
def check_sys(fail):
self.assertEqual(
[stdout, stdin], [sys.stdout, sys.stdin])
return fail
self.assertFailure(d, ConchError)
test_getPasswordConchError.skip = windowsSkip or ttySkip
def test_getGenericAnswers(self):
"""
L{twisted.conch.client.default.SSHUserAuthClient.getGenericAnswers}
"""
options = ConchOptions()
client = SSHUserAuthClient(b"user", options, None)
def getpass(prompt):
self.assertEqual(prompt, "pass prompt")
return "getpass"
self.patch(default.getpass, 'getpass', getpass)
def raw_input(prompt):
self.assertEqual(prompt, "raw_input prompt")
return "raw_input"
self.patch(default, 'raw_input', raw_input)
d = client.getGenericAnswers(
b"Name", b"Instruction", [
(b"pass prompt", False), (b"raw_input prompt", True)])
d.addCallback(
self.assertListEqual, ["getpass", "raw_input"])
return d
test_getGenericAnswers.skip = windowsSkip or ttySkip
class ConchOptionsParsing(TestCase):
"""
Options parsing.
"""
def test_macs(self):
"""
Specify MAC algorithms.
"""
opts = ConchOptions()
e = self.assertRaises(SystemExit, opts.opt_macs, "invalid-mac")
self.assertIn("Unknown mac type", e.code)
opts = ConchOptions()
opts.opt_macs("hmac-sha2-512")
self.assertEqual(opts['macs'], [b"hmac-sha2-512"])
opts.opt_macs(b"hmac-sha2-512")
self.assertEqual(opts['macs'], [b"hmac-sha2-512"])
opts.opt_macs("hmac-sha2-256,hmac-sha1,hmac-md5")
self.assertEqual(opts['macs'], [b"hmac-sha2-256", b"hmac-sha1", b"hmac-md5"])
def test_host_key_algorithms(self):
"""
Specify host key algorithms.
"""
opts = ConchOptions()
e = self.assertRaises(SystemExit, opts.opt_host_key_algorithms, "invalid-key")
self.assertIn("Unknown host key type", e.code)
opts = ConchOptions()
opts.opt_host_key_algorithms("ssh-rsa")
self.assertEqual(opts['host-key-algorithms'], [b"ssh-rsa"])
opts.opt_host_key_algorithms(b"ssh-dss")
self.assertEqual(opts['host-key-algorithms'], [b"ssh-dss"])
opts.opt_host_key_algorithms("ssh-rsa,ssh-dss")
self.assertEqual(opts['host-key-algorithms'], [b"ssh-rsa", b"ssh-dss"])
|
gliderkite/ants | refs/heads/master | src/sim.py | 1 | #! /usr/bin/env python
"""Ants simulator module."""
import random
import entity
import behavior
def place_food(world, cardinality, quantity):
"""Place some food in random cells of the world."""
world.food_quantity = cardinality * quantity
width, height = world.size
i = 0
while i < cardinality:
loc = random.randrange(0, width), random.randrange(0, height)
if loc != world.nest:
world[loc].food_quantity += quantity
else:
world.nest_food_quantity += quantity
world.food_quantity -= quantity
i += 1
def birth(world, food_qty, upper_bound=None, direction=None):
"""Add a new ant if possible."""
# check if the number of current ants is lower of the upper bound
if not upper_bound or len(world.ants) < upper_bound:
# check if the nest has enough food
if world.nest_food_quantity >= food_qty:
verse = direction or random.randrange(0, 8)
world.nest_food_quantity -= food_qty
world.ants.append(entity.Ant(world, verse))
def death(world, life_expectancy):
"""Kill an ald ant."""
# get the ants too old
ancients = [a for a in world.ants if a.age > life_expectancy]
if ancients:
# random choice of the dying ant
dying = random.choice(ancients)
# drop the food of the dying ant
if dying.location == world.nest:
world.nest_food_quantity += dying.food_quantity
world.food_quantity -= dying.food_quantity
else:
world[dying.location].food_quantity += dying.food_quantity
world.ants.remove(dying)
def evaporate(world, colony_ph_factor, food_ph_factor):
width, height = world.size
x = 0
while x < width:
y = 0
while y < height:
if entity.colony_lifespan(world.cells[x][y]) > 0:
world.cells[x][y].colony_ph[1] -= colony_ph_factor
if entity.food_lifespan(world.cells[x][y]) > 0:
world.cells[x][y].food_ph[1] -= food_ph_factor
y += 1
x += 1
def step(world, gsp, cphdf, fphdf):
"""Move all the ants forward to the next generation."""
for a in world.ants:
behavior.act(a, gsp)
# simulate pheromone evaporation
evaporate(world, cphdf, fphdf)
|
molobrakos/home-assistant | refs/heads/master | homeassistant/auth/permissions/types.py | 9 | """Common code for permissions."""
from typing import Mapping, Union
# MyPy doesn't support recursion yet. So writing it out as far as we need.
ValueType = Union[
# Example: entities.all = { read: true, control: true }
Mapping[str, bool],
bool,
None
]
# Example: entities.domains = { light: … }
SubCategoryDict = Mapping[str, ValueType]
SubCategoryType = Union[
SubCategoryDict,
bool,
None
]
CategoryType = Union[
# Example: entities.domains
Mapping[str, SubCategoryType],
# Example: entities.all
Mapping[str, ValueType],
bool,
None
]
# Example: { entities: … }
PolicyType = Mapping[str, CategoryType]
|
adelina-t/neutron | refs/heads/master | neutron/tests/unit/extensions/base.py | 29 | # Copyright 2014 Intel Corporation.
# Copyright 2014 Isaku Yamahata <isaku.yamahata at intel com>
# <isaku.yamahata at gmail com>
# All Rights Reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import mock
from oslo_config import cfg
from webob import exc
import webtest
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron import quota
from neutron.tests import tools
from neutron.tests.unit.api import test_extensions
from neutron.tests.unit.api.v2 import test_base
from neutron.tests.unit import testlib_api
class ExtensionTestCase(testlib_api.WebTestCase):
def _setUpExtension(self, plugin, service_type,
resource_attribute_map, extension_class,
resource_prefix, plural_mappings=None,
translate_resource_name=False,
allow_pagination=False, allow_sorting=False,
supported_extension_aliases=None,
use_quota=False,
):
self._resource_prefix = resource_prefix
self._plural_mappings = plural_mappings or {}
self._translate_resource_name = translate_resource_name
# Ensure existing ExtensionManager is not used
extensions.PluginAwareExtensionManager._instance = None
self.useFixture(tools.AttributeMapMemento())
# Create the default configurations
self.config_parse()
#just stubbing core plugin with plugin
self.setup_coreplugin(plugin)
cfg.CONF.set_override('core_plugin', plugin)
if service_type:
cfg.CONF.set_override('service_plugins', [plugin])
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
instance = self.plugin.return_value
if service_type:
instance.get_plugin_type.return_value = service_type
if supported_extension_aliases is not None:
instance.supported_extension_aliases = supported_extension_aliases
if allow_pagination:
cfg.CONF.set_override('allow_pagination', True)
# instance.__native_pagination_support = True
native_pagination_attr_name = ("_%s__native_pagination_support"
% instance.__class__.__name__)
setattr(instance, native_pagination_attr_name, True)
if allow_sorting:
cfg.CONF.set_override('allow_sorting', True)
# instance.__native_sorting_support = True
native_sorting_attr_name = ("_%s__native_sorting_support"
% instance.__class__.__name__)
setattr(instance, native_sorting_attr_name, True)
if use_quota:
quota.QUOTAS._driver = None
cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver',
group='QUOTAS')
setattr(instance, 'path_prefix', resource_prefix)
class ExtensionTestExtensionManager(object):
def get_resources(self):
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attributes.RESOURCE_ATTRIBUTE_MAP.update(
resource_attribute_map)
return extension_class.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
ext_mgr = ExtensionTestExtensionManager()
self.ext_mdw = test_extensions.setup_extensions_middleware(ext_mgr)
self.api = webtest.TestApp(self.ext_mdw)
def _test_entity_delete(self, entity):
"""Does the entity deletion based on naming convention."""
entity_id = str(uuid.uuid4())
path = self._resource_prefix + '/' if self._resource_prefix else ''
path += self._plural_mappings.get(entity, entity + 's')
if self._translate_resource_name:
path = path.replace('_', '-')
res = self.api.delete(
test_base._get_path(path, id=entity_id, fmt=self.fmt))
delete_entity = getattr(self.plugin.return_value, "delete_" + entity)
delete_entity.assert_called_with(mock.ANY, entity_id)
self.assertEqual(res.status_int, exc.HTTPNoContent.code)
|
bdh1011/cupeye | refs/heads/master | venv/lib/python2.7/site-packages/pip/_vendor/lockfile/symlinklockfile.py | 487 | from __future__ import absolute_import
import time
import os
from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout,
AlreadyLocked)
class SymlinkLockFile(LockBase):
"""Lock access to a file using symlink(2)."""
def __init__(self, path, threaded=True, timeout=None):
# super(SymlinkLockFile).__init(...)
LockBase.__init__(self, path, threaded, timeout)
# split it back!
self.unique_name = os.path.split(self.unique_name)[1]
def acquire(self, timeout=None):
# Hopefully unnecessary for symlink.
#try:
# open(self.unique_name, "wb").close()
#except IOError:
# raise LockFailed("failed to create %s" % self.unique_name)
timeout = timeout is not None and timeout or self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
while True:
# Try and create a symbolic link to it.
try:
os.symlink(self.unique_name, self.lock_file)
except OSError:
# Link creation failed. Maybe we've double-locked?
if self.i_am_locking():
# Linked to out unique name. Proceed.
return
else:
# Otherwise the lock creation failed.
if timeout is not None and time.time() > end_time:
if timeout > 0:
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
raise AlreadyLocked("%s is already locked" %
self.path)
time.sleep(timeout/10 if timeout is not None else 0.1)
else:
# Link creation succeeded. We're good to go.
return
def release(self):
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
elif not self.i_am_locking():
raise NotMyLock("%s is locked, but not by me" % self.path)
os.unlink(self.lock_file)
def is_locked(self):
return os.path.islink(self.lock_file)
def i_am_locking(self):
return os.path.islink(self.lock_file) and \
os.readlink(self.lock_file) == self.unique_name
def break_lock(self):
if os.path.islink(self.lock_file): # exists && link
os.unlink(self.lock_file)
|
zbyufei/open-budgets | refs/heads/develop | openbudget/apps/contexts/factories.py | 2 | import datetime
import factory
from django.utils.timezone import utc
from openbudget.apps.entities.factories import EntityFactory
from openbudget.apps.contexts.models import Context
class ContextFactory(factory.DjangoModelFactory):
FACTORY_FOR = Context
entity = factory.SubFactory(EntityFactory)
data = '{"population":0,"ground_surface":0;"high_schools":0}'
period_start = factory.Sequence(
lambda n: datetime.datetime.utcnow().replace(tzinfo=utc)
)
period_end = factory.Sequence(
lambda n: datetime.datetime.utcnow().replace(tzinfo=utc)
)
|
tsunli/shadowsocks | refs/heads/master | shadowsocks/udprelay.py | 924 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# SOCKS5 UDP Request
# +----+------+------+----------+----------+----------+
# |RSV | FRAG | ATYP | DST.ADDR | DST.PORT | DATA |
# +----+------+------+----------+----------+----------+
# | 2 | 1 | 1 | Variable | 2 | Variable |
# +----+------+------+----------+----------+----------+
# SOCKS5 UDP Response
# +----+------+------+----------+----------+----------+
# |RSV | FRAG | ATYP | DST.ADDR | DST.PORT | DATA |
# +----+------+------+----------+----------+----------+
# | 2 | 1 | 1 | Variable | 2 | Variable |
# +----+------+------+----------+----------+----------+
# shadowsocks UDP Request (before encrypted)
# +------+----------+----------+----------+
# | ATYP | DST.ADDR | DST.PORT | DATA |
# +------+----------+----------+----------+
# | 1 | Variable | 2 | Variable |
# +------+----------+----------+----------+
# shadowsocks UDP Response (before encrypted)
# +------+----------+----------+----------+
# | ATYP | DST.ADDR | DST.PORT | DATA |
# +------+----------+----------+----------+
# | 1 | Variable | 2 | Variable |
# +------+----------+----------+----------+
# shadowsocks UDP Request and Response (after encrypted)
# +-------+--------------+
# | IV | PAYLOAD |
# +-------+--------------+
# | Fixed | Variable |
# +-------+--------------+
# HOW TO NAME THINGS
# ------------------
# `dest` means destination server, which is from DST fields in the SOCKS5
# request
# `local` means local server of shadowsocks
# `remote` means remote server of shadowsocks
# `client` means UDP clients that connects to other servers
# `server` means the UDP server that handles user requests
from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import logging
import struct
import errno
import random
from shadowsocks import encrypt, eventloop, lru_cache, common, shell
from shadowsocks.common import parse_header, pack_addr
BUF_SIZE = 65536
def client_key(source_addr, server_af):
# notice this is server af, not dest af
return '%s:%s:%d' % (source_addr[0], source_addr[1], server_af)
class UDPRelay(object):
def __init__(self, config, dns_resolver, is_local, stat_callback=None):
self._config = config
if is_local:
self._listen_addr = config['local_address']
self._listen_port = config['local_port']
self._remote_addr = config['server']
self._remote_port = config['server_port']
else:
self._listen_addr = config['server']
self._listen_port = config['server_port']
self._remote_addr = None
self._remote_port = None
self._dns_resolver = dns_resolver
self._password = common.to_bytes(config['password'])
self._method = config['method']
self._timeout = config['timeout']
self._is_local = is_local
self._cache = lru_cache.LRUCache(timeout=config['timeout'],
close_callback=self._close_client)
self._client_fd_to_server_addr = \
lru_cache.LRUCache(timeout=config['timeout'])
self._dns_cache = lru_cache.LRUCache(timeout=300)
self._eventloop = None
self._closed = False
self._sockets = set()
if 'forbidden_ip' in config:
self._forbidden_iplist = config['forbidden_ip']
else:
self._forbidden_iplist = None
addrs = socket.getaddrinfo(self._listen_addr, self._listen_port, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if len(addrs) == 0:
raise Exception("can't get addrinfo for %s:%d" %
(self._listen_addr, self._listen_port))
af, socktype, proto, canonname, sa = addrs[0]
server_socket = socket.socket(af, socktype, proto)
server_socket.bind((self._listen_addr, self._listen_port))
server_socket.setblocking(False)
self._server_socket = server_socket
self._stat_callback = stat_callback
def _get_a_server(self):
server = self._config['server']
server_port = self._config['server_port']
if type(server_port) == list:
server_port = random.choice(server_port)
if type(server) == list:
server = random.choice(server)
logging.debug('chosen server: %s:%d', server, server_port)
return server, server_port
def _close_client(self, client):
if hasattr(client, 'close'):
self._sockets.remove(client.fileno())
self._eventloop.remove(client)
client.close()
else:
# just an address
pass
def _handle_server(self):
server = self._server_socket
data, r_addr = server.recvfrom(BUF_SIZE)
if not data:
logging.debug('UDP handle_server: data is empty')
if self._stat_callback:
self._stat_callback(self._listen_port, len(data))
if self._is_local:
frag = common.ord(data[2])
if frag != 0:
logging.warn('drop a message since frag is not 0')
return
else:
data = data[3:]
else:
data = encrypt.encrypt_all(self._password, self._method, 0, data)
# decrypt data
if not data:
logging.debug('UDP handle_server: data is empty after decrypt')
return
header_result = parse_header(data)
if header_result is None:
return
addrtype, dest_addr, dest_port, header_length = header_result
if self._is_local:
server_addr, server_port = self._get_a_server()
else:
server_addr, server_port = dest_addr, dest_port
addrs = self._dns_cache.get(server_addr, None)
if addrs is None:
addrs = socket.getaddrinfo(server_addr, server_port, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if not addrs:
# drop
return
else:
self._dns_cache[server_addr] = addrs
af, socktype, proto, canonname, sa = addrs[0]
key = client_key(r_addr, af)
client = self._cache.get(key, None)
if not client:
# TODO async getaddrinfo
if self._forbidden_iplist:
if common.to_str(sa[0]) in self._forbidden_iplist:
logging.debug('IP %s is in forbidden list, drop' %
common.to_str(sa[0]))
# drop
return
client = socket.socket(af, socktype, proto)
client.setblocking(False)
self._cache[key] = client
self._client_fd_to_server_addr[client.fileno()] = r_addr
self._sockets.add(client.fileno())
self._eventloop.add(client, eventloop.POLL_IN, self)
if self._is_local:
data = encrypt.encrypt_all(self._password, self._method, 1, data)
if not data:
return
else:
data = data[header_length:]
if not data:
return
try:
client.sendto(data, (server_addr, server_port))
except IOError as e:
err = eventloop.errno_from_exception(e)
if err in (errno.EINPROGRESS, errno.EAGAIN):
pass
else:
shell.print_exception(e)
def _handle_client(self, sock):
data, r_addr = sock.recvfrom(BUF_SIZE)
if not data:
logging.debug('UDP handle_client: data is empty')
return
if self._stat_callback:
self._stat_callback(self._listen_port, len(data))
if not self._is_local:
addrlen = len(r_addr[0])
if addrlen > 255:
# drop
return
data = pack_addr(r_addr[0]) + struct.pack('>H', r_addr[1]) + data
response = encrypt.encrypt_all(self._password, self._method, 1,
data)
if not response:
return
else:
data = encrypt.encrypt_all(self._password, self._method, 0,
data)
if not data:
return
header_result = parse_header(data)
if header_result is None:
return
# addrtype, dest_addr, dest_port, header_length = header_result
response = b'\x00\x00\x00' + data
client_addr = self._client_fd_to_server_addr.get(sock.fileno())
if client_addr:
self._server_socket.sendto(response, client_addr)
else:
# this packet is from somewhere else we know
# simply drop that packet
pass
def add_to_loop(self, loop):
if self._eventloop:
raise Exception('already add to loop')
if self._closed:
raise Exception('already closed')
self._eventloop = loop
server_socket = self._server_socket
self._eventloop.add(server_socket,
eventloop.POLL_IN | eventloop.POLL_ERR, self)
loop.add_periodic(self.handle_periodic)
def handle_event(self, sock, fd, event):
if sock == self._server_socket:
if event & eventloop.POLL_ERR:
logging.error('UDP server_socket err')
self._handle_server()
elif sock and (fd in self._sockets):
if event & eventloop.POLL_ERR:
logging.error('UDP client_socket err')
self._handle_client(sock)
def handle_periodic(self):
if self._closed:
if self._server_socket:
self._server_socket.close()
self._server_socket = None
for sock in self._sockets:
sock.close()
logging.info('closed UDP port %d', self._listen_port)
self._cache.sweep()
self._client_fd_to_server_addr.sweep()
def close(self, next_tick=False):
logging.debug('UDP close')
self._closed = True
if not next_tick:
if self._eventloop:
self._eventloop.remove_periodic(self.handle_periodic)
self._eventloop.remove(self._server_socket)
self._server_socket.close()
for client in list(self._cache.values()):
client.close()
|
pferreir/indico-backup | refs/heads/master | bin/utils/proposeAbstractsToAccept.py | 2 | # -*- coding: utf-8 -*-
##
##
## This file is part of Indico.
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
##
## Indico is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico;if not, see <http://www.gnu.org/licenses/>.
from indico.core.db import DBMgr
from MaKaC.user import AvatarHolder
from MaKaC.conference import ConferenceHolder
from MaKaC.review import AbstractStatusSubmitted
"""
Change all submitted abstracts to "propose to be accepted" status
"""
DBMgr.getInstance().startRequest()
confId = '149557'
trackId = '3'
userId = '27108'
contribTypeId = '2'
conf = ConferenceHolder().getById(confId)
track = conf.getTrackById(trackId)
contribType = conf.getContribTypeById(contribTypeId)
user = AvatarHolder().getById(userId)
for abstract in track.getAbstractList():
if isinstance(abstract.getCurrentStatus(), AbstractStatusSubmitted):
abstract.proposeToAccept(user, track, contribType)
DBMgr.getInstance().endRequest()
|
openstack/congress | refs/heads/master | thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t031emptyAlt.py | 22 | import antlr3
import testbase
import unittest
class t031emptyAlt(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def testValid1(self):
cStream = antlr3.StringStream('foo')
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
events = parser.r()
if __name__ == '__main__':
unittest.main()
|
ThCC/postman-client | refs/heads/master | .eggs/requests-2.11.0-py2.7.egg/requests/packages/urllib3/filepost.py | 713 | from __future__ import absolute_import
import codecs
from uuid import uuid4
from io import BytesIO
from .packages import six
from .packages.six import b
from .fields import RequestField
writer = codecs.lookup('utf-8')[3]
def choose_boundary():
"""
Our embarassingly-simple replacement for mimetools.choose_boundary.
"""
return uuid4().hex
def iter_field_objects(fields):
"""
Iterate over fields.
Supports list of (k, v) tuples and dicts, and lists of
:class:`~urllib3.fields.RequestField`.
"""
if isinstance(fields, dict):
i = six.iteritems(fields)
else:
i = iter(fields)
for field in i:
if isinstance(field, RequestField):
yield field
else:
yield RequestField.from_tuples(*field)
def iter_fields(fields):
"""
.. deprecated:: 1.6
Iterate over fields.
The addition of :class:`~urllib3.fields.RequestField` makes this function
obsolete. Instead, use :func:`iter_field_objects`, which returns
:class:`~urllib3.fields.RequestField` objects.
Supports list of (k, v) tuples and dicts.
"""
if isinstance(fields, dict):
return ((k, v) for k, v in six.iteritems(fields))
return ((k, v) for k, v in fields)
def encode_multipart_formdata(fields, boundary=None):
"""
Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
:param fields:
Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
:param boundary:
If not specified, then a random boundary will be generated using
:func:`mimetools.choose_boundary`.
"""
body = BytesIO()
if boundary is None:
boundary = choose_boundary()
for field in iter_field_objects(fields):
body.write(b('--%s\r\n' % (boundary)))
writer(body).write(field.render_headers())
data = field.data
if isinstance(data, int):
data = str(data) # Backwards compatibility
if isinstance(data, six.text_type):
writer(body).write(data)
else:
body.write(data)
body.write(b'\r\n')
body.write(b('--%s--\r\n' % (boundary)))
content_type = str('multipart/form-data; boundary=%s' % boundary)
return body.getvalue(), content_type
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.