repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
santoshphilip/pyclearsky | refs/heads/master | original_code/pytest_helpers.py | 1 | """helpers for pytest"""
# taken from python's unit test
# may be covered by Python's license
def almostequal(first, second, places=7, printit=True):
"""docstring for almostequal"""
if round(abs(second-first), places) != 0:
if printit:
print round(abs(second-first), places)
print "notalmost: %s != %s" % (first, second)
return False
else:
return True |
ojake/django | refs/heads/master | django/views/generic/__init__.py | 597 | from django.views.generic.base import RedirectView, TemplateView, View
from django.views.generic.dates import (
ArchiveIndexView, DateDetailView, DayArchiveView, MonthArchiveView,
TodayArchiveView, WeekArchiveView, YearArchiveView,
)
from django.views.generic.detail import DetailView
from django.views.generic.edit import (
CreateView, DeleteView, FormView, UpdateView,
)
from django.views.generic.list import ListView
__all__ = [
'View', 'TemplateView', 'RedirectView', 'ArchiveIndexView',
'YearArchiveView', 'MonthArchiveView', 'WeekArchiveView', 'DayArchiveView',
'TodayArchiveView', 'DateDetailView', 'DetailView', 'FormView',
'CreateView', 'UpdateView', 'DeleteView', 'ListView', 'GenericViewError',
]
class GenericViewError(Exception):
"""A problem in a generic view."""
pass
|
google/google-ctf | refs/heads/master | third_party/edk2/AppPkg/Applications/Python/Python-2.7.2/Lib/mimetools.py | 71 | """Various tools used by MIME-reading or MIME-writing programs."""
import os
import sys
import tempfile
from warnings import filterwarnings, catch_warnings
with catch_warnings():
if sys.py3kwarning:
filterwarnings("ignore", ".*rfc822 has been removed", DeprecationWarning)
import rfc822
from warnings import warnpy3k
warnpy3k("in 3.x, mimetools has been removed in favor of the email package",
stacklevel=2)
__all__ = ["Message","choose_boundary","encode","decode","copyliteral",
"copybinary"]
class Message(rfc822.Message):
"""A derived class of rfc822.Message that knows about MIME headers and
contains some hooks for decoding encoded and multipart messages."""
def __init__(self, fp, seekable = 1):
rfc822.Message.__init__(self, fp, seekable)
self.encodingheader = \
self.getheader('content-transfer-encoding')
self.typeheader = \
self.getheader('content-type')
self.parsetype()
self.parseplist()
def parsetype(self):
str = self.typeheader
if str is None:
str = 'text/plain'
if ';' in str:
i = str.index(';')
self.plisttext = str[i:]
str = str[:i]
else:
self.plisttext = ''
fields = str.split('/')
for i in range(len(fields)):
fields[i] = fields[i].strip().lower()
self.type = '/'.join(fields)
self.maintype = fields[0]
self.subtype = '/'.join(fields[1:])
def parseplist(self):
str = self.plisttext
self.plist = []
while str[:1] == ';':
str = str[1:]
if ';' in str:
# XXX Should parse quotes!
end = str.index(';')
else:
end = len(str)
f = str[:end]
if '=' in f:
i = f.index('=')
f = f[:i].strip().lower() + \
'=' + f[i+1:].strip()
self.plist.append(f.strip())
str = str[end:]
def getplist(self):
return self.plist
def getparam(self, name):
name = name.lower() + '='
n = len(name)
for p in self.plist:
if p[:n] == name:
return rfc822.unquote(p[n:])
return None
def getparamnames(self):
result = []
for p in self.plist:
i = p.find('=')
if i >= 0:
result.append(p[:i].lower())
return result
def getencoding(self):
if self.encodingheader is None:
return '7bit'
return self.encodingheader.lower()
def gettype(self):
return self.type
def getmaintype(self):
return self.maintype
def getsubtype(self):
return self.subtype
# Utility functions
# -----------------
try:
import thread
except ImportError:
import dummy_thread as thread
_counter_lock = thread.allocate_lock()
del thread
_counter = 0
def _get_next_counter():
global _counter
_counter_lock.acquire()
_counter += 1
result = _counter
_counter_lock.release()
return result
_prefix = None
def choose_boundary():
"""Return a string usable as a multipart boundary.
The string chosen is unique within a single program run, and
incorporates the user id (if available), process id (if available),
and current time. So it's very unlikely the returned string appears
in message text, but there's no guarantee.
The boundary contains dots so you have to quote it in the header."""
global _prefix
import time
if _prefix is None:
import socket
try:
hostid = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
hostid = '127.0.0.1'
try:
uid = repr(os.getuid())
except AttributeError:
uid = '1'
try:
pid = repr(os.getpid())
except AttributeError:
pid = '1'
_prefix = hostid + '.' + uid + '.' + pid
return "%s.%.3f.%d" % (_prefix, time.time(), _get_next_counter())
# Subroutines for decoding some common content-transfer-types
def decode(input, output, encoding):
"""Decode common content-transfer-encodings (base64, quopri, uuencode)."""
if encoding == 'base64':
import base64
return base64.decode(input, output)
if encoding == 'quoted-printable':
import quopri
return quopri.decode(input, output)
if encoding in ('uuencode', 'x-uuencode', 'uue', 'x-uue'):
import uu
return uu.decode(input, output)
if encoding in ('7bit', '8bit'):
return output.write(input.read())
if encoding in decodetab:
pipethrough(input, decodetab[encoding], output)
else:
raise ValueError, \
'unknown Content-Transfer-Encoding: %s' % encoding
def encode(input, output, encoding):
"""Encode common content-transfer-encodings (base64, quopri, uuencode)."""
if encoding == 'base64':
import base64
return base64.encode(input, output)
if encoding == 'quoted-printable':
import quopri
return quopri.encode(input, output, 0)
if encoding in ('uuencode', 'x-uuencode', 'uue', 'x-uue'):
import uu
return uu.encode(input, output)
if encoding in ('7bit', '8bit'):
return output.write(input.read())
if encoding in encodetab:
pipethrough(input, encodetab[encoding], output)
else:
raise ValueError, \
'unknown Content-Transfer-Encoding: %s' % encoding
# The following is no longer used for standard encodings
# XXX This requires that uudecode and mmencode are in $PATH
uudecode_pipe = '''(
TEMP=/tmp/@uu.$$
sed "s%^begin [0-7][0-7]* .*%begin 600 $TEMP%" | uudecode
cat $TEMP
rm $TEMP
)'''
decodetab = {
'uuencode': uudecode_pipe,
'x-uuencode': uudecode_pipe,
'uue': uudecode_pipe,
'x-uue': uudecode_pipe,
'quoted-printable': 'mmencode -u -q',
'base64': 'mmencode -u -b',
}
encodetab = {
'x-uuencode': 'uuencode tempfile',
'uuencode': 'uuencode tempfile',
'x-uue': 'uuencode tempfile',
'uue': 'uuencode tempfile',
'quoted-printable': 'mmencode -q',
'base64': 'mmencode -b',
}
def pipeto(input, command):
pipe = os.popen(command, 'w')
copyliteral(input, pipe)
pipe.close()
def pipethrough(input, command, output):
(fd, tempname) = tempfile.mkstemp()
temp = os.fdopen(fd, 'w')
copyliteral(input, temp)
temp.close()
pipe = os.popen(command + ' <' + tempname, 'r')
copybinary(pipe, output)
pipe.close()
os.unlink(tempname)
def copyliteral(input, output):
while 1:
line = input.readline()
if not line: break
output.write(line)
def copybinary(input, output):
BUFSIZE = 8192
while 1:
line = input.read(BUFSIZE)
if not line: break
output.write(line)
|
yitian134/chromium | refs/heads/master | tools/json_schema_compiler/schema_bundle_generator.py | 6 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import code
import cpp_util
import json
import os
import re
# TODO(miket/asargent) - parameterize this.
SOURCE_BASE_PATH = 'chrome/common/extensions/api'
class SchemaBundleGenerator(object):
"""This class contains methods to generate code based on multiple schemas.
"""
def __init__(self, model, api_defs, cpp_type_generator):
self._model = model
self._api_defs = api_defs
self._cpp_type_generator = cpp_type_generator
def GenerateHeader(self, file_base, body_code):
"""Generates a code.Code object for a header file
Parameters:
- |file_base| - the base of the filename, e.g. 'foo' (for 'foo.h')
- |body_code| - the code to put in between the multiple inclusion guards"""
c = code.Code()
c.Append(cpp_util.CHROMIUM_LICENSE)
c.Append()
c.Append(cpp_util.GENERATED_BUNDLE_FILE_MESSAGE % SOURCE_BASE_PATH)
ifndef_name = cpp_util.GenerateIfndefName(SOURCE_BASE_PATH, file_base)
c.Append()
c.Append('#ifndef %s' % ifndef_name)
c.Append('#define %s' % ifndef_name)
c.Append('#pragma once')
c.Append()
c.Concat(body_code)
c.Append()
c.Append('#endif // %s' % ifndef_name)
c.Append()
return c
def GenerateAPIHeader(self):
"""Generates the header for API registration / declaration"""
c = code.Code()
c.Append('#include <string>')
c.Append()
c.Append('#include "base/basictypes.h"')
for namespace in self._model.namespaces.values():
namespace_name = namespace.unix_name.replace("experimental_", "")
c.Append('#include "chrome/browser/extensions/api/%s/%s_api.h"' % (
namespace_name, namespace_name))
c.Append()
c.Append("class ExtensionFunctionRegistry;")
c.Append()
c.Concat(self._cpp_type_generator.GetRootNamespaceStart())
for namespace in self._model.namespaces.values():
c.Append("// TODO(miket): emit code for %s" % (namespace.unix_name))
c.Append()
c.Concat(self.GenerateFunctionRegistry())
c.Concat(self._cpp_type_generator.GetRootNamespaceEnd())
c.Append()
return self.GenerateHeader('generated_api', c)
def CapitalizeFirstLetter(self, value):
return value[0].capitalize() + value[1:]
def GenerateFunctionRegistry(self):
c = code.Code()
c.Sblock("class GeneratedFunctionRegistry {")
c.Append("public:")
c.Sblock("static void RegisterAll(ExtensionFunctionRegistry* registry) {")
for namespace in self._model.namespaces.values():
for function in namespace.functions.values():
namespace_name = self.CapitalizeFirstLetter(namespace.name.replace(
"experimental.", ""))
function_name = namespace_name + self.CapitalizeFirstLetter(
function.name)
c.Append("registry->RegisterFunction<%sFunction>();" % (
function_name))
c.Eblock("}")
c.Eblock("};")
c.Append()
return c
def GenerateSchemasHeader(self):
"""Generates a code.Code object for the generated schemas .h file"""
c = code.Code()
c.Append('#include <map>')
c.Append('#include <string>')
c.Append();
c.Append('#include "base/string_piece.h"')
c.Append()
c.Concat(self._cpp_type_generator.GetRootNamespaceStart())
c.Append()
c.Sblock('class GeneratedSchemas {')
c.Append('public:')
c.Append('// Puts all API schemas in |schemas|.')
c.Append('static void Get('
'std::map<std::string, base::StringPiece>* schemas);')
c.Eblock('};');
c.Append()
c.Concat(self._cpp_type_generator.GetRootNamespaceEnd())
c.Append()
return self.GenerateHeader('generated_schemas', c)
def GenerateSchemasCC(self):
"""Generates a code.Code object for the generated schemas .cc file"""
c = code.Code()
c.Append(cpp_util.CHROMIUM_LICENSE)
c.Append()
c.Append('#include "%s"' % (os.path.join(SOURCE_BASE_PATH,
'generated_schemas.h')))
c.Append()
c.Concat(self._cpp_type_generator.GetRootNamespaceStart())
c.Append()
c.Append('// static')
c.Sblock('void GeneratedSchemas::Get('
'std::map<std::string, base::StringPiece>* schemas) {')
for api in self._api_defs:
namespace = self._model.namespaces[api.get('namespace')]
# JSON parsing code expects lists of schemas, so dump a singleton list.
json_content = json.dumps([api], indent=2)
# Escape all double-quotes. Ignore already-escaped double-quotes.
json_content = re.sub('(?<!\\\\)"', '\\"', json_content)
lines = json_content.split('\n')
c.Append('(*schemas)["%s"] = ' % namespace.name)
for index, line in enumerate(lines):
line = ' "%s"' % line
if index == len(lines) - 1:
line += ';'
c.Append(line)
c.Eblock('}')
c.Append()
c.Concat(self._cpp_type_generator.GetRootNamespaceEnd())
c.Append()
return c
|
carvalhomb/tsmells | refs/heads/master | guess/src/Lib/xml/dom/html/HTMLBRElement.py | 2 | ########################################################################
#
# File Name: HTMLBRElement
#
# Documentation: http://docs.4suite.com/4DOM/HTMLBRElement.html
#
### This file is automatically generated by GenerateHtml.py.
### DO NOT EDIT!
"""
WWW: http://4suite.com/4DOM e-mail: support@4suite.com
Copyright (c) 2000 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
import string
from xml.dom import Node
from xml.dom.html.HTMLElement import HTMLElement
class HTMLBRElement(HTMLElement):
def __init__(self, ownerDocument, nodeName="BR"):
HTMLElement.__init__(self, ownerDocument, nodeName)
### Attribute Methods ###
def _get_clear(self):
return string.capitalize(self.getAttribute("CLEAR"))
def _set_clear(self, value):
self.setAttribute("CLEAR", value)
### Attribute Access Mappings ###
_readComputedAttrs = HTMLElement._readComputedAttrs.copy()
_readComputedAttrs.update({
"clear" : _get_clear
})
_writeComputedAttrs = HTMLElement._writeComputedAttrs.copy()
_writeComputedAttrs.update({
"clear" : _set_clear
})
_readOnlyAttrs = filter(lambda k,m=_writeComputedAttrs: not m.has_key(k),
HTMLElement._readOnlyAttrs + _readComputedAttrs.keys())
|
levilucio/SyVOLT | refs/heads/master | UMLRT2Kiltera_MM/MT_post__FIXED0.py | 1 | """
__MT_post__FIXED0.py_____________________________________________________
Automatically generated AToM3 syntactic object (DO NOT MODIFY DIRECTLY)
Author: gehan
Modified: Sun Feb 15 10:31:26 2015
_________________________________________________________________________
"""
from ASGNode import *
from ATOM3Type import *
from ATOM3Text import *
from ATOM3String import *
from graph_MT_post__FIXED0 import *
class MT_post__FIXED0(ASGNode, ATOM3Type):
def __init__(self, parent = None):
ASGNode.__init__(self)
ATOM3Type.__init__(self)
self.superTypes = ['MT_post__RoleType', 'MT_post__MetaModelElement_S']
self.graphClass_ = graph_MT_post__FIXED0
self.isGraphObjectVisual = True
if(hasattr(self, '_setHierarchicalLink')):
self._setHierarchicalLink(False)
if(hasattr(self, '_setHierarchicalNode')):
self._setHierarchicalNode(False)
self.parent = parent
self.MT_post__cardinality=ATOM3Text('\n#===============================================================================\n# You can access the value of the current node\'s attribute value by: attr_value.\n# If the current node shall be created you MUST initialize it here!\n# You can access a node labelled n by: PreNode(\'n\').\n# To access attribute x of node n, use: PreNode(\'n\')[\'x\'].\n# Note that the attribute values are those before the match is rewritten.\n# The order in which this code is executed depends on the label value\n# of the encapsulating node.\n# The given action must return the new value of the attribute.\n#===============================================================================\n\nreturn attr_value\n', 80,15 )
self.MT_post__cardinality=ATOM3Text('\n#===============================================================================\n# You can access the value of the current node\'s attribute value by: attr_value.\n# If the current node shall be created you MUST initialize it here!\n# You can access a node labelled n by: PreNode(\'n\').\n# To access attribute x of node n, use: PreNode(\'n\')[\'x\'].\n# Note that the attribute values are those before the match is rewritten.\n# The order in which this code is executed depends on the label value\n# of the encapsulating node.\n# The given action must return the new value of the attribute.\n#===============================================================================\n\nreturn attr_value\n', 80,15 )
self.MT_post__classtype=ATOM3Text('\n#===============================================================================\n# You can access the value of the current node\'s attribute value by: attr_value.\n# If the current node shall be created you MUST initialize it here!\n# You can access a node labelled n by: PreNode(\'n\').\n# To access attribute x of node n, use: PreNode(\'n\')[\'x\'].\n# Note that the attribute values are those before the match is rewritten.\n# The order in which this code is executed depends on the label value\n# of the encapsulating node.\n# The given action must return the new value of the attribute.\n#===============================================================================\n\nreturn attr_value\n', 80,15 )
self.MT_post__classtype=ATOM3Text('\n#===============================================================================\n# You can access the value of the current node\'s attribute value by: attr_value.\n# If the current node shall be created you MUST initialize it here!\n# You can access a node labelled n by: PreNode(\'n\').\n# To access attribute x of node n, use: PreNode(\'n\')[\'x\'].\n# Note that the attribute values are those before the match is rewritten.\n# The order in which this code is executed depends on the label value\n# of the encapsulating node.\n# The given action must return the new value of the attribute.\n#===============================================================================\n\nreturn attr_value\n', 80,15 )
self.MT_post__name=ATOM3Text('\n#===============================================================================\n# You can access the value of the current node\'s attribute value by: attr_value.\n# If the current node shall be created you MUST initialize it here!\n# You can access a node labelled n by: PreNode(\'n\').\n# To access attribute x of node n, use: PreNode(\'n\')[\'x\'].\n# Note that the attribute values are those before the match is rewritten.\n# The order in which this code is executed depends on the label value\n# of the encapsulating node.\n# The given action must return the new value of the attribute.\n#===============================================================================\n\nreturn attr_value\n', 80,15 )
self.MT_post__name=ATOM3Text('\n#===============================================================================\n# You can access the value of the current node\'s attribute value by: attr_value.\n# If the current node shall be created you MUST initialize it here!\n# You can access a node labelled n by: PreNode(\'n\').\n# To access attribute x of node n, use: PreNode(\'n\')[\'x\'].\n# Note that the attribute values are those before the match is rewritten.\n# The order in which this code is executed depends on the label value\n# of the encapsulating node.\n# The given action must return the new value of the attribute.\n#===============================================================================\n\nreturn attr_value\n', 80,15 )
self.MT_label__=ATOM3String('', 20)
self.MT_pivotOut__=ATOM3String('', 20)
self.generatedAttributes = {'MT_post__cardinality': ('ATOM3Text', ),
'MT_post__cardinality': ('ATOM3Text', ),
'MT_post__classtype': ('ATOM3Text', ),
'MT_post__classtype': ('ATOM3Text', ),
'MT_post__name': ('ATOM3Text', ),
'MT_post__name': ('ATOM3Text', ),
'MT_label__': ('ATOM3String', ),
'MT_pivotOut__': ('ATOM3String', ) }
self.realOrder = ['MT_post__cardinality','MT_post__cardinality','MT_post__classtype','MT_post__classtype','MT_post__name','MT_post__name','MT_label__','MT_pivotOut__']
self.directEditing = [0,0,0,0,0,0,1,1]
def clone(self):
cloneObject = MT_post__FIXED0( self.parent )
for atr in self.realOrder:
cloneObject.setAttrValue(atr, self.getAttrValue(atr).clone() )
ASGNode.cloneActions(self, cloneObject)
return cloneObject
def copy(self, other):
ATOM3Type.copy(self, other)
for atr in self.realOrder:
self.setAttrValue(atr, other.getAttrValue(atr) )
ASGNode.copy(self, other)
def preCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.preCondition(actionID, params)
else: return None
def postCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.postCondition(actionID, params)
else: return None
def preAction (self, actionID, * params):
if actionID == self.CREATE:
self.autoIncrLabel(params)
if self.graphObject_:
return self.graphObject_.preAction(actionID, params)
else: return None
def postAction (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.postAction(actionID, params)
else: return None
def QOCA(self, params):
"""
QOCA Constraint Template
NOTE: DO NOT select a POST/PRE action trigger
Constraints will be added/removed in a logical manner by other mechanisms.
"""
return # <---- Remove this to use QOCA
""" Get the high level constraint helper and solver """
from Qoca.atom3constraints.OffsetConstraints import OffsetConstraints
oc = OffsetConstraints(self.parent.qocaSolver)
"""
Example constraint, see Kernel/QOCA/atom3constraints/OffsetConstraints.py
For more types of constraints
"""
oc.fixedWidth(self.graphObject_, self.graphObject_.sizeX)
oc.fixedHeight(self.graphObject_, self.graphObject_.sizeY)
def autoIncrLabel(self, params):
#===============================================================================
# Auto increment the label
#===============================================================================
# If there is already one, ignore
if not self.MT_label__.isNone(): return
# Get the maximum label of all MT_pre__ elements
label = 0
for nt in self.parent.ASGroot.listNodes:
if nt.startswith('MT_post__'):
for node in self.parent.ASGroot.listNodes[nt]:
currLabel = 0
try:
currLabel = int(node.MT_label__.getValue())
except:
pass
if currLabel > label:
label = currLabel
# The label of this instance will be the max label + 1
self.MT_label__.setValue(str(label + 1))
|
wangyou/XX-Net | refs/heads/master | code/default/gae_proxy/server/gae/wsgi.py | 6 | #!/usr/bin/env python
# coding=utf-8
# Contributor:
# Phus Lu <phus.lu@gmail.com>
__version__ = '3.0.7'
__password__ = ''
__hostsdeny__ = ()
#__hostsdeny__ = ('.youtube.com', '.youku.com', ".googlevideo.com")
__content_type__ = 'image/gif'
import sys
import os
import re
import time
import struct
import zlib
import base64
import logging
import httplib
import urlparse
import errno
import string
try:
from io import BytesIO
except ImportError:
from cStringIO import StringIO as BytesIO
try:
from google.appengine.api import urlfetch
from google.appengine.runtime import apiproxy_errors
except ImportError:
urlfetch = None
try:
import sae
except ImportError:
sae = None
try:
import bae.core.wsgi
except ImportError:
bae = None
try:
import socket
import select
except ImportError:
socket = None
try:
import OpenSSL
except ImportError:
OpenSSL = None
URLFETCH_MAX = 2
URLFETCH_MAXSIZE = 4*1024*1024
URLFETCH_DEFLATE_MAXSIZE = 4*1024*1024
URLFETCH_TIMEOUT = 60
def message_html(title, banner, detail=''):
MESSAGE_TEMPLATE = '''
<html><head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<title>$title</title>
<style><!--
body {font-family: arial,sans-serif}
div.nav {margin-top: 1ex}
div.nav A {font-size: 10pt; font-family: arial,sans-serif}
span.nav {font-size: 10pt; font-family: arial,sans-serif; font-weight: bold}
div.nav A,span.big {font-size: 12pt; color: #0000cc}
div.nav A {font-size: 10pt; color: black}
A.l:link {color: #6f6f6f}
A.u:link {color: green}
//--></style>
</head>
<body text=#000000 bgcolor=#ffffff>
<table border=0 cellpadding=2 cellspacing=0 width=100%>
<tr><td bgcolor=#3366cc><font face=arial,sans-serif color=#ffffff><b>Message</b></td></tr>
<tr><td> </td></tr></table>
<blockquote>
<H1>$banner</H1>
$detail
<p>
</blockquote>
<table width=100% cellpadding=0 cellspacing=0><tr><td bgcolor=#3366cc><img alt="" width=1 height=4></td></tr></table>
</body></html>
'''
return string.Template(MESSAGE_TEMPLATE).substitute(title=title, banner=banner, detail=detail)
try:
from Crypto.Cipher.ARC4 import new as _Crypto_Cipher_ARC4_new
except ImportError:
logging.warn('Load Crypto.Cipher.ARC4 Failed, Use Pure Python Instead.')
class _Crypto_Cipher_ARC4_new(object):
def __init__(self, key):
x = 0
box = range(256)
for i, y in enumerate(box):
x = (x + y + ord(key[i % len(key)])) & 0xff
box[i], box[x] = box[x], y
self.__box = box
self.__x = 0
self.__y = 0
def encrypt(self, data):
out = []
out_append = out.append
x = self.__x
y = self.__y
box = self.__box
for char in data:
x = (x + 1) & 0xff
y = (y + box[x]) & 0xff
box[x], box[y] = box[y], box[x]
out_append(chr(ord(char) ^ box[(box[x] + box[y]) & 0xff]))
self.__x = x
self.__y = y
return ''.join(out)
def rc4crypt(data, key):
return _Crypto_Cipher_ARC4_new(key).encrypt(data) if key else data
class RC4FileObject(object):
"""fileobj for rc4"""
def __init__(self, stream, key):
self.__stream = stream
self.__cipher = _Crypto_Cipher_ARC4_new(key) if key else lambda x:x
def __getattr__(self, attr):
if attr not in ('__stream', '__cipher'):
return getattr(self.__stream, attr)
def read(self, size=-1):
return self.__cipher.encrypt(self.__stream.read(size))
def gae_application(environ, start_response):
cookie = environ.get('HTTP_COOKIE', '')
options = environ.get('HTTP_X_GOA_OPTIONS', '')
if environ['REQUEST_METHOD'] == 'GET' and not cookie:
if '204' in environ['QUERY_STRING']:
start_response('204 No Content', [])
yield ''
else:
timestamp = long(os.environ['CURRENT_VERSION_ID'].split('.')[1])/2**28
ctime = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(timestamp+8*3600))
html = u'GoAgent Python Server %s \u5df2\u7ecf\u5728\u5de5\u4f5c\u4e86\uff0c\u90e8\u7f72\u65f6\u95f4 %s\n' % (__version__, ctime)
start_response('200 OK', [('Content-Type', 'text/plain; charset=utf-8')])
yield html.encode('utf8')
raise StopIteration
# inflate = lambda x:zlib.decompress(x, -zlib.MAX_WBITS)
wsgi_input = environ['wsgi.input']
input_data = wsgi_input.read()
try:
if cookie:
if 'rc4' not in options:
metadata = zlib.decompress(base64.b64decode(cookie), -zlib.MAX_WBITS)
payload = input_data or ''
else:
metadata = zlib.decompress(rc4crypt(base64.b64decode(cookie), __password__), -zlib.MAX_WBITS)
payload = rc4crypt(input_data, __password__) if input_data else ''
else:
if 'rc4' in options:
input_data = rc4crypt(input_data, __password__)
metadata_length, = struct.unpack('!h', input_data[:2])
metadata = zlib.decompress(input_data[2:2+metadata_length], -zlib.MAX_WBITS)
payload = input_data[2+metadata_length:]
headers = dict(x.split(':', 1) for x in metadata.splitlines() if x)
method = headers.pop('G-Method')
url = headers.pop('G-Url')
except (zlib.error, KeyError, ValueError):
import traceback
start_response('500 Internal Server Error', [('Content-Type', 'text/html')])
yield message_html('500 Internal Server Error', 'Bad Request (metadata) - Possible Wrong Password', '<pre>%s</pre>' % traceback.format_exc())
raise StopIteration
kwargs = {}
any(kwargs.__setitem__(x[2:].lower(), headers.pop(x)) for x in headers.keys() if x.startswith('G-'))
if 'Content-Encoding' in headers:
if headers['Content-Encoding'] == 'deflate':
payload = zlib.decompress(payload, -zlib.MAX_WBITS)
headers['Content-Length'] = str(len(payload))
del headers['Content-Encoding']
logging.info('%s "%s %s %s" - -', environ['REMOTE_ADDR'], method, url, 'HTTP/1.1')
#logging.info('request headers=%s', headers)
if __password__ and __password__ != kwargs.get('password', ''):
start_response('403 Forbidden', [('Content-Type', 'text/html')])
yield message_html('403 Wrong password', 'Wrong password(%r)' % kwargs.get('password', ''), 'GoAgent proxy.ini password is wrong!')
raise StopIteration
netloc = urlparse.urlparse(url).netloc
if __hostsdeny__ and netloc.endswith(__hostsdeny__):
start_response('403 Forbidden', [('Content-Type', 'text/html')])
yield message_html('403 Hosts Deny', 'Hosts Deny(%r)' % netloc, detail='共用appid因为资源有限,限制观看视频和文件下载等消耗资源过多的访问,请使用自己的appid <a href=" https://github.com/XX-net/XX-Net/wiki/Register-Google-appid" target="_blank">帮助</a> ')
raise StopIteration
if netloc.startswith(('127.0.0.', '::1', 'localhost')):
start_response('400 Bad Request', [('Content-Type', 'text/html')])
html = ''.join('<a href="https://%s/">%s</a><br/>' % (x, x) for x in ('google.com', 'mail.google.com'))
yield message_html('GoAgent %s is Running' % __version__, 'Now you can visit some websites', html)
raise StopIteration
fetchmethod = getattr(urlfetch, method, None)
if not fetchmethod:
start_response('405 Method Not Allowed', [('Content-Type', 'text/html')])
yield message_html('405 Method Not Allowed', 'Method Not Allowed: %r' % method, detail='Method Not Allowed URL=%r' % url)
raise StopIteration
deadline = URLFETCH_TIMEOUT
validate_certificate = bool(int(kwargs.get('validate', 0)))
accept_encoding = headers.get('Accept-Encoding', '')
errors = []
for i in xrange(int(kwargs.get('fetchmax', URLFETCH_MAX))):
try:
response = urlfetch.fetch(url, payload, fetchmethod, headers, allow_truncated=False, follow_redirects=False, deadline=deadline, validate_certificate=validate_certificate)
break
except apiproxy_errors.OverQuotaError as e:
time.sleep(5)
except urlfetch.DeadlineExceededError as e:
errors.append('%r, deadline=%s' % (e, deadline))
logging.error('DeadlineExceededError(deadline=%s, url=%r)', deadline, url)
time.sleep(1)
deadline = URLFETCH_TIMEOUT * 2
except urlfetch.DownloadError as e:
errors.append('%r, deadline=%s' % (e, deadline))
logging.error('DownloadError(deadline=%s, url=%r)', deadline, url)
time.sleep(1)
deadline = URLFETCH_TIMEOUT * 2
except urlfetch.ResponseTooLargeError as e:
errors.append('%r, deadline=%s' % (e, deadline))
response = e.response
logging.error('ResponseTooLargeError(deadline=%s, url=%r) response(%r)', deadline, url, response)
m = re.search(r'=\s*(\d+)-', headers.get('Range') or headers.get('range') or '')
if m is None:
headers['Range'] = 'bytes=0-%d' % int(kwargs.get('fetchmaxsize', URLFETCH_MAXSIZE))
else:
headers.pop('Range', '')
headers.pop('range', '')
start = int(m.group(1))
headers['Range'] = 'bytes=%s-%d' % (start, start+int(kwargs.get('fetchmaxsize', URLFETCH_MAXSIZE)))
deadline = URLFETCH_TIMEOUT * 2
except urlfetch.SSLCertificateError as e:
errors.append('%r, should validate=0 ?' % e)
logging.error('%r, deadline=%s', e, deadline)
except Exception as e:
errors.append(str(e))
if i == 0 and method == 'GET':
deadline = URLFETCH_TIMEOUT * 2
else:
start_response('500 Internal Server Error', [('Content-Type', 'text/html')])
error_string = '<br />\n'.join(errors)
if not error_string:
logurl = 'https://appengine.google.com/logs?&app_id=%s' % os.environ['APPLICATION_ID']
error_string = 'Internal Server Error. <p/>try <a href="javascript:window.location.reload(true);">refresh</a> or goto <a href="%s" target="_blank">appengine.google.com</a> for details' % logurl
yield message_html('502 Urlfetch Error', 'Python Urlfetch Error: %r' % method, error_string)
raise StopIteration
#logging.debug('url=%r response.status_code=%r response.headers=%r response.content[:1024]=%r', url, response.status_code, dict(response.headers), response.content[:1024])
data = response.content
response_headers = response.headers
if 'content-encoding' not in response_headers and len(response.content) < URLFETCH_DEFLATE_MAXSIZE and response_headers.get('content-type', '').startswith(('text/', 'application/json', 'application/javascript')):
if 'gzip' in accept_encoding:
response_headers['Content-Encoding'] = 'gzip'
compressobj = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS, zlib.DEF_MEM_LEVEL, 0)
dataio = BytesIO()
dataio.write('\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff')
dataio.write(compressobj.compress(data))
dataio.write(compressobj.flush())
dataio.write(struct.pack('<LL', zlib.crc32(data) & 0xFFFFFFFFL, len(data) & 0xFFFFFFFFL))
data = dataio.getvalue()
elif 'deflate' in accept_encoding:
response_headers['Content-Encoding'] = 'deflate'
data = zlib.compress(data)[2:-4]
if data:
response_headers['Content-Length'] = str(len(data))
response_headers_data = zlib.compress('\n'.join('%s:%s' % (k.title(), v) for k, v in response_headers.items() if not k.startswith('x-google-')))[2:-4]
if 'rc4' not in options:
start_response('200 OK', [('Content-Type', __content_type__)])
yield struct.pack('!hh', int(response.status_code), len(response_headers_data))+response_headers_data
yield data
else:
start_response('200 OK', [('Content-Type', __content_type__), ('X-GOA-Options', 'rc4')])
yield struct.pack('!hh', int(response.status_code), len(response_headers_data))
yield rc4crypt(response_headers_data, __password__)
yield rc4crypt(data, __password__)
|
wangyum/tensorflow | refs/heads/master | tensorflow/python/platform/flags.py | 85 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of the flags interface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse as _argparse
from tensorflow.python.util.all_util import remove_undocumented
_global_parser = _argparse.ArgumentParser()
# pylint: disable=invalid-name
class _FlagValues(object):
"""Global container and accessor for flags and their values."""
def __init__(self):
self.__dict__['__flags'] = {}
self.__dict__['__parsed'] = False
def _parse_flags(self, args=None):
result, unparsed = _global_parser.parse_known_args(args=args)
for flag_name, val in vars(result).items():
self.__dict__['__flags'][flag_name] = val
self.__dict__['__parsed'] = True
return unparsed
def __getattr__(self, name):
"""Retrieves the 'value' attribute of the flag --name."""
if not self.__dict__['__parsed']:
self._parse_flags()
if name not in self.__dict__['__flags']:
raise AttributeError(name)
return self.__dict__['__flags'][name]
def __setattr__(self, name, value):
"""Sets the 'value' attribute of the flag --name."""
if not self.__dict__['__parsed']:
self._parse_flags()
self.__dict__['__flags'][name] = value
def _define_helper(flag_name, default_value, docstring, flagtype):
"""Registers 'flag_name' with 'default_value' and 'docstring'."""
_global_parser.add_argument('--' + flag_name,
default=default_value,
help=docstring,
type=flagtype)
# Provides the global object that can be used to access flags.
FLAGS = _FlagValues()
def DEFINE_string(flag_name, default_value, docstring):
"""Defines a flag of type 'string'.
Args:
flag_name: The name of the flag as a string.
default_value: The default value the flag should take as a string.
docstring: A helpful message explaining the use of the flag.
"""
_define_helper(flag_name, default_value, docstring, str)
def DEFINE_integer(flag_name, default_value, docstring):
"""Defines a flag of type 'int'.
Args:
flag_name: The name of the flag as a string.
default_value: The default value the flag should take as an int.
docstring: A helpful message explaining the use of the flag.
"""
_define_helper(flag_name, default_value, docstring, int)
def DEFINE_boolean(flag_name, default_value, docstring):
"""Defines a flag of type 'boolean'.
Args:
flag_name: The name of the flag as a string.
default_value: The default value the flag should take as a boolean.
docstring: A helpful message explaining the use of the flag.
"""
# Register a custom function for 'bool' so --flag=True works.
def str2bool(v):
return v.lower() in ('true', 't', '1')
_global_parser.add_argument('--' + flag_name,
nargs='?',
const=True,
help=docstring,
default=default_value,
type=str2bool)
# Add negated version, stay consistent with argparse with regard to
# dashes in flag names.
_global_parser.add_argument('--no' + flag_name,
action='store_false',
dest=flag_name.replace('-', '_'))
# The internal google library defines the following alias, so we match
# the API for consistency.
DEFINE_bool = DEFINE_boolean # pylint: disable=invalid-name
def DEFINE_float(flag_name, default_value, docstring):
"""Defines a flag of type 'float'.
Args:
flag_name: The name of the flag as a string.
default_value: The default value the flag should take as a float.
docstring: A helpful message explaining the use of the flag.
"""
_define_helper(flag_name, default_value, docstring, float)
_allowed_symbols = [
# We rely on gflags documentation.
'DEFINE_bool',
'DEFINE_boolean',
'DEFINE_float',
'DEFINE_integer',
'DEFINE_string',
'FLAGS',
]
remove_undocumented(__name__, _allowed_symbols)
|
RustoriaRu/hipster_api | refs/heads/master | dj_test/dj_test/wsgi.py | 1 | """
WSGI config for dj_test project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dj_test.settings")
application = get_wsgi_application()
|
nismod/energy_demand | refs/heads/master | tests/scripts/test_s_rs_raw_shapes.py | 1 | """testing
"""
from energy_demand.scripts import s_rs_raw_shapes
import numpy as np
def test_assign_hes_data_to_year():
hes_data = {
'working_day': {
0: np.zeros((24, 2)) + 10,
1: np.zeros((24, 2)) + 20,
2: np.zeros((24, 2)) + 30,
3: np.zeros((24, 2)) + 40,
4: np.zeros((24, 2)) + 50,
5: np.zeros((24, 2)) + 60,
6: np.zeros((24, 2)) + 70,
7: np.zeros((24, 2)) + 80,
8: np.zeros((24, 2)) + 90,
9: np.zeros((24, 2)) + 100,
10: np.zeros((24, 2)) + 110,
11: np.zeros((24, 2)) + 120},
'holiday' : {
0: np.zeros((24, 2)) + 1,
1: np.zeros((24, 2)) + 2,
2: np.zeros((24, 2)) + 3,
3: np.zeros((24, 2)) + 4,
4: np.zeros((24, 2)) + 5,
5: np.zeros((24, 2)) + 6,
6: np.zeros((24, 2)) + 7,
7: np.zeros((24, 2)) + 8,
8: np.zeros((24, 2)) + 9,
9: np.zeros((24, 2)) + 10,
10: np.zeros((24, 2)) + 11,
11: np.zeros((24, 2)) + 12}
}
result = s_rs_raw_shapes.assign_hes_data_to_year(
nr_of_appliances=2,
hes_data=hes_data,
base_yr=2015)
#daytype, month_python, appliances
assert result[10][0][1] == 1 # yearday, hour, appliance_nr--> sun
assert result[11][0][1] == 10 # yearday, hour, appliance_nr--> mon
|
FireWRT/OpenWrt-Firefly-Libraries | refs/heads/master | staging_dir/host/lib/python2.7/test/test_cd.py | 45 | """Whimpy test script for the cd module
Roger E. Masse
"""
from test.test_support import verbose, import_module
cd = import_module('cd')
cdattrs = ['BLOCKSIZE', 'CDROM', 'DATASIZE', 'ERROR', 'NODISC', 'PAUSED', 'PLAYING', 'READY',
'STILL', '__doc__', '__name__', 'atime', 'audio', 'catalog', 'control', 'createparser', 'error',
'ident', 'index', 'msftoframe', 'open', 'pnum', 'ptime']
# This is a very inobtrusive test for the existence of the cd module and all its
# attributes. More comprehensive examples can be found in Demo/cd and
# require that you have a CD and a CD ROM drive
def test_main():
# touch all the attributes of cd without doing anything
if verbose:
print 'Touching cd module attributes...'
for attr in cdattrs:
if verbose:
print 'touching: ', attr
getattr(cd, attr)
if __name__ == '__main__':
test_main()
|
rschmidtz/httpie | refs/heads/master | httpie/compat.py | 46 | """
Python 2.6, 2.7, and 3.x compatibility.
"""
import sys
is_py2 = sys.version_info[0] == 2
is_py26 = sys.version_info[:2] == (2, 6)
is_py27 = sys.version_info[:2] == (2, 7)
is_py3 = sys.version_info[0] == 3
is_pypy = 'pypy' in sys.version.lower()
is_windows = 'win32' in str(sys.platform).lower()
if is_py2:
bytes = str
str = unicode
elif is_py3:
str = str
bytes = bytes
try: # pragma: no cover
# noinspection PyUnresolvedReferences,PyCompatibility
from urllib.parse import urlsplit
except ImportError: # pragma: no cover
# noinspection PyUnresolvedReferences,PyCompatibility
from urlparse import urlsplit
try: # pragma: no cover
# noinspection PyCompatibility
from urllib.request import urlopen
except ImportError: # pragma: no cover
# noinspection PyCompatibility
from urllib2 import urlopen
try: # pragma: no cover
from collections import OrderedDict
except ImportError: # pragma: no cover
# Python 2.6 OrderedDict class, needed for headers, parameters, etc .###
# <https://pypi.python.org/pypi/ordereddict/1.1>
# noinspection PyCompatibility
from UserDict import DictMixin
# noinspection PyShadowingBuiltins
class OrderedDict(dict, DictMixin):
# Copyright (c) 2009 Raymond Hettinger
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# noinspection PyMissingConstructor
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d'
% len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
# noinspection PyUnusedLocal
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return self.__class__, (items,), inst_dict
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
# noinspection PyMethodOverriding
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
if len(self) != len(other):
return False
for p, q in zip(self.items(), other.items()):
if p != q:
return False
return True
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
|
gsmartway/odoo | refs/heads/8.0 | addons/email_template/wizard/mail_compose_message.py | 197 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp import tools
from openerp.osv import osv, fields
def _reopen(self, res_id, model):
return {'type': 'ir.actions.act_window',
'view_mode': 'form',
'view_type': 'form',
'res_id': res_id,
'res_model': self._name,
'target': 'new',
# save original model in context, because selecting the list of available
# templates requires a model in context
'context': {
'default_model': model,
},
}
class mail_compose_message(osv.TransientModel):
_inherit = 'mail.compose.message'
def default_get(self, cr, uid, fields, context=None):
""" Override to pre-fill the data when having a template in single-email mode
and not going through the view: the on_change is not called in that case. """
if context is None:
context = {}
res = super(mail_compose_message, self).default_get(cr, uid, fields, context=context)
if res.get('composition_mode') != 'mass_mail' and context.get('default_template_id') and res.get('model') and res.get('res_id'):
res.update(
self.onchange_template_id(
cr, uid, [], context['default_template_id'], res.get('composition_mode'),
res.get('model'), res.get('res_id'), context=context
)['value']
)
if fields is not None:
[res.pop(field, None) for field in res.keys() if field not in fields]
return res
_columns = {
'template_id': fields.many2one('email.template', 'Use template', select=True),
}
def send_mail(self, cr, uid, ids, context=None):
""" Override of send_mail to duplicate attachments linked to the email.template.
Indeed, basic mail.compose.message wizard duplicates attachments in mass
mailing mode. But in 'single post' mode, attachments of an email template
also have to be duplicated to avoid changing their ownership. """
if context is None:
context = {}
wizard_context = dict(context)
for wizard in self.browse(cr, uid, ids, context=context):
if wizard.template_id:
wizard_context['mail_notify_user_signature'] = False # template user_signature is added when generating body_html
wizard_context['mail_auto_delete'] = wizard.template_id.auto_delete # mass mailing: use template auto_delete value -> note, for emails mass mailing only
wizard_context['mail_server_id'] = wizard.template_id.mail_server_id.id
if not wizard.attachment_ids or wizard.composition_mode == 'mass_mail' or not wizard.template_id:
continue
new_attachment_ids = []
for attachment in wizard.attachment_ids:
if attachment in wizard.template_id.attachment_ids:
new_attachment_ids.append(self.pool.get('ir.attachment').copy(cr, uid, attachment.id, {'res_model': 'mail.compose.message', 'res_id': wizard.id}, context=context))
else:
new_attachment_ids.append(attachment.id)
self.write(cr, uid, wizard.id, {'attachment_ids': [(6, 0, new_attachment_ids)]}, context=context)
return super(mail_compose_message, self).send_mail(cr, uid, ids, context=wizard_context)
def onchange_template_id(self, cr, uid, ids, template_id, composition_mode, model, res_id, context=None):
""" - mass_mailing: we cannot render, so return the template values
- normal mode: return rendered values """
if template_id and composition_mode == 'mass_mail':
fields = ['subject', 'body_html', 'email_from', 'reply_to', 'mail_server_id']
template = self.pool['email.template'].browse(cr, uid, template_id, context=context)
values = dict((field, getattr(template, field)) for field in fields if getattr(template, field))
if template.attachment_ids:
values['attachment_ids'] = [att.id for att in template.attachment_ids]
if template.mail_server_id:
values['mail_server_id'] = template.mail_server_id.id
if template.user_signature and 'body_html' in values:
signature = self.pool.get('res.users').browse(cr, uid, uid, context).signature
values['body_html'] = tools.append_content_to_html(values['body_html'], signature, plaintext=False)
elif template_id:
values = self.generate_email_for_composer_batch(cr, uid, template_id, [res_id], context=context)[res_id]
# transform attachments into attachment_ids; not attached to the document because this will
# be done further in the posting process, allowing to clean database if email not send
ir_attach_obj = self.pool.get('ir.attachment')
for attach_fname, attach_datas in values.pop('attachments', []):
data_attach = {
'name': attach_fname,
'datas': attach_datas,
'datas_fname': attach_fname,
'res_model': 'mail.compose.message',
'res_id': 0,
'type': 'binary', # override default_type from context, possibly meant for another model!
}
values.setdefault('attachment_ids', list()).append(ir_attach_obj.create(cr, uid, data_attach, context=context))
else:
default_context = dict(context, default_composition_mode=composition_mode, default_model=model, default_res_id=res_id)
default_values = self.default_get(cr, uid, ['composition_mode', 'model', 'res_id', 'parent_id', 'partner_ids', 'subject', 'body', 'email_from', 'reply_to', 'attachment_ids', 'mail_server_id'], context=default_context)
values = dict((key, default_values[key]) for key in ['subject', 'body', 'partner_ids', 'email_from', 'reply_to', 'attachment_ids', 'mail_server_id'] if key in default_values)
if values.get('body_html'):
values['body'] = values.pop('body_html')
return {'value': values}
def save_as_template(self, cr, uid, ids, context=None):
""" hit save as template button: current form value will be a new
template attached to the current document. """
email_template = self.pool.get('email.template')
ir_model_pool = self.pool.get('ir.model')
for record in self.browse(cr, uid, ids, context=context):
model_ids = ir_model_pool.search(cr, uid, [('model', '=', record.model or 'mail.message')], context=context)
model_id = model_ids and model_ids[0] or False
model_name = ''
if model_id:
model_name = ir_model_pool.browse(cr, uid, model_id, context=context).name
template_name = "%s: %s" % (model_name, tools.ustr(record.subject))
values = {
'name': template_name,
'subject': record.subject or False,
'body_html': record.body or False,
'model_id': model_id or False,
'attachment_ids': [(6, 0, [att.id for att in record.attachment_ids])],
}
template_id = email_template.create(cr, uid, values, context=context)
# generate the saved template
template_values = record.onchange_template_id(template_id, record.composition_mode, record.model, record.res_id)['value']
template_values['template_id'] = template_id
record.write(template_values)
return _reopen(self, record.id, record.model)
#------------------------------------------------------
# Wizard validation and send
#------------------------------------------------------
def generate_email_for_composer_batch(self, cr, uid, template_id, res_ids, context=None, fields=None):
""" Call email_template.generate_email(), get fields relevant for
mail.compose.message, transform email_cc and email_to into partner_ids """
if context is None:
context = {}
if fields is None:
fields = ['subject', 'body_html', 'email_from', 'email_to', 'partner_to', 'email_cc', 'reply_to', 'attachment_ids', 'mail_server_id']
returned_fields = fields + ['partner_ids', 'attachments']
values = dict.fromkeys(res_ids, False)
ctx = dict(context, tpl_partners_only=True)
template_values = self.pool.get('email.template').generate_email_batch(cr, uid, template_id, res_ids, fields=fields, context=ctx)
for res_id in res_ids:
res_id_values = dict((field, template_values[res_id][field]) for field in returned_fields if template_values[res_id].get(field))
res_id_values['body'] = res_id_values.pop('body_html', '')
values[res_id] = res_id_values
return values
def render_message_batch(self, cr, uid, wizard, res_ids, context=None):
""" Override to handle templates. """
# generate composer values
composer_values = super(mail_compose_message, self).render_message_batch(cr, uid, wizard, res_ids, context)
# generate template-based values
if wizard.template_id:
template_values = self.generate_email_for_composer_batch(
cr, uid, wizard.template_id.id, res_ids,
fields=['email_to', 'partner_to', 'email_cc', 'attachment_ids', 'mail_server_id'],
context=context)
else:
template_values = {}
for res_id in res_ids:
if template_values.get(res_id):
# recipients are managed by the template
composer_values[res_id].pop('partner_ids')
composer_values[res_id].pop('email_to')
composer_values[res_id].pop('email_cc')
# remove attachments from template values as they should not be rendered
template_values[res_id].pop('attachment_ids', None)
else:
template_values[res_id] = dict()
# update template values by composer values
template_values[res_id].update(composer_values[res_id])
return template_values
def render_template_batch(self, cr, uid, template, model, res_ids, context=None, post_process=False):
return self.pool.get('email.template').render_template_batch(cr, uid, template, model, res_ids, context=context, post_process=post_process)
# Compatibility methods
def generate_email_for_composer(self, cr, uid, template_id, res_id, context=None):
return self.generate_email_for_composer_batch(cr, uid, template_id, [res_id], context)[res_id]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ravello/ansible | refs/heads/devel | v2/ansible/plugins/connections/funcd.py | 4 | # Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
# (c) 2013, Michael Scherer <misc@zarb.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# ---
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# The func transport permit to use ansible over func. For people who have already setup
# func and that wish to play with ansible, this permit to move gradually to ansible
# without having to redo completely the setup of the network.
HAVE_FUNC=False
try:
import func.overlord.client as fc
HAVE_FUNC=True
except ImportError:
pass
import os
from ansible.callbacks import vvv
from ansible import errors
import tempfile
import shutil
class Connection(object):
''' Func-based connections '''
def __init__(self, runner, host, port, *args, **kwargs):
self.runner = runner
self.host = host
self.has_pipelining = False
# port is unused, this go on func
self.port = port
def connect(self, port=None):
if not HAVE_FUNC:
raise errors.AnsibleError("func is not installed")
self.client = fc.Client(self.host)
return self
def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False,
executable='/bin/sh', in_data=None, su=None, su_user=None):
''' run a command on the remote minion '''
if su or su_user:
raise errors.AnsibleError("Internal Error: this module does not support running commands via su")
if in_data:
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
vvv("EXEC %s" % (cmd), host=self.host)
p = self.client.command.run(cmd)[self.host]
return (p[0], '', p[1], p[2])
def _normalize_path(self, path, prefix):
if not path.startswith(os.path.sep):
path = os.path.join(os.path.sep, path)
normpath = os.path.normpath(path)
return os.path.join(prefix, normpath[1:])
def put_file(self, in_path, out_path):
''' transfer a file from local to remote '''
out_path = self._normalize_path(out_path, '/')
vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
self.client.local.copyfile.send(in_path, out_path)
def fetch_file(self, in_path, out_path):
''' fetch a file from remote to local '''
in_path = self._normalize_path(in_path, '/')
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
# need to use a tmp dir due to difference of semantic for getfile
# ( who take a # directory as destination) and fetch_file, who
# take a file directly
tmpdir = tempfile.mkdtemp(prefix="func_ansible")
self.client.local.getfile.get(in_path, tmpdir)
shutil.move(os.path.join(tmpdir, self.host, os.path.basename(in_path)),
out_path)
shutil.rmtree(tmpdir)
def close(self):
''' terminate the connection; nothing to do here '''
pass
|
ininex/geofire-python | refs/heads/master | resource/lib/python2.7/site-packages/Crypto/SelfTest/Cipher/test_ChaCha20.py | 5 | # ===================================================================
#
# Copyright (c) 2014, Legrandin <helderijs@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
import os
import re
import unittest
from binascii import unhexlify, hexlify
from Crypto.Util.py3compat import b, tobytes, bchr
from Crypto.Util.strxor import strxor_c
from Crypto.SelfTest.st_common import list_test_cases
from Crypto.Cipher import ChaCha20
class ChaCha20Test(unittest.TestCase):
def test_new_positive(self):
cipher = ChaCha20.new(key=b("0")*32, nonce=b("0")*8)
self.assertEqual(cipher.nonce, b("0") * 8)
def test_new_negative(self):
new = ChaCha20.new
self.assertRaises(TypeError, new)
self.assertRaises(TypeError, new, nonce=b("0"))
self.assertRaises(ValueError, new, nonce=b("0")*8, key=b("0"))
self.assertRaises(ValueError, new, nonce=b("0"), key=b("0")*32)
def test_default_nonce(self):
cipher1 = ChaCha20.new(key=bchr(1) * 32)
cipher2 = ChaCha20.new(key=bchr(1) * 32)
self.assertEquals(len(cipher1.nonce), 8)
self.assertNotEqual(cipher1.nonce, cipher2.nonce)
def test_eiter_encrypt_or_decrypt(self):
"""Verify that a cipher cannot be used for both decrypting and encrypting"""
c1 = ChaCha20.new(key=b("5") * 32, nonce=b("6") * 8)
c1.encrypt(b("8"))
self.assertRaises(TypeError, c1.decrypt, b("9"))
c2 = ChaCha20.new(key=b("5") * 32, nonce=b("6") * 8)
c2.decrypt(b("8"))
self.assertRaises(TypeError, c2.encrypt, b("9"))
def test_round_trip(self):
pt = b("A") * 1024
c1 = ChaCha20.new(key=b("5") * 32, nonce=b("6") * 8)
c2 = ChaCha20.new(key=b("5") * 32, nonce=b("6") * 8)
ct = c1.encrypt(pt)
self.assertEqual(c2.decrypt(ct), pt)
self.assertEqual(c1.encrypt(b("")), b(""))
self.assertEqual(c2.decrypt(b("")), b(""))
def test_streaming(self):
"""Verify that an arbitrary number of bytes can be encrypted/decrypted"""
from Crypto.Hash import SHA1
segments = (1, 3, 5, 7, 11, 17, 23)
total = sum(segments)
pt = b("")
while len(pt) < total:
pt += SHA1.new(pt).digest()
cipher1 = ChaCha20.new(key=b("7") * 32, nonce=b("t") * 8)
ct = cipher1.encrypt(pt)
cipher2 = ChaCha20.new(key=b("7") * 32, nonce=b("t") * 8)
cipher3 = ChaCha20.new(key=b("7") * 32, nonce=b("t") * 8)
idx = 0
for segment in segments:
self.assertEqual(cipher2.decrypt(ct[idx:idx+segment]), pt[idx:idx+segment])
self.assertEqual(cipher3.encrypt(pt[idx:idx+segment]), ct[idx:idx+segment])
idx += segment
def test_seek(self):
cipher1 = ChaCha20.new(key=b("9") * 32, nonce=b("e") * 8)
offset = 64 * 900 + 7
pt = b("1") * 64
cipher1.encrypt(b("0") * offset)
ct1 = cipher1.encrypt(pt)
cipher2 = ChaCha20.new(key=b("9") * 32, nonce=b("e") * 8)
cipher2.seek(offset)
ct2 = cipher2.encrypt(pt)
self.assertEquals(ct1, ct2)
def test_seek_tv(self):
# Test Vector #4, A.1 from
# http://tools.ietf.org/html/draft-nir-cfrg-chacha20-poly1305-04
key = bchr(0) + bchr(255) + bchr(0) * 30
nonce = bchr(0) * 8
cipher = ChaCha20.new(key=key, nonce=nonce)
cipher.seek(64 * 2)
expected_key_stream = unhexlify(b(
"72d54dfbf12ec44b362692df94137f32"
"8fea8da73990265ec1bbbea1ae9af0ca"
"13b25aa26cb4a648cb9b9d1be65b2c09"
"24a66c54d545ec1b7374f4872e99f096"
))
ct = cipher.encrypt(bchr(0) * len(expected_key_stream))
self.assertEqual(expected_key_stream, ct)
class ChaCha20_AGL_NIR(unittest.TestCase):
# From http://tools.ietf.org/html/draft-agl-tls-chacha20poly1305-04
# and http://tools.ietf.org/html/draft-nir-cfrg-chacha20-poly1305-04
tv = [
( "00" * 32,
"00" * 8,
"76b8e0ada0f13d90405d6ae55386bd28bdd219b8a08ded1aa836efcc"
"8b770dc7da41597c5157488d7724e03fb8d84a376a43b8f41518a11c"
"c387b669b2ee6586"
"9f07e7be5551387a98ba977c732d080d"
"cb0f29a048e3656912c6533e32ee7aed"
"29b721769ce64e43d57133b074d839d5"
"31ed1f28510afb45ace10a1f4b794d6f"
),
( "00" * 31 + "01",
"00" * 8,
"4540f05a9f1fb296d7736e7b208e3c96eb4fe1834688d2604f450952"
"ed432d41bbe2a0b6ea7566d2a5d1e7e20d42af2c53d792b1c43fea81"
"7e9ad275ae546963"
"3aeb5224ecf849929b9d828db1ced4dd"
"832025e8018b8160b82284f3c949aa5a"
"8eca00bbb4a73bdad192b5c42f73f2fd"
"4e273644c8b36125a64addeb006c13a0"
),
( "00" * 32,
"00" * 7 + "01",
"de9cba7bf3d69ef5e786dc63973f653a0b49e015adbff7134fcb7df1"
"37821031e85a050278a7084527214f73efc7fa5b5277062eb7a0433e"
"445f41e3"
),
( "00" * 32,
"01" + "00" * 7,
"ef3fdfd6c61578fbf5cf35bd3dd33b8009631634d21e42ac33960bd1"
"38e50d32111e4caf237ee53ca8ad6426194a88545ddc497a0b466e7d"
"6bbdb0041b2f586b"
),
( "000102030405060708090a0b0c0d0e0f101112131415161718191a1b"
"1c1d1e1f",
"0001020304050607",
"f798a189f195e66982105ffb640bb7757f579da31602fc93ec01ac56"
"f85ac3c134a4547b733b46413042c9440049176905d3be59ea1c53f1"
"5916155c2be8241a38008b9a26bc35941e2444177c8ade6689de9526"
"4986d95889fb60e84629c9bd9a5acb1cc118be563eb9b3a4a472f82e"
"09a7e778492b562ef7130e88dfe031c79db9d4f7c7a899151b9a4750"
"32b63fc385245fe054e3dd5a97a5f576fe064025d3ce042c566ab2c5"
"07b138db853e3d6959660996546cc9c4a6eafdc777c040d70eaf46f7"
"6dad3979e5c5360c3317166a1c894c94a371876a94df7628fe4eaaf2"
"ccb27d5aaae0ad7ad0f9d4b6ad3b54098746d4524d38407a6deb3ab7"
"8fab78c9"
),
( "00" * 32,
"00" * 7 + "02",
"c2c64d378cd536374ae204b9ef933fcd"
"1a8b2288b3dfa49672ab765b54ee27c7"
"8a970e0e955c14f3a88e741b97c286f7"
"5f8fc299e8148362fa198a39531bed6d"
),
]
def runTest(self):
for (key, nonce, stream) in self.tv:
c = ChaCha20.new(key=unhexlify(b(key)), nonce=unhexlify(b(nonce)))
ct = unhexlify(b(stream))
pt = b("\x00") * len(ct)
self.assertEqual(c.encrypt(pt), ct)
def get_tests(config={}):
tests = []
tests += list_test_cases(ChaCha20Test)
tests.append(ChaCha20_AGL_NIR())
return tests
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
|
IndraVikas/scikit-learn | refs/heads/master | sklearn/decomposition/pca.py | 192 | """ Principal Component Analysis
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis A. Engemann <d.engemann@fz-juelich.de>
# Michael Eickenberg <michael.eickenberg@inria.fr>
#
# License: BSD 3 clause
from math import log, sqrt
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, as_float_array
from ..utils import check_array
from ..utils.extmath import fast_dot, fast_logdet, randomized_svd
from ..utils.validation import check_is_fitted
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
Data spectrum.
rank: int
Tested rank value.
n_samples: int
Number of samples.
n_features: int
Number of features.
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.)
- log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Read more in the :ref:`User Guide <PCA>`.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
components_ : array, [n_components, n_features]
Principal axes in feature space, representing the directions of
maximum variance in the data.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
n_components_ : int
The estimated number of components. Relevant when n_components is set
to 'mle' or a number between 0 and 1 to select using explained
variance.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
computed the estimated data covariance and score samples.
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Implements the probabilistic PCA model from:
M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,
Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622
via the score and score_samples methods.
See http://www.miketipping.com/papers/met-mppca.pdf
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
U, s, V : ndarrays
The SVD of the input data, copied and centered when
requested.
"""
X = check_array(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
explained_variance_ = (S ** 2) / n_samples
explained_variance_ratio_ = (explained_variance_ /
explained_variance_.sum())
components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(explained_variance_,
n_samples, n_features)
elif not 0 <= n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d"
% (n_components, n_features))
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < n_features:
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
# store n_samples to revert whitening when getting covariance
self.n_samples_ = n_samples
self.components_ = components_[:n_components]
self.explained_variance_ = explained_variance_[:n_components]
explained_variance_ratio_ = explained_variance_ratio_[:n_components]
self.explained_variance_ratio_ = explained_variance_ratio_
self.n_components_ = n_components
return (U, S, V)
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def transform(self, X):
"""Apply the dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space, i.e.,
return an input X_original whose transform would be X
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
"""
check_is_fitted(self, 'mean_')
if self.whiten:
return fast_dot(
X,
np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
def score_samples(self, X):
"""Return the log-likelihood of each sample
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Read more in the :ref:`User Guide <RandomizedPCA>`.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 3 by default.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=3, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
"""
def __init__(self, n_components=None, copy=True, iterated_power=3,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X by extracting the first principal components.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(check_array(X))
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X = fast_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
X = self._fit(X)
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
check_is_fitted(self, 'mean_')
X_original = fast_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
|
Orav/kbengine | refs/heads/master | kbe/src/lib/python/Lib/test/test_file.py | 2 | import sys
import os
import unittest
from array import array
from weakref import proxy
import io
import _pyio as pyio
from test.support import TESTFN, run_unittest
from collections import UserList
class AutoFileTests:
# file tests for which a test file is automatically set up
def setUp(self):
self.f = self.open(TESTFN, 'wb')
def tearDown(self):
if self.f:
self.f.close()
os.remove(TESTFN)
def testWeakRefs(self):
# verify weak references
p = proxy(self.f)
p.write(b'teststring')
self.assertEqual(self.f.tell(), p.tell())
self.f.close()
self.f = None
self.assertRaises(ReferenceError, getattr, p, 'tell')
def testAttributes(self):
# verify expected attributes exist
f = self.f
f.name # merely shouldn't blow up
f.mode # ditto
f.closed # ditto
def testReadinto(self):
# verify readinto
self.f.write(b'12')
self.f.close()
a = array('b', b'x'*10)
self.f = self.open(TESTFN, 'rb')
n = self.f.readinto(a)
self.assertEqual(b'12', a.tobytes()[:n])
def testReadinto_text(self):
# verify readinto refuses text files
a = array('b', b'x'*10)
self.f.close()
self.f = self.open(TESTFN, 'r')
if hasattr(self.f, "readinto"):
self.assertRaises(TypeError, self.f.readinto, a)
def testWritelinesUserList(self):
# verify writelines with instance sequence
l = UserList([b'1', b'2'])
self.f.writelines(l)
self.f.close()
self.f = self.open(TESTFN, 'rb')
buf = self.f.read()
self.assertEqual(buf, b'12')
def testWritelinesIntegers(self):
# verify writelines with integers
self.assertRaises(TypeError, self.f.writelines, [1, 2, 3])
def testWritelinesIntegersUserList(self):
# verify writelines with integers in UserList
l = UserList([1,2,3])
self.assertRaises(TypeError, self.f.writelines, l)
def testWritelinesNonString(self):
# verify writelines with non-string object
class NonString:
pass
self.assertRaises(TypeError, self.f.writelines,
[NonString(), NonString()])
def testErrors(self):
f = self.f
self.assertEqual(f.name, TESTFN)
self.assertTrue(not f.isatty())
self.assertTrue(not f.closed)
if hasattr(f, "readinto"):
self.assertRaises((OSError, TypeError), f.readinto, "")
f.close()
self.assertTrue(f.closed)
def testMethods(self):
methods = [('fileno', ()),
('flush', ()),
('isatty', ()),
('__next__', ()),
('read', ()),
('write', (b"",)),
('readline', ()),
('readlines', ()),
('seek', (0,)),
('tell', ()),
('write', (b"",)),
('writelines', ([],)),
('__iter__', ()),
]
methods.append(('truncate', ()))
# __exit__ should close the file
self.f.__exit__(None, None, None)
self.assertTrue(self.f.closed)
for methodname, args in methods:
method = getattr(self.f, methodname)
# should raise on closed file
self.assertRaises(ValueError, method, *args)
# file is closed, __exit__ shouldn't do anything
self.assertEqual(self.f.__exit__(None, None, None), None)
# it must also return None if an exception was given
try:
1/0
except:
self.assertEqual(self.f.__exit__(*sys.exc_info()), None)
def testReadWhenWriting(self):
self.assertRaises(OSError, self.f.read)
class CAutoFileTests(AutoFileTests, unittest.TestCase):
open = io.open
class PyAutoFileTests(AutoFileTests, unittest.TestCase):
open = staticmethod(pyio.open)
class OtherFileTests:
def testModeStrings(self):
# check invalid mode strings
for mode in ("", "aU", "wU+"):
try:
f = self.open(TESTFN, mode)
except ValueError:
pass
else:
f.close()
self.fail('%r is an invalid file mode' % mode)
def testBadModeArgument(self):
# verify that we get a sensible error message for bad mode argument
bad_mode = "qwerty"
try:
f = self.open(TESTFN, bad_mode)
except ValueError as msg:
if msg.args[0] != 0:
s = str(msg)
if TESTFN in s or bad_mode not in s:
self.fail("bad error message for invalid mode: %s" % s)
# if msg.args[0] == 0, we're probably on Windows where there may be
# no obvious way to discover why open() failed.
else:
f.close()
self.fail("no error for invalid mode: %s" % bad_mode)
def testSetBufferSize(self):
# make sure that explicitly setting the buffer size doesn't cause
# misbehaviour especially with repeated close() calls
for s in (-1, 0, 1, 512):
try:
f = self.open(TESTFN, 'wb', s)
f.write(str(s).encode("ascii"))
f.close()
f.close()
f = self.open(TESTFN, 'rb', s)
d = int(f.read().decode("ascii"))
f.close()
f.close()
except OSError as msg:
self.fail('error setting buffer size %d: %s' % (s, str(msg)))
self.assertEqual(d, s)
def testTruncateOnWindows(self):
# SF bug <http://www.python.org/sf/801631>
# "file.truncate fault on windows"
os.unlink(TESTFN)
f = self.open(TESTFN, 'wb')
try:
f.write(b'12345678901') # 11 bytes
f.close()
f = self.open(TESTFN,'rb+')
data = f.read(5)
if data != b'12345':
self.fail("Read on file opened for update failed %r" % data)
if f.tell() != 5:
self.fail("File pos after read wrong %d" % f.tell())
f.truncate()
if f.tell() != 5:
self.fail("File pos after ftruncate wrong %d" % f.tell())
f.close()
size = os.path.getsize(TESTFN)
if size != 5:
self.fail("File size after ftruncate wrong %d" % size)
finally:
f.close()
os.unlink(TESTFN)
def testIteration(self):
# Test the complex interaction when mixing file-iteration and the
# various read* methods.
dataoffset = 16384
filler = b"ham\n"
assert not dataoffset % len(filler), \
"dataoffset must be multiple of len(filler)"
nchunks = dataoffset // len(filler)
testlines = [
b"spam, spam and eggs\n",
b"eggs, spam, ham and spam\n",
b"saussages, spam, spam and eggs\n",
b"spam, ham, spam and eggs\n",
b"spam, spam, spam, spam, spam, ham, spam\n",
b"wonderful spaaaaaam.\n"
]
methods = [("readline", ()), ("read", ()), ("readlines", ()),
("readinto", (array("b", b" "*100),))]
try:
# Prepare the testfile
bag = self.open(TESTFN, "wb")
bag.write(filler * nchunks)
bag.writelines(testlines)
bag.close()
# Test for appropriate errors mixing read* and iteration
for methodname, args in methods:
f = self.open(TESTFN, 'rb')
if next(f) != filler:
self.fail, "Broken testfile"
meth = getattr(f, methodname)
meth(*args) # This simply shouldn't fail
f.close()
# Test to see if harmless (by accident) mixing of read* and
# iteration still works. This depends on the size of the internal
# iteration buffer (currently 8192,) but we can test it in a
# flexible manner. Each line in the bag o' ham is 4 bytes
# ("h", "a", "m", "\n"), so 4096 lines of that should get us
# exactly on the buffer boundary for any power-of-2 buffersize
# between 4 and 16384 (inclusive).
f = self.open(TESTFN, 'rb')
for i in range(nchunks):
next(f)
testline = testlines.pop(0)
try:
line = f.readline()
except ValueError:
self.fail("readline() after next() with supposedly empty "
"iteration-buffer failed anyway")
if line != testline:
self.fail("readline() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
testline = testlines.pop(0)
buf = array("b", b"\x00" * len(testline))
try:
f.readinto(buf)
except ValueError:
self.fail("readinto() after next() with supposedly empty "
"iteration-buffer failed anyway")
line = buf.tobytes()
if line != testline:
self.fail("readinto() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
testline = testlines.pop(0)
try:
line = f.read(len(testline))
except ValueError:
self.fail("read() after next() with supposedly empty "
"iteration-buffer failed anyway")
if line != testline:
self.fail("read() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
try:
lines = f.readlines()
except ValueError:
self.fail("readlines() after next() with supposedly empty "
"iteration-buffer failed anyway")
if lines != testlines:
self.fail("readlines() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
f.close()
# Reading after iteration hit EOF shouldn't hurt either
f = self.open(TESTFN, 'rb')
try:
for line in f:
pass
try:
f.readline()
f.readinto(buf)
f.read()
f.readlines()
except ValueError:
self.fail("read* failed after next() consumed file")
finally:
f.close()
finally:
os.unlink(TESTFN)
class COtherFileTests(OtherFileTests, unittest.TestCase):
open = io.open
class PyOtherFileTests(OtherFileTests, unittest.TestCase):
open = staticmethod(pyio.open)
def tearDownModule():
# Historically, these tests have been sloppy about removing TESTFN.
# So get rid of it no matter what.
if os.path.exists(TESTFN):
os.unlink(TESTFN)
if __name__ == '__main__':
unittest.main()
|
MounikaVanka/bme590hrm | refs/heads/master | Code/read_input.py | 1 | def read_in():
import numpy as np
a = input('Enter the file name')
file = numpy.genfromtxt('ecg_data.csv',
dtype=['fltvar', 'fltvar'],
... names=['Time', 'Voltage'],
delimiter=" , ")
read_in()
|
kishikawakatsumi/Mozc-for-iOS | refs/heads/master | src/session/gen_session_stress_test_data.py | 1 | # -*- coding: utf-8 -*-
# Copyright 2010-2014, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
A tool to generate test sentences for stress test"
"""
__author__ = "taku"
import sys
def escape_string(s):
""" escape the string with "\\xXX" format.
We don't use encode('string_escape') because it doesn't escape ascii
characters.
Args:
s: a string to be escaped
Returns:
an escaped string.
"""
result = ''
for c in s:
hexstr = hex(ord(c))
# because hexstr contains '0x', remove the prefix and add our prefix
result += '\\x' + hexstr[2:]
return result
def GenerateHeader(file):
try:
print "const char *kTestSentences[] = {"
for line in open(file, "r"):
if line.startswith('#'):
continue
line = line.rstrip('\r\n')
if not line:
continue
print " \"%s\"," % escape_string(line)
print "};"
except:
print "cannot open %s" % (file)
sys.exit(1)
def main():
GenerateHeader(sys.argv[1])
if __name__ == "__main__":
main()
|
yuweijun/learning-programming | refs/heads/master | language-python/closure.py | 1 | #! /usr/bin/python
def outter(x, y):
def inner(a = x, b = y):
return a ** b
return inner
f = outter(2, 3)
print f
print f()
def outter1(x, y):
def inner1(a = x, b = y):
return a ** b
return inner1()
f = outter1(2, 3)
print f
def jc(x):
print x
if x: jc(x - 1)
jc(3) |
glwu/python-for-android | refs/heads/master | python3-alpha/python3-src/Lib/turtledemo/peace.py | 65 | #!/usr/bin/env python3
""" turtle-example-suite:
tdemo_peace.py
A very simple drawing suitable as a beginner's
programming example.
Uses only commands, which are also available in
old turtle.py.
Intentionally no variables are used except for the
colorloop:
"""
from turtle import *
def main():
peacecolors = ("red3", "orange", "yellow",
"seagreen4", "orchid4",
"royalblue1", "dodgerblue4")
reset()
s = Screen()
up()
goto(-320,-195)
width(70)
for pcolor in peacecolors:
color(pcolor)
down()
forward(640)
up()
backward(640)
left(90)
forward(66)
right(90)
width(25)
color("white")
goto(0,-170)
down()
circle(170)
left(90)
forward(340)
up()
left(180)
forward(170)
right(45)
down()
forward(170)
up()
backward(170)
left(90)
down()
forward(170)
up()
goto(0,300) # vanish if hideturtle() is not available ;-)
return "Done!!"
if __name__ == "__main__":
main()
mainloop()
|
baloo/shinken | refs/heads/debian/master | test/test_module_simplelog.py | 1 | #!/usr/bin/env python2.6
#Copyright (C) 2009-2010 :
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
#This file is part of Shinken.
#
#Shinken is free software: you can redistribute it and/or modify
#it under the terms of the GNU Affero General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#Shinken is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Affero General Public License for more details.
#
#You should have received a copy of the GNU Affero General Public License
#along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
import os
from shinken_test import unittest, ShinkenTest
from shinken.brok import Brok
from shinken.modules.simplelog_broker import get_instance
class TestConfig(ShinkenTest):
#setUp is in shinken_test
#Change ME :)
def test_simplelog(self):
print self.conf.modules
#get our modules
mod = None
for m in self.conf.modules:
if m.module_type == 'simple_log':
mod = m
self.assert_(mod is not None)
self.assert_(mod.path == 'tmp/nagios.log')
self.assert_(mod.module_name == 'Simple-log')
try :
os.unlink(mod.path)
except :
pass
sl = get_instance(mod)
print sl
#Hack here :(
sl.properties = {}
sl.properties['to_queue'] = None
sl.init()
b = Brok('log', {'log' : "look at my ass.\n"})
sl.manage_brok(b)
b = Brok('log', {'log' : "look at my ass again.\n"})
sl.manage_brok(b)
sl.file.close()
fd = open(mod.path)
buf = fd.readline()
self.assert_(buf == "look at my ass.\n")
buf = fd.readline()
self.assert_(buf == "look at my ass again.\n")
fd.close()
os.unlink(mod.path)
if __name__ == '__main__':
unittest.main()
|
castelao/maud | refs/heads/master | tests/test_wmean_2D.py | 1 | """ Test some fundamental results from window_mean_2D_latlon
"""
import numpy as np
from numpy import ma
from numpy.random import random
from maud import tests_support
from maud import wmean_2D, wmean_2D_serial
try:
import cython
with_cython = True
except:
with_cython = False
if with_cython:
from cmaud import wmean_2D as cwmean_2D
from cmaud import wmean_2D_serial as cwmean_2D_serial
#def random_input(N=10):
# I, J = (N*random(2)).astype('i')+1
def test_inputsizes(f=wmean_2D):
tests_support.inputsizes_f2D(f)
def test_mask(N=4):
l = 5
x = np.arange(N)
y = np.arange(N)
X, Y = np.meshgrid(x, y)
# input ndarray -> output ndarray
Z = np.ones(X.shape)
h = wmean_2D(X, Y, Z, l=l)
assert type(h) is np.ndarray
# input MA array -> output MA array
Z = ma.array(Z)
h = wmean_2D(X, Y, Z, l=l)
assert type(h) == ma.MaskedArray
# Input MA and mask==False -> Output MA and mask==False
assert ~h.mask.any()
# Only the masked inputs should return as masked.
Z.mask = ma.getmaskarray(Z)
Z.mask[0, 0] = True
h = wmean_2D(X, Y, Z, l=l)
assert h[0, 0].mask == True
assert ~h[1:, 1:].mask.any()
def test_whitenoise():
"""
Apply in a 3D array.
Need to improve this.
"""
grid = np.arange(-9, 9, 0.25)
X, Y = np.meshgrid(grid, grid)
#h = ma.array(random(X.shape)-0.5)
h = ma.array(random([3]+list(X.shape))-0.5)
smooth1 = wmean_2D(X, Y, h, l=7.8)
#y2 = cmaud.window_1Dmean(Z, l=l, axis=2, method='hamming')
# Large limits since the filter does not include too many numbers
assert abs(smooth1).mean() < 0.05
assert abs(smooth1).max() < 0.1
def test_2Dmasked_array(N=25):
l = N/2
# Ones array
grid = np.linspace(-10, 10, N)
X, Y = np.meshgrid(grid, grid)
data = random((N, N))
thr = np.percentile(data, 70)
data = ma.masked_greater(data, thr)
h = wmean_2D(X, Y, data, l=l)
assert h.mask.any()
def eval_ones(x, y, z, l):
h = wmean_2D(x, y, z, l=l)
assert (h == 1).all()
# Ones masked array with random masked positions
tmp = random(z.shape)
# Select the top 1 third of the positions
thr = np.percentile(tmp, 70)
z = ma.masked_array(z, tmp>=thr)
h = wmean_2D(x, y, z, l=l)
assert (h == 1).all()
# Masked values should not interfere in the filtered output.
z.data[z.mask==True] = 1e10
h = wmean_2D(x, y, z, l=l)
assert (h == 1).all()
# With interp, the energy should also be preserved
h = wmean_2D(x, y, z, l=l, interp=True)
assert (h == 1).all()
def test_ones(N=9):
""" The energy must be preserved
Therefore, an array of ones must return only ones, even if
the input has mask, and with interp.
"""
l = N/2
print("Testing 2D array")
grid = np.linspace(-10, 10, N)
X, Y = np.meshgrid(grid, grid)
data = np.ones((N, N))
eval_ones(X, Y, data, l)
print("Testing 3D array")
data = np.ones((3, N, N))
eval_ones(X, Y, data, l)
def test_mask_at_interp():
""" Test the behavior of masked points with interp on|off
As long as the filter is wide enough to capture at least
one data point per point, the interp=True will return
"""
N = 25
l = N/2
grid = np.linspace(-10, 10, N)
X, Y = np.meshgrid(grid, grid)
data = np.ones((N, N))
thr = np.percentile(data, 90)
data = ma.masked_greater(data, thr)
# Equivalent to interp=False
h = wmean_2D(X, Y, data, l=l)
assert (data.mask == h.mask).all()
h = wmean_2D(X, Y, data, l=l, interp=True)
assert (~h.mask).all()
def test_Serial_x_Parallel(N=10):
"""
Improve this. Should include more possibilities like:
different arrays shapes, l, input types(array x MA)
"""
l = N/2
grid = np.linspace(-10, 10, N)
X, Y = np.meshgrid(grid, grid)
data = random(X.shape)
h_serial = wmean_2D_serial(X, Y, data, l=l)
h = wmean_2D(X, Y, data, l=l)
assert (h_serial == h).all()
def test_Python_x_Cython(N=10):
if not with_cython:
return
l = N/2
# ATENTION, in the future I should not force t to be np.float.
grid = np.linspace(-10, 10, N)
X, Y = np.meshgrid(grid, grid)
data = random(X.shape)
h = wmean_2D(X, Y, data, l=l)
ch = cwmean_2D(X, Y, data, l=l)
assert (h == ch).all()
|
aspectron/jsx | refs/heads/master | extern/boost/tools/build/test/remove_requirement.py | 44 | #!/usr/bin/python
# Copyright (C) Vladimir Prus 2006.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import BoostBuild
t = BoostBuild.Tester(use_test_config=False)
t.write("jamroot.jam", """
project : requirements <threading>multi <variant>debug:<link>static ;
build-project sub ;
build-project sub2 ;
build-project sub3 ;
build-project sub4 ;
""")
t.write("sub/jamfile.jam", """
exe hello : hello.cpp : -<threading>multi ;
""")
t.write("sub/hello.cpp", """
int main() {}
""")
t.write("sub2/jamfile.jam", """
project : requirements -<threading>multi ;
exe hello : hello.cpp ;
""")
t.write("sub2/hello.cpp", """
int main() {}
""")
t.write("sub3/hello.cpp", """
int main() {}
""")
t.write("sub3/jamfile.jam", """
exe hello : hello.cpp : -<variant>debug:<link>static ;
""")
t.write("sub4/hello.cpp", """
int main() {}
""")
t.write("sub4/jamfile.jam", """
project : requirements -<variant>debug:<link>static ;
exe hello : hello.cpp ;
""")
t.run_build_system()
t.expect_addition("sub/bin/$toolset/debug/link-static/hello.exe")
t.expect_addition("sub2/bin/$toolset/debug/link-static/hello.exe")
t.expect_addition("sub3/bin/$toolset/debug/threading-multi/hello.exe")
t.expect_addition("sub4/bin/$toolset/debug/threading-multi/hello.exe")
t.rm(".")
# Now test that path requirements can be removed as well.
t.write("jamroot.jam", """
build-project sub ;
""")
t.write("sub/jamfile.jam", """
project : requirements <include>broken ;
exe hello : hello.cpp : -<include>broken ;
""")
t.write("sub/hello.cpp", """
#include "math.h"
int main() {}
""")
t.write("sub/broken/math.h", """
Broken
""")
t.run_build_system()
t.expect_addition("sub/bin/$toolset/debug/hello.exe")
t.cleanup()
|
MoisesTedeschi/python | refs/heads/master | Scripts-Python/Modulos-Diversos/deteccao-de-faces-com-python-e-opencv/Lib/site-packages/pip/_vendor/cachecontrol/compat.py | 78 | try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
try:
import cPickle as pickle
except ImportError:
import pickle
# Handle the case where the requests module has been patched to not have
# urllib3 bundled as part of its source.
try:
from pip._vendor.requests.packages.urllib3.response import HTTPResponse
except ImportError:
from pip._vendor.urllib3.response import HTTPResponse
try:
from pip._vendor.requests.packages.urllib3.util import is_fp_closed
except ImportError:
from pip._vendor.urllib3.util import is_fp_closed
# Replicate some six behaviour
try:
text_type = unicode
except NameError:
text_type = str
|
libretees/libreshop | refs/heads/master | libreshop/products/migrations/0010_image_main.py | 1 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-05-31 00:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0009_auto_20160531_0049'),
]
operations = [
migrations.AddField(
model_name='image',
name='main',
field=models.BooleanField(default=False),
),
]
|
ZihengJiang/mxnet | refs/heads/master | example/image-classification/benchmark.py | 46 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import logging
import argparse
import os
import time
import sys
import shutil
import csv
import re
import subprocess, threading
import pygal
import importlib
import collections
import threading
import copy
'''
Setup Logger and LogLevel
'''
def setup_logging(log_loc):
if os.path.exists(log_loc):
shutil.move(log_loc, log_loc + "_" + str(int(os.path.getctime(log_loc))))
os.makedirs(log_loc)
log_file = '{}/benchmark.log'.format(log_loc)
LOGGER = logging.getLogger('benchmark')
LOGGER.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)s:%(name)s %(message)s')
file_handler = logging.FileHandler(log_file)
console_handler = logging.StreamHandler()
file_handler.setFormatter(formatter)
console_handler.setFormatter(formatter)
LOGGER.addHandler(file_handler)
LOGGER.addHandler(console_handler)
return LOGGER
'''
Runs the command given in the cmd_args for specified timeout period
and terminates after
'''
class RunCmd(threading.Thread):
def __init__(self, cmd_args, logfile):
threading.Thread.__init__(self)
self.cmd_args = cmd_args
self.logfile = logfile
self.process = None
def run(self):
LOGGER = logging.getLogger('benchmark')
LOGGER.info('started running %s', ' '.join(self.cmd_args))
log_fd = open(self.logfile, 'w')
self.process = subprocess.Popen(self.cmd_args, stdout=log_fd, stderr=subprocess.STDOUT, universal_newlines=True)
for line in self.process.communicate():
LOGGER.debug(line)
log_fd.close()
LOGGER.info('finished running %s', ' '.join(self.cmd_args))
def startCmd(self, timeout):
LOGGER.debug('Attempting to start Thread to run %s', ' '.join(self.cmd_args))
self.start()
self.join(timeout)
if self.is_alive():
LOGGER.debug('Terminating process running %s', ' '.join(self.cmd_args))
self.process.terminate()
self.join()
time.sleep(1)
return
log_loc = './benchmark'
LOGGER = setup_logging(log_loc)
class Network(object):
def __init__(self, mode, name, img_size, batch_size):
self.mode = mode
self.name = name
self.img_size = img_size
self.batch_size = batch_size
self.gpu_speedup = collections.OrderedDict()
def parse_args():
class NetworkArgumentAction(argparse.Action):
def validate(self, attrs):
args = attrs.split(':')
if len(args) != 4 or isinstance(args[0], str) == False or isinstance(args[1], str) == False:
print('expected network attributes in format mode:network_name:batch_size:image_size \
\nThe network_name is a valid model defined as network_name.py in the image-classification/symbol folder. \
\nOr a gluon vision model defined in mxnet/python/mxnet/gluon/model_zoo/model_store.py.')
sys.exit(1)
try:
# check if the network exists
if args[0] == 'native':
importlib.import_module('symbols.' + args[1])
batch_size = int(args[2])
img_size = int(args[3])
return Network(mode=args[0], name=args[1], batch_size=batch_size, img_size=img_size)
except Exception as e:
print('expected network attributes in format mode:network_name:batch_size:image_size \
\nThe network_name is a valid model defined as network_name.py in the image-classification/symbol folder. \
\nOr a gluon vision model defined in mxnet/python/mxnet/gluon/model_zoo/model_store.py.')
print(e)
sys.exit(1)
def __init__(self, *args, **kw):
kw['nargs'] = '+'
argparse.Action.__init__(self, *args, **kw)
def __call__(self, parser, namespace, values, option_string=None):
if isinstance(values, list) == True:
setattr(namespace, self.dest, map(self.validate, values))
else:
setattr(namespace, self.dest, self.validate(values))
parser = argparse.ArgumentParser(description='Run Benchmark on various imagenet networks using train_imagenent.py')
parser.add_argument('--networks', dest='networks', nargs='+', type=str, help='one or more networks in the format mode:network_name:batch_size:image_size \
\nThe network_name is a valid model defined as network_name.py in the image-classification/symbol folder for native imagenet \
\n Or a gluon vision model defined in mxnet/python/mxnet/gluon/model_zoo/model_store.py.',
action=NetworkArgumentAction)
parser.add_argument('--worker_file', type=str,
help='file that contains a list of worker hostnames or list of worker ip addresses that can be sshed without a password.',
required=True)
parser.add_argument('--worker_count', type=int, help='number of workers to run benchmark on.', required=True)
parser.add_argument('--gpu_count', type=int, help='number of gpus on each worker to use.', required=True)
args = parser.parse_args()
return args
def series(max_count):
i = 1
s = []
while i <= max_count:
s.append(i)
i = i * 2
if s[-1] < max_count:
s.append(max_count)
return s
'''
Choose the middle iteration to get the images processed per sec
'''
def images_processed(log_loc, mode):
f = open(log_loc)
if mode == 'native':
img_per_sec = re.findall("(?:Batch\s+\[30\]\\\\tSpeed:\s+)(\d+\.\d+)(?:\s+)", str(f.readlines()))
else:
img_per_sec = re.findall("(?:Batch\s+\[3\]\\\\tSpeed:\s+)(\d+\.\d+)(?:\s+)", str(f.readlines()))
f.close()
img_per_sec = map(float, img_per_sec)
total_img_per_sec = sum(img_per_sec)
return total_img_per_sec
def generate_hosts_file(num_nodes, workers_file, args_workers_file):
f = open(workers_file, 'w')
output = subprocess.check_output(['head', '-n', str(num_nodes), args_workers_file])
f.write(output)
f.close()
return
def stop_old_processes(hosts_file, prog_name):
stop_args = ['python', '../../tools/kill-mxnet.py', hosts_file, 'python', prog_name]
stop_args_str = ' '.join(stop_args)
LOGGER.info('killing old remote processes\n %s', stop_args_str)
stop = subprocess.check_output(stop_args, stderr=subprocess.STDOUT)
LOGGER.debug(stop)
time.sleep(1)
def run_benchmark(kv_store, data_shape, batch_size, num_gpus, num_nodes, network, args_workers_file, mode):
if mode == 'native':
benchmark_args = ['python', 'train_imagenet.py', '--gpus', ','.join(str(i) for i in range(num_gpus)), \
'--network', network, '--batch-size', str(batch_size * num_gpus), \
'--image-shape', '3,' + str(data_shape) + ',' + str(data_shape), '--num-epochs', '1',
'--kv-store', kv_store, '--benchmark', '1', '--disp-batches', '10']
else:
benchmark_args = ['python', '../gluon/image_classification.py', '--dataset', 'dummy', '--gpus', str(num_gpus), \
'--epochs', '1', '--benchmark', '--mode', mode, '--model', network, '--batch-size',
str(batch_size), \
'--log-interval', str(1), '--kvstore', kv_store]
log = log_loc + '/' + network + '_' + str(num_nodes * num_gpus) + '_log'
hosts = log_loc + '/' + network + '_' + str(num_nodes * num_gpus) + '_workers'
generate_hosts_file(num_nodes, hosts, args_workers_file)
if mode == 'native':
stop_old_processes(hosts, 'train_imagenet.py')
else:
stop_old_processes(hosts, '../gluon/image-classification.py')
launch_args = ['../../tools/launch.py', '-n', str(num_nodes), '-s', str(num_nodes * 2), '-H', hosts,
' '.join(benchmark_args)]
# use train_imagenet/image_classification when running on a single node
if kv_store == 'device':
imagenet = RunCmd(benchmark_args, log)
imagenet.startCmd(timeout=60 * 10)
else:
launch = RunCmd(launch_args, log)
launch.startCmd(timeout=60 * 10)
if mode == 'native':
stop_old_processes(hosts, 'train_imagenet.py')
else:
stop_old_processes(hosts, '../gluon/image-classification.py')
img_per_sec = images_processed(log, mode)
LOGGER.info('network: %s, num_gpus: %d, image/sec: %f', network, num_gpus * num_nodes, img_per_sec)
return img_per_sec
def plot_graph(args):
speedup_chart = pygal.Line(x_title='gpus', y_title='speedup', logarithmic=True)
speedup_chart.x_labels = map(str, series(args.worker_count * args.gpu_count))
speedup_chart.add('ideal speedup', series(args.worker_count * args.gpu_count))
for net in args.networks:
image_single_gpu = net.gpu_speedup[1] if 1 in net.gpu_speedup or not net.gpu_speedup[1] else 1
y_values = [each / image_single_gpu for each in net.gpu_speedup.values()]
LOGGER.info('%s: image_single_gpu:%.2f' % (net.name, image_single_gpu))
LOGGER.debug('network:%s, y_values: %s' % (net.name, ' '.join(map(str, y_values))))
speedup_chart.add(net.name, y_values \
, formatter=lambda y_val, img=copy.deepcopy(image_single_gpu), batch_size=copy.deepcopy(
net.batch_size): 'speedup:%.2f, img/sec:%.2f, batch/gpu:%d' % \
(0 if y_val is None else y_val, 0 if y_val is None else y_val * img, batch_size))
speedup_chart.render_to_file(log_loc + '/speedup.svg')
def write_csv(log_loc, args):
for net in args.networks:
with open(log_loc + '/' + net.name + '.csv', 'wb') as f:
w = csv.writer(f)
w.writerow(['num_gpus', 'img_processed_per_sec'])
w.writerows(net.gpu_speedup.items())
def main():
args = parse_args()
for net in args.networks:
# use kv_store='device' when running on 1 node
for num_gpus in series(args.gpu_count):
imgs_per_sec = run_benchmark(kv_store='device', data_shape=net.img_size, batch_size=net.batch_size, \
num_gpus=num_gpus, num_nodes=1, network=net.name,
args_workers_file=args.worker_file, mode=net.mode)
net.gpu_speedup[num_gpus] = imgs_per_sec
for num_nodes in series(args.worker_count)[1::]:
imgs_per_sec = run_benchmark(kv_store='dist_sync_device', data_shape=net.img_size,
batch_size=net.batch_size, \
num_gpus=args.gpu_count, num_nodes=num_nodes, network=net.name,
args_workers_file=args.worker_file, mode=net.mode)
net.gpu_speedup[num_nodes * args.gpu_count] = imgs_per_sec
LOGGER.info('Network: %s (num_gpus, images_processed): %s', net.name, ','.join(map(str, net.gpu_speedup.items())))
write_csv(log_loc, args)
plot_graph(args)
if __name__ == '__main__':
main()
|
rndusr/stig | refs/heads/master | stig/client/filters/tracker.py | 1 | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details
# http://www.gnu.org/licenses/gpl-3.0.txt
"""Filtering TrackerList items by various values"""
from ..ttypes import TorrentTracker
from .base import BoolFilterSpec, CmpFilterSpec, Filter, FilterChain, FilterSpecDict
from .utils import cmp_timestamp_or_timdelta, timestamp_or_timedelta
class _BoolFilterSpec(BoolFilterSpec):
def __init__(self, *args, **kwargs):
super().__init__(*args, needed_keys=('trackers',), **kwargs)
class _CmpFilterSpec(CmpFilterSpec):
def __init__(self, *args, **kwargs):
super().__init__(*args, needed_keys=('trackers',), **kwargs)
class _SingleFilter(Filter):
DEFAULT_FILTER = 'domain'
BOOLEAN_FILTERS = FilterSpecDict({
'all' : _BoolFilterSpec(None,
aliases=('*',),
description='All trackers'),
'alive' : _BoolFilterSpec(lambda trk: trk['status'] != 'stopped',
description='Trackers we are trying to connect to'),
})
COMPARATIVE_FILTERS = FilterSpecDict({
'tier' : _CmpFilterSpec(value_getter=lambda trk: trk['tier'],
value_type=TorrentTracker.TYPES['tier'],
as_bool=lambda trk: True,
description='Match VALUE against tracker tier'),
'domain' : _CmpFilterSpec(value_getter=lambda trk: trk['domain'],
value_type=TorrentTracker.TYPES['domain'],
aliases=('dom', 'tracker'),
description='Match VALUE against domain of announce URL'),
'url-announce' : _CmpFilterSpec(value_getter=lambda trk: trk['url-announce'],
value_type=TorrentTracker.TYPES['url-announce'],
aliases=('an',),
description='Match VALUE against announce URL'),
'url-scrape' : _CmpFilterSpec(value_getter=lambda trk: trk['url-scrape'],
value_type=TorrentTracker.TYPES['url-scrape'],
aliases=('sc',),
description='Match VALUE against scrape URL'),
'status' : _CmpFilterSpec(value_getter=lambda trk: trk['status'],
value_type=TorrentTracker.TYPES['status'],
aliases=('st',),
description=('Match VALUE against tracker status '
'(stopped, idle, queued, announcing, scraping)')),
'error' : _CmpFilterSpec(value_getter=lambda trk: trk['error'],
value_type=TorrentTracker.TYPES['error'],
aliases=('err',),
description='Match VALUE against error message from tracker'),
'downloads' : _CmpFilterSpec(value_getter=lambda trk: trk['count-downloads'],
value_type=TorrentTracker.TYPES['count-downloads'],
aliases=('dns',),
description='Match VALUE against number of known downloads'),
'leeches' : _CmpFilterSpec(value_getter=lambda trk: trk['count-leeches'],
value_type=TorrentTracker.TYPES['count-leeches'],
aliases=('lcs',),
description='Match VALUE against number of known downloads'),
'seeds' : _CmpFilterSpec(value_getter=lambda trk: trk['count-seeds'],
value_type=TorrentTracker.TYPES['count-seeds'],
aliases=('sds',),
description='Match VALUE against number of known seeding peers'),
'last-announce' : _CmpFilterSpec(value_getter=lambda trk: trk['time-last-announce'],
value_matcher=lambda trk, op, v: cmp_timestamp_or_timdelta(trk['time-last-announce'], op, v),
value_type=TorrentTracker.TYPES['time-last-announce'],
value_convert=lambda v: timestamp_or_timedelta(v, default_sign=-1),
aliases=('lan',),
description='Match VALUE against time of last announce'),
'next-announce' : _CmpFilterSpec(value_getter=lambda trk: trk['time-next-announce'],
value_matcher=lambda trk, op, v: cmp_timestamp_or_timdelta(trk['time-next-announce'], op, v),
value_type=TorrentTracker.TYPES['time-next-announce'],
value_convert=lambda v: timestamp_or_timedelta(v, default_sign=1),
aliases=('nan',),
description='Match VALUE against time of next announce'),
'last-scrape' : _CmpFilterSpec(value_getter=lambda trk: trk['time-last-scrape'],
value_matcher=lambda trk, op, v: cmp_timestamp_or_timdelta(trk['time-last-scrape'], op, v),
value_type=TorrentTracker.TYPES['time-last-scrape'],
value_convert=lambda v: timestamp_or_timedelta(v, default_sign=-1),
aliases=('lsc',),
description='Match VALUE against time of last scrape'),
'next-scrape' : _CmpFilterSpec(value_getter=lambda trk: trk['time-next-scrape'],
value_matcher=lambda trk, op, v: cmp_timestamp_or_timdelta(trk['time-next-scrape'], op, v),
value_type=TorrentTracker.TYPES['time-next-scrape'],
value_convert=lambda v: timestamp_or_timedelta(v, default_sign=1),
aliases=('nsc',),
description='Match VALUE against time of next scrape'),
})
class TrackerFilter(FilterChain):
"""One or more filters combined with & and | operators"""
filterclass = _SingleFilter
|
jimcarreer/hpack | refs/heads/master | docs/source/conf.py | 1 | # -*- coding: utf-8 -*-
#
# hpack documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 12 13:14:36 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'hpack'
copyright = u'2015, Cory Benfield'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.0.1'
# The full version, including alpha/beta/rc tags.
release = '2.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'hpackdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'hpack.tex', u'hpack Documentation',
u'Cory Benfield', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'hpack', u'hpack Documentation',
[u'Cory Benfield'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'hpack', u'hpack Documentation',
u'Cory Benfield', 'hpack', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
Theer108/invenio | refs/heads/master | invenio/legacy/authorlist/templates.py | 13 | # This file is part of Invenio.
# Copyright (C) 2011, 2012, 2013, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0103
"""BibEdit Templates."""
__revision__ = "$Id$"
from invenio.config import CFG_SITE_URL
from invenio.legacy.authorlist import config as cfg
class Template:
"""Authorlist Template Class."""
def __init__(self):
"""Initialize."""
pass
def body(self):
return '<div id="authorlist"></div>'
def css(self, css):
return '@import "%s/img/%s";' % (CFG_SITE_URL, css)
def index_header(self):
return """
%s
%s
<script>
jQuery( document ).ready( function() {
var authorindex = new AuthorlistIndex( 'authorlist' );
} );
</script>
""" % (self.stylesheets(), self.scripts())
def javascript(self, js):
return '<script type="text/javascript" src="%s/js/%s"></script>' % (CFG_SITE_URL, js)
def list_header(self):
return """
%s
%s
<script>
jQuery( document ).ready( function() {
// load config variables
gAuthorlistConfig = %s;
authorlist = new Authorlist( 'authorlist' );
} );
</script>
""" % (self.stylesheets(), self.scripts(), self.config())
def scripts(self):
return '\n'.join([self.javascript(script) for script in cfg.Resources.SCRIPTS])
def stylesheets(self):
return '<style type="text/css" title="InspireTheme">\n%s</style>' % \
'\n'.join([self.css(sheet) for sheet in cfg.Resources.STYLESHEETS])
def config(self):
config_dict = {}
config_dict['IDENTIFIERS_LIST'] = cfg.OPTIONS.IDENTIFIERS_LIST
config_dict['IDENTIFIERS_MAPPING'] = cfg.OPTIONS.IDENTIFIERS_MAPPING
config_dict['AUTHOR_AFFILIATION_TYPE'] = cfg.OPTIONS.AUTHOR_AFFILIATION_TYPE
return config_dict
|
vijayanandnandam/youtube-dl | refs/heads/master | youtube_dl/extractor/huajiao.py | 64 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
parse_duration,
parse_iso8601,
)
class HuajiaoIE(InfoExtractor):
IE_DESC = '花椒直播'
_VALID_URL = r'https?://(?:www\.)?huajiao\.com/l/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.huajiao.com/l/38941232',
'md5': 'd08bf9ac98787d24d1e4c0283f2d372d',
'info_dict': {
'id': '38941232',
'ext': 'mp4',
'title': '#新人求关注#',
'description': 're:.*',
'duration': 2424.0,
'thumbnail': r're:^https?://.*\.jpg$',
'timestamp': 1475866459,
'upload_date': '20161007',
'uploader': 'Penny_余姿昀',
'uploader_id': '75206005',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
feed_json = self._search_regex(
r'var\s+feed\s*=\s*({.+})', webpage, 'feed json')
feed = self._parse_json(feed_json, video_id)
description = self._html_search_meta(
'description', webpage, 'description', fatal=False)
def get(section, field):
return feed.get(section, {}).get(field)
return {
'id': video_id,
'title': feed['feed']['formated_title'],
'description': description,
'duration': parse_duration(get('feed', 'duration')),
'thumbnail': get('feed', 'image'),
'timestamp': parse_iso8601(feed.get('creatime'), ' '),
'uploader': get('author', 'nickname'),
'uploader_id': get('author', 'uid'),
'formats': self._extract_m3u8_formats(
feed['feed']['m3u8'], video_id, 'mp4', 'm3u8_native'),
}
|
LiveZenLK/CeygateERP | refs/heads/master | addons/sale/report/__init__.py | 70 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import sale_report
import invoice_report
|
fiuba08/robotframework | refs/heads/master | atest/testdata/libdoc/NewStyleNoInit.py | 38 | class NewStyleNoInit(object):
"""No inits here!"""
def keyword(self, arg1, arg2):
"""The only lonely keyword."""
|
HalCanary/skia-hc | refs/heads/master | infra/bots/recipes/test.py | 1 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Recipe module for Skia Swarming test.
DEPS = [
'env',
'flavor',
'recipe_engine/context',
'recipe_engine/file',
'recipe_engine/path',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/python',
'recipe_engine/raw_io',
'recipe_engine/step',
'run',
'vars',
]
def upload_dm_results(buildername):
skip_upload_bots = [
'ASAN',
'Coverage',
'MSAN',
'TSAN',
'Valgrind',
]
for s in skip_upload_bots:
if s in buildername:
return False
return True
def dm_flags(api, bot):
args = []
configs = []
blacklisted = []
def blacklist(quad):
config, src, options, name = (
quad.split(' ') if isinstance(quad, str) else quad)
if (config == '_' or
config in configs or
(config[0] == '~' and config[1:] in configs)):
blacklisted.extend([config, src, options, name])
# We've been spending lots of time writing out and especially uploading
# .pdfs, but not doing anything further with them. skia:6821
args.extend(['--dont_write', 'pdf'])
# This enables non-deterministic random seeding of the GPU FP optimization
# test.
# Not Android due to:
# - https://skia.googlesource.com/skia/+/
# 5910ed347a638ded8cd4c06dbfda086695df1112/BUILD.gn#160
# - https://skia.googlesource.com/skia/+/
# ce06e261e68848ae21cac1052abc16bc07b961bf/tests/ProcessorTest.cpp#307
# Not MSAN due to:
# - https://skia.googlesource.com/skia/+/
# 0ac06e47269a40c177747310a613d213c95d1d6d/infra/bots/recipe_modules/
# flavor/gn_flavor.py#80
if 'Android' not in bot and 'MSAN' not in bot:
args.append('--randomProcessorTest')
if 'Pixel3' in bot and 'Vulkan' in bot:
args.extend(['--dontReduceOpsTaskSplitting'])
thread_limit = None
MAIN_THREAD_ONLY = 0
# 32-bit desktop bots tend to run out of memory, because they have relatively
# far more cores than RAM (e.g. 32 cores, 3G RAM). Hold them back a bit.
if '-x86-' in bot:
thread_limit = 4
# These bots run out of memory easily.
if 'Chromecast' in bot or 'MotoG4' in bot or 'Nexus7' in bot:
thread_limit = MAIN_THREAD_ONLY
# Avoid issues with dynamically exceeding resource cache limits.
if 'Test' in bot and 'DISCARDABLE' in bot:
thread_limit = MAIN_THREAD_ONLY
if thread_limit is not None:
args.extend(['--threads', str(thread_limit)])
# Android's kernel will occasionally attempt to kill our process, using
# SIGINT, in an effort to free up resources. If requested, that signal
# is ignored and dm will keep attempting to proceed until we actually
# exhaust the available resources.
if 'Chromecast' in bot:
args.append('--ignoreSigInt')
if 'SwiftShader' in api.vars.extra_tokens:
configs.extend(['gles', 'glesdft'])
args.append('--disableDriverCorrectnessWorkarounds')
elif api.vars.builder_cfg.get('cpu_or_gpu') == 'CPU':
args.append('--nogpu')
configs.append('8888')
if 'BonusConfigs' in bot:
configs = [
'pdf',
'g8', '565',
'pic-8888', 'serialize-8888',
'f16', 'srgb', 'esrgb', 'narrow', 'enarrow',
'p3', 'ep3', 'rec2020', 'erec2020']
elif api.vars.builder_cfg.get('cpu_or_gpu') == 'GPU':
args.append('--nocpu')
# Add in either gles or gl configs to the canonical set based on OS
sample_count = '8'
gl_prefix = 'gl'
if 'Android' in bot or 'iOS' in bot:
sample_count = '4'
# We want to test the OpenGL config not the GLES config on the Shield
if 'NVIDIA_Shield' not in bot:
gl_prefix = 'gles'
# MSAA is disabled on Pixel3a (https://b.corp.google.com/issues/143074513).
if ('Pixel3a' in bot):
sample_count = ''
elif 'Intel' in bot:
# MSAA doesn't work well on Intel GPUs chromium:527565, chromium:983926
sample_count = ''
elif 'ChromeOS' in bot:
gl_prefix = 'gles'
if 'NativeFonts' in bot:
configs.append(gl_prefix)
else:
configs.extend([gl_prefix,
gl_prefix + 'dft',
gl_prefix + 'srgb'])
if sample_count:
configs.append(gl_prefix + 'msaa' + sample_count)
# The Tegra3 doesn't support MSAA
if ('Tegra3' in bot or
# We aren't interested in fixing msaa bugs on current iOS devices.
'iPad4' in bot or
'iPadPro' in bot or
'iPhone6' in bot or
'iPhone7' in bot or
# skia:5792
'IntelHD530' in bot or
'IntelIris540' in bot):
configs = [x for x in configs if 'msaa' not in x]
# We want to test both the OpenGL config and the GLES config on Linux Intel:
# GL is used by Chrome, GLES is used by ChromeOS.
# Also do the Ganesh threading verification test (render with and without
# worker threads, using only the SW path renderer, and compare the results).
if 'Intel' in bot and api.vars.is_linux:
configs.extend(['gles',
'glesdft',
'glessrgb',
'gltestthreading'])
# skbug.com/6333, skbug.com/6419, skbug.com/6702
blacklist('gltestthreading gm _ lcdblendmodes')
blacklist('gltestthreading gm _ lcdoverlap')
blacklist('gltestthreading gm _ textbloblooper')
# All of these GMs are flaky, too:
blacklist('gltestthreading gm _ bleed_alpha_bmp')
blacklist('gltestthreading gm _ bleed_alpha_bmp_shader')
blacklist('gltestthreading gm _ bleed_alpha_image')
blacklist('gltestthreading gm _ bleed_alpha_image_shader')
blacklist('gltestthreading gm _ savelayer_with_backdrop')
blacklist('gltestthreading gm _ persp_shaders_bw')
blacklist('gltestthreading gm _ dftext_blob_persp')
blacklist('gltestthreading gm _ dftext')
# skbug.com/7523 - Flaky on various GPUs
blacklist('gltestthreading gm _ orientation')
# These GMs only differ in the low bits
blacklist('gltestthreading gm _ stroketext')
blacklist('gltestthreading gm _ draw_image_set')
# CommandBuffer bot *only* runs the command_buffer config.
if 'CommandBuffer' in bot:
configs = ['commandbuffer']
# ANGLE bot *only* runs the angle configs
if 'ANGLE' in bot:
configs = ['angle_d3d11_es2',
'angle_d3d9_es2',
'angle_gl_es2',
'angle_d3d11_es3']
if sample_count:
configs.append('angle_d3d11_es2_msaa' + sample_count)
configs.append('angle_d3d11_es3_msaa' + sample_count)
if 'LenovoYogaC630' in bot:
# LenovoYogaC630 only supports D3D11, and to save time, we only test ES3
configs = ['angle_d3d11_es3',
'angle_d3d11_es3_msaa' + sample_count]
if 'GTX' in bot or 'Quadro' in bot:
# See skia:7823 and chromium:693090.
configs.append('angle_gl_es3')
if sample_count:
configs.append('angle_gl_es2_msaa' + sample_count)
configs.append('angle_gl_es3_msaa' + sample_count)
if 'NUC5i7RYH' in bot:
# skbug.com/7376
blacklist('_ test _ ProcessorCloneTest')
if 'AndroidOne' in bot or ('Nexus' in bot and 'Nexus5x' not in bot) or 'GalaxyS6' in bot:
# skbug.com/9019
blacklist('_ test _ ProcessorCloneTest')
blacklist('_ test _ Programs')
blacklist('_ test _ ProcessorOptimizationValidationTest')
if 'CommandBuffer' in bot and 'MacBook10.1-' in bot:
# skbug.com/9235
blacklist('_ test _ Programs')
# skbug.com/9033 - these devices run out of memory on this test
# when opList splitting reduction is enabled
if 'GPU' in bot and ('Nexus7' in bot or
'NVIDIA_Shield' in bot or
'Nexus5x' in bot or
('Win10' in bot and 'GTX660' in bot and 'Vulkan' in bot) or
'Chorizo' in bot):
blacklist(['_', 'gm', '_', 'savelayer_clipmask'])
# skbug.com/9123
if 'CommandBuffer' in bot and 'IntelIris5100' in bot:
blacklist(['_', 'test', '_', 'AsyncReadPixels'])
# skbug.com/9043 - these devices render this test incorrectly
# when opList splitting reduction is enabled
if 'GPU' in bot and 'Vulkan' in bot and ('RadeonR9M470X' in bot or
'RadeonHD7770' in bot):
blacklist(['_', 'tests', '_', 'VkDrawableImportTest'])
if 'Vulkan' in bot:
configs = ['vk']
if 'Android' in bot:
configs.append('vkmsaa4')
else:
# MSAA doesn't work well on Intel GPUs chromium:527565, chromium:983926, skia:9023
if 'Intel' not in bot:
configs.append('vkmsaa8')
if 'Metal' in bot:
configs = ['mtl']
if 'iOS' in bot:
configs.append('mtlmsaa4')
else:
# MSAA doesn't work well on Intel GPUs chromium:527565, chromium:983926
if 'Intel' not in bot:
configs.append('mtlmsaa8')
# Test 1010102 on our Linux/NVIDIA bots and the persistent cache config
# on the GL bots.
if ('QuadroP400' in bot and 'PreAbandonGpuContext' not in bot and
'TSAN' not in bot and api.vars.is_linux):
if 'Vulkan' in bot:
configs.append('vk1010102')
# Decoding transparent images to 1010102 just looks bad
blacklist('vk1010102 image _ _')
else:
configs.extend(['gl1010102',
'gltestpersistentcache',
'gltestglslcache',
'gltestprecompile'])
# Decoding transparent images to 1010102 just looks bad
blacklist('gl1010102 image _ _')
# These tests produce slightly different pixels run to run on NV.
blacklist('gltestpersistentcache gm _ atlastext')
blacklist('gltestpersistentcache gm _ dftext')
blacklist('gltestpersistentcache gm _ glyph_pos_h_b')
blacklist('gltestglslcache gm _ atlastext')
blacklist('gltestglslcache gm _ dftext')
blacklist('gltestglslcache gm _ glyph_pos_h_b')
blacklist('gltestprecompile gm _ atlastext')
blacklist('gltestprecompile gm _ dftext')
blacklist('gltestprecompile gm _ glyph_pos_h_b')
# Tessellation shaders do not yet participate in the persistent cache.
blacklist('gltestpersistentcache gm _ tessellation')
blacklist('gltestglslcache gm _ tessellation')
blacklist('gltestprecompile gm _ tessellation')
# We also test the SkSL precompile config on Pixel2XL as a representative
# Android device - this feature is primarily used by Flutter.
if 'Pixel2XL' in bot and 'Vulkan' not in bot:
configs.append('glestestprecompile')
# Test rendering to wrapped dsts on a few bots
# Also test 'glenarrow', which hits F16 surfaces and F16 vertex colors.
if 'BonusConfigs' in api.vars.extra_tokens:
configs = ['glbetex', 'glbert', 'glenarrow']
if 'ChromeOS' in bot:
# Just run GLES for now - maybe add gles_msaa4 in the future
configs = ['gles']
if 'Chromecast' in bot:
configs = ['gles']
# Test coverage counting path renderer.
if 'CCPR' in bot:
configs = [c for c in configs if c == 'gl' or c == 'gles']
args.extend(['--pr', 'ccpr', '--cc', 'true', '--cachePathMasks', 'false'])
# Test GPU tessellation path renderer.
if 'GpuTess' in bot:
configs = [gl_prefix + 'msaa4']
args.extend(['--pr', 'gtess'])
# Test non-nvpr on NVIDIA.
if 'NonNVPR' in bot:
configs = ['gl', 'glmsaa4']
args.extend(['--pr', '~nvpr'])
# DDL is a GPU-only feature
if 'DDL1' in bot:
# This bot generates gl and vk comparison images for the large skps
configs = [c for c in configs if c == 'gl' or c == 'vk' or c == 'mtl']
args.extend(['--skpViewportSize', "2048"])
args.extend(['--pr', '~small'])
if 'DDL3' in bot:
# This bot generates the ddl-gl and ddl-vk images for the
# large skps and the gms
ddl_configs = ['ddl-' + c for c in configs if c == 'gl' or c == 'vk' or c == 'mtl']
ddl2_configs = ['ddl2-' + c for c in configs if c == 'gl' or c == 'vk' or c == 'mtl']
configs = ddl_configs + ddl2_configs
args.extend(['--skpViewportSize', "2048"])
args.extend(['--gpuThreads', "0"])
tf = api.vars.builder_cfg.get('test_filter')
if 'All' != tf:
# Expected format: shard_XX_YY
parts = tf.split('_')
if len(parts) == 3:
args.extend(['--shard', parts[1]])
args.extend(['--shards', parts[2]])
else: # pragma: nocover
raise Exception('Invalid task name - bad shards: %s' % tf)
args.append('--config')
args.extend(configs)
# Run tests, gms, and image decoding tests everywhere.
args.extend('--src tests gm image lottie colorImage svg skp'.split(' '))
if api.vars.builder_cfg.get('cpu_or_gpu') == 'GPU':
# Don't run the 'svgparse_*' svgs on GPU.
blacklist('_ svg _ svgparse_')
elif bot == 'Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Debug-All-ASAN':
# Only run the CPU SVGs on 8888.
blacklist('~8888 svg _ _')
else:
# On CPU SVGs we only care about parsing. Only run them on the above bot.
args.remove('svg')
# Eventually I'd like these to pass, but for now just skip 'em.
if 'SK_FORCE_RASTER_PIPELINE_BLITTER' in bot:
args.remove('tests')
if 'NativeFonts' in bot: # images won't exercise native font integration :)
args.remove('image')
args.remove('colorImage')
def remove_from_args(arg):
if arg in args:
args.remove(arg)
if 'DDL' in bot:
# The DDL bots just render the large skps and the gms
remove_from_args('tests')
remove_from_args('image')
remove_from_args('colorImage')
remove_from_args('svg')
else:
# Currently, only the DDL bots render skps
remove_from_args('skp')
if 'Lottie' in api.vars.builder_cfg.get('extra_config', ''):
# Only run the lotties on Lottie bots.
remove_from_args('tests')
remove_from_args('gm')
remove_from_args('image')
remove_from_args('colorImage')
remove_from_args('svg')
remove_from_args('skp')
else:
remove_from_args('lottie')
# TODO: ???
blacklist('f16 _ _ dstreadshuffle')
blacklist('glsrgb image _ _')
blacklist('glessrgb image _ _')
# --src image --config g8 means "decode into Gray8", which isn't supported.
blacklist('g8 image _ _')
blacklist('g8 colorImage _ _')
if 'Valgrind' in bot:
# These take 18+ hours to run.
blacklist('pdf gm _ fontmgr_iter')
blacklist('pdf _ _ PANO_20121023_214540.jpg')
blacklist('pdf skp _ worldjournal')
blacklist('pdf skp _ desk_baidu.skp')
blacklist('pdf skp _ desk_wikipedia.skp')
blacklist('_ svg _ _')
# skbug.com/9171 and 8847
blacklist('_ test _ InitialTextureClear')
if 'TecnoSpark3Pro' in bot:
# skbug.com/9421
blacklist('_ test _ InitialTextureClear')
if 'iOS' in bot:
blacklist(gl_prefix + ' skp _ _')
if 'Mac' in bot or 'iOS' in bot:
# CG fails on questionable bmps
blacklist('_ image gen_platf rgba32abf.bmp')
blacklist('_ image gen_platf rgb24prof.bmp')
blacklist('_ image gen_platf rgb24lprof.bmp')
blacklist('_ image gen_platf 8bpp-pixeldata-cropped.bmp')
blacklist('_ image gen_platf 4bpp-pixeldata-cropped.bmp')
blacklist('_ image gen_platf 32bpp-pixeldata-cropped.bmp')
blacklist('_ image gen_platf 24bpp-pixeldata-cropped.bmp')
# CG has unpredictable behavior on this questionable gif
# It's probably using uninitialized memory
blacklist('_ image gen_platf frame_larger_than_image.gif')
# CG has unpredictable behavior on incomplete pngs
# skbug.com/5774
blacklist('_ image gen_platf inc0.png')
blacklist('_ image gen_platf inc1.png')
blacklist('_ image gen_platf inc2.png')
blacklist('_ image gen_platf inc3.png')
blacklist('_ image gen_platf inc4.png')
blacklist('_ image gen_platf inc5.png')
blacklist('_ image gen_platf inc6.png')
blacklist('_ image gen_platf inc7.png')
blacklist('_ image gen_platf inc8.png')
blacklist('_ image gen_platf inc9.png')
blacklist('_ image gen_platf inc10.png')
blacklist('_ image gen_platf inc11.png')
blacklist('_ image gen_platf inc12.png')
blacklist('_ image gen_platf inc13.png')
blacklist('_ image gen_platf inc14.png')
blacklist('_ image gen_platf incInterlaced.png')
# These images fail after Mac 10.13.1 upgrade.
blacklist('_ image gen_platf incInterlaced.gif')
blacklist('_ image gen_platf inc1.gif')
blacklist('_ image gen_platf inc0.gif')
blacklist('_ image gen_platf butterfly.gif')
# WIC fails on questionable bmps
if 'Win' in bot:
blacklist('_ image gen_platf pal8os2v2.bmp')
blacklist('_ image gen_platf pal8os2v2-16.bmp')
blacklist('_ image gen_platf rgba32abf.bmp')
blacklist('_ image gen_platf rgb24prof.bmp')
blacklist('_ image gen_platf rgb24lprof.bmp')
blacklist('_ image gen_platf 8bpp-pixeldata-cropped.bmp')
blacklist('_ image gen_platf 4bpp-pixeldata-cropped.bmp')
blacklist('_ image gen_platf 32bpp-pixeldata-cropped.bmp')
blacklist('_ image gen_platf 24bpp-pixeldata-cropped.bmp')
if 'x86_64' in bot and 'CPU' in bot:
# This GM triggers a SkSmallAllocator assert.
blacklist('_ gm _ composeshader_bitmap')
if 'Win' in bot or 'Mac' in bot:
# WIC and CG fail on arithmetic jpegs
blacklist('_ image gen_platf testimgari.jpg')
# More questionable bmps that fail on Mac, too. skbug.com/6984
blacklist('_ image gen_platf rle8-height-negative.bmp')
blacklist('_ image gen_platf rle4-height-negative.bmp')
# These PNGs have CRC errors. The platform generators seem to draw
# uninitialized memory without reporting an error, so skip them to
# avoid lots of images on Gold.
blacklist('_ image gen_platf error')
if 'Android' in bot or 'iOS' in bot or 'Chromecast' in bot:
# This test crashes the N9 (perhaps because of large malloc/frees). It also
# is fairly slow and not platform-specific. So we just disable it on all of
# Android and iOS. skia:5438
blacklist('_ test _ GrShape')
if api.vars.internal_hardware_label == '5':
# http://b/118312149#comment9
blacklist('_ test _ SRGBReadWritePixels')
# skia:4095
bad_serialize_gms = ['bleed_image',
'c_gms',
'colortype',
'colortype_xfermodes',
'drawfilter',
'fontmgr_bounds_0.75_0',
'fontmgr_bounds_1_-0.25',
'fontmgr_bounds',
'fontmgr_match',
'fontmgr_iter',
'imagemasksubset',
'wacky_yuv_formats_domain',
'imagemakewithfilter',
'imagemakewithfilter_crop',
'imagemakewithfilter_crop_ref',
'imagemakewithfilter_ref']
# skia:5589
bad_serialize_gms.extend(['bitmapfilters',
'bitmapshaders',
'bleed',
'bleed_alpha_bmp',
'bleed_alpha_bmp_shader',
'convex_poly_clip',
'extractalpha',
'filterbitmap_checkerboard_32_32_g8',
'filterbitmap_image_mandrill_64',
'shadows',
'simpleaaclip_aaclip'])
# skia:5595
bad_serialize_gms.extend(['composeshader_bitmap',
'scaled_tilemodes_npot',
'scaled_tilemodes'])
# skia:5778
bad_serialize_gms.append('typefacerendering_pfaMac')
# skia:5942
bad_serialize_gms.append('parsedpaths')
# these use a custom image generator which doesn't serialize
bad_serialize_gms.append('ImageGeneratorExternal_rect')
bad_serialize_gms.append('ImageGeneratorExternal_shader')
# skia:6189
bad_serialize_gms.append('shadow_utils')
# skia:7938
bad_serialize_gms.append('persp_images')
# Not expected to round trip encoding/decoding.
bad_serialize_gms.append('all_bitmap_configs')
bad_serialize_gms.append('makecolorspace')
bad_serialize_gms.append('readpixels')
bad_serialize_gms.append('draw_image_set_rect_to_rect')
bad_serialize_gms.append('compositor_quads_shader')
# This GM forces a path to be convex. That property doesn't survive
# serialization.
bad_serialize_gms.append('analytic_antialias_convex')
for test in bad_serialize_gms:
blacklist(['serialize-8888', 'gm', '_', test])
if 'Mac' not in bot:
for test in ['bleed_alpha_image', 'bleed_alpha_image_shader']:
blacklist(['serialize-8888', 'gm', '_', test])
# It looks like we skip these only for out-of-memory concerns.
if 'Win' in bot or 'Android' in bot:
for test in ['verylargebitmap', 'verylarge_picture_image']:
blacklist(['serialize-8888', 'gm', '_', test])
if 'Mac' in bot and 'CPU' in bot:
# skia:6992
blacklist(['pic-8888', 'gm', '_', 'encode-platform'])
blacklist(['serialize-8888', 'gm', '_', 'encode-platform'])
# skia:4769
for test in ['drawfilter']:
blacklist([ 'pic-8888', 'gm', '_', test])
# skia:4703
for test in ['image-cacherator-from-picture',
'image-cacherator-from-raster',
'image-cacherator-from-ctable']:
blacklist([ 'pic-8888', 'gm', '_', test])
blacklist(['serialize-8888', 'gm', '_', test])
# GM that requires raster-backed canvas
for test in ['complexclip4_bw', 'complexclip4_aa', 'p3',
'async_rescale_and_read_text_up_large',
'async_rescale_and_read_text_up',
'async_rescale_and_read_text_down',
'async_rescale_and_read_dog_up',
'async_rescale_and_read_dog_down',
'async_rescale_and_read_rose',
'async_rescale_and_read_no_bleed']:
blacklist([ 'pic-8888', 'gm', '_', test])
blacklist(['serialize-8888', 'gm', '_', test])
# GM requries canvas->makeSurface() to return a valid surface.
blacklist([ 'pic-8888', 'gm', '_', "blurrect_compare"])
blacklist(['serialize-8888', 'gm', '_', "blurrect_compare"])
# Extensions for RAW images
r = ['arw', 'cr2', 'dng', 'nef', 'nrw', 'orf', 'raf', 'rw2', 'pef', 'srw',
'ARW', 'CR2', 'DNG', 'NEF', 'NRW', 'ORF', 'RAF', 'RW2', 'PEF', 'SRW']
# skbug.com/4888
# Blacklist RAW images (and a few large PNGs) on GPU bots
# until we can resolve failures.
if 'GPU' in bot:
blacklist('_ image _ interlaced1.png')
blacklist('_ image _ interlaced2.png')
blacklist('_ image _ interlaced3.png')
for raw_ext in r:
blacklist('_ image _ .%s' % raw_ext)
# Blacklist memory intensive tests on 32-bit bots.
if 'Win8' in bot and 'x86-' in bot:
blacklist('_ image f16 _')
blacklist('_ image _ abnormal.wbmp')
blacklist('_ image _ interlaced1.png')
blacklist('_ image _ interlaced2.png')
blacklist('_ image _ interlaced3.png')
for raw_ext in r:
blacklist('_ image _ .%s' % raw_ext)
if 'Nexus5' in bot and 'GPU' in bot:
# skia:5876
blacklist(['_', 'gm', '_', 'encode-platform'])
if 'AndroidOne-GPU' in bot: # skia:4697, skia:4704, skia:4694, skia:4705
blacklist(['_', 'gm', '_', 'bigblurs'])
blacklist(['_', 'gm', '_', 'bleed'])
blacklist(['_', 'gm', '_', 'bleed_alpha_bmp'])
blacklist(['_', 'gm', '_', 'bleed_alpha_bmp_shader'])
blacklist(['_', 'gm', '_', 'bleed_alpha_image'])
blacklist(['_', 'gm', '_', 'bleed_alpha_image_shader'])
blacklist(['_', 'gm', '_', 'bleed_image'])
blacklist(['_', 'gm', '_', 'dropshadowimagefilter'])
blacklist(['_', 'gm', '_', 'filterfastbounds'])
blacklist([gl_prefix, 'gm', '_', 'imageblurtiled'])
blacklist(['_', 'gm', '_', 'imagefiltersclipped'])
blacklist(['_', 'gm', '_', 'imagefiltersscaled'])
blacklist(['_', 'gm', '_', 'imageresizetiled'])
blacklist(['_', 'gm', '_', 'matrixconvolution'])
blacklist(['_', 'gm', '_', 'strokedlines'])
if sample_count:
gl_msaa_config = gl_prefix + 'msaa' + sample_count
blacklist([gl_msaa_config, 'gm', '_', 'imageblurtiled'])
blacklist([gl_msaa_config, 'gm', '_', 'imagefiltersbase'])
match = []
if 'Valgrind' in bot: # skia:3021
match.append('~Threaded')
if 'Valgrind' in bot and 'PreAbandonGpuContext' in bot:
# skia:6575
match.append('~multipicturedraw_')
if 'AndroidOne' in bot:
match.append('~WritePixels') # skia:4711
match.append('~PremulAlphaRoundTrip_Gpu') # skia:7501
match.append('~ReimportImageTextureWithMipLevels') # skia:8090
if 'Chromecast' in bot:
if 'GPU' in bot:
# skia:6687
match.append('~animated-image-blurs')
match.append('~blur_0.01')
match.append('~blur_image_filter')
match.append('~check_small_sigma_offset')
match.append('~imageblur2')
match.append('~lighting')
match.append('~longpathdash')
match.append('~matrixconvolution')
match.append('~textblobmixedsizes_df')
match.append('~textblobrandomfont')
# Blacklisted to avoid OOM (we see DM just end with "broken pipe")
match.append('~bigbitmaprect_')
match.append('~DrawBitmapRect')
match.append('~drawbitmaprect')
match.append('~GM_animated-image-blurs')
match.append('~ImageFilterBlurLargeImage')
match.append('~savelayer_clipmask')
match.append('~TextBlobCache')
match.append('~verylarge')
if 'GalaxyS6' in bot:
match.append('~SpecialImage') # skia:6338
match.append('~skbug6653') # skia:6653
if 'MSAN' in bot:
match.extend(['~Once', '~Shared']) # Not sure what's up with these tests.
if 'TSAN' in bot:
match.extend(['~ReadWriteAlpha']) # Flaky on TSAN-covered on nvidia bots.
match.extend(['~RGBA4444TextureTest', # Flakier than they are important.
'~RGB565TextureTest'])
# By default, we test with GPU threading enabled, unless specifically
# disabled.
if 'NoGPUThreads' in bot:
args.extend(['--gpuThreads', '0'])
if 'Vulkan' in bot and 'Adreno530' in bot:
# skia:5777
match.extend(['~CopySurface'])
if 'Vulkan' in bot and 'Adreno' in bot:
# skia:7663
match.extend(['~WritePixelsNonTextureMSAA_Gpu'])
match.extend(['~WritePixelsMSAA_Gpu'])
if 'Vulkan' in bot and api.vars.is_linux and 'IntelIris640' in bot:
match.extend(['~VkHeapTests']) # skia:6245
if api.vars.is_linux and 'IntelIris640' in bot:
match.extend(['~Programs']) # skia:7849
if 'IntelIris640' in bot or 'IntelHD615' in bot or 'IntelHDGraphics615' in bot:
match.append('~^SRGBReadWritePixels$') # skia:9225
if 'Vulkan' in bot and api.vars.is_linux and 'IntelHD405' in bot:
# skia:7322
blacklist(['vk', 'gm', '_', 'skbug_257'])
blacklist(['vk', 'gm', '_', 'filltypespersp'])
match.append('~^ClearOp$')
match.append('~^CopySurface$')
match.append('~^ImageNewShader_GPU$')
match.append('~^InitialTextureClear$')
match.append('~^PinnedImageTest$')
match.append('~^ReadPixels_Gpu$')
match.append('~^ReadPixels_Texture$')
match.append('~^SRGBReadWritePixels$')
match.append('~^VkUploadPixelsTests$')
match.append('~^WritePixelsNonTexture_Gpu$')
match.append('~^WritePixelsNonTextureMSAA_Gpu$')
match.append('~^WritePixels_Gpu$')
match.append('~^WritePixelsMSAA_Gpu$')
if 'Vulkan' in bot and 'GTX660' in bot and 'Win' in bot:
# skbug.com/8047
match.append('~FloatingPointTextureTest$')
if 'Metal' in bot and 'HD8870M' in bot and 'Mac' in bot:
# skia:9255
match.append('~WritePixelsNonTextureMSAA_Gpu')
if 'ANGLE' in bot:
# skia:7835
match.append('~BlurMaskBiggerThanDest')
if 'IntelIris6100' in bot and 'ANGLE' in bot and 'Release' in bot:
# skia:7376
match.append('~^ProcessorOptimizationValidationTest$')
if ('IntelIris6100' in bot or 'IntelHD4400' in bot) and 'ANGLE' in bot:
# skia:6857
blacklist(['angle_d3d9_es2', 'gm', '_', 'lighting'])
if 'Chorizo' in bot:
# skia:8869
blacklist(['_', 'gm', '_', 'compositor_quads_filter'])
if 'PowerVRGX6250' in bot:
match.append('~gradients_view_perspective_nodither') #skia:6972
if '-arm-' in bot and 'ASAN' in bot:
# TODO: can we run with env allocator_may_return_null=1 instead?
match.append('~BadImage')
if 'Mac' in bot and 'IntelHD6000' in bot:
# skia:7574
match.append('~^ProcessorCloneTest$')
match.append('~^GrMeshTest$')
if 'Mac' in bot and 'IntelHD615' in bot:
# skia:7603
match.append('~^GrMeshTest$')
if 'LenovoYogaC630' in bot and 'ANGLE' in api.vars.extra_tokens:
# skia:9275
blacklist(['_', 'tests', '_', 'Programs'])
# skia:8976
blacklist(['_', 'tests', '_', 'GrDefaultPathRendererTest'])
# https://bugs.chromium.org/p/angleproject/issues/detail?id=3414
blacklist(['_', 'tests', '_', 'PinnedImageTest'])
if blacklisted:
args.append('--blacklist')
args.extend(blacklisted)
if match:
args.append('--match')
args.extend(match)
# These bots run out of memory running RAW codec tests. Do not run them in
# parallel
if 'Nexus5' in bot or 'Nexus9' in bot:
args.append('--noRAW_threading')
if 'FSAA' in bot:
args.extend(['--analyticAA', 'false'])
if 'FAAA' in bot:
args.extend(['--forceAnalyticAA'])
if 'NativeFonts' not in bot:
args.append('--nonativeFonts')
if 'GDI' in bot:
args.append('--gdi')
# Let's make all bots produce verbose output by default.
args.append('--verbose')
return args
def key_params(api):
"""Build a unique key from the builder name (as a list).
E.g. arch x86 gpu GeForce320M mode MacMini4.1 os Mac10.6
"""
# Don't bother to include role, which is always Test.
blacklist = ['role', 'test_filter']
flat = []
for k in sorted(api.vars.builder_cfg.keys()):
if k not in blacklist:
flat.append(k)
flat.append(api.vars.builder_cfg[k])
return flat
def test_steps(api):
"""Run the DM test."""
b = api.properties['buildername']
use_hash_file = False
if upload_dm_results(b):
host_dm_dir = str(api.flavor.host_dirs.dm_dir)
api.flavor.create_clean_host_dir(api.path['start_dir'].join('test'))
device_dm_dir = str(api.flavor.device_dirs.dm_dir)
if host_dm_dir != device_dm_dir:
api.flavor.create_clean_device_dir(device_dm_dir)
# Obtain the list of already-generated hashes.
hash_filename = 'uninteresting_hashes.txt'
host_hashes_file = api.vars.tmp_dir.join(hash_filename)
hashes_file = api.flavor.device_path_join(
api.flavor.device_dirs.tmp_dir, hash_filename)
api.run(
api.python.inline,
'get uninteresting hashes',
program="""
import contextlib
import math
import socket
import sys
import time
import urllib2
HASHES_URL = sys.argv[1]
RETRIES = 5
TIMEOUT = 60
WAIT_BASE = 15
socket.setdefaulttimeout(TIMEOUT)
for retry in range(RETRIES):
try:
with contextlib.closing(
urllib2.urlopen(HASHES_URL, timeout=TIMEOUT)) as w:
hashes = w.read()
with open(sys.argv[2], 'w') as f:
f.write(hashes)
break
except Exception as e:
print 'Failed to get uninteresting hashes from %s:' % HASHES_URL
print e
if retry == RETRIES:
raise
waittime = WAIT_BASE * math.pow(2, retry)
print 'Retry in %d seconds.' % waittime
time.sleep(waittime)
""",
args=[api.properties['gold_hashes_url'], host_hashes_file],
abort_on_failure=False,
fail_build_on_failure=False,
infra_step=True)
if api.path.exists(host_hashes_file):
api.flavor.copy_file_to_device(host_hashes_file, hashes_file)
use_hash_file = True
# Run DM.
properties = [
'gitHash', api.properties['revision'],
'builder', api.vars.builder_name,
'buildbucket_build_id', api.properties.get('buildbucket_build_id', ''),
'task_id', api.properties['task_id'],
]
if api.vars.is_trybot:
properties.extend([
'issue', api.vars.issue,
'patchset', api.vars.patchset,
'patch_storage', api.vars.patch_storage,
])
properties.extend(['swarming_bot_id', api.vars.swarming_bot_id])
properties.extend(['swarming_task_id', api.vars.swarming_task_id])
if 'Chromecast' in api.vars.builder_cfg.get('os', ''):
# Due to limited disk space, we only deal with skps and one image.
args = [
'dm',
'--resourcePath', api.flavor.device_dirs.resource_dir,
'--skps', api.flavor.device_dirs.skp_dir,
'--images', api.flavor.device_path_join(
api.flavor.device_dirs.resource_dir, 'images', 'color_wheel.jpg'),
'--nameByHash',
'--dontReduceOpsTaskSplitting',
'--properties'
] + properties
else:
args = [
'dm',
'--resourcePath', api.flavor.device_dirs.resource_dir,
'--skps', api.flavor.device_dirs.skp_dir,
'--images', api.flavor.device_path_join(
api.flavor.device_dirs.images_dir, 'dm'),
'--colorImages', api.flavor.device_path_join(
api.flavor.device_dirs.images_dir, 'colorspace'),
'--nameByHash',
'--properties'
] + properties
args.extend(['--svgs', api.flavor.device_dirs.svg_dir])
if 'Lottie' in api.vars.builder_cfg.get('extra_config', ''):
args.extend([
'--lotties',
api.flavor.device_path_join(
api.flavor.device_dirs.resource_dir, 'skottie'),
api.flavor.device_dirs.lotties_dir])
args.append('--key')
keys = key_params(api)
if 'Lottie' in api.vars.builder_cfg.get('extra_config', ''):
keys.extend(['renderer', 'skottie'])
if 'DDL' in api.vars.builder_cfg.get('extra_config', ''):
# 'DDL' style means "--skpViewportSize 2048 --pr ~small"
keys.extend(['style', 'DDL'])
else:
keys.extend(['style', 'default'])
args.extend(keys)
if use_hash_file:
args.extend(['--uninterestingHashesFile', hashes_file])
if upload_dm_results(b):
args.extend(['--writePath', api.flavor.device_dirs.dm_dir])
args.extend(dm_flags(api, api.vars.builder_name))
# See skia:2789.
if 'AbandonGpuContext' in api.vars.extra_tokens:
args.append('--abandonGpuContext')
if 'PreAbandonGpuContext' in api.vars.extra_tokens:
args.append('--preAbandonGpuContext')
if 'ReleaseAndAbandonGpuContext' in api.vars.extra_tokens:
args.append('--releaseAndAbandonGpuContext')
api.run(api.flavor.step, 'dm', cmd=args, abort_on_failure=False)
if upload_dm_results(b):
# Copy images and JSON to host machine if needed.
api.flavor.copy_directory_contents_to_host(
api.flavor.device_dirs.dm_dir, api.flavor.host_dirs.dm_dir)
def RunSteps(api):
api.vars.setup()
api.file.ensure_directory('makedirs tmp_dir', api.vars.tmp_dir)
api.flavor.setup()
env = {}
if 'iOS' in api.vars.builder_name:
env['IOS_BUNDLE_ID'] = 'com.google.dm'
env['IOS_MOUNT_POINT'] = api.vars.slave_dir.join('mnt_iosdevice')
with api.context(env=env):
try:
if 'Chromecast' in api.vars.builder_name:
api.flavor.install(resources=True, skps=True)
elif 'Lottie' in api.vars.builder_name:
api.flavor.install(resources=True, lotties=True)
else:
api.flavor.install(skps=True, images=True, svgs=True, resources=True)
test_steps(api)
finally:
api.flavor.cleanup_steps()
api.run.check_failure()
TEST_BUILDERS = [
'Test-Android-Clang-AndroidOne-GPU-Mali400MP2-arm-Release-All-Android',
'Test-Android-Clang-GalaxyS6-GPU-MaliT760-arm64-Debug-All-Android',
('Test-Android-Clang-GalaxyS6-GPU-MaliT760-arm64-Debug-All'
'-Android_NoGPUThreads'),
('Test-Android-Clang-GalaxyS7_G930FD-GPU-MaliT880-arm64-Release-All'
'-Android_Vulkan'),
'Test-Android-Clang-MotoG4-CPU-Snapdragon617-arm-Release-All-Android',
'Test-Android-Clang-NVIDIA_Shield-GPU-TegraX1-arm64-Debug-All-Android_CCPR',
'Test-Android-Clang-Nexus5-GPU-Adreno330-arm-Release-All-Android',
'Test-Android-Clang-Nexus7-CPU-Tegra3-arm-Release-All-Android',
'Test-Android-Clang-Pixel-GPU-Adreno530-arm64-Debug-All-Android_Vulkan',
'Test-Android-Clang-Pixel-GPU-Adreno530-arm-Debug-All-Android_ASAN',
'Test-Android-Clang-Pixel2XL-GPU-Adreno540-arm64-Debug-All-Android',
'Test-Android-Clang-Pixel3-GPU-Adreno630-arm64-Debug-All-Android_Vulkan',
'Test-Android-Clang-Pixel3a-GPU-Adreno615-arm64-Debug-All-Android',
('Test-ChromeOS-Clang-AcerChromebookR13Convertible-GPU-PowerVRGX6250-'
'arm-Debug-All'),
'Test-Chromecast-Clang-Chorizo-CPU-Cortex_A7-arm-Release-All',
'Test-Chromecast-Clang-Chorizo-GPU-Cortex_A7-arm-Release-All',
'Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Debug-All-ASAN',
'Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Debug-All-BonusConfigs',
'Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Debug-shard_00_10-Coverage',
'Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Debug-All-MSAN',
('Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Debug-All'
'-SK_USE_DISCARDABLE_SCALEDIMAGECACHE'),
'Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Release-All-Lottie',
('Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Release-All'
'-SK_FORCE_RASTER_PIPELINE_BLITTER'),
'Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Release-All-TSAN',
'Test-Debian9-Clang-GCE-GPU-SwiftShader-x86_64-Release-All-SwiftShader',
'Test-Debian9-Clang-NUC5PPYH-GPU-IntelHD405-x86_64-Release-All-Vulkan',
'Test-Debian9-Clang-NUC7i5BNK-GPU-IntelIris640-x86_64-Debug-All-Vulkan',
'Test-Debian10-GCC-GCE-CPU-AVX2-x86_64-Debug-All-Docker',
'Test-iOS-Clang-iPhone6-GPU-PowerVRGX6450-arm64-Release-All-Metal',
('Test-Mac10.13-Clang-MacBook10.1-GPU-IntelHD615-x86_64-Release-All'
'-NativeFonts'),
'Test-Mac10.13-Clang-MacBookPro11.5-CPU-AVX2-x86_64-Release-All',
'Test-Mac10.13-Clang-MacBookPro11.5-GPU-RadeonHD8870M-x86_64-Debug-All-Metal',
('Test-Mac10.13-Clang-MacMini7.1-GPU-IntelIris5100-x86_64-Debug-All'
'-CommandBuffer'),
'Test-Mac10.14-Clang-MacBookAir7.2-GPU-IntelHD6000-x86_64-Debug-All',
'Test-Ubuntu18-Clang-Golo-GPU-QuadroP400-x86_64-Debug-All-Vulkan',
('Test-Ubuntu18-Clang-Golo-GPU-QuadroP400-x86_64-Release-All'
'-Valgrind_AbandonGpuContext_SK_CPU_LIMIT_SSE41'),
('Test-Ubuntu18-Clang-Golo-GPU-QuadroP400-x86_64-Release-All'
'-Valgrind_PreAbandonGpuContext_SK_CPU_LIMIT_SSE41'),
'Test-Ubuntu18-Clang-Golo-GPU-QuadroP400-x86_64-Debug-All-DDL1',
'Test-Ubuntu18-Clang-Golo-GPU-QuadroP400-x86_64-Debug-All-DDL3',
'Test-Win10-Clang-Golo-GPU-QuadroP400-x86_64-Release-All-BonusConfigs',
'Test-Win10-Clang-Golo-GPU-QuadroP400-x86_64-Debug-All-GpuTess',
'Test-Win10-Clang-Golo-GPU-QuadroP400-x86_64-Debug-All-NonNVPR',
('Test-Win10-Clang-Golo-GPU-QuadroP400-x86_64-Release-All'
'-ReleaseAndAbandonGpuContext'),
'Test-Win10-Clang-NUC5i7RYH-CPU-AVX2-x86_64-Debug-All-NativeFonts_GDI',
'Test-Win10-Clang-NUC5i7RYH-GPU-IntelIris6100-x86_64-Release-All-ANGLE',
'Test-Win10-Clang-NUCD34010WYKH-GPU-IntelHD4400-x86_64-Release-All-ANGLE',
'Test-Win10-Clang-ShuttleA-GPU-GTX660-x86_64-Release-All-Vulkan',
'Test-Win10-Clang-ShuttleA-GPU-RadeonHD7770-x86_64-Release-All-Vulkan',
'Test-Win10-Clang-ShuttleC-GPU-GTX960-x86_64-Debug-All-ANGLE',
'Test-Win10-MSVC-LenovoYogaC630-GPU-Adreno630-arm64-Debug-All-ANGLE',
'Test-Win2019-Clang-GCE-CPU-AVX2-x86_64-Debug-All-FAAA',
'Test-Win2019-Clang-GCE-CPU-AVX2-x86_64-Debug-All-FSAA',
'Test-iOS-Clang-iPadPro-GPU-PowerVRGT7800-arm64-Release-All',
'Test-Mac10.13-Clang-MacBook10.1-GPU-IntelHD615-x86_64-Debug-All-CommandBuffer',
'Test-Android-Clang-TecnoSpark3Pro-GPU-PowerVRGE8320-arm-Debug-All-Android',
]
def GenTests(api):
for builder in TEST_BUILDERS:
test = (
api.test(builder) +
api.properties(buildername=builder,
buildbucket_build_id='123454321',
revision='abc123',
path_config='kitchen',
gold_hashes_url='https://example.com/hashes.txt',
swarm_out_dir='[SWARM_OUT_DIR]',
task_id='task_12345') +
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
) +
api.step_data('get swarming bot id',
stdout=api.raw_io.output('skia-bot-123')) +
api.step_data('get swarming task id',
stdout=api.raw_io.output('123456'))
)
if 'Win' in builder and not 'LenovoYogaC630' in builder:
test += api.platform('win', 64)
if 'Chromecast' in builder:
test += api.step_data(
'read chromecast ip',
stdout=api.raw_io.output('192.168.1.2:5555'))
yield test
builder = 'Test-Win8-Clang-Golo-CPU-AVX-x86-Debug-All'
yield (
api.test('trybot') +
api.properties(buildername=builder,
buildbucket_build_id='123454321',
revision='abc123',
path_config='kitchen',
gold_hashes_url='https://example.com/hashes.txt',
swarm_out_dir='[SWARM_OUT_DIR]',
task_id='task_12345') +
api.platform('win', 64) +
api.properties(patch_storage='gerrit') +
api.properties.tryserver(
buildername=builder,
gerrit_project='skia',
gerrit_url='https://skia-review.googlesource.com/',
)+
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
)
)
builder = 'Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Debug-All'
yield (
api.test('failed_dm') +
api.properties(buildername=builder,
buildbucket_build_id='123454321',
revision='abc123',
path_config='kitchen',
gold_hashes_url='https://example.com/hashes.txt',
swarm_out_dir='[SWARM_OUT_DIR]',
task_id='task_12345') +
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
) +
api.step_data('symbolized dm', retcode=1)
)
builder = 'Test-Android-Clang-Nexus7-GPU-Tegra3-arm-Release-All-Android'
yield (
api.test('failed_get_hashes') +
api.properties(buildername=builder,
buildbucket_build_id='123454321',
revision='abc123',
path_config='kitchen',
gold_hashes_url='https://example.com/hashes.txt',
swarm_out_dir='[SWARM_OUT_DIR]',
task_id='task_12345') +
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
) +
api.step_data('get uninteresting hashes', retcode=1)
)
builder = 'Test-Android-Clang-Nexus7-GPU-Tegra3-arm-Debug-All-Android'
retry_step_name = ('push [START_DIR]/skia/resources/* '
'/sdcard/revenge_of_the_skiabot/resources.push '
'[START_DIR]/skia/resources/file1')
yield (
api.test('failed_push') +
api.properties(buildername=builder,
buildbucket_build_id='123454321',
revision='abc123',
path_config='kitchen',
gold_hashes_url='https://example.com/hashes.txt',
swarm_out_dir='[SWARM_OUT_DIR]',
task_id='task_12345') +
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
) +
api.step_data('get swarming bot id',
stdout=api.raw_io.output('build123-m2--device5')) +
api.step_data(retry_step_name, retcode=1) +
api.step_data(retry_step_name + ' (attempt 2)', retcode=1) +
api.step_data(retry_step_name + ' (attempt 3)', retcode=1)
)
retry_step_name = 'adb pull.pull /sdcard/revenge_of_the_skiabot/dm_out'
yield (
api.test('failed_pull') +
api.properties(buildername=builder,
buildbucket_build_id='123454321',
revision='abc123',
path_config='kitchen',
gold_hashes_url='https://example.com/hashes.txt',
swarm_out_dir='[SWARM_OUT_DIR]',
task_id='task_12345') +
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
) +
api.step_data('dm', retcode=1) +
api.step_data(retry_step_name, retcode=1) +
api.step_data(retry_step_name + ' (attempt 2)', retcode=1) +
api.step_data(retry_step_name + ' (attempt 3)', retcode=1)
)
yield (
api.test('internal_bot_5') +
api.properties(buildername=builder,
buildbucket_build_id='123454321',
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]',
gold_hashes_url='https://example.com/hashes.txt',
internal_hardware_label='5',
task_id='task_12345') +
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
)
)
|
mou4e/zirconium | refs/heads/master | build/android/adb_logcat_printer.py | 69 | #!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Shutdown adb_logcat_monitor and print accumulated logs.
To test, call './adb_logcat_printer.py <base_dir>' where
<base_dir> contains 'adb logcat -v threadtime' files named as
logcat_<deviceID>_<sequenceNum>
The script will print the files to out, and will combine multiple
logcats from a single device if there is overlap.
Additionally, if a <base_dir>/LOGCAT_MONITOR_PID exists, the script
will attempt to terminate the contained PID by sending a SIGINT and
monitoring for the deletion of the aforementioned file.
"""
# pylint: disable=W0702
import cStringIO
import logging
import optparse
import os
import re
import signal
import sys
import time
# Set this to debug for more verbose output
LOG_LEVEL = logging.INFO
def CombineLogFiles(list_of_lists, logger):
"""Splices together multiple logcats from the same device.
Args:
list_of_lists: list of pairs (filename, list of timestamped lines)
logger: handler to log events
Returns:
list of lines with duplicates removed
"""
cur_device_log = ['']
for cur_file, cur_file_lines in list_of_lists:
# Ignore files with just the logcat header
if len(cur_file_lines) < 2:
continue
common_index = 0
# Skip this step if list just has empty string
if len(cur_device_log) > 1:
try:
line = cur_device_log[-1]
# Used to make sure we only splice on a timestamped line
if re.match(r'^\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{3} ', line):
common_index = cur_file_lines.index(line)
else:
logger.warning('splice error - no timestamp in "%s"?', line.strip())
except ValueError:
# The last line was valid but wasn't found in the next file
cur_device_log += ['***** POSSIBLE INCOMPLETE LOGCAT *****']
logger.info('Unable to splice %s. Incomplete logcat?', cur_file)
cur_device_log += ['*'*30 + ' %s' % cur_file]
cur_device_log.extend(cur_file_lines[common_index:])
return cur_device_log
def FindLogFiles(base_dir):
"""Search a directory for logcat files.
Args:
base_dir: directory to search
Returns:
Mapping of device_id to a sorted list of file paths for a given device
"""
logcat_filter = re.compile(r'^logcat_(\S+)_(\d+)$')
# list of tuples (<device_id>, <seq num>, <full file path>)
filtered_list = []
for cur_file in os.listdir(base_dir):
matcher = logcat_filter.match(cur_file)
if matcher:
filtered_list += [(matcher.group(1), int(matcher.group(2)),
os.path.join(base_dir, cur_file))]
filtered_list.sort()
file_map = {}
for device_id, _, cur_file in filtered_list:
if device_id not in file_map:
file_map[device_id] = []
file_map[device_id] += [cur_file]
return file_map
def GetDeviceLogs(log_filenames, logger):
"""Read log files, combine and format.
Args:
log_filenames: mapping of device_id to sorted list of file paths
logger: logger handle for logging events
Returns:
list of formatted device logs, one for each device.
"""
device_logs = []
for device, device_files in log_filenames.iteritems():
logger.debug('%s: %s', device, str(device_files))
device_file_lines = []
for cur_file in device_files:
with open(cur_file) as f:
device_file_lines += [(cur_file, f.read().splitlines())]
combined_lines = CombineLogFiles(device_file_lines, logger)
# Prepend each line with a short unique ID so it's easy to see
# when the device changes. We don't use the start of the device
# ID because it can be the same among devices. Example lines:
# AB324: foo
# AB324: blah
device_logs += [('\n' + device[-5:] + ': ').join(combined_lines)]
return device_logs
def ShutdownLogcatMonitor(base_dir, logger):
"""Attempts to shutdown adb_logcat_monitor and blocks while waiting."""
try:
monitor_pid_path = os.path.join(base_dir, 'LOGCAT_MONITOR_PID')
with open(monitor_pid_path) as f:
monitor_pid = int(f.readline())
logger.info('Sending SIGTERM to %d', monitor_pid)
os.kill(monitor_pid, signal.SIGTERM)
i = 0
while True:
time.sleep(.2)
if not os.path.exists(monitor_pid_path):
return
if not os.path.exists('/proc/%d' % monitor_pid):
logger.warning('Monitor (pid %d) terminated uncleanly?', monitor_pid)
return
logger.info('Waiting for logcat process to terminate.')
i += 1
if i >= 10:
logger.warning('Monitor pid did not terminate. Continuing anyway.')
return
except (ValueError, IOError, OSError):
logger.exception('Error signaling logcat monitor - continuing')
def main(argv):
parser = optparse.OptionParser(usage='Usage: %prog [options] <log dir>')
parser.add_option('--output-path',
help='Output file path (if unspecified, prints to stdout)')
options, args = parser.parse_args(argv)
if len(args) != 1:
parser.error('Wrong number of unparsed args')
base_dir = args[0]
if options.output_path:
output_file = open(options.output_path, 'w')
else:
output_file = sys.stdout
log_stringio = cStringIO.StringIO()
logger = logging.getLogger('LogcatPrinter')
logger.setLevel(LOG_LEVEL)
sh = logging.StreamHandler(log_stringio)
sh.setFormatter(logging.Formatter('%(asctime)-2s %(levelname)-8s'
' %(message)s'))
logger.addHandler(sh)
try:
# Wait at least 5 seconds after base_dir is created before printing.
#
# The idea is that 'adb logcat > file' output consists of 2 phases:
# 1 Dump all the saved logs to the file
# 2 Stream log messages as they are generated
#
# We want to give enough time for phase 1 to complete. There's no
# good method to tell how long to wait, but it usually only takes a
# second. On most bots, this code path won't occur at all, since
# adb_logcat_monitor.py command will have spawned more than 5 seconds
# prior to called this shell script.
try:
sleep_time = 5 - (time.time() - os.path.getctime(base_dir))
except OSError:
sleep_time = 5
if sleep_time > 0:
logger.warning('Monitor just started? Sleeping %.1fs', sleep_time)
time.sleep(sleep_time)
assert os.path.exists(base_dir), '%s does not exist' % base_dir
ShutdownLogcatMonitor(base_dir, logger)
separator = '\n' + '*' * 80 + '\n\n'
for log in GetDeviceLogs(FindLogFiles(base_dir), logger):
output_file.write(log)
output_file.write(separator)
with open(os.path.join(base_dir, 'eventlog')) as f:
output_file.write('\nLogcat Monitor Event Log\n')
output_file.write(f.read())
except:
logger.exception('Unexpected exception')
logger.info('Done.')
sh.flush()
output_file.write('\nLogcat Printer Event Log\n')
output_file.write(log_stringio.getvalue())
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
jamespeterschinner/async_v20 | refs/heads/master | tests/test_interface/test_helpers.py | 1 | import inspect
import json
import re
import pandas as pd
import pytest
from async_v20 import endpoints
from async_v20.client import OandaClient
from async_v20.definitions.types import Account
from async_v20.definitions.types import DateTime
from async_v20.definitions.types import OrderRequest
from async_v20.definitions.types import StopLossOrderRequest
from async_v20.definitions.types import ArrayInstrument
from async_v20.definitions.types import MarketOrderRequest
from async_v20.endpoints import POSTOrders
from async_v20.endpoints.annotations import Bool
from async_v20.endpoints.annotations import Authorization
from async_v20.endpoints.annotations import SinceTransactionID
from async_v20.endpoints.annotations import LastTransactionID
from async_v20.exceptions import FailedToCreatePath, InvalidOrderRequest
from async_v20.interface.helpers import _create_request_params
from async_v20.interface.helpers import _format_order_request
from async_v20.interface.helpers import construct_arguments
from async_v20.interface.helpers import create_body
from async_v20.interface.helpers import create_request_kwargs
from async_v20.interface.helpers import create_url
from async_v20.interface.helpers import too_many_passed_transactions
from .helpers import order_dict
from ..data.json_data import GETAccountID_response, example_instruments
from ..fixtures.client import client
from ..fixtures.server import server
from ..test_definitions.helpers import get_valid_primitive_data
client_attrs = [getattr(OandaClient, attr) for attr in dir(OandaClient)]
client_methods = list(filter(lambda x: hasattr(x, 'endpoint'), client_attrs))
import logging
logger = logging.getLogger('async_v20')
logger.disabled = True
client = client
server = server
def test_order_dict():
first = {'a': 1, 'b': 2, 'c': {'d': 3, 'e': 4, 'f': {'e': 5, 'g': 6}}}
second = {'c': {'f': {'g': 6, 'e': 5}, 'e': 4, 'd': 3}, 'b': 2, 'a': 1}
assert order_dict(first) == order_dict(second)
@pytest.fixture
def stop_loss_order():
order = StopLossOrderRequest(instrument='AUD_USD', trade_id=1234, price=0.8)
yield order
del order
client_signatures = [inspect.signature(method) for method in client_methods]
def kwargs(sig):
args = {name: get_valid_primitive_data(param.annotation) for name, param in sig.parameters.items()
if name != 'self'}
return args
annotation_lookup_arguments = [(sig, kwargs(sig)) for sig in client_signatures]
@pytest.mark.asyncio
@pytest.mark.parametrize('signature, arguments', annotation_lookup_arguments)
async def test_construct_arguments(client, server, signature, arguments):
"""Ensure that the annotation lookup dictionary is built correctly"""
await client.initialize()
result = construct_arguments(client, signature, **arguments)
for annotation, instance in result.items():
if isinstance(instance, bool):
assert issubclass(annotation, Bool)
elif isinstance(instance, pd.Timestamp):
assert issubclass(annotation, DateTime)
else:
assert type(instance) == annotation
locations = ['header', 'path', 'query']
test_arguments_arguments = [(getattr(endpoints, cls), location)
for location in locations for cls in endpoints.__all__]
@pytest.mark.parametrize('method, signature, kwargs', zip(client_methods, *zip(*annotation_lookup_arguments)))
@pytest.mark.asyncio
async def test_create_request_params(client, method, signature, kwargs):
"""Test that all every argument supplied to an endpoint goes into the HTTP request"""
endpoint = method.endpoint
arguments = construct_arguments(client, signature, **kwargs)
total_params = []
for location in locations:
result = _create_request_params(client, endpoint, arguments, location)
total_params.extend(result)
# These parameters are set by default in the client.
# They will appear in total_arguments even though they were not passed
# therefore We will remove them
for default_param in ['Authorization', 'LastTransactionID', 'Accept-Datetime-Format',
'accountID']:
try:
total_params.remove(default_param)
except ValueError:
continue
assert len(total_params) == len(arguments) - len(list(endpoint.request_schema))
@pytest.mark.parametrize('endpoint', [getattr(endpoints, cls) for cls in endpoints.__all__])
def test_create_url(client, endpoint):
template = endpoint.path
arguments = [value for value in template if not isinstance(value, str)]
values = list(map(lambda x: str(x), range(len(arguments))))
arguments = dict(zip(arguments, values))
url = create_url(client, endpoint, arguments)
path = url.path
for value in values:
assert value in path
path = path[path.index(value):]
@pytest.mark.parametrize('endpoint', [getattr(endpoints, cls) for cls in endpoints.__all__])
def test_create_url_raises_error_when_missing_arguments(client, endpoint):
if len(endpoint.path) > 3: # URL TEMPLATES with len > 3 will require addition arguments to be passed
with pytest.raises(FailedToCreatePath):
url = create_url(client, endpoint, {})
@pytest.mark.asyncio
@pytest.mark.parametrize('method, signature, kwargs', zip(client_methods, *zip(*annotation_lookup_arguments)))
async def test_create_request_kwargs(client, server, method, signature, kwargs):
await client.initialize()
client.format_order_requests = True
args = construct_arguments(client, signature, **kwargs)
if OrderRequest in args:
args.update({OrderRequest: OrderRequest(instrument='AUD_USD', units=1)})
request_kwargs = create_request_kwargs(client, method.endpoint, args)
# Make sure args are not empty
assert request_kwargs.get('method', 1)
assert request_kwargs.get('url', 1)
assert request_kwargs.get('headers', 1)
assert request_kwargs.get('params', 1)
assert request_kwargs.get('json', 1)
assert [request_kwargs['method']] in [['POST'], ['GET'], ['PUT'], ['PATCH'], ['DELETE']]
auth_in_header = 'Authorization' in request_kwargs.get('headers', '')
if Authorization in method.endpoint.parameters:
assert auth_in_header
else:
assert not auth_in_header
@pytest.mark.asyncio
async def test_request_body_is_constructed_correctly(client, server, stop_loss_order):
await client.initialize()
result = create_body(client, POSTOrders.request_schema,
{OrderRequest: stop_loss_order, 'test': Account(), 'arg': 'random_string'})
correct = {'order': {'instrument':'AUD_USD','tradeID': '1234', 'price': '0.8', 'type': 'STOP_LOSS', 'timeInForce': 'GTC',
'triggerCondition': 'DEFAULT'}}
assert result == correct
@pytest.mark.asyncio
async def test_request_body_does_not_format_order_request_with_no_instrument_parameter(client, server, stop_loss_order):
await client.initialize()
client.format_order_requests = True
create_body(client, POSTOrders.request_schema,
{OrderRequest: stop_loss_order, 'test': Account(), 'arg': 'random_string'})
@pytest.mark.asyncio
async def test_request_body_raises_error_when_cannot_format_order_request(client, server):
await client.initialize()
client.format_order_requests = True
with pytest.raises(InvalidOrderRequest):
create_body(client, POSTOrders.request_schema,
{OrderRequest: MarketOrderRequest(instrument='NOT AN INSTRUMENT', units=1)})
@pytest.mark.asyncio
async def test_request_body_formats_order_request_when_an_order_request_is_passed(client, server):
await client.initialize()
client.format_order_requests = True
with pytest.raises(InvalidOrderRequest):
create_body(client, POSTOrders.request_schema,
{OrderRequest: MarketOrderRequest(instrument='NOT AN INSTRUMENT', units=1)})
@pytest.mark.asyncio
async def test_request_body_does_not_raise_error_when_an_invalid_order_request_is_passed(client, server):
await client.initialize()
client.format_order_requests = True
body = create_body(client, POSTOrders.request_schema,
{OrderRequest: OrderRequest(instrument='AUD_USD', units=0)})
assert body['order']['units'] == '1.0'
@pytest.mark.asyncio
async def test_objects_can_be_converted_between_Model_object_and_json():
account = Account(**GETAccountID_response['account'])
response_json_account = GETAccountID_response['account']
account_to_json = account.dict(json=True, datetime_format='RFC3339')
response_json_account = order_dict(response_json_account)
account_to_json = order_dict(account_to_json)
assert response_json_account == account_to_json
@pytest.mark.parametrize('instrument', ArrayInstrument(*json.loads(example_instruments)))
def test_format_order_requests_updates_units(instrument):
order_request = OrderRequest(instrument='AUD_JPY', units=0.123456)
result = _format_order_request(order_request, instrument, clip=True)
assert result.units >= instrument.minimum_trade_size
@pytest.mark.parametrize('instrument', ArrayInstrument(*json.loads(example_instruments)))
def test_format_order_requests_raises_error_when_units_less_than_minimum(instrument):
order_request = OrderRequest(instrument='XPT_USD', units=0.123456)
with pytest.raises(InvalidOrderRequest):
_format_order_request(order_request, instrument)
@pytest.mark.parametrize('instrument', ArrayInstrument(*json.loads(example_instruments)))
def test_format_order_requests_applies_correct_precision_to_units(instrument):
order_request = OrderRequest(instrument=instrument.name, units=50.1234567891234)
result = _format_order_request(order_request, instrument)
if instrument.trade_units_precision == 0:
assert re.findall(r'(?<=\.)\d+', str(result.units))[0] == '0'
else:
assert len(re.findall(r'(?<=\.)\d+', str(result.units))[0]) == instrument.trade_units_precision
order_request = OrderRequest(instrument=instrument.name, units=0.1234567891234)
result = _format_order_request(order_request, instrument, clip=True)
if instrument.trade_units_precision == 0:
assert re.findall(r'(?<=\.)\d+', str(result.units))[0] == '0'
else:
assert len(re.findall(r'(?<=\.)\d+', str(result.units))[0]) == instrument.trade_units_precision
@pytest.mark.parametrize('instrument', ArrayInstrument(*json.loads(example_instruments)))
def test_format_order_requests_applies_correct_precision_to_price_price_bound_distance(instrument):
order_request = OrderRequest(instrument='AUD_USD', price=50.1234567891234, price_bound=1234.123456789,
distance=20.123456789)
result = _format_order_request(order_request, instrument)
for attr in (result.price, result.price_bound, result.distance):
if instrument.display_precision == 0:
assert re.findall(r'(?<=\.)\d+', str(attr))[0] == '0'
else:
assert len(re.findall(r'(?<=\.)\d+', str(attr))[0]) == instrument.display_precision
@pytest.mark.parametrize('instrument', ArrayInstrument(*json.loads(example_instruments)))
def test_format_order_requests_applies_correct_precision_to_take_profit_on_fill_stop_loss_on_fill(instrument):
order_request = OrderRequest(instrument=instrument.name, take_profit_on_fill=50.123456789,
stop_loss_on_fill=50.123456789)
result = _format_order_request(order_request, instrument)
for attr in (result.stop_loss_on_fill.price, result.take_profit_on_fill):
if instrument.display_precision == 0:
assert re.findall(r'(?<=\.)\d+', str(attr))[0] == '0'
else:
assert len(re.findall(r'(?<=\.)\d+', str(attr))[0]) == instrument.display_precision
@pytest.mark.parametrize('instrument', ArrayInstrument(*json.loads(example_instruments)))
def test_format_order_requests_applies_correct_precision_to_trailing_stop_loss_on_fill(instrument):
order_request = OrderRequest(
instrument=instrument.name,
trailing_stop_loss_on_fill=instrument.minimum_trailing_stop_distance + 0.123456789
)
result = _format_order_request(order_request, instrument)
attr = result.trailing_stop_loss_on_fill.distance
if instrument.display_precision == 0:
assert re.findall(r'(?<=\.)\d+', str(attr))[0] == '0'
else:
assert len(re.findall(r'(?<=\.)\d+', str(attr))[0]) == instrument.display_precision
@pytest.mark.parametrize('instrument', ArrayInstrument(*json.loads(example_instruments)))
def test_format_order_requests_limits_trailing_stop_loss_on_fill_to_valid_range(instrument):
order_request = OrderRequest(
instrument=instrument.name,
trailing_stop_loss_on_fill=0
)
if instrument.minimum_trailing_stop_distance > 0:
with pytest.raises(InvalidOrderRequest):
_format_order_request(order_request, instrument)
result = _format_order_request(order_request, instrument, clip=True)
assert result.trailing_stop_loss_on_fill.distance == instrument.minimum_trailing_stop_distance
order_request = OrderRequest(
instrument=instrument.name,
trailing_stop_loss_on_fill=instrument.maximum_trailing_stop_distance + 10
)
with pytest.raises(InvalidOrderRequest):
_format_order_request(order_request, instrument)
result = _format_order_request(order_request, instrument, clip=True)
assert result.trailing_stop_loss_on_fill.distance == instrument.maximum_trailing_stop_distance
@pytest.mark.parametrize('instrument', ArrayInstrument(*json.loads(example_instruments)))
def test_format_order_requests_limits_units_to_valid_range(instrument):
order_request = OrderRequest(
instrument=instrument.name,
units=0
)
if instrument.minimum_trade_size > 0:
with pytest.raises(InvalidOrderRequest):
_format_order_request(order_request, instrument)
result = _format_order_request(order_request, instrument, clip=True)
assert result.units == instrument.minimum_trade_size
order_request = OrderRequest(
instrument=instrument.name,
units=instrument.maximum_order_units + 10
)
with pytest.raises(InvalidOrderRequest):
_format_order_request(order_request, instrument)
result = _format_order_request(order_request, instrument, clip=True)
assert result.units == instrument.maximum_order_units
@pytest.mark.parametrize('instrument', ArrayInstrument(*json.loads(example_instruments)))
def test_format_order_requests_accepts_negative_values_for_units(instrument):
order_request = OrderRequest(
instrument=instrument.name,
units=-instrument.minimum_trade_size
)
result = _format_order_request(order_request, instrument, clip=False)
assert result.units == -instrument.minimum_trade_size
result = _format_order_request(order_request, instrument, clip=True)
assert result.units == -instrument.minimum_trade_size
@pytest.mark.parametrize('instrument', ArrayInstrument(*json.loads(example_instruments)))
def test_ins_context_does_not_add_parameters_to_order_requests(instrument):
order_request = OrderRequest(
instrument=instrument.name,
units=instrument.minimum_trade_size
)
result = _format_order_request(order_request, instrument, clip=True)
assert not hasattr(result, 'price_bound')
assert not hasattr(result, 'trailing_stop_loss_on_fill')
assert not hasattr(result, 'stop_loss_on_fill')
assert not hasattr(result, 'take_profit_on_fill')
def test_too_many_passed_transactions(client):
client.default_parameters[SinceTransactionID] = 0
client.default_parameters[LastTransactionID] = 0
assert not too_many_passed_transactions(client)
client.default_parameters[SinceTransactionID] = 0
client.default_parameters[LastTransactionID] = 901
assert too_many_passed_transactions(client)
client.default_parameters.pop(SinceTransactionID)
assert not too_many_passed_transactions(client) |
gtoonstra/airflow | refs/heads/master | airflow/api/common/experimental/get_task.py | 18 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.exceptions import DagNotFound, TaskNotFound
from airflow.models import DagBag
def get_task(dag_id, task_id):
"""Return the task object identified by the given dag_id and task_id."""
dagbag = DagBag()
# Check DAG exists.
if dag_id not in dagbag.dags:
error_message = "Dag id {} not found".format(dag_id)
raise DagNotFound(error_message)
# Get DAG object and check Task Exists
dag = dagbag.get_dag(dag_id)
if not dag.has_task(task_id):
error_message = 'Task {} not found in dag {}'.format(task_id, dag_id)
raise TaskNotFound(error_message)
# Return the task.
return dag.get_task(task_id)
|
jianghuaw/nova | refs/heads/master | nova/objects/keypair.py | 2 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_log import log as logging
from oslo_utils import versionutils
from nova import db
from nova.db.sqlalchemy import api as db_api
from nova.db.sqlalchemy import api_models
from nova.db.sqlalchemy import models as main_models
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
KEYPAIR_TYPE_SSH = 'ssh'
KEYPAIR_TYPE_X509 = 'x509'
LOG = logging.getLogger(__name__)
@db_api.api_context_manager.reader
def _get_from_db(context, user_id, name=None, limit=None, marker=None):
query = context.session.query(api_models.KeyPair).\
filter(api_models.KeyPair.user_id == user_id)
if name is not None:
db_keypair = query.filter(api_models.KeyPair.name == name).\
first()
if not db_keypair:
raise exception.KeypairNotFound(user_id=user_id, name=name)
return db_keypair
marker_row = None
if marker is not None:
marker_row = context.session.query(api_models.KeyPair).\
filter(api_models.KeyPair.name == marker).\
filter(api_models.KeyPair.user_id == user_id).first()
if not marker_row:
raise exception.MarkerNotFound(marker=marker)
query = sqlalchemyutils.paginate_query(
query, api_models.KeyPair, limit, ['name'], marker=marker_row)
return query.all()
@db_api.api_context_manager.reader
def _get_count_from_db(context, user_id):
return context.session.query(api_models.KeyPair).\
filter(api_models.KeyPair.user_id == user_id).\
count()
@db_api.api_context_manager.writer
def _create_in_db(context, values):
kp = api_models.KeyPair()
kp.update(values)
try:
kp.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.KeyPairExists(key_name=values['name'])
return kp
@db_api.api_context_manager.writer
def _destroy_in_db(context, user_id, name):
result = context.session.query(api_models.KeyPair).\
filter_by(user_id=user_id).\
filter_by(name=name).\
delete()
if not result:
raise exception.KeypairNotFound(user_id=user_id, name=name)
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class KeyPair(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: String attributes updated to support unicode
# Version 1.2: Added keypair type
# Version 1.3: Name field is non-null
# Version 1.4: Add localonly flag to get_by_name()
VERSION = '1.4'
fields = {
'id': fields.IntegerField(),
'name': fields.StringField(nullable=False),
'user_id': fields.StringField(nullable=True),
'fingerprint': fields.StringField(nullable=True),
'public_key': fields.StringField(nullable=True),
'type': fields.StringField(nullable=False),
}
def obj_make_compatible(self, primitive, target_version):
super(KeyPair, self).obj_make_compatible(primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 2) and 'type' in primitive:
del primitive['type']
@staticmethod
def _from_db_object(context, keypair, db_keypair):
ignore = {'deleted': False,
'deleted_at': None}
for key in keypair.fields:
if key in ignore and not hasattr(db_keypair, key):
keypair[key] = ignore[key]
else:
keypair[key] = db_keypair[key]
keypair._context = context
keypair.obj_reset_changes()
return keypair
@staticmethod
def _get_from_db(context, user_id, name):
return _get_from_db(context, user_id, name=name)
@staticmethod
def _destroy_in_db(context, user_id, name):
return _destroy_in_db(context, user_id, name)
@staticmethod
def _create_in_db(context, values):
return _create_in_db(context, values)
@base.remotable_classmethod
def get_by_name(cls, context, user_id, name,
localonly=False):
db_keypair = None
if not localonly:
try:
db_keypair = cls._get_from_db(context, user_id, name)
except exception.KeypairNotFound:
pass
if db_keypair is None:
db_keypair = db.key_pair_get(context, user_id, name)
return cls._from_db_object(context, cls(), db_keypair)
@base.remotable_classmethod
def destroy_by_name(cls, context, user_id, name):
try:
cls._destroy_in_db(context, user_id, name)
except exception.KeypairNotFound:
db.key_pair_destroy(context, user_id, name)
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
# NOTE(danms): Check to see if it exists in the old DB before
# letting them create in the API DB, since we won't get protection
# from the UC.
try:
db.key_pair_get(self._context, self.user_id, self.name)
raise exception.KeyPairExists(key_name=self.name)
except exception.KeypairNotFound:
pass
self._create()
def _create(self):
updates = self.obj_get_changes()
db_keypair = self._create_in_db(self._context, updates)
self._from_db_object(self._context, self, db_keypair)
@base.remotable
def destroy(self):
try:
self._destroy_in_db(self._context, self.user_id, self.name)
except exception.KeypairNotFound:
db.key_pair_destroy(self._context, self.user_id, self.name)
@base.NovaObjectRegistry.register
class KeyPairList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# KeyPair <= version 1.1
# Version 1.1: KeyPair <= version 1.2
# Version 1.2: KeyPair <= version 1.3
# Version 1.3: Add new parameters 'limit' and 'marker' to get_by_user()
VERSION = '1.3'
fields = {
'objects': fields.ListOfObjectsField('KeyPair'),
}
@staticmethod
def _get_from_db(context, user_id, limit, marker):
return _get_from_db(context, user_id, limit=limit, marker=marker)
@staticmethod
def _get_count_from_db(context, user_id):
return _get_count_from_db(context, user_id)
@base.remotable_classmethod
def get_by_user(cls, context, user_id, limit=None, marker=None):
try:
api_db_keypairs = cls._get_from_db(
context, user_id, limit=limit, marker=marker)
# NOTE(pkholkin): If we were asked for a marker and found it in
# results from the API DB, we must continue our pagination with
# just the limit (if any) to the main DB.
marker = None
except exception.MarkerNotFound:
api_db_keypairs = []
if limit is not None:
limit_more = limit - len(api_db_keypairs)
else:
limit_more = None
if limit_more is None or limit_more > 0:
main_db_keypairs = db.key_pair_get_all_by_user(
context, user_id, limit=limit_more, marker=marker)
else:
main_db_keypairs = []
return base.obj_make_list(context, cls(context), objects.KeyPair,
api_db_keypairs + main_db_keypairs)
@base.remotable_classmethod
def get_count_by_user(cls, context, user_id):
return (cls._get_count_from_db(context, user_id) +
db.key_pair_count_by_user(context, user_id))
@db_api.pick_context_manager_reader
def _count_unmigrated_instances(context):
return context.session.query(main_models.InstanceExtra).\
filter_by(keypairs=None).\
filter_by(deleted=0).\
count()
@db_api.pick_context_manager_reader
def _get_main_keypairs(context, limit):
return context.session.query(main_models.KeyPair).\
filter_by(deleted=0).\
limit(limit).\
all()
def migrate_keypairs_to_api_db(context, count):
bad_instances = _count_unmigrated_instances(context)
if bad_instances:
LOG.error('Some instances are still missing keypair '
'information. Unable to run keypair migration '
'at this time.')
return 0, 0
main_keypairs = _get_main_keypairs(context, count)
done = 0
for db_keypair in main_keypairs:
kp = objects.KeyPair(context=context,
user_id=db_keypair.user_id,
name=db_keypair.name,
fingerprint=db_keypair.fingerprint,
public_key=db_keypair.public_key,
type=db_keypair.type)
try:
kp._create()
except exception.KeyPairExists:
# NOTE(danms): If this got created somehow in the API DB,
# then it's newer and we just continue on to destroy the
# old one in the cell DB.
pass
db_api.key_pair_destroy(context, db_keypair.user_id, db_keypair.name)
done += 1
return len(main_keypairs), done
|
JustAkan/Oxygen_united_kernel-gproj-lollipop | refs/heads/cm-12.0 | tools/perf/scripts/python/sctop.py | 11180 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
melon-li/openstack-dashboard | refs/heads/master | openstack_dashboard/dashboards/admin/volumes/volumes/urls.py | 24 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.admin.volumes.volumes \
import views
VIEWS_MOD = ('openstack_dashboard.dashboards.admin.volumes.volumes.views')
urlpatterns = patterns(
VIEWS_MOD,
url(r'^manage/$',
views.ManageVolumeView.as_view(),
name='manage'),
url(r'^(?P<volume_id>[^/]+)/$',
views.DetailView.as_view(),
name='detail'),
url(r'^(?P<volume_id>[^/]+)/update_status$',
views.UpdateStatusView.as_view(),
name='update_status'),
url(r'^(?P<volume_id>[^/]+)/unmanage$',
views.UnmanageVolumeView.as_view(),
name='unmanage'),
url(r'^(?P<volume_id>[^/]+)/migrate$',
views.MigrateVolumeView.as_view(),
name='migrate'),
)
|
kvar/ansible | refs/heads/seas_master_2.9.5 | lib/ansible/modules/cloud/azure/azure_rm_autoscale_info.py | 20 | #!/usr/bin/python
#
# Copyright (c) 2017 Yuwei Zhou, <yuwzho@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_autoscale_info
version_added: "2.9"
short_description: Get Azure Auto Scale Setting facts
description:
- Get facts of Auto Scale Setting.
options:
resource_group:
description:
- The name of the resource group.
required: True
name:
description:
- The name of the Auto Scale Setting.
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
extends_documentation_fragment:
- azure
author:
- Yuwei Zhou (@yuwzho)
'''
EXAMPLES = '''
- name: Get instance of Auto Scale Setting
azure_rm_autoscale_info:
resource_group: myResourceGroup
name: auto_scale_name
- name: List instances of Auto Scale Setting
azure_rm_autoscale_info:
resource_group: myResourceGroup
'''
RETURN = '''
autoscales:
description: List of Azure Scale Settings dicts.
returned: always
type: list
sample: [{
"enabled": true,
"id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/microsoft.insights/autoscalesettings/scale",
"location": "eastus",
"name": "scale",
"notifications": [
{
"custom_emails": [
"yuwzho@microsoft.com"
],
"send_to_subscription_administrator": true,
"send_to_subscription_co_administrators": false,
"webhooks": []
}
],
"profiles": [
{
"count": "1",
"max_count": "1",
"min_count": "1",
"name": "Auto created scale condition 0",
"recurrence_days": [
"Monday"
],
"recurrence_frequency": "Week",
"recurrence_hours": [
"6"
],
"recurrence_mins": [
"0"
],
"recurrence_timezone": "China Standard Time",
"rules": [
{
"cooldown": 5.0,
"direction": "Increase",
"metric_name": "Percentage CPU",
"metric_resource_uri": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsof
t.Compute/virtualMachineScaleSets/myVmss",
"operator": "GreaterThan",
"statistic": "Average",
"threshold": 70.0,
"time_aggregation": "Average",
"time_grain": 1.0,
"time_window": 10.0,
"type": "ChangeCount",
"value": "1"
}
]
}
],
"target": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachineScale
Sets/myVmss"
}]
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils._text import to_native
try:
from msrestazure.azure_exceptions import CloudError
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
# duplicated in azure_rm_autoscale
def timedelta_to_minutes(time):
if not time:
return 0
return time.days * 1440 + time.seconds / 60.0 + time.microseconds / 60000000.0
def get_enum_value(item):
if 'value' in dir(item):
return to_native(item.value)
return to_native(item)
def auto_scale_to_dict(instance):
if not instance:
return dict()
return dict(
id=to_native(instance.id or ''),
name=to_native(instance.name),
location=to_native(instance.location),
profiles=[profile_to_dict(p) for p in instance.profiles or []],
notifications=[notification_to_dict(n) for n in instance.notifications or []],
enabled=instance.enabled,
target=to_native(instance.target_resource_uri),
tags=instance.tags
)
def rule_to_dict(rule):
if not rule:
return dict()
result = dict(metric_name=to_native(rule.metric_trigger.metric_name),
metric_resource_uri=to_native(rule.metric_trigger.metric_resource_uri),
time_grain=timedelta_to_minutes(rule.metric_trigger.time_grain),
statistic=get_enum_value(rule.metric_trigger.statistic),
time_window=timedelta_to_minutes(rule.metric_trigger.time_window),
time_aggregation=get_enum_value(rule.metric_trigger.time_aggregation),
operator=get_enum_value(rule.metric_trigger.operator),
threshold=float(rule.metric_trigger.threshold))
if rule.scale_action and to_native(rule.scale_action.direction) != 'None':
result['direction'] = get_enum_value(rule.scale_action.direction)
result['type'] = get_enum_value(rule.scale_action.type)
result['value'] = to_native(rule.scale_action.value)
result['cooldown'] = timedelta_to_minutes(rule.scale_action.cooldown)
return result
def profile_to_dict(profile):
if not profile:
return dict()
result = dict(name=to_native(profile.name),
count=to_native(profile.capacity.default),
max_count=to_native(profile.capacity.maximum),
min_count=to_native(profile.capacity.minimum))
if profile.rules:
result['rules'] = [rule_to_dict(r) for r in profile.rules]
if profile.fixed_date:
result['fixed_date_timezone'] = profile.fixed_date.time_zone
result['fixed_date_start'] = profile.fixed_date.start
result['fixed_date_end'] = profile.fixed_date.end
if profile.recurrence:
if get_enum_value(profile.recurrence.frequency) != 'None':
result['recurrence_frequency'] = get_enum_value(profile.recurrence.frequency)
if profile.recurrence.schedule:
result['recurrence_timezone'] = to_native(str(profile.recurrence.schedule.time_zone))
result['recurrence_days'] = [to_native(r) for r in profile.recurrence.schedule.days]
result['recurrence_hours'] = [to_native(r) for r in profile.recurrence.schedule.hours]
result['recurrence_mins'] = [to_native(r) for r in profile.recurrence.schedule.minutes]
return result
def notification_to_dict(notification):
if not notification:
return dict()
return dict(send_to_subscription_administrator=notification.email.send_to_subscription_administrator if notification.email else False,
send_to_subscription_co_administrators=notification.email.send_to_subscription_co_administrators if notification.email else False,
custom_emails=[to_native(e) for e in notification.email.custom_emails or []],
webhooks=[to_native(w.service_url) for w in notification.webhooks or []])
class AzureRMAutoScaleInfo(AzureRMModuleBase):
def __init__(self):
# define user inputs into argument
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str'
),
tags=dict(
type='list'
)
)
# store the results of the module operation
self.results = dict()
self.resource_group = None
self.name = None
self.tags = None
super(AzureRMAutoScaleInfo, self).__init__(self.module_arg_spec, supports_tags=False)
def exec_module(self, **kwargs):
is_old_facts = self.module._name == 'azure_rm_autoscale_facts'
if is_old_facts:
self.module.deprecate("The 'azure_rm_autoscale_facts' module has been renamed to 'azure_rm_autoscale_info'", version='2.13')
for key in list(self.module_arg_spec):
setattr(self, key, kwargs[key])
if self.resource_group and self.name:
self.results['autoscales'] = self.get()
elif self.resource_group:
self.results['autoscales'] = self.list_by_resource_group()
return self.results
def get(self):
result = []
try:
instance = self.monitor_client.autoscale_settings.get(self.resource_group, self.name)
result = [auto_scale_to_dict(instance)]
except Exception as ex:
self.log('Could not get facts for autoscale {0} - {1}.'.format(self.name, str(ex)))
return result
def list_by_resource_group(self):
results = []
try:
response = self.monitor_client.autoscale_settings.list_by_resource_group(self.resource_group)
results = [auto_scale_to_dict(item) for item in response if self.has_tags(item.tags, self.tags)]
except Exception as ex:
self.log('Could not get facts for autoscale {0} - {1}.'.format(self.name, str(ex)))
return results
def main():
AzureRMAutoScaleInfo()
if __name__ == '__main__':
main()
|
pinkavaj/batch_isp | refs/heads/master | __main__.py | 1 | #!/usr/bin/python3
from batch_isp import BatchISP
if __name__ == '__main__':
batchISP = BatchISP()
exit(batchISP.run())
|
tarzan0820/odoo | refs/heads/8.0 | openerp/addons/test_documentation_examples/__openerp__.py | 344 | # -*- coding: utf-8 -*-
{
'name': "Documentation examples test",
'description': """
Contains pieces of code to be used as technical documentation examples
(via the ``literalinclude`` directive) in situations where they can be
syntax-checked and tested.
""",
'author': "Odoo",
'website': "http://odoo.com",
'category': 'Tests',
'version': '0.1',
'data': [
'ir.model.access.csv',
],
}
|
jaywreddy/django | refs/heads/master | tests/utils_tests/test_os_utils.py | 482 | import os
import unittest
from django.core.exceptions import SuspiciousFileOperation
from django.utils._os import safe_join
class SafeJoinTests(unittest.TestCase):
def test_base_path_ends_with_sep(self):
drive, path = os.path.splitdrive(safe_join("/abc/", "abc"))
self.assertEqual(
path,
"{0}abc{0}abc".format(os.path.sep)
)
def test_root_path(self):
drive, path = os.path.splitdrive(safe_join("/", "path"))
self.assertEqual(
path,
"{}path".format(os.path.sep),
)
drive, path = os.path.splitdrive(safe_join("/", ""))
self.assertEqual(
path,
os.path.sep,
)
def test_parent_path(self):
with self.assertRaises(SuspiciousFileOperation):
safe_join("/abc/", "../def")
|
bigdatauniversity/edx-platform | refs/heads/master | openedx/core/djangoapps/course_groups/management/commands/post_cohort_membership_fix.py | 64 | """
Intended to fix any inconsistencies that may arise during the rollout of the CohortMembership model.
Illustration: https://gist.github.com/efischer19/d62f8ee42b7fbfbc6c9a
"""
from django.core.management.base import BaseCommand
from django.db import IntegrityError
from openedx.core.djangoapps.course_groups.models import CourseUserGroup, CohortMembership
class Command(BaseCommand):
"""
Repair any inconsistencies between CourseUserGroup and CohortMembership. To be run after migration 0006.
"""
help = '''
Repairs any potential inconsistencies made in the window between running migrations 0005 and 0006, and deploying
the code changes to enforce use of CohortMembership that go with said migrations.
commit: optional argument. If not provided, will dry-run and list number of operations that would be made.
'''
def add_arguments(self, parser):
"""
Add arguments to the command parser.
"""
parser.add_argument(
'--commit',
action='store_true',
dest='commit',
default=False,
help='Really commit the changes, otherwise, just dry run',
)
def handle(self, *args, **options):
"""
Execute the command. Since this is designed to fix any issues cause by running pre-CohortMembership code
with the database already migrated to post-CohortMembership state, we will use the pre-CohortMembership
table CourseUserGroup as the canonical source of truth. This way, changes made in the window are persisted.
"""
commit = options['commit']
memberships_to_delete = 0
memberships_to_add = 0
# Begin by removing any data in CohortMemberships that does not match CourseUserGroups data
for membership in CohortMembership.objects.all():
try:
CourseUserGroup.objects.get(
group_type=CourseUserGroup.COHORT,
users__id=membership.user.id,
course_id=membership.course_id,
id=membership.course_user_group.id
)
except CourseUserGroup.DoesNotExist:
memberships_to_delete += 1
if commit:
membership.delete()
# Now we can add any CourseUserGroup data that is missing a backing CohortMembership
for course_group in CourseUserGroup.objects.filter(group_type=CourseUserGroup.COHORT):
for user in course_group.users.all():
try:
CohortMembership.objects.get(
user=user,
course_id=course_group.course_id,
course_user_group_id=course_group.id
)
except CohortMembership.DoesNotExist:
memberships_to_add += 1
if commit:
membership = CohortMembership(
course_user_group=course_group,
user=user,
course_id=course_group.course_id
)
try:
membership.save()
except IntegrityError: # If the user is in multiple cohorts, we arbitrarily choose between them
# In this case, allow the pre-existing entry to be "correct"
course_group.users.remove(user)
user.course_groups.remove(course_group)
print '{} CohortMemberships did not match the CourseUserGroup table and will be deleted'.format(
memberships_to_delete
)
print '{} CourseUserGroup users do not have a CohortMembership; one will be added if it is valid'.format(
memberships_to_add
)
if commit:
print 'Changes have been made and saved.'
else:
print 'Dry run, changes have not been saved. Run again with "commit" argument to save changes'
|
amir-qayyum-khan/edx-platform | refs/heads/master | openedx/core/djangoapps/theming/tests/test_storage.py | 21 | """
Tests for comprehensive theme static files storage classes.
"""
import ddt
import unittest
import re
from mock import patch
from django.test import TestCase, override_settings
from django.conf import settings
from openedx.core.djangoapps.theming.helpers import get_theme_base_dirs, Theme, get_theme_base_dir
from openedx.core.djangoapps.theming.storage import ThemeStorage
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
@ddt.ddt
class TestStorageLMS(TestCase):
"""
Test comprehensive theming static files storage.
"""
def setUp(self):
super(TestStorageLMS, self).setUp()
self.themes_dir = get_theme_base_dirs()[0]
self.enabled_theme = "red-theme"
self.system_dir = settings.REPO_ROOT / "lms"
self.storage = ThemeStorage(location=self.themes_dir / self.enabled_theme / 'lms' / 'static')
@override_settings(DEBUG=True)
@ddt.data(
(True, "images/logo.png"),
(True, "images/favicon.ico"),
(False, "images/spinning.gif"),
)
@ddt.unpack
def test_themed(self, is_themed, asset):
"""
Verify storage returns True on themed assets
"""
self.assertEqual(is_themed, self.storage.themed(asset, self.enabled_theme))
@override_settings(DEBUG=True)
@ddt.data(
("images/logo.png", ),
("images/favicon.ico", ),
)
@ddt.unpack
def test_url(self, asset):
"""
Verify storage returns correct url depending upon the enabled theme
"""
with patch(
"openedx.core.djangoapps.theming.storage.get_current_theme",
return_value=Theme(self.enabled_theme, self.enabled_theme, get_theme_base_dir(self.enabled_theme)),
):
asset_url = self.storage.url(asset)
# remove hash key from file url
asset_url = re.sub(r"(\.\w+)(\.png|\.ico)$", r"\g<2>", asset_url)
expected_url = self.storage.base_url + self.enabled_theme + "/" + asset
self.assertEqual(asset_url, expected_url)
@override_settings(DEBUG=True)
@ddt.data(
("images/logo.png", ),
("images/favicon.ico", ),
)
@ddt.unpack
def test_path(self, asset):
"""
Verify storage returns correct file path depending upon the enabled theme
"""
with patch(
"openedx.core.djangoapps.theming.storage.get_current_theme",
return_value=Theme(self.enabled_theme, self.enabled_theme, get_theme_base_dir(self.enabled_theme)),
):
returned_path = self.storage.path(asset)
expected_path = self.themes_dir / self.enabled_theme / "lms/static/" / asset
self.assertEqual(expected_path, returned_path)
|
ynvb/DIE | refs/heads/master | DIE/Lib/DataPluginBase.py | 1 |
import logging
from yapsy.PluginManager import IPlugin
from DIE.Lib.ParsedValue import ParsedValue
#from idaapi import *
import idaapi
from idautils import *
from idc import *
class DataPluginBase(IPlugin):
"""
DIE Data Parser plugin base class.
"""
name = ""
version = 0
description = ""
author = ""
is_activated = True
supported_types = [] # supported_types hold tuples containing the supported type name and the type description
type = None # The value type (or None if unidentified).
loc = None # The value (memory) location.
rawValue = None # The raw value to be parsed.
parsedValues = [] # List of the parsed values.
typeName_norm_cb = None # Type name normalizer callback function
def __init__(self):
self.logger = logging.getLogger(__name__)
self.type_params = None # Currently parsed type parameters
def initPlugin(self, type_norm_callback=None):
"""
Plguin Initialization
@param type_norm_callback: a type name normalization callback function
"""
idaapi.msg("Initializing plugin %s\n" % self.__class__)
# Set type name normalization callback function
if type_norm_callback is not None:
self.typeName_norm_cb = type_norm_callback
# Register supported types
self.registerSupportedTypes()
def guessValues(self, rawData):
"""
"Abstract" method to be implemented by successors
If type is not known, used to guess possible values matching rawData.
@param rawData: Raw data who`s type should be guessed.
"""
def matchType(self, type):
"""
"Abstract" method to be implemented by successors.
Checks if the type is supported by the current plugin.
@param type: And type_info_t object to match
@return: True if a match was found, otherwise False
"""
return True
def parseValue(self, rawData):
"""
"Abstract" method to be implemented by successors.
If type is known, Parses the value.
@param rawData: Raw data who`s type should be parsed.
@param type: IDA type_info_t object
"""
def registerSupportedTypes(self):
"""
A parser can register supported types in order to allow quick parser lookups.
types are registered by their type name string value.
registration should be made using self.addSuportedType()
"""
def run(self, rawData, type, match_override=False):
"""
Run Plugin
@param rawData: the raw data to be parsed
@param type: data type (None if unknown)
@param match_override: set this flag in order to bypass the plugin type matching method.
@return: DebugValue array with the parsed data
"""
try:
self.parsedValues = [] # Initialize parsed value list
# If type was not recognized, try to guess the value.
if type is None:
self.guessValues(rawData)
return self.parsedValues
# If bypass match flag is set, force parsing.
if match_override:
self.parseValue(rawData)
return self.parsedValues
# Otherwise, if type matches the plugin parser type, run the parser logic.
if self.matchType(type):
self.parseValue(rawData)
return self.parsedValues
except Exception as ex:
self.logger.exception("Error while running plugin: %s", ex)
def setPluginType(self, type):
"""
Set the plugin type string that will be associated with values parsed by this parser
@param type: Type string (e.g. "INT")
@return: True if type was successfully set, otherwise False.
"""
try:
self.type = type.lower()
except Exception as ex:
self.logger.exception("Setting plugin type failed: %s", ex)
return False
def addSuportedType(self, type_name, type_desc):
"""
Add supported type to supported type list
@param type_name: supported type name string
@param type_desc: type description
"""
# type description must not be Null. set to an empty string by default.
try:
if type_desc is None:
type_desc = ""
type_name = self.typeName_norm_cb(type_name)
type_tuple = (type_name, type_desc)
if not type_tuple in self.supported_types:
self.supported_types.append(type_tuple)
except Exception as ex:
self.logger.exception("Failed to add supported type: %s", ex)
def checkSupportedType(self, type):
"""
Check if a type name string is supported
@param type: IDA type_into_t object
@return: True if type name is supported or otherwise False
"""
try:
tname = idaapi.print_tinfo('', 0, 0, idaapi.PRTYPE_1LINE, type, '', '')
type_name = None
if self.typeName_norm_cb is not None:
type_name = self.typeName_norm_cb(tname)
for (stype, sparams) in self.supported_types:
if type_name == stype:
self.type_params = sparams
return True
return False
except Exception as ex:
self.logger.exception("Error while checking for supported type: %s", ex)
def getSupportedTypes(self):
"""
Get a list in which each element is a tuple that contains:
[1] supported type name
[2] type description parameters
(type names are strings stripped of all spaces, e.g "UNSIGNED CHAR *" will be returned as "UNSIGNEDCHAR*")
@return: list of TypeTuples
"""
if len(self.supported_types) > 0:
return self.supported_types
else:
return None
def addParsedvalue(self, value, score=0, description="NoN", raw=None):
"""
Add a parsed value to the parsed value list
"""
parsed_val = ParsedValue(value, description, score, raw, self.type)
self.parsedValues.append(parsed_val)
def getParsedValues(self):
"""
Get the parsed values list
@return: Parsed value list (with 'ParsedValue' element types)
"""
return self.parsedValues
|
ankeshanand/imgurpython | refs/heads/master | imgurpython/imgur/models/conversation.py | 22 | from .message import Message
class Conversation(object):
def __init__(self, conversation_id, last_message_preview, datetime, with_account_id, with_account, message_count, messages=None,
done=None, page=None):
self.id = conversation_id
self.last_message_preview = last_message_preview
self.datetime = datetime
self.with_account_id = with_account_id
self.with_account = with_account
self.message_count = message_count
self.page = page
self.done = done
if messages:
self.messages = [Message(
message['id'],
message['from'],
message['account_id'],
message['sender_id'],
message['body'],
message['conversation_id'],
message['datetime'],
) for message in messages]
else:
self.messages = None
|
40123247/w17 | refs/heads/master | static/Brython3.1.0-20150301-090019/Lib/multiprocessing/util.py | 696 | #
# Module providing various facilities to other parts of the package
#
# multiprocessing/util.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
import sys
import functools
import os
import itertools
import weakref
import atexit
import threading # we want threading to install it's
# cleanup function before multiprocessing does
from subprocess import _args_from_interpreter_flags
from multiprocessing.process import current_process, active_children
__all__ = [
'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',
'log_to_stderr', 'get_temp_dir', 'register_after_fork',
'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal',
'SUBDEBUG', 'SUBWARNING',
]
#
# Logging
#
NOTSET = 0
SUBDEBUG = 5
DEBUG = 10
INFO = 20
SUBWARNING = 25
LOGGER_NAME = 'multiprocessing'
DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'
_logger = None
_log_to_stderr = False
def sub_debug(msg, *args):
if _logger:
_logger.log(SUBDEBUG, msg, *args)
def debug(msg, *args):
if _logger:
_logger.log(DEBUG, msg, *args)
def info(msg, *args):
if _logger:
_logger.log(INFO, msg, *args)
def sub_warning(msg, *args):
if _logger:
_logger.log(SUBWARNING, msg, *args)
def get_logger():
'''
Returns logger used by multiprocessing
'''
global _logger
import logging
logging._acquireLock()
try:
if not _logger:
_logger = logging.getLogger(LOGGER_NAME)
_logger.propagate = 0
logging.addLevelName(SUBDEBUG, 'SUBDEBUG')
logging.addLevelName(SUBWARNING, 'SUBWARNING')
# XXX multiprocessing should cleanup before logging
if hasattr(atexit, 'unregister'):
atexit.unregister(_exit_function)
atexit.register(_exit_function)
else:
atexit._exithandlers.remove((_exit_function, (), {}))
atexit._exithandlers.append((_exit_function, (), {}))
finally:
logging._releaseLock()
return _logger
def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
global _log_to_stderr
import logging
logger = get_logger()
formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
if level:
logger.setLevel(level)
_log_to_stderr = True
return _logger
#
# Function returning a temp directory which will be removed on exit
#
def get_temp_dir():
# get name of a temp directory which will be automatically cleaned up
if current_process()._tempdir is None:
import shutil, tempfile
tempdir = tempfile.mkdtemp(prefix='pymp-')
info('created temp directory %s', tempdir)
Finalize(None, shutil.rmtree, args=[tempdir], exitpriority=-100)
current_process()._tempdir = tempdir
return current_process()._tempdir
#
# Support for reinitialization of objects when bootstrapping a child process
#
_afterfork_registry = weakref.WeakValueDictionary()
_afterfork_counter = itertools.count()
def _run_after_forkers():
items = list(_afterfork_registry.items())
items.sort()
for (index, ident, func), obj in items:
try:
func(obj)
except Exception as e:
info('after forker raised exception %s', e)
def register_after_fork(obj, func):
_afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj
#
# Finalization using weakrefs
#
_finalizer_registry = {}
_finalizer_counter = itertools.count()
class Finalize(object):
'''
Class which supports object finalization using weakrefs
'''
def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None):
assert exitpriority is None or type(exitpriority) is int
if obj is not None:
self._weakref = weakref.ref(obj, self)
else:
assert exitpriority is not None
self._callback = callback
self._args = args
self._kwargs = kwargs or {}
self._key = (exitpriority, next(_finalizer_counter))
self._pid = os.getpid()
_finalizer_registry[self._key] = self
def __call__(self, wr=None,
# Need to bind these locally because the globals can have
# been cleared at shutdown
_finalizer_registry=_finalizer_registry,
sub_debug=sub_debug, getpid=os.getpid):
'''
Run the callback unless it has already been called or cancelled
'''
try:
del _finalizer_registry[self._key]
except KeyError:
sub_debug('finalizer no longer registered')
else:
if self._pid != getpid():
sub_debug('finalizer ignored because different process')
res = None
else:
sub_debug('finalizer calling %s with args %s and kwargs %s',
self._callback, self._args, self._kwargs)
res = self._callback(*self._args, **self._kwargs)
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
return res
def cancel(self):
'''
Cancel finalization of the object
'''
try:
del _finalizer_registry[self._key]
except KeyError:
pass
else:
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
def still_active(self):
'''
Return whether this finalizer is still waiting to invoke callback
'''
return self._key in _finalizer_registry
def __repr__(self):
try:
obj = self._weakref()
except (AttributeError, TypeError):
obj = None
if obj is None:
return '<Finalize object, dead>'
x = '<Finalize object, callback=%s' % \
getattr(self._callback, '__name__', self._callback)
if self._args:
x += ', args=' + str(self._args)
if self._kwargs:
x += ', kwargs=' + str(self._kwargs)
if self._key[0] is not None:
x += ', exitprority=' + str(self._key[0])
return x + '>'
def _run_finalizers(minpriority=None):
'''
Run all finalizers whose exit priority is not None and at least minpriority
Finalizers with highest priority are called first; finalizers with
the same priority will be called in reverse order of creation.
'''
if _finalizer_registry is None:
# This function may be called after this module's globals are
# destroyed. See the _exit_function function in this module for more
# notes.
return
if minpriority is None:
f = lambda p : p[0][0] is not None
else:
f = lambda p : p[0][0] is not None and p[0][0] >= minpriority
items = [x for x in list(_finalizer_registry.items()) if f(x)]
items.sort(reverse=True)
for key, finalizer in items:
sub_debug('calling %s', finalizer)
try:
finalizer()
except Exception:
import traceback
traceback.print_exc()
if minpriority is None:
_finalizer_registry.clear()
#
# Clean up on exit
#
def is_exiting():
'''
Returns true if the process is shutting down
'''
return _exiting or _exiting is None
_exiting = False
def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers,
active_children=active_children,
current_process=current_process):
# We hold on to references to functions in the arglist due to the
# situation described below, where this function is called after this
# module's globals are destroyed.
global _exiting
if not _exiting:
_exiting = True
info('process shutting down')
debug('running all "atexit" finalizers with priority >= 0')
_run_finalizers(0)
if current_process() is not None:
# We check if the current process is None here because if
# it's None, any call to ``active_children()`` will raise
# an AttributeError (active_children winds up trying to
# get attributes from util._current_process). One
# situation where this can happen is if someone has
# manipulated sys.modules, causing this module to be
# garbage collected. The destructor for the module type
# then replaces all values in the module dict with None.
# For instance, after setuptools runs a test it replaces
# sys.modules with a copy created earlier. See issues
# #9775 and #15881. Also related: #4106, #9205, and
# #9207.
for p in active_children():
if p._daemonic:
info('calling terminate() for daemon %s', p.name)
p._popen.terminate()
for p in active_children():
info('calling join() for process %s', p.name)
p.join()
debug('running the remaining "atexit" finalizers')
_run_finalizers()
atexit.register(_exit_function)
#
# Some fork aware types
#
class ForkAwareThreadLock(object):
def __init__(self):
self._reset()
register_after_fork(self, ForkAwareThreadLock._reset)
def _reset(self):
self._lock = threading.Lock()
self.acquire = self._lock.acquire
self.release = self._lock.release
class ForkAwareLocal(threading.local):
def __init__(self):
register_after_fork(self, lambda obj : obj.__dict__.clear())
def __reduce__(self):
return type(self), ()
|
arista-eosplus/ansible | refs/heads/devel | test/runner/retry.py | 177 | #!/usr/bin/env python
# PYTHON_ARGCOMPLETE_OK
"""Automatically retry failed commands."""
from __future__ import absolute_import, print_function
# noinspection PyCompatibility
import argparse
import errno
import os
import sys
import time
from lib.util import (
display,
raw_command,
ApplicationError,
ApplicationWarning,
SubprocessError,
)
try:
import argcomplete
except ImportError:
argcomplete = None
def main():
"""Main program function."""
try:
args = parse_args()
display.verbosity = args.verbosity
display.color = args.color
command = [args.command] + args.args
for attempt in range(0, args.tries):
if attempt > 0:
time.sleep(args.sleep)
try:
raw_command(command, env=os.environ)
return
except SubprocessError as ex:
display.error(ex)
except ApplicationWarning as ex:
display.warning(str(ex))
exit(0)
except ApplicationError as ex:
display.error(str(ex))
exit(1)
except KeyboardInterrupt:
exit(2)
except IOError as ex:
if ex.errno == errno.EPIPE:
exit(3)
raise
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose',
dest='verbosity',
action='count',
default=0,
help='display more output')
parser.add_argument('--color',
metavar='COLOR',
nargs='?',
help='generate color output: %(choices)s',
choices=('yes', 'no', 'auto'),
const='yes',
default='auto')
parser.add_argument('--tries',
metavar='TRIES',
type=int,
default=3,
help='number of tries to execute command (default: %(default)s)')
parser.add_argument('--sleep',
metavar='SECONDS',
type=int,
default=3,
help='seconds to sleep between tries (default: %(default)s)')
parser.add_argument('command',
help='command to execute')
parser.add_argument('args',
metavar='...',
nargs=argparse.REMAINDER,
help='optional arguments for command')
if argcomplete:
argcomplete.autocomplete(parser)
args = parser.parse_args()
if args.color == 'yes':
args.color = True
elif args.color == 'no':
args.color = False
elif 'SHIPPABLE' in os.environ:
args.color = True
else:
args.color = sys.stdout.isatty()
return args
if __name__ == '__main__':
main()
|
wweiradio/django | refs/heads/master | tests/template_backends/apps/good/templatetags/subpackage/tags.py | 1426 | from django.template import Library
register = Library()
|
alphagov/digitalmarketplace-api | refs/heads/dependabot/pip/digitalmarketplace-apiclient-22.2.0 | tests/test_url_utils.py | 1 | from app.url_utils import force_relative_url
class TestForceRelativeURL(object):
def test_hostname_removed(self):
result = force_relative_url('http://hostname:port/', 'https://badhostname/plus/path?woo')
assert result == "plus/path?woo"
def test_additional_base_path_removed(self):
result = force_relative_url('http://hostname:port/extra/', 'https://badhostname/extra/plus/path?woo')
assert result == "plus/path?woo"
def test_additional_base_path_no_slash(self):
# This is a stupid case: the missing slash means that our relative URL *must* include the 'extra' part,
# if it is to actually work when eventually re-joined to the base URL. (urljoin will, as expected,
# remove the resource at the bottom level when joining a relative URL.)
result = force_relative_url('http://hostname:port/extra', 'https://badhostname/extra/plus/path?woo')
assert result == "extra/plus/path?woo"
def test_mismatched_base_paths_ignored(self):
result = force_relative_url('http://hostname:port/extra/', 'https://badhostname/mismatch/plus/path?woo')
# No way to be sure that removing "mismatch" is correct - so we must not (if this ever happened we
# probably did something wrong).
assert result == "/mismatch/plus/path?woo"
|
MihaiMoldovanu/ansible | refs/heads/devel | test/runner/lib/metadata.py | 21 | """Test metadata for passing data to delegated tests."""
from __future__ import absolute_import, print_function
import json
from lib.util import (
display,
is_shippable,
)
from lib.diff import (
parse_diff,
FileDiff,
)
class Metadata(object):
"""Metadata object for passing data to delegated tests."""
def __init__(self):
"""Initialize metadata."""
self.changes = {} # type: dict [str, tuple[tuple[int, int]]
self.cloud_config = None # type: dict [str, str]
if is_shippable():
self.ci_provider = 'shippable'
else:
self.ci_provider = ''
def populate_changes(self, diff):
"""
:type diff: list[str] | None
"""
patches = parse_diff(diff)
patches = sorted(patches, key=lambda k: k.new.path) # type: list [FileDiff]
self.changes = dict((patch.new.path, tuple(patch.new.ranges)) for patch in patches)
renames = [patch.old.path for patch in patches if patch.old.path != patch.new.path and patch.old.exists and patch.new.exists]
deletes = [patch.old.path for patch in patches if not patch.new.exists]
# make sure old paths which were renamed or deleted are registered in changes
for path in renames + deletes:
if path in self.changes:
# old path was replaced with another file
continue
# failed tests involving deleted files should be using line 0 since there is no content remaining
self.changes[path] = ((0, 0),)
def to_dict(self):
"""
:rtype: dict[str, any]
"""
return dict(
changes=self.changes,
cloud_config=self.cloud_config,
ci_provider=self.ci_provider,
)
def to_file(self, path):
"""
:type path: path
"""
data = self.to_dict()
display.info('>>> Metadata: %s\n%s' % (path, data), verbosity=3)
with open(path, 'w') as data_fd:
json.dump(data, data_fd, sort_keys=True, indent=4)
@staticmethod
def from_file(path):
"""
:type path: str
:rtype: Metadata
"""
with open(path, 'r') as data_fd:
data = json.load(data_fd)
return Metadata.from_dict(data)
@staticmethod
def from_dict(data):
"""
:type data: dict[str, any]
:rtype: Metadata
"""
metadata = Metadata()
metadata.changes = data['changes']
metadata.cloud_config = data['cloud_config']
metadata.ci_provider = data['ci_provider']
return metadata
|
google/rekall | refs/heads/master | rekall-agent/rekall_agent/flows/collect.py | 1 | #!/usr/bin/env python2
# Rekall Memory Forensics
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Author: Michael Cohen scudette@google.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
__author__ = "Michael Cohen <scudette@google.com>"
import time
from rekall_agent import flow
from rekall_agent import result_collections
from rekall_agent.client_actions import collect as collect_action
from rekall_lib.rekall_types import agent
class CollectFlow(agent.Flow):
"""Create a collection and upload it.
This flow defines an EFilter query string, and a collection definition, and
creates client Action()s from these. It is meant as a shorthand for writing
more complete flows.
"""
__abstract = True
# This is the EFilter query and possible parameters. It is a dict with key
# being the Rekall live mode and value being the query to run in that
# mode. The mode is selected from the Flow.session.live parameter.
_query = {}
schema = [
dict(name="query_parameters", type="dict"),
]
# The columns to add to the collection spec.
_columns = []
_collection_name = "collection"
def set_collection_name(self, name):
self._collection_name = name
def expand_collection_name(self):
return self._collection_name.format(
timestamp=int(time.time()),
flow_id=self.flow_id,
)
def get_location(self):
"""Work out where the agent should store the collection."""
if self.is_hunt():
return self._config.server.hunt_vfs_path_for_client(
self.flow_id, self.expand_collection_name(),
expiration=self.expiration())
return self._config.server.vfs_path_for_client(
self.client_id, self.expand_collection_name(),
expiration=self.expiration())
def generate_actions(self):
# Make a collection to store the result.
collection = result_collections.GenericSQLiteCollection.from_keywords(
session=self._session,
type=self._collection_type,
location=self.get_location(),
tables=[
dict(name="default", columns=self._columns)
],
)
yield collect_action.CollectAction.from_keywords(
session=self._session,
query=self._query,
query_parameters=self.query_parameters,
collection=collection
)
class ListProcessesFlow(CollectFlow):
"""Collect data about all processes."""
_query = dict(
mode_live_api=(
"select Name as name, pid, ppid, start from pslist()"),
mode_linux_memory=(
"select proc.name, proc.pid, ppid, start_time from pslist()")
)
_collection_type = "pslist"
_columns = [
dict(name="name", type="unicode"),
dict(name="pid", type="int"),
dict(name="ppid", type="int"),
dict(name="start_time", type="epoch"),
]
_collection_name = "pslist"
|
analyst-collective/dbt | refs/heads/experiment/typetag | test/integration/040_override_database_test/test_override_database.py | 1 | from test.integration.base import DBTIntegrationTest, use_profile
import os
class BaseOverrideDatabase(DBTIntegrationTest):
setup_alternate_db = True
@property
def schema(self):
return "override_database_040"
@property
def models(self):
return "models"
@property
def alternative_database(self):
if self.adapter_type == 'snowflake':
return os.getenv('SNOWFLAKE_TEST_DATABASE')
else:
return super().alternative_database
def snowflake_profile(self):
return {
'config': {
'send_anonymous_usage_stats': False
},
'test': {
'outputs': {
'default2': {
'type': 'snowflake',
'threads': 4,
'account': os.getenv('SNOWFLAKE_TEST_ACCOUNT'),
'user': os.getenv('SNOWFLAKE_TEST_USER'),
'password': os.getenv('SNOWFLAKE_TEST_PASSWORD'),
'database': os.getenv('SNOWFLAKE_TEST_QUOTED_DATABASE'),
'schema': self.unique_schema(),
'warehouse': os.getenv('SNOWFLAKE_TEST_WAREHOUSE'),
},
'noaccess': {
'type': 'snowflake',
'threads': 4,
'account': os.getenv('SNOWFLAKE_TEST_ACCOUNT'),
'user': 'noaccess',
'password': 'password',
'database': os.getenv('SNOWFLAKE_TEST_DATABASE'),
'schema': self.unique_schema(),
'warehouse': os.getenv('SNOWFLAKE_TEST_WAREHOUSE'),
}
},
'target': 'default2'
}
}
@property
def project_config(self):
return {
'config-version': 2,
'data-paths': ['data'],
'vars': {
'alternate_db': self.alternative_database,
},
'quoting': {
'database': True,
},
'seeds': {
'quote_columns': False,
}
}
def run_dbt_notstrict(self, args):
return self.run_dbt(args, strict=False)
class TestModelOverride(BaseOverrideDatabase):
def run_database_override(self):
if self.adapter_type == 'snowflake':
func = lambda x: x.upper()
else:
func = lambda x: x
self.run_dbt_notstrict(['seed'])
self.assertEqual(len(self.run_dbt_notstrict(['run'])), 4)
self.assertManyRelationsEqual([
(func('seed'), self.unique_schema(), self.default_database),
(func('view_2'), self.unique_schema(), self.alternative_database),
(func('view_1'), self.unique_schema(), self.default_database),
(func('view_3'), self.unique_schema(), self.default_database),
(func('view_4'), self.unique_schema(), self.alternative_database),
])
@use_profile('bigquery')
def test_bigquery_database_override(self):
self.run_database_override()
@use_profile('snowflake')
def test_snowflake_database_override(self):
self.run_database_override()
class BaseTestProjectModelOverride(BaseOverrideDatabase):
# this is janky, but I really want to access self.default_database in
# project_config
@property
def default_database(self):
target = self._profile_config['test']['target']
profile = self._profile_config['test']['outputs'][target]
for key in ['database', 'project', 'dbname']:
if key in profile:
database = profile[key]
if self.adapter_type == 'snowflake':
return database.upper()
return database
assert False, 'No profile database found!'
def run_database_override(self):
self.run_dbt_notstrict(['seed'])
self.assertEqual(len(self.run_dbt_notstrict(['run'])), 4)
self.assertExpectedRelations()
def assertExpectedRelations(self):
if self.adapter_type == 'snowflake':
func = lambda x: x.upper()
else:
func = lambda x: x
self.assertManyRelationsEqual([
(func('seed'), self.unique_schema(), self.default_database),
(func('view_2'), self.unique_schema(), self.alternative_database),
(func('view_1'), self.unique_schema(), self.alternative_database),
(func('view_3'), self.unique_schema(), self.default_database),
(func('view_4'), self.unique_schema(), self.alternative_database),
])
class TestProjectModelOverride(BaseTestProjectModelOverride):
@property
def project_config(self):
return {
'config-version': 2,
'vars': {
'alternate_db': self.alternative_database,
},
'models': {
'database': self.alternative_database,
'test': {
'subfolder': {
'database': self.default_database,
}
}
},
'data-paths': ['data'],
'vars': {
'alternate_db': self.alternative_database,
},
'quoting': {
'database': True,
},
'seeds': {
'quote_columns': False,
}
}
@use_profile('bigquery')
def test_bigquery_database_override(self):
self.run_database_override()
@use_profile('snowflake')
def test_snowflake_database_override(self):
self.run_database_override()
class TestProjectModelAliasOverride(BaseTestProjectModelOverride):
@property
def project_config(self):
return {
'config-version': 2,
'vars': {
'alternate_db': self.alternative_database,
},
'models': {
'project': self.alternative_database,
'test': {
'subfolder': {
'project': self.default_database,
}
}
},
'data-paths': ['data'],
'vars': {
'alternate_db': self.alternative_database,
},
'quoting': {
'database': True,
},
'seeds': {
'quote_columns': False,
}
}
@use_profile('bigquery')
def test_bigquery_project_override(self):
self.run_database_override()
class TestProjectSeedOverride(BaseOverrideDatabase):
def run_database_override(self):
if self.adapter_type == 'snowflake':
func = lambda x: x.upper()
else:
func = lambda x: x
self.use_default_project({
'config-version': 2,
'seeds': {
'database': self.alternative_database
},
})
self.run_dbt_notstrict(['seed'])
self.assertEqual(len(self.run_dbt_notstrict(['run'])), 4)
self.assertManyRelationsEqual([
(func('seed'), self.unique_schema(), self.alternative_database),
(func('view_2'), self.unique_schema(), self.alternative_database),
(func('view_1'), self.unique_schema(), self.default_database),
(func('view_3'), self.unique_schema(), self.default_database),
(func('view_4'), self.unique_schema(), self.alternative_database),
])
@use_profile('bigquery')
def test_bigquery_database_override(self):
self.run_database_override()
@use_profile('snowflake')
def test_snowflake_database_override(self):
self.run_database_override()
|
CalthorpeAnalytics/urbanfootprint | refs/heads/master | footprint/main/resources/presentation_medium_resource.py | 1 |
# UrbanFootprint v1.5
# Copyright (C) 2017 Calthorpe Analytics
#
# This file is part of UrbanFootprint version 1.5
#
# UrbanFootprint is distributed under the terms of the GNU General
# Public License version 3, as published by the Free Software Foundation. This
# code is distributed WITHOUT ANY WARRANTY, without implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License v3 for more details; see <http://www.gnu.org/licenses/>.
import logging
from tastypie import fields
from tastypie.fields import ListField
from footprint.main.lib.functions import remove_keys
from footprint.main.models.presentation.presentation_medium import PresentationMedium
from footprint.main.resources.db_entity_resources import DbEntityResource
from footprint.main.resources.footprint_resource import FootprintResource
from footprint.main.resources.medium_resources import MediumResource
from footprint.main.resources.pickled_dict_field import PickledDictField
from footprint.main.resources.user_resource import UserResource
logger = logging.getLogger(__name__)
__author__ = 'calthorpe_analytics'
class PresentationMediumResource(FootprintResource):
"""
The through class between Presentation and Medium, a list of which are loaded by a PresentationResource instance to give the user access to the corresponding Medium and also the important db_entity method, which returns the selected DbEntity interest of the PresentationMedium's db_entity_key
"""
# The db_entity--We don't expose the DbEntityInterest to the client
db_entity = fields.ToOneField(DbEntityResource, attribute='db_entity', null=False)
# Return the full Medium
medium = fields.ToOneField(MediumResource, attribute='medium', null=False, full=True)
# The configuration of items not directly related to the Medium, such as graph labels. These are usually also
# editable by the user.
configuration = PickledDictField(attribute='configuration', null=True, blank=True, default=lambda: {})
visible_attributes = ListField(attribute='visible_attributes', null=True, blank=True)
creator = fields.ToOneField(UserResource, 'creator', full=True, null=True, readonly=True)
updater = fields.ToOneField(UserResource, 'updater', full=True, null=True, readonly=True)
def dehydrate_medium_context(self, bundle):
# Remove data that isn't needed by the API
return remove_keys(['attributes'])
def hydrate(self, bundle):
"""
Set the user who created the Layer
:param bundle:
:return:
"""
if not bundle.obj.id:
bundle.obj.creator = self.resolve_user(bundle.request.GET)
bundle.obj.updater = self.resolve_user(bundle.request.GET)
return super(PresentationMediumResource, self).hydrate(bundle)
def full_hydrate(self, bundle):
super(PresentationMediumResource, self).full_hydrate(bundle)
if not bundle.data.get('id') and bundle.obj.db_entity_interest.db_entity.origin_instance:
# If cloning, copy the medium_context.attributes
config_entity = bundle.obj.db_entity_interest.config_entity
origin_db_entity = bundle.obj.db_entity_interest.db_entity.origin_instance
presentation_medium = PresentationMedium.objects.get(presentation__config_entity=config_entity, db_entity_key=origin_db_entity.key)
bundle.data['medium']['attributes'] = presentation_medium.medium['attributes']
return bundle
class Meta(FootprintResource.Meta):
resource_name = 'presentation_medium'
always_return_data = True
queryset = PresentationMedium.objects.all()
excludes = ['rendered_medium']
|
jlepird/turing | refs/heads/master | student_code/Kyania/turing_machine.py | 3 | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 18 10:28:35 2015
@author: Community Outreach
"""
import sys
sys.path.append("../")
from tmachine.VirtualHardware import VirtualHardware
tape = [None,1,1, None,1,1,1,1,1,1,1,None]
turing_machine = VirtualHardware(tape_length=1000, init=tape)
running = True
state = 0
while running == True:
read_val = turing_machine.read()
print read_val
print turing_machine.position()
if state == 0:
if read_val == None:
turing_machine.write(None)
turing_machine.moveLeft()
print "nothing"
elif read_val == 1:
turing_machine.write(1)
turing_machine.moveLeft()
print "1"
state=1
elif state == 1:
if read_val == None:
turing_machine.write(None)
turing_machine.moveLeft()
print "nothing"
state=2
elif read_val == 1:
turing_machine.write(1)
turing_machine.moveLeft()
print "1"
elif state == 2:
if read_val == None:
turing_machine.write(None)
turing_machine.moveRight()
print "nothing"
state=3
elif read_val == 1:
turing_machine.write(1)
turing_machine.moveLeft()
print "1"
elif state ==3:
if read_val == None :
turing_machine.write(None)
turing_machine.moveRight()
print "nothing"
elif read_val == 1:
turing_machine.write(1)
turing_machine.moveRight()
print "1"
state=4
elif state ==4:
if read_val == None:
turing_machine.write(None)
turing_machine.moveLeft()
print "nothing"
break
elif read_val == 1:
turing_machine.write(1)
turing_machine.moveRight()
print "1"
print tape
|
Jgarcia-IAS/SAT | refs/heads/master | openerp/addons/account/report/account_aged_partner_balance.py | 73 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
from common_report_header import common_report_header
class aged_trial_report(report_sxw.rml_parse, common_report_header):
def __init__(self, cr, uid, name, context):
super(aged_trial_report, self).__init__(cr, uid, name, context=context)
self.total_account = []
self.localcontext.update({
'time': time,
'get_lines_with_out_partner': self._get_lines_with_out_partner,
'get_lines': self._get_lines,
'get_total': self._get_total,
'get_direction': self._get_direction,
'get_for_period': self._get_for_period,
'get_company': self._get_company,
'get_currency': self._get_currency,
'get_partners':self._get_partners,
'get_account': self._get_account,
'get_fiscalyear': self._get_fiscalyear,
'get_target_move': self._get_target_move,
})
def set_context(self, objects, data, ids, report_type=None):
obj_move = self.pool.get('account.move.line')
ctx = data['form'].get('used_context', {})
ctx.update({'fiscalyear': False, 'all_fiscalyear': True})
self.query = obj_move._query_get(self.cr, self.uid, obj='l', context=ctx)
self.direction_selection = data['form'].get('direction_selection', 'past')
self.target_move = data['form'].get('target_move', 'all')
self.date_from = data['form'].get('date_from', time.strftime('%Y-%m-%d'))
if (data['form']['result_selection'] == 'customer' ):
self.ACCOUNT_TYPE = ['receivable']
elif (data['form']['result_selection'] == 'supplier'):
self.ACCOUNT_TYPE = ['payable']
else:
self.ACCOUNT_TYPE = ['payable','receivable']
return super(aged_trial_report, self).set_context(objects, data, ids, report_type=report_type)
def _get_lines(self, form):
res = []
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
self.cr.execute('SELECT DISTINCT res_partner.id AS id,\
res_partner.name AS name \
FROM res_partner,account_move_line AS l, account_account, account_move am\
WHERE (l.account_id=account_account.id) \
AND (l.move_id=am.id) \
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND account_account.active\
AND ((reconcile_id IS NULL)\
OR (reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND (l.partner_id=res_partner.id)\
AND (l.date <= %s)\
AND ' + self.query + ' \
ORDER BY res_partner.name', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, self.date_from,))
partners = self.cr.dictfetchall()
## mise a 0 du total
for i in range(7):
self.total_account.append(0)
#
# Build a string like (1,2,3) for easy use in SQL query
partner_ids = [x['id'] for x in partners]
if not partner_ids:
return []
# This dictionary will store the debit-credit for all partners, using partner_id as key.
totals = {}
self.cr.execute('SELECT l.partner_id, SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id = account_account.id) AND (l.move_id=am.id) \
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND (l.partner_id IN %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND ' + self.query + '\
AND account_account.active\
AND (l.date <= %s)\
GROUP BY l.partner_id ', (tuple(move_state), tuple(self.ACCOUNT_TYPE), tuple(partner_ids), self.date_from, self.date_from,))
t = self.cr.fetchall()
for i in t:
totals[i[0]] = i[1]
# This dictionary will store the future or past of all partners
future_past = {}
if self.direction_selection == 'future':
self.cr.execute('SELECT l.partner_id, SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id=account_account.id) AND (l.move_id=am.id) \
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND (COALESCE(l.date_maturity, l.date) < %s)\
AND (l.partner_id IN %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND '+ self.query + '\
AND account_account.active\
AND (l.date <= %s)\
GROUP BY l.partner_id', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, tuple(partner_ids),self.date_from, self.date_from,))
t = self.cr.fetchall()
for i in t:
future_past[i[0]] = i[1]
elif self.direction_selection == 'past': # Using elif so people could extend without this breaking
self.cr.execute('SELECT l.partner_id, SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id=account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND (COALESCE(l.date_maturity,l.date) > %s)\
AND (l.partner_id IN %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND '+ self.query + '\
AND account_account.active\
AND (l.date <= %s)\
GROUP BY l.partner_id', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, tuple(partner_ids), self.date_from, self.date_from,))
t = self.cr.fetchall()
for i in t:
future_past[i[0]] = i[1]
# Use one query per period and store results in history (a list variable)
# Each history will contain: history[1] = {'<partner_id>': <partner_debit-credit>}
history = []
for i in range(5):
args_list = (tuple(move_state), tuple(self.ACCOUNT_TYPE), tuple(partner_ids),self.date_from,)
dates_query = '(COALESCE(l.date_maturity,l.date)'
if form[str(i)]['start'] and form[str(i)]['stop']:
dates_query += ' BETWEEN %s AND %s)'
args_list += (form[str(i)]['start'], form[str(i)]['stop'])
elif form[str(i)]['start']:
dates_query += ' >= %s)'
args_list += (form[str(i)]['start'],)
else:
dates_query += ' <= %s)'
args_list += (form[str(i)]['stop'],)
args_list += (self.date_from,)
self.cr.execute('''SELECT l.partner_id, SUM(l.debit-l.credit), l.reconcile_partial_id
FROM account_move_line AS l, account_account, account_move am
WHERE (l.account_id = account_account.id) AND (l.move_id=am.id)
AND (am.state IN %s)
AND (account_account.type IN %s)
AND (l.partner_id IN %s)
AND ((l.reconcile_id IS NULL)
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))
AND ''' + self.query + '''
AND account_account.active
AND ''' + dates_query + '''
AND (l.date <= %s)
GROUP BY l.partner_id, l.reconcile_partial_id''', args_list)
partners_partial = self.cr.fetchall()
partners_amount = dict((i[0],0) for i in partners_partial)
for partner_info in partners_partial:
if partner_info[2]:
# in case of partial reconciliation, we want to keep the left amount in the oldest period
self.cr.execute('''SELECT MIN(COALESCE(date_maturity,date)) FROM account_move_line WHERE reconcile_partial_id = %s''', (partner_info[2],))
date = self.cr.fetchall()
partial = False
if 'BETWEEN' in dates_query:
partial = date and args_list[-3] <= date[0][0] <= args_list[-2]
elif '>=' in dates_query:
partial = date and date[0][0] >= form[str(i)]['start']
else:
partial = date and date[0][0] <= form[str(i)]['stop']
if partial:
# partial reconcilation
limit_date = 'COALESCE(l.date_maturity,l.date) %s %%s' % '<=' if self.direction_selection == 'past' else '>='
self.cr.execute('''SELECT SUM(l.debit-l.credit)
FROM account_move_line AS l, account_move AS am
WHERE l.move_id = am.id AND am.state in %s
AND l.reconcile_partial_id = %s
AND ''' + limit_date, (tuple(move_state), partner_info[2], self.date_from))
unreconciled_amount = self.cr.fetchall()
partners_amount[partner_info[0]] += unreconciled_amount[0][0]
else:
partners_amount[partner_info[0]] += partner_info[1]
history.append(partners_amount)
for partner in partners:
values = {}
## If choise selection is in the future
if self.direction_selection == 'future':
# Query here is replaced by one query which gets the all the partners their 'before' value
before = False
if future_past.has_key(partner['id']):
before = [ future_past[partner['id']] ]
self.total_account[6] = self.total_account[6] + (before and before[0] or 0.0)
values['direction'] = before and before[0] or 0.0
elif self.direction_selection == 'past': # Changed this so people could in the future create new direction_selections
# Query here is replaced by one query which gets the all the partners their 'after' value
after = False
if future_past.has_key(partner['id']): # Making sure this partner actually was found by the query
after = [ future_past[partner['id']] ]
self.total_account[6] = self.total_account[6] + (after and after[0] or 0.0)
values['direction'] = after and after[0] or 0.0
for i in range(5):
during = False
if history[i].has_key(partner['id']):
during = [ history[i][partner['id']] ]
# Ajout du compteur
self.total_account[(i)] = self.total_account[(i)] + (during and during[0] or 0)
values[str(i)] = during and during[0] or 0.0
total = False
if totals.has_key( partner['id'] ):
total = [ totals[partner['id']] ]
values['total'] = total and total[0] or 0.0
## Add for total
self.total_account[(i+1)] = self.total_account[(i+1)] + (total and total[0] or 0.0)
values['name'] = partner['name']
res.append(values)
total = 0.0
totals = {}
for r in res:
total += float(r['total'] or 0.0)
for i in range(5)+['direction']:
totals.setdefault(str(i), 0.0)
totals[str(i)] += float(r[str(i)] or 0.0)
return res
def _get_lines_with_out_partner(self, form):
res = []
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
## mise a 0 du total
for i in range(7):
self.total_account.append(0)
totals = {}
self.cr.execute('SELECT SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id = account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (l.partner_id IS NULL)\
AND (account_account.type IN %s)\
AND ((l.reconcile_id IS NULL) \
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND ' + self.query + '\
AND (l.date <= %s)\
AND account_account.active ',(tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, self.date_from,))
t = self.cr.fetchall()
for i in t:
totals['Unknown Partner'] = i[0]
future_past = {}
if self.direction_selection == 'future':
self.cr.execute('SELECT SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am\
WHERE (l.account_id=account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (l.partner_id IS NULL)\
AND (account_account.type IN %s)\
AND (COALESCE(l.date_maturity, l.date) < %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND '+ self.query + '\
AND account_account.active ', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, self.date_from))
t = self.cr.fetchall()
for i in t:
future_past['Unknown Partner'] = i[0]
elif self.direction_selection == 'past': # Using elif so people could extend without this breaking
self.cr.execute('SELECT SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id=account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (l.partner_id IS NULL)\
AND (account_account.type IN %s)\
AND (COALESCE(l.date_maturity,l.date) > %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND '+ self.query + '\
AND account_account.active ', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, self.date_from))
t = self.cr.fetchall()
for i in t:
future_past['Unknown Partner'] = i[0]
history = []
for i in range(5):
args_list = (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from,)
dates_query = '(COALESCE(l.date_maturity,l.date)'
if form[str(i)]['start'] and form[str(i)]['stop']:
dates_query += ' BETWEEN %s AND %s)'
args_list += (form[str(i)]['start'], form[str(i)]['stop'])
elif form[str(i)]['start']:
dates_query += ' > %s)'
args_list += (form[str(i)]['start'],)
else:
dates_query += ' < %s)'
args_list += (form[str(i)]['stop'],)
args_list += (self.date_from,)
self.cr.execute('SELECT SUM(l.debit-l.credit)\
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id = account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND (l.partner_id IS NULL)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND '+ self.query + '\
AND account_account.active\
AND ' + dates_query + '\
AND (l.date <= %s)\
GROUP BY l.partner_id', args_list)
t = self.cr.fetchall()
d = {}
for i in t:
d['Unknown Partner'] = i[0]
history.append(d)
values = {}
if self.direction_selection == 'future':
before = False
if future_past.has_key('Unknown Partner'):
before = [ future_past['Unknown Partner'] ]
self.total_account[6] = self.total_account[6] + (before and before[0] or 0.0)
values['direction'] = before and before[0] or 0.0
elif self.direction_selection == 'past':
after = False
if future_past.has_key('Unknown Partner'):
after = [ future_past['Unknown Partner'] ]
self.total_account[6] = self.total_account[6] + (after and after[0] or 0.0)
values['direction'] = after and after[0] or 0.0
for i in range(5):
during = False
if history[i].has_key('Unknown Partner'):
during = [ history[i]['Unknown Partner'] ]
self.total_account[(i)] = self.total_account[(i)] + (during and during[0] or 0)
values[str(i)] = during and during[0] or 0.0
total = False
if totals.has_key( 'Unknown Partner' ):
total = [ totals['Unknown Partner'] ]
values['total'] = total and total[0] or 0.0
## Add for total
self.total_account[(i+1)] = self.total_account[(i+1)] + (total and total[0] or 0.0)
values['name'] = 'Unknown Partner'
if values['total']:
res.append(values)
total = 0.0
totals = {}
for r in res:
total += float(r['total'] or 0.0)
for i in range(5)+['direction']:
totals.setdefault(str(i), 0.0)
totals[str(i)] += float(r[str(i)] or 0.0)
return res
def _get_total(self,pos):
period = self.total_account[int(pos)]
return period or 0.0
def _get_direction(self,pos):
period = self.total_account[int(pos)]
return period or 0.0
def _get_for_period(self,pos):
period = self.total_account[int(pos)]
return period or 0.0
def _get_partners(self,data):
# TODO: deprecated, to remove in trunk
if data['form']['result_selection'] == 'customer':
return self._translate('Receivable Accounts')
elif data['form']['result_selection'] == 'supplier':
return self._translate('Payable Accounts')
elif data['form']['result_selection'] == 'customer_supplier':
return self._translate('Receivable and Payable Accounts')
return ''
class report_agedpartnerbalance(osv.AbstractModel):
_name = 'report.account.report_agedpartnerbalance'
_inherit = 'report.abstract_report'
_template = 'account.report_agedpartnerbalance'
_wrapped_report_class = aged_trial_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
fperazzi/davis | refs/heads/master | python/experiments/eval_all.py | 1 | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# A Benchmark Dataset and Evaluation Methodology for Video Object Segmentation
#-----------------------------------------------------------------------------
# Copyright (c) 2016 Federico Perazzi
# Licensed under the BSD License [see LICENSE for details]
# Written by Federico Perazzi
# ----------------------------------------------------------------------------
"""
Peform full evaluation as reported in the paper and store results.
EXAMPLE:
python tools/eval_all.py
"""
import sys
import h5py
import glob
import argparse
import numpy as np
import os.path as osp
from davis import cfg,log
from davis.dataset import *
from prettytable import PrettyTable as ptable
def parse_args():
parser = argparse.ArgumentParser(
description='Perform full evaluation as reported in the paper.')
parser.add_argument('--compute',
dest='compute',action='store_true',
help='Compute results instead of loading from file.')
# Parse command-line arguments
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
if args.compute:
log.info('Running full evaluation on DAVIS')
log.info('Searching available techniques in: "%s"'%cfg.PATH.SEGMENTATION_DIR)
# Search available techniques within the default output folder
techniques = sorted([osp.splitext(osp.basename(t))[0]
for t in glob.glob(cfg.PATH.SEGMENTATION_DIR+ "/*")])
log.info('Number of techniques being evaluated: %d'%len(techniques))
# Read sequences from file
log.info('Reading sequences from: %s '%osp.basename(cfg.FILES.DB_INFO))
sequences = [s.name for s in db_read_sequences()]
# Compute full evaluation and save results
for technique in techniques:
db_save_eval(db_eval(technique,sequences))
# Read results from file
db_eval_dict = db_read_eval(raw_eval=False)
# Save techniques attributes and results
#db_save_techniques(db_eval_dict)
log.info('Reading available techniques and results from: %s'%
osp.basename(cfg.FILES.DB_BENCHMARK))
db_techniques = db_read_techniques()
# Display results
table = ptable(['Measure']+[t.name for t in db_techniques])
X = np.array([np.hstack([t.J,t.F,t.T])
for t in db_techniques]).T
for row,measure in zip(X,['J(M)','J(O)','J(D)','F(M)','F(O)','F(D)','T(M)']):
table.add_row([measure]+["{: .3f}".format(r) for r in row])
print "\n" + str(table) + "\n"
|
antinet/cjdns | refs/heads/master | node_build/dependencies/libuv/build/gyp/test/actions/src/confirm-dep-files.py | 349 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Confirms presence of files generated by our targets we depend on.
If they exist, create a new file.
Note target's input files are explicitly NOT defined in the gyp file
so they can't easily be passed to this script as args.
"""
import os
import sys
outfile = sys.argv[1] # Example value we expect: deps_all_done_first_123.txt
if (os.path.exists("dep_1.txt") and
os.path.exists("dep_2.txt") and
os.path.exists("dep_3.txt")):
open(outfile, "w")
|
murfz/Sick-Beard | refs/heads/development | cherrypy/process/win32.py | 39 | """Windows service. Requires pywin32."""
import os
import win32api
import win32con
import win32event
import win32service
import win32serviceutil
from cherrypy.process import wspbus, plugins
class ConsoleCtrlHandler(plugins.SimplePlugin):
"""A WSPBus plugin for handling Win32 console events (like Ctrl-C)."""
def __init__(self, bus):
self.is_set = False
plugins.SimplePlugin.__init__(self, bus)
def start(self):
if self.is_set:
self.bus.log('Handler for console events already set.', level=40)
return
result = win32api.SetConsoleCtrlHandler(self.handle, 1)
if result == 0:
self.bus.log('Could not SetConsoleCtrlHandler (error %r)' %
win32api.GetLastError(), level=40)
else:
self.bus.log('Set handler for console events.', level=40)
self.is_set = True
def stop(self):
if not self.is_set:
self.bus.log('Handler for console events already off.', level=40)
return
try:
result = win32api.SetConsoleCtrlHandler(self.handle, 0)
except ValueError:
# "ValueError: The object has not been registered"
result = 1
if result == 0:
self.bus.log('Could not remove SetConsoleCtrlHandler (error %r)' %
win32api.GetLastError(), level=40)
else:
self.bus.log('Removed handler for console events.', level=40)
self.is_set = False
def handle(self, event):
"""Handle console control events (like Ctrl-C)."""
if event in (win32con.CTRL_C_EVENT, win32con.CTRL_LOGOFF_EVENT,
win32con.CTRL_BREAK_EVENT, win32con.CTRL_SHUTDOWN_EVENT,
win32con.CTRL_CLOSE_EVENT):
self.bus.log('Console event %s: shutting down bus' % event)
# Remove self immediately so repeated Ctrl-C doesn't re-call it.
try:
self.stop()
except ValueError:
pass
self.bus.exit()
# 'First to return True stops the calls'
return 1
return 0
class Win32Bus(wspbus.Bus):
"""A Web Site Process Bus implementation for Win32.
Instead of time.sleep, this bus blocks using native win32event objects.
"""
def __init__(self):
self.events = {}
wspbus.Bus.__init__(self)
def _get_state_event(self, state):
"""Return a win32event for the given state (creating it if needed)."""
try:
return self.events[state]
except KeyError:
event = win32event.CreateEvent(None, 0, 0,
"WSPBus %s Event (pid=%r)" %
(state.name, os.getpid()))
self.events[state] = event
return event
def _get_state(self):
return self._state
def _set_state(self, value):
self._state = value
event = self._get_state_event(value)
win32event.PulseEvent(event)
state = property(_get_state, _set_state)
def wait(self, state, interval=0.1, channel=None):
"""Wait for the given state(s), KeyboardInterrupt or SystemExit.
Since this class uses native win32event objects, the interval
argument is ignored.
"""
if isinstance(state, (tuple, list)):
# Don't wait for an event that beat us to the punch ;)
if self.state not in state:
events = tuple([self._get_state_event(s) for s in state])
win32event.WaitForMultipleObjects(events, 0, win32event.INFINITE)
else:
# Don't wait for an event that beat us to the punch ;)
if self.state != state:
event = self._get_state_event(state)
win32event.WaitForSingleObject(event, win32event.INFINITE)
class _ControlCodes(dict):
"""Control codes used to "signal" a service via ControlService.
User-defined control codes are in the range 128-255. We generally use
the standard Python value for the Linux signal and add 128. Example:
>>> signal.SIGUSR1
10
control_codes['graceful'] = 128 + 10
"""
def key_for(self, obj):
"""For the given value, return its corresponding key."""
for key, val in self.items():
if val is obj:
return key
raise ValueError("The given object could not be found: %r" % obj)
control_codes = _ControlCodes({'graceful': 138})
def signal_child(service, command):
if command == 'stop':
win32serviceutil.StopService(service)
elif command == 'restart':
win32serviceutil.RestartService(service)
else:
win32serviceutil.ControlService(service, control_codes[command])
class PyWebService(win32serviceutil.ServiceFramework):
"""Python Web Service."""
_svc_name_ = "Python Web Service"
_svc_display_name_ = "Python Web Service"
_svc_deps_ = None # sequence of service names on which this depends
_exe_name_ = "pywebsvc"
_exe_args_ = None # Default to no arguments
# Only exists on Windows 2000 or later, ignored on windows NT
_svc_description_ = "Python Web Service"
def SvcDoRun(self):
from cherrypy import process
process.bus.start()
process.bus.block()
def SvcStop(self):
from cherrypy import process
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
process.bus.exit()
def SvcOther(self, control):
process.bus.publish(control_codes.key_for(control))
if __name__ == '__main__':
win32serviceutil.HandleCommandLine(PyWebService)
|
AlanRosenthal/appinventor-sources | refs/heads/master | appinventor/lib/blockly/src/i18n/xliff_to_json.py | 80 | #!/usr/bin/python
# Converts .xlf files into .json files for use at http://translatewiki.net.
#
# Copyright 2013 Google Inc.
# https://blockly.googlecode.com/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import re
import subprocess
import sys
from xml.dom import minidom
from common import InputError
from common import write_files
# Global variables
args = None # Parsed command-line arguments.
def _parse_trans_unit(trans_unit):
"""Converts a trans-unit XML node into a more convenient dictionary format.
Args:
trans_unit: An XML representation of a .xlf translation unit.
Returns:
A dictionary with useful information about the translation unit.
The returned dictionary is guaranteed to have an entry for 'key' and
may have entries for 'source', 'target', 'description', and 'meaning'
if present in the argument.
Raises:
InputError: A required field was not present.
"""
def get_value(tag_name):
elts = trans_unit.getElementsByTagName(tag_name)
if not elts:
return None
elif len(elts) == 1:
return ''.join([child.toxml() for child in elts[0].childNodes])
else:
raise InputError('', 'Unable to extract ' + tag_name)
result = {}
key = trans_unit.getAttribute('id')
if not key:
raise InputError('', 'id attribute not found')
result['key'] = key
# Get source and target, if present.
try:
result['source'] = get_value('source')
result['target'] = get_value('target')
except InputError, e:
raise InputError(key, e.msg)
# Get notes, using the from value as key and the data as value.
notes = trans_unit.getElementsByTagName('note')
for note in notes:
from_value = note.getAttribute('from')
if from_value and len(note.childNodes) == 1:
result[from_value] = note.childNodes[0].data
else:
raise InputError(key, 'Unable to extract ' + from_value)
return result
def _process_file(filename):
"""Builds list of translation units from input file.
Each translation unit in the input file includes:
- an id (opaquely generated by Soy)
- the Blockly name for the message
- the text in the source language (generally English)
- a description for the translator
The Soy and Blockly ids are joined with a hyphen and serve as the
keys in both output files. The value is the corresponding text (in the
<lang>.json file) or the description (in the qqq.json file).
Args:
filename: The name of an .xlf file produced by Closure.
Raises:
IOError: An I/O error occurred with an input or output file.
InputError: The input file could not be parsed or lacked required
fields.
Returns:
A list of dictionaries produced by parse_trans_unit().
"""
try:
results = [] # list of dictionaries (return value)
names = [] # list of names of encountered keys (local variable)
try:
parsed_xml = minidom.parse(filename)
except IOError:
# Don't get caught by below handler
raise
except Exception, e:
print
raise InputError(filename, str(e))
# Make sure needed fields are present and non-empty.
for trans_unit in parsed_xml.getElementsByTagName('trans-unit'):
unit = _parse_trans_unit(trans_unit)
for key in ['description', 'meaning', 'source']:
if not key in unit or not unit[key]:
raise InputError(filename + ':' + unit['key'],
key + ' not found')
if unit['description'].lower() == 'ibid':
if unit['meaning'] not in names:
# If the term has not already been described, the use of 'ibid'
# is an error.
raise InputError(
filename,
'First encountered definition of: ' + unit['meaning']
+ ' has definition: ' + unit['description']
+ '. This error can occur if the definition was not'
+ ' provided on the first appearance of the message'
+ ' or if the source (English-language) messages differ.')
else:
# If term has already been described, 'ibid' was used correctly,
# and we output nothing.
pass
else:
if unit['meaning'] in names:
raise InputError(filename,
'Second definition of: ' + unit['meaning'])
names.append(unit['meaning'])
results.append(unit)
return results
except IOError, e:
print 'Error with file {0}: {1}'.format(filename, e.strerror)
sys.exit(1)
def sort_units(units, templates):
"""Sorts the translation units by their definition order in the template.
Args:
units: A list of dictionaries produced by parse_trans_unit()
that have a non-empty value for the key 'meaning'.
templates: A string containing the Soy templates in which each of
the units' meanings is defined.
Returns:
A new list of translation units, sorted by the order in which
their meaning is defined in the templates.
Raises:
InputError: If a meaning definition cannot be found in the
templates.
"""
def key_function(unit):
match = re.search(
'\\smeaning\\s*=\\s*"{0}"\\s'.format(unit['meaning']),
templates)
if match:
return match.start()
else:
raise InputError(args.templates,
'msg definition for meaning not found: ' +
unit['meaning'])
return sorted(units, key=key_function)
def main():
"""Parses arguments and processes the specified file.
Raises:
IOError: An I/O error occurred with an input or output file.
InputError: Input files lacked required fields.
"""
# Set up argument parser.
parser = argparse.ArgumentParser(description='Create translation files.')
parser.add_argument(
'--author',
default='Ellen Spertus <ellen.spertus@gmail.com>',
help='name and email address of contact for translators')
parser.add_argument('--lang', default='en',
help='ISO 639-1 source language code')
parser.add_argument('--output_dir', default='json',
help='relative directory for output files')
parser.add_argument('--xlf', help='file containing xlf definitions')
parser.add_argument('--templates', default=['template.soy'], nargs='+',
help='relative path to Soy templates, comma or space '
'separated (used for ordering messages)')
global args
args = parser.parse_args()
# Make sure output_dir ends with slash.
if (not args.output_dir.endswith(os.path.sep)):
args.output_dir += os.path.sep
# Process the input file, and sort the entries.
units = _process_file(args.xlf)
files = []
for arg in args.templates:
for filename in arg.split(','):
filename = filename.strip();
if filename:
with open(filename) as myfile:
files.append(' '.join(line.strip() for line in myfile))
sorted_units = sort_units(units, ' '.join(files))
# Write the output files.
write_files(args.author, args.lang, args.output_dir, sorted_units, True)
# Delete the input .xlf file.
os.remove(args.xlf)
print('Removed ' + args.xlf)
if __name__ == '__main__':
main()
|
tinkhaven-organization/odoo | refs/heads/8.0 | addons/account/wizard/account_open_closed_fiscalyear.py | 237 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_open_closed_fiscalyear(osv.osv_memory):
_name = "account.open.closed.fiscalyear"
_description = "Choose Fiscal Year"
_columns = {
'fyear_id': fields.many2one('account.fiscalyear', \
'Fiscal Year', required=True, help='Select Fiscal Year which you want to remove entries for its End of year entries journal'),
}
def remove_entries(self, cr, uid, ids, context=None):
move_obj = self.pool.get('account.move')
data = self.browse(cr, uid, ids, context=context)[0]
period_journal = data.fyear_id.end_journal_period_id or False
if not period_journal:
raise osv.except_osv(_('Error!'), _("You have to set the 'End of Year Entries Journal' for this Fiscal Year which is set after generating opening entries from 'Generate Opening Entries'."))
if period_journal.period_id.state == 'done':
raise osv.except_osv(_('Error!'), _("You can not cancel closing entries if the 'End of Year Entries Journal' period is closed."))
ids_move = move_obj.search(cr, uid, [('journal_id','=',period_journal.journal_id.id),('period_id','=',period_journal.period_id.id)])
if ids_move:
cr.execute('delete from account_move where id IN %s', (tuple(ids_move),))
self.invalidate_cache(cr, uid, context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
mmardini/django | refs/heads/master | django/contrib/gis/db/models/manager.py | 83 | from django.db.models.manager import Manager
from django.contrib.gis.db.models.query import GeoQuerySet
class GeoManager(Manager):
"Overrides Manager to return Geographic QuerySets."
# This manager should be used for queries on related fields
# so that geometry columns on Oracle and MySQL are selected
# properly.
use_for_related_fields = True
def get_queryset(self):
return GeoQuerySet(self.model, using=self._db)
def area(self, *args, **kwargs):
return self.get_queryset().area(*args, **kwargs)
def centroid(self, *args, **kwargs):
return self.get_queryset().centroid(*args, **kwargs)
def collect(self, *args, **kwargs):
return self.get_queryset().collect(*args, **kwargs)
def difference(self, *args, **kwargs):
return self.get_queryset().difference(*args, **kwargs)
def distance(self, *args, **kwargs):
return self.get_queryset().distance(*args, **kwargs)
def envelope(self, *args, **kwargs):
return self.get_queryset().envelope(*args, **kwargs)
def extent(self, *args, **kwargs):
return self.get_queryset().extent(*args, **kwargs)
def extent3d(self, *args, **kwargs):
return self.get_queryset().extent3d(*args, **kwargs)
def force_rhr(self, *args, **kwargs):
return self.get_queryset().force_rhr(*args, **kwargs)
def geohash(self, *args, **kwargs):
return self.get_queryset().geohash(*args, **kwargs)
def geojson(self, *args, **kwargs):
return self.get_queryset().geojson(*args, **kwargs)
def gml(self, *args, **kwargs):
return self.get_queryset().gml(*args, **kwargs)
def intersection(self, *args, **kwargs):
return self.get_queryset().intersection(*args, **kwargs)
def kml(self, *args, **kwargs):
return self.get_queryset().kml(*args, **kwargs)
def length(self, *args, **kwargs):
return self.get_queryset().length(*args, **kwargs)
def make_line(self, *args, **kwargs):
return self.get_queryset().make_line(*args, **kwargs)
def mem_size(self, *args, **kwargs):
return self.get_queryset().mem_size(*args, **kwargs)
def num_geom(self, *args, **kwargs):
return self.get_queryset().num_geom(*args, **kwargs)
def num_points(self, *args, **kwargs):
return self.get_queryset().num_points(*args, **kwargs)
def perimeter(self, *args, **kwargs):
return self.get_queryset().perimeter(*args, **kwargs)
def point_on_surface(self, *args, **kwargs):
return self.get_queryset().point_on_surface(*args, **kwargs)
def reverse_geom(self, *args, **kwargs):
return self.get_queryset().reverse_geom(*args, **kwargs)
def scale(self, *args, **kwargs):
return self.get_queryset().scale(*args, **kwargs)
def snap_to_grid(self, *args, **kwargs):
return self.get_queryset().snap_to_grid(*args, **kwargs)
def svg(self, *args, **kwargs):
return self.get_queryset().svg(*args, **kwargs)
def sym_difference(self, *args, **kwargs):
return self.get_queryset().sym_difference(*args, **kwargs)
def transform(self, *args, **kwargs):
return self.get_queryset().transform(*args, **kwargs)
def translate(self, *args, **kwargs):
return self.get_queryset().translate(*args, **kwargs)
def union(self, *args, **kwargs):
return self.get_queryset().union(*args, **kwargs)
def unionagg(self, *args, **kwargs):
return self.get_queryset().unionagg(*args, **kwargs)
|
longjon/numpy | refs/heads/master | numpy/linalg/lapack_lite/fortran.py | 132 | from __future__ import division, absolute_import, print_function
import re
import itertools
def isBlank(line):
return not line
def isLabel(line):
return line[0].isdigit()
def isComment(line):
return line[0] != ' '
def isContinuation(line):
return line[5] != ' '
COMMENT, STATEMENT, CONTINUATION = 0, 1, 2
def lineType(line):
"""Return the type of a line of Fortan code."""
if isBlank(line):
return COMMENT
elif isLabel(line):
return STATEMENT
elif isComment(line):
return COMMENT
elif isContinuation(line):
return CONTINUATION
else:
return STATEMENT
class LineIterator(object):
"""LineIterator(iterable)
Return rstrip()'d lines from iterable, while keeping a count of the
line number in the .lineno attribute.
"""
def __init__(self, iterable):
object.__init__(self)
self.iterable = iter(iterable)
self.lineno = 0
def __iter__(self):
return self
def __next__(self):
self.lineno += 1
line = next(self.iterable)
line = line.rstrip()
return line
next = __next__
class PushbackIterator(object):
"""PushbackIterator(iterable)
Return an iterator for which items can be pushed back into.
Call the .pushback(item) method to have item returned as the next
value of .next().
"""
def __init__(self, iterable):
object.__init__(self)
self.iterable = iter(iterable)
self.buffer = []
def __iter__(self):
return self
def __next__(self):
if self.buffer:
return self.buffer.pop()
else:
return next(self.iterable)
def pushback(self, item):
self.buffer.append(item)
next = __next__
def fortranSourceLines(fo):
"""Return an iterator over statement lines of a Fortran source file.
Comment and blank lines are stripped out, and continuation lines are
merged.
"""
numberingiter = LineIterator(fo)
# add an extra '' at the end
with_extra = itertools.chain(numberingiter, [''])
pushbackiter = PushbackIterator(with_extra)
for line in pushbackiter:
t = lineType(line)
if t == COMMENT:
continue
elif t == STATEMENT:
lines = [line]
# this is where we need the extra '', so we don't finish reading
# the iterator when we don't want to handle that
for next_line in pushbackiter:
t = lineType(next_line)
if t == CONTINUATION:
lines.append(next_line[6:])
else:
pushbackiter.pushback(next_line)
break
yield numberingiter.lineno, ''.join(lines)
else:
raise ValueError("jammed: continuation line not expected: %s:%d" %
(fo.name, numberingiter.lineno))
def getDependencies(filename):
"""For a Fortran source file, return a list of routines declared as EXTERNAL
in it.
"""
fo = open(filename)
external_pat = re.compile(r'^\s*EXTERNAL\s', re.I)
routines = []
for lineno, line in fortranSourceLines(fo):
m = external_pat.match(line)
if m:
names = line = line[m.end():].strip().split(',')
names = [n.strip().lower() for n in names]
names = [n for n in names if n]
routines.extend(names)
fo.close()
return routines
|
teoreteetik/api-snippets | refs/heads/master | rest/message/instance-get-example-1/instance-get-example-1.5.x.py | 1 | # Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import TwilioRestClient
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
auth_token = "your_auth_token"
client = TwilioRestClient(account_sid, auth_token)
message = client.messages.get("MM800f449d0399ed014aae2bcc0cc2f2ec")
print(message.body.encode('utf-8'))
|
ARudiuk/mne-python | refs/heads/master | mne/io/bti/read.py | 8 | # Authors: Denis A. Engemann <denis.engemann@gmail.com>
# simplified BSD-3 license
import numpy as np
def _unpack_matrix(fid, rows, cols, dtype, out_dtype):
""" Aux Function """
dtype = np.dtype(dtype)
string = fid.read(int(dtype.itemsize * rows * cols))
out = np.fromstring(string, dtype=dtype).reshape(
rows, cols).astype(out_dtype)
return out
def _unpack_simple(fid, dtype, out_dtype):
""" Aux Function """
dtype = np.dtype(dtype)
string = fid.read(dtype.itemsize)
out = np.fromstring(string, dtype=dtype).astype(out_dtype)
if len(out) > 0:
out = out[0]
return out
def read_char(fid, count=1):
" Read character from bti file """
return _unpack_simple(fid, '>S%s' % count, 'S')
def read_bool(fid):
""" Read bool value from bti file """
return _unpack_simple(fid, '>?', np.bool)
def read_uint8(fid):
""" Read unsigned 8bit integer from bti file """
return _unpack_simple(fid, '>u1', np.uint8)
def read_int8(fid):
""" Read 8bit integer from bti file """
return _unpack_simple(fid, '>i1', np.int8)
def read_uint16(fid):
""" Read unsigned 16bit integer from bti file """
return _unpack_simple(fid, '>u2', np.uint16)
def read_int16(fid):
""" Read 16bit integer from bti file """
return _unpack_simple(fid, '>i2', np.int16)
def read_uint32(fid):
""" Read unsigned 32bit integer from bti file """
return _unpack_simple(fid, '>u4', np.uint32)
def read_int32(fid):
""" Read 32bit integer from bti file """
return _unpack_simple(fid, '>i4', np.int32)
def read_uint64(fid):
""" Read unsigned 64bit integer from bti file """
return _unpack_simple(fid, '>u8', np.uint64)
def read_int64(fid):
""" Read 64bit integer from bti file """
return _unpack_simple(fid, '>u8', np.int64)
def read_float(fid):
""" Read 32bit float from bti file """
return _unpack_simple(fid, '>f4', np.float32)
def read_double(fid):
""" Read 64bit float from bti file """
return _unpack_simple(fid, '>f8', np.float64)
def read_int16_matrix(fid, rows, cols):
""" Read 16bit integer matrix from bti file """
return _unpack_matrix(fid, rows, cols, dtype='>i2',
out_dtype=np.int16)
def read_float_matrix(fid, rows, cols):
""" Read 32bit float matrix from bti file """
return _unpack_matrix(fid, rows, cols, dtype='>f4',
out_dtype=np.float32)
def read_double_matrix(fid, rows, cols):
""" Read 64bit float matrix from bti file """
return _unpack_matrix(fid, rows, cols, dtype='>f8',
out_dtype=np.float64)
def read_transform(fid):
""" Read 64bit float matrix transform from bti file """
return read_double_matrix(fid, rows=4, cols=4)
|
mixman/djangodev | refs/heads/master | django/contrib/staticfiles/management/commands/findstatic.py | 244 | import os
from optparse import make_option
from django.core.management.base import LabelCommand
from django.utils.encoding import smart_str, smart_unicode
from django.contrib.staticfiles import finders
class Command(LabelCommand):
help = "Finds the absolute paths for the given static file(s)."
args = "[file ...]"
label = 'static file'
option_list = LabelCommand.option_list + (
make_option('--first', action='store_false', dest='all', default=True,
help="Only return the first match for each static file."),
)
def handle_label(self, path, **options):
verbosity = int(options.get('verbosity', 1))
result = finders.find(path, all=options['all'])
path = smart_unicode(path)
if result:
if not isinstance(result, (list, tuple)):
result = [result]
output = u'\n '.join(
(smart_unicode(os.path.realpath(path)) for path in result))
self.stdout.write(
smart_str(u"Found '%s' here:\n %s\n" % (path, output)))
else:
if verbosity >= 1:
self.stderr.write(
smart_str("No matching file found for '%s'.\n" % path))
|
fbossy/SickRage | refs/heads/master | sickbeard/metadata/tivo.py | 9 | # Author: Nic Wolfe <nic@wolfeden.ca>
# Author: Gordon Turner <gordonturner@gordonturner.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import datetime
import os
import sickbeard
from sickbeard import logger, helpers
from sickbeard.metadata import generic
from sickrage.helper.encoding import ek
from sickrage.helper.exceptions import ex, ShowNotFoundException
class TIVOMetadata(generic.GenericMetadata):
"""
Metadata generation class for TIVO
The following file structure is used:
show_root/Season ##/filename.ext (*)
show_root/Season ##/.meta/filename.ext.txt (episode metadata)
This class only generates episode specific metadata files, it does NOT generate a default.txt file.
"""
def __init__(self,
show_metadata=False,
episode_metadata=False,
fanart=False,
poster=False,
banner=False,
episode_thumbnails=False,
season_posters=False,
season_banners=False,
season_all_poster=False,
season_all_banner=False):
generic.GenericMetadata.__init__(self,
show_metadata,
episode_metadata,
fanart,
poster,
banner,
episode_thumbnails,
season_posters,
season_banners,
season_all_poster,
season_all_banner)
self.name = 'TIVO'
self._ep_nfo_extension = "txt"
# web-ui metadata template
self.eg_show_metadata = "<i>not supported</i>"
self.eg_episode_metadata = "Season##\\.meta\\<i>filename</i>.ext.txt"
self.eg_fanart = "<i>not supported</i>"
self.eg_poster = "<i>not supported</i>"
self.eg_banner = "<i>not supported</i>"
self.eg_episode_thumbnails = "<i>not supported</i>"
self.eg_season_posters = "<i>not supported</i>"
self.eg_season_banners = "<i>not supported</i>"
self.eg_season_all_poster = "<i>not supported</i>"
self.eg_season_all_banner = "<i>not supported</i>"
# Override with empty methods for unsupported features
def retrieveShowMetadata(self, folder):
# no show metadata generated, we abort this lookup function
return (None, None, None)
def create_show_metadata(self, show_obj, force=False):
pass
def update_show_indexer_metadata(self, show_obj):
pass
def get_show_file_path(self, show_obj):
pass
def create_fanart(self, show_obj):
pass
def create_poster(self, show_obj):
pass
def create_banner(self, show_obj):
pass
def create_episode_thumb(self, ep_obj):
pass
def get_episode_thumb_path(self, ep_obj):
pass
def create_season_posters(self, ep_obj):
pass
def create_season_banners(self, ep_obj):
pass
def create_season_all_poster(self, show_obj):
pass
def create_season_all_banner(self, show_obj):
pass
# Override generic class
def get_episode_file_path(self, ep_obj):
"""
Returns a full show dir/.meta/episode.txt path for Tivo
episode metadata files.
Note, that pyTivo requires the metadata filename to include the original extention.
ie If the episode name is foo.avi, the metadata name is foo.avi.txt
ep_obj: a TVEpisode object to get the path for
"""
if ek(os.path.isfile, ep_obj.location):
metadata_file_name = ek(os.path.basename, ep_obj.location) + "." + self._ep_nfo_extension
metadata_dir_name = ek(os.path.join, ek(os.path.dirname, ep_obj.location), '.meta')
metadata_file_path = ek(os.path.join, metadata_dir_name, metadata_file_name)
else:
logger.log(u"Episode location doesn't exist: " + str(ep_obj.location), logger.DEBUG)
return ''
return metadata_file_path
def _ep_data(self, ep_obj):
"""
Creates a key value structure for a Tivo episode metadata file and
returns the resulting data object.
ep_obj: a TVEpisode instance to create the metadata file for.
Lookup the show in http://thetvdb.com/ using the python library:
https://github.com/dbr/indexer_api/
The results are saved in the object myShow.
The key values for the tivo metadata file are from:
http://pytivo.sourceforge.net/wiki/index.php/Metadata
"""
data = ""
eps_to_write = [ep_obj] + ep_obj.relatedEps
indexer_lang = ep_obj.show.lang
try:
lINDEXER_API_PARMS = sickbeard.indexerApi(ep_obj.show.indexer).api_params.copy()
lINDEXER_API_PARMS['actors'] = True
if indexer_lang and not indexer_lang == sickbeard.INDEXER_DEFAULT_LANGUAGE:
lINDEXER_API_PARMS['language'] = indexer_lang
if ep_obj.show.dvdorder != 0:
lINDEXER_API_PARMS['dvdorder'] = True
t = sickbeard.indexerApi(ep_obj.show.indexer).indexer(**lINDEXER_API_PARMS)
myShow = t[ep_obj.show.indexerid]
except sickbeard.indexer_shownotfound, e:
raise ShowNotFoundException(str(e))
except sickbeard.indexer_error, e:
logger.log(u"Unable to connect to " + sickbeard.indexerApi(
ep_obj.show.indexer).name + " while creating meta files - skipping - " + str(e), logger.ERROR)
return False
for curEpToWrite in eps_to_write:
try:
myEp = myShow[curEpToWrite.season][curEpToWrite.episode]
except (sickbeard.indexer_episodenotfound, sickbeard.indexer_seasonnotfound):
logger.log(u"Unable to find episode " + str(curEpToWrite.season) + "x" + str(
curEpToWrite.episode) + " on " + sickbeard.indexerApi(
ep_obj.show.indexer).name + "... has it been removed? Should I delete from db?")
return None
if getattr(myEp, 'firstaired', None) is None and ep_obj.season == 0:
myEp["firstaired"] = str(datetime.date.fromordinal(1))
if getattr(myEp, 'episodename', None) is None or getattr(myEp, 'firstaired', None) is None:
return None
if getattr(myShow, 'seriesname', None) is not None:
data += ("title : " + myShow["seriesname"] + "\n")
data += ("seriesTitle : " + myShow["seriesname"] + "\n")
data += ("episodeTitle : " + curEpToWrite._format_pattern('%Sx%0E %EN') + "\n")
# This should be entered for episodic shows and omitted for movies. The standard tivo format is to enter
# the season number followed by the episode number for that season. For example, enter 201 for season 2
# episode 01.
# This only shows up if you go into the Details from the Program screen.
# This seems to disappear once the video is transferred to TiVo.
# NOTE: May not be correct format, missing season, but based on description from wiki leaving as is.
data += ("episodeNumber : " + str(curEpToWrite.episode) + "\n")
# Must be entered as true or false. If true, the year from originalAirDate will be shown in parentheses
# after the episode's title and before the description on the Program screen.
# FIXME: Hardcode isEpisode to true for now, not sure how to handle movies
data += ("isEpisode : true\n")
# Write the synopsis of the video here
# Micrsoft Word's smartquotes can die in a fire.
sanitizedDescription = curEpToWrite.description
# Replace double curly quotes
sanitizedDescription = sanitizedDescription.replace(u"\u201c", "\"").replace(u"\u201d", "\"")
# Replace single curly quotes
sanitizedDescription = sanitizedDescription.replace(u"\u2018", "'").replace(u"\u2019", "'").replace(
u"\u02BC", "'")
data += ("description : " + sanitizedDescription + "\n")
# Usually starts with "SH" and followed by 6-8 digits.
# Tivo uses zap2it for thier data, so the series id is the zap2it_id.
if getattr(myShow, 'zap2it_id', None) is not None:
data += ("seriesId : " + myShow["zap2it_id"] + "\n")
# This is the call sign of the channel the episode was recorded from.
if getattr(myShow, 'network', None) is not None:
data += ("callsign : " + myShow["network"] + "\n")
# This must be entered as yyyy-mm-ddThh:mm:ssZ (the t is capitalized and never changes, the Z is also
# capitalized and never changes). This is the original air date of the episode.
# NOTE: Hard coded the time to T00:00:00Z as we really don't know when during the day the first run happened.
if curEpToWrite.airdate != datetime.date.fromordinal(1):
data += ("originalAirDate : " + str(curEpToWrite.airdate) + "T00:00:00Z\n")
# This shows up at the beginning of the description on the Program screen and on the Details screen.
if getattr(myShow, 'actors', None) is not None:
for actor in myShow["actors"].split('|'):
if actor:
data += ("vActor : " + actor + "\n")
# This is shown on both the Program screen and the Details screen.
if getattr(myEp, 'rating', None) is not None:
try:
rating = float(myEp['rating'])
except ValueError:
rating = 0.0
# convert 10 to 4 star rating. 4 * rating / 10
# only whole numbers or half numbers work. multiply by 2, round, divide by 2.0
rating = round(8 * rating / 10) / 2.0
data += ("starRating : " + str(rating) + "\n")
# This is shown on both the Program screen and the Details screen.
# It uses the standard TV rating system of: TV-Y7, TV-Y, TV-G, TV-PG, TV-14, TV-MA and TV-NR.
if getattr(myShow, 'contentrating', None) is not None:
data += ("tvRating : " + str(myShow["contentrating"]) + "\n")
# This field can be repeated as many times as necessary or omitted completely.
if ep_obj.show.genre:
for genre in ep_obj.show.genre.split('|'):
if genre:
data += ("vProgramGenre : " + str(genre) + "\n")
# NOTE: The following are metadata keywords are not used
# displayMajorNumber
# showingBits
# displayMinorNumber
# colorCode
# vSeriesGenre
# vGuestStar, vDirector, vExecProducer, vProducer, vWriter, vHost, vChoreographer
# partCount
# partIndex
return data
def write_ep_file(self, ep_obj):
"""
Generates and writes ep_obj's metadata under the given path with the
given filename root. Uses the episode's name with the extension in
_ep_nfo_extension.
ep_obj: TVEpisode object for which to create the metadata
file_name_path: The file name to use for this metadata. Note that the extension
will be automatically added based on _ep_nfo_extension. This should
include an absolute path.
"""
data = self._ep_data(ep_obj)
if not data:
return False
nfo_file_path = self.get_episode_file_path(ep_obj)
nfo_file_dir = ek(os.path.dirname, nfo_file_path)
try:
if not ek(os.path.isdir, nfo_file_dir):
logger.log(u"Metadata dir didn't exist, creating it at " + nfo_file_dir, logger.DEBUG)
ek(os.makedirs, nfo_file_dir)
helpers.chmodAsParent(nfo_file_dir)
logger.log(u"Writing episode nfo file to " + nfo_file_path, logger.DEBUG)
with ek(open, nfo_file_path, 'w') as nfo_file:
# Calling encode directly, b/c often descriptions have wonky characters.
nfo_file.write(data.encode("utf-8"))
helpers.chmodAsParent(nfo_file_path)
except EnvironmentError, e:
logger.log(u"Unable to write file to " + nfo_file_path + " - are you sure the folder is writable? " + ex(e),
logger.ERROR)
return False
return True
# present a standard "interface" from the module
metadata_class = TIVOMetadata
|
elgambitero/FreeCAD_sf_master | refs/heads/master | src/Mod/Assembly/InitGui.py | 19 | # Assembly gui init module
# (c) 2003 Juergen Riegel
#
# Gathering all the information to start FreeCAD
# This is the second one of three init scripts, the third one
# runs when the gui is up
#***************************************************************************
#* (c) Juergen Riegel (juergen.riegel@web.de) 2002 *
#* *
#* This file is part of the FreeCAD CAx development system. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* FreeCAD is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Lesser General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with FreeCAD; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#* Juergen Riegel 2002 *
#***************************************************************************/
class AssemblyWorkbench ( Workbench ):
"Assembly workbench object"
Icon = """
/* XPM */
static const char *Assembly_Box[]={
"16 16 3 1",
". c None",
"# c #000000",
"a c #c6c642",
"................",
".......#######..",
"......#aaaaa##..",
".....#aaaaa###..",
"....#aaaaa##a#..",
"...#aaaaa##aa#..",
"..#aaaaa##aaa#..",
".########aaaa#..",
".#aaaaa#aaaaa#..",
".#aaaaa#aaaa##..",
".#aaaaa#aaa##...",
".#aaaaa#aa##....",
".#aaaaa#a##... .",
".#aaaaa###......",
".########.......",
"................"};
"""
MenuText = "Assembly"
ToolTip = "Assembly workbench"
def Initialize(self):
# load the module
import AssemblyGui
import Assembly
def GetClassName(self):
return "AssemblyGui::Workbench"
Gui.addWorkbench(AssemblyWorkbench())
|
JioCloud/nova | refs/heads/master | nova/tests/unit/virt/xenapi/test_vm_utils.py | 10 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import uuid
from eventlet import greenthread
import fixtures
import mock
from mox3 import mox
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_utils import timeutils
from oslo_utils import units
import six
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import vm_mode
from nova import context
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_flavor
from nova.tests.unit.virt.xenapi import stubs
from nova.tests.unit.virt.xenapi import test_xenapi
from nova import utils
from nova.virt import hardware
from nova.virt.xenapi.client import session as xenapi_session
from nova.virt.xenapi import driver as xenapi_conn
from nova.virt.xenapi import fake
from nova.virt.xenapi import vm_utils
CONF = cfg.CONF
XENSM_TYPE = 'xensm'
ISCSI_TYPE = 'iscsi'
def get_fake_connection_data(sr_type):
fakes = {XENSM_TYPE: {'sr_uuid': 'falseSR',
'name_label': 'fake_storage',
'name_description': 'test purposes',
'server': 'myserver',
'serverpath': '/local/scratch/myname',
'sr_type': 'nfs',
'introduce_sr_keys': ['server',
'serverpath',
'sr_type'],
'vdi_uuid': 'falseVDI'},
ISCSI_TYPE: {'volume_id': 'fake_volume_id',
'target_lun': 1,
'target_iqn': 'fake_iqn:volume-fake_volume_id',
'target_portal': u'localhost:3260',
'target_discovered': False}, }
return fakes[sr_type]
def _get_fake_session(error=None):
session = mock.Mock()
xenapi_session.apply_session_helpers(session)
if error is not None:
class FakeException(Exception):
details = [error, "a", "b", "c"]
session.XenAPI.Failure = FakeException
session.call_xenapi.side_effect = FakeException
return session
@contextlib.contextmanager
def contextified(result):
yield result
def _fake_noop(*args, **kwargs):
return
class VMUtilsTestBase(stubs.XenAPITestBaseNoDB):
pass
class LookupTestCase(VMUtilsTestBase):
def setUp(self):
super(LookupTestCase, self).setUp()
self.session = self.mox.CreateMockAnything('Fake Session')
self.name_label = 'my_vm'
def _do_mock(self, result):
self.session.call_xenapi(
"VM.get_by_name_label", self.name_label).AndReturn(result)
self.mox.ReplayAll()
def test_normal(self):
self._do_mock(['x'])
result = vm_utils.lookup(self.session, self.name_label)
self.assertEqual('x', result)
def test_no_result(self):
self._do_mock([])
result = vm_utils.lookup(self.session, self.name_label)
self.assertIsNone(result)
def test_too_many(self):
self._do_mock(['a', 'b'])
self.assertRaises(exception.InstanceExists,
vm_utils.lookup,
self.session, self.name_label)
def test_rescue_none(self):
self.session.call_xenapi(
"VM.get_by_name_label", self.name_label + '-rescue').AndReturn([])
self._do_mock(['x'])
result = vm_utils.lookup(self.session, self.name_label,
check_rescue=True)
self.assertEqual('x', result)
def test_rescue_found(self):
self.session.call_xenapi(
"VM.get_by_name_label",
self.name_label + '-rescue').AndReturn(['y'])
self.mox.ReplayAll()
result = vm_utils.lookup(self.session, self.name_label,
check_rescue=True)
self.assertEqual('y', result)
def test_rescue_too_many(self):
self.session.call_xenapi(
"VM.get_by_name_label",
self.name_label + '-rescue').AndReturn(['a', 'b', 'c'])
self.mox.ReplayAll()
self.assertRaises(exception.InstanceExists,
vm_utils.lookup,
self.session, self.name_label,
check_rescue=True)
class GenerateConfigDriveTestCase(VMUtilsTestBase):
def test_no_admin_pass(self):
instance = {}
self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
vm_utils.safe_find_sr('session').AndReturn('sr_ref')
self.mox.StubOutWithMock(vm_utils, 'create_vdi')
vm_utils.create_vdi('session', 'sr_ref', instance, 'config-2',
'configdrive',
64 * units.Mi).AndReturn('vdi_ref')
self.mox.StubOutWithMock(vm_utils, 'vdi_attached_here')
vm_utils.vdi_attached_here(
'session', 'vdi_ref', read_only=False).AndReturn(
contextified('mounted_dev'))
class FakeInstanceMetadata(object):
def __init__(_self, instance, content=None, extra_md=None,
network_info=None):
self.assertEqual(network_info, "nw_info")
def metadata_for_config_drive(_self):
return []
self.useFixture(fixtures.MonkeyPatch(
'nova.api.metadata.base.InstanceMetadata',
FakeInstanceMetadata))
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('genisoimage', '-o', mox.IgnoreArg(), '-ldots',
'-allow-lowercase', '-allow-multidot', '-l',
'-publisher', mox.IgnoreArg(), '-quiet',
'-J', '-r', '-V', 'config-2', mox.IgnoreArg(),
attempts=1, run_as_root=False).AndReturn(None)
utils.execute('dd', mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), run_as_root=True).AndReturn(None)
self.mox.StubOutWithMock(vm_utils, 'create_vbd')
vm_utils.create_vbd('session', 'vm_ref', 'vdi_ref', mox.IgnoreArg(),
bootable=False, read_only=True).AndReturn(None)
self.mox.ReplayAll()
# And the actual call we're testing
vm_utils.generate_configdrive('session', instance, 'vm_ref',
'userdevice', "nw_info")
@mock.patch.object(vm_utils, "destroy_vdi")
@mock.patch.object(vm_utils, "vdi_attached_here")
@mock.patch.object(vm_utils, "create_vdi")
@mock.patch.object(vm_utils, "safe_find_sr")
def test_vdi_cleaned_up(self, mock_find, mock_create_vdi, mock_attached,
mock_destroy):
mock_create_vdi.return_value = 'vdi_ref'
mock_attached.side_effect = test.TestingException
mock_destroy.side_effect = exception.StorageError(reason="")
instance = {"uuid": "asdf"}
self.assertRaises(test.TestingException,
vm_utils.generate_configdrive,
'session', instance, 'vm_ref', 'userdevice',
'nw_info')
mock_destroy.assert_called_once_with('session', 'vdi_ref')
class XenAPIGetUUID(VMUtilsTestBase):
def test_get_this_vm_uuid_new_kernel(self):
self.mox.StubOutWithMock(vm_utils, '_get_sys_hypervisor_uuid')
vm_utils._get_sys_hypervisor_uuid().AndReturn(
'2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f')
self.mox.ReplayAll()
self.assertEqual('2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f',
vm_utils.get_this_vm_uuid(None))
self.mox.VerifyAll()
def test_get_this_vm_uuid_old_kernel_reboot(self):
self.mox.StubOutWithMock(vm_utils, '_get_sys_hypervisor_uuid')
self.mox.StubOutWithMock(utils, 'execute')
vm_utils._get_sys_hypervisor_uuid().AndRaise(
IOError(13, 'Permission denied'))
utils.execute('xenstore-read', 'domid', run_as_root=True).AndReturn(
('27', ''))
utils.execute('xenstore-read', '/local/domain/27/vm',
run_as_root=True).AndReturn(
('/vm/2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f', ''))
self.mox.ReplayAll()
self.assertEqual('2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f',
vm_utils.get_this_vm_uuid(None))
self.mox.VerifyAll()
class FakeSession(object):
def call_xenapi(self, *args):
pass
def call_plugin(self, *args):
pass
def call_plugin_serialized(self, plugin, fn, *args, **kwargs):
pass
def call_plugin_serialized_with_retry(self, plugin, fn, num_retries,
callback, *args, **kwargs):
pass
class FetchVhdImageTestCase(VMUtilsTestBase):
def setUp(self):
super(FetchVhdImageTestCase, self).setUp()
self.context = context.get_admin_context()
self.context.auth_token = 'auth_token'
self.session = FakeSession()
self.instance = {"uuid": "uuid"}
self.mox.StubOutWithMock(vm_utils, '_make_uuid_stack')
vm_utils._make_uuid_stack().AndReturn(["uuid_stack"])
self.mox.StubOutWithMock(vm_utils, 'get_sr_path')
vm_utils.get_sr_path(self.session).AndReturn('sr_path')
def _stub_glance_download_vhd(self, raise_exc=None):
self.mox.StubOutWithMock(
self.session, 'call_plugin_serialized_with_retry')
func = self.session.call_plugin_serialized_with_retry(
'glance', 'download_vhd', 0, mox.IgnoreArg(), mox.IgnoreArg(),
extra_headers={'X-Service-Catalog': '[]',
'X-Auth-Token': 'auth_token',
'X-Roles': '',
'X-Tenant-Id': None,
'X-User-Id': None,
'X-Identity-Status': 'Confirmed'},
image_id='image_id',
uuid_stack=["uuid_stack"],
sr_path='sr_path')
if raise_exc:
func.AndRaise(raise_exc)
else:
func.AndReturn({'root': {'uuid': 'vdi'}})
def _stub_bittorrent_download_vhd(self, raise_exc=None):
self.mox.StubOutWithMock(
self.session, 'call_plugin_serialized')
func = self.session.call_plugin_serialized(
'bittorrent', 'download_vhd',
image_id='image_id',
uuid_stack=["uuid_stack"],
sr_path='sr_path',
torrent_download_stall_cutoff=600,
torrent_listen_port_start=6881,
torrent_listen_port_end=6891,
torrent_max_last_accessed=86400,
torrent_max_seeder_processes_per_host=1,
torrent_seed_chance=1.0,
torrent_seed_duration=3600,
torrent_url='http://foo/image_id.torrent'
)
if raise_exc:
func.AndRaise(raise_exc)
else:
func.AndReturn({'root': {'uuid': 'vdi'}})
def test_fetch_vhd_image_works_with_glance(self):
self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
vm_utils._image_uses_bittorrent(
self.context, self.instance).AndReturn(False)
self._stub_glance_download_vhd()
self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
vm_utils.safe_find_sr(self.session).AndReturn("sr")
self.mox.StubOutWithMock(vm_utils, '_scan_sr')
vm_utils._scan_sr(self.session, "sr")
self.mox.StubOutWithMock(vm_utils, '_check_vdi_size')
vm_utils._check_vdi_size(
self.context, self.session, self.instance, "vdi")
self.mox.ReplayAll()
self.assertEqual("vdi", vm_utils._fetch_vhd_image(self.context,
self.session, self.instance, 'image_id')['root']['uuid'])
self.mox.VerifyAll()
def test_fetch_vhd_image_works_with_bittorrent(self):
cfg.CONF.import_opt('torrent_base_url',
'nova.virt.xenapi.image.bittorrent',
group='xenserver')
self.flags(torrent_base_url='http://foo', group='xenserver')
self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
vm_utils._image_uses_bittorrent(
self.context, self.instance).AndReturn(True)
self._stub_bittorrent_download_vhd()
self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
vm_utils.safe_find_sr(self.session).AndReturn("sr")
self.mox.StubOutWithMock(vm_utils, '_scan_sr')
vm_utils._scan_sr(self.session, "sr")
self.mox.StubOutWithMock(vm_utils, '_check_vdi_size')
vm_utils._check_vdi_size(self.context, self.session, self.instance,
"vdi")
self.mox.ReplayAll()
self.assertEqual("vdi", vm_utils._fetch_vhd_image(self.context,
self.session, self.instance, 'image_id')['root']['uuid'])
self.mox.VerifyAll()
def test_fetch_vhd_image_cleans_up_vdi_on_fail(self):
self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
vm_utils._image_uses_bittorrent(
self.context, self.instance).AndReturn(False)
self._stub_glance_download_vhd()
self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
vm_utils.safe_find_sr(self.session).AndReturn("sr")
self.mox.StubOutWithMock(vm_utils, '_scan_sr')
vm_utils._scan_sr(self.session, "sr")
self.mox.StubOutWithMock(vm_utils, '_check_vdi_size')
vm_utils._check_vdi_size(self.context, self.session, self.instance,
"vdi").AndRaise(exception.FlavorDiskTooSmall)
self.mox.StubOutWithMock(self.session, 'call_xenapi')
self.session.call_xenapi("VDI.get_by_uuid", "vdi").AndReturn("ref")
self.mox.StubOutWithMock(vm_utils, 'destroy_vdi')
vm_utils.destroy_vdi(self.session,
"ref").AndRaise(exception.StorageError(reason=""))
self.mox.ReplayAll()
self.assertRaises(exception.FlavorDiskTooSmall,
vm_utils._fetch_vhd_image, self.context, self.session,
self.instance, 'image_id')
self.mox.VerifyAll()
def test_fallback_to_default_handler(self):
cfg.CONF.import_opt('torrent_base_url',
'nova.virt.xenapi.image.bittorrent',
group='xenserver')
self.flags(torrent_base_url='http://foo', group='xenserver')
self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
vm_utils._image_uses_bittorrent(
self.context, self.instance).AndReturn(True)
self._stub_bittorrent_download_vhd(raise_exc=RuntimeError)
vm_utils._make_uuid_stack().AndReturn(["uuid_stack"])
vm_utils.get_sr_path(self.session).AndReturn('sr_path')
self._stub_glance_download_vhd()
self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
vm_utils.safe_find_sr(self.session).AndReturn("sr")
self.mox.StubOutWithMock(vm_utils, '_scan_sr')
vm_utils._scan_sr(self.session, "sr")
self.mox.StubOutWithMock(vm_utils, '_check_vdi_size')
vm_utils._check_vdi_size(self.context, self.session, self.instance,
"vdi")
self.mox.ReplayAll()
self.assertEqual("vdi", vm_utils._fetch_vhd_image(self.context,
self.session, self.instance, 'image_id')['root']['uuid'])
self.mox.VerifyAll()
def test_default_handler_does_not_fallback_to_itself(self):
cfg.CONF.import_opt('torrent_base_url',
'nova.virt.xenapi.image.bittorrent',
group='xenserver')
self.flags(torrent_base_url='http://foo', group='xenserver')
self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
vm_utils._image_uses_bittorrent(
self.context, self.instance).AndReturn(False)
self._stub_glance_download_vhd(raise_exc=RuntimeError)
self.mox.ReplayAll()
self.assertRaises(RuntimeError, vm_utils._fetch_vhd_image,
self.context, self.session, self.instance, 'image_id')
self.mox.VerifyAll()
class TestImageCompression(VMUtilsTestBase):
def test_image_compression(self):
# Testing for nova.conf, too low, negative, and a correct value.
self.assertIsNone(vm_utils.get_compression_level())
self.flags(image_compression_level=0, group='xenserver')
self.assertIsNone(vm_utils.get_compression_level())
self.flags(image_compression_level=-6, group='xenserver')
self.assertIsNone(vm_utils.get_compression_level())
self.flags(image_compression_level=6, group='xenserver')
self.assertEqual(vm_utils.get_compression_level(), 6)
class ResizeHelpersTestCase(VMUtilsTestBase):
def test_repair_filesystem(self):
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('e2fsck', '-f', "-y", "fakepath",
run_as_root=True, check_exit_code=[0, 1, 2]).AndReturn(
("size is: 42", ""))
self.mox.ReplayAll()
vm_utils._repair_filesystem("fakepath")
def _call_tune2fs_remove_journal(self, path):
utils.execute("tune2fs", "-O ^has_journal", path, run_as_root=True)
def _call_tune2fs_add_journal(self, path):
utils.execute("tune2fs", "-j", path, run_as_root=True)
def _call_parted_mkpart(self, path, start, end):
utils.execute('parted', '--script', path, 'rm', '1',
run_as_root=True)
utils.execute('parted', '--script', path, 'mkpart',
'primary', '%ds' % start, '%ds' % end, run_as_root=True)
def _call_parted_boot_flag(sef, path):
utils.execute('parted', '--script', path, 'set', '1',
'boot', 'on', run_as_root=True)
def test_resize_part_and_fs_down_succeeds(self):
self.mox.StubOutWithMock(vm_utils, "_repair_filesystem")
self.mox.StubOutWithMock(utils, 'execute')
dev_path = "/dev/fake"
partition_path = "%s1" % dev_path
vm_utils._repair_filesystem(partition_path)
self._call_tune2fs_remove_journal(partition_path)
utils.execute("resize2fs", partition_path, "10s", run_as_root=True)
self._call_parted_mkpart(dev_path, 0, 9)
self._call_parted_boot_flag(dev_path)
self._call_tune2fs_add_journal(partition_path)
self.mox.ReplayAll()
vm_utils._resize_part_and_fs("fake", 0, 20, 10, "boot")
def test_log_progress_if_required(self):
self.mox.StubOutWithMock(vm_utils.LOG, "debug")
vm_utils.LOG.debug("Sparse copy in progress, "
"%(complete_pct).2f%% complete. "
"%(left)s bytes left to copy",
{"complete_pct": 50.0, "left": 1})
current = timeutils.utcnow()
timeutils.set_time_override(current)
timeutils.advance_time_seconds(vm_utils.PROGRESS_INTERVAL_SECONDS + 1)
self.mox.ReplayAll()
vm_utils._log_progress_if_required(1, current, 2)
def test_log_progress_if_not_required(self):
self.mox.StubOutWithMock(vm_utils.LOG, "debug")
current = timeutils.utcnow()
timeutils.set_time_override(current)
timeutils.advance_time_seconds(vm_utils.PROGRESS_INTERVAL_SECONDS - 1)
self.mox.ReplayAll()
vm_utils._log_progress_if_required(1, current, 2)
def test_resize_part_and_fs_down_fails_disk_too_big(self):
self.mox.StubOutWithMock(vm_utils, "_repair_filesystem")
self.mox.StubOutWithMock(utils, 'execute')
dev_path = "/dev/fake"
partition_path = "%s1" % dev_path
new_sectors = 10
vm_utils._repair_filesystem(partition_path)
self._call_tune2fs_remove_journal(partition_path)
mobj = utils.execute("resize2fs",
partition_path,
"%ss" % new_sectors,
run_as_root=True)
mobj.AndRaise(processutils.ProcessExecutionError)
self.mox.ReplayAll()
self.assertRaises(exception.ResizeError,
vm_utils._resize_part_and_fs,
"fake", 0, 20, 10, "boot")
def test_resize_part_and_fs_up_succeeds(self):
self.mox.StubOutWithMock(vm_utils, "_repair_filesystem")
self.mox.StubOutWithMock(utils, 'execute')
dev_path = "/dev/fake"
partition_path = "%s1" % dev_path
vm_utils._repair_filesystem(partition_path)
self._call_tune2fs_remove_journal(partition_path)
self._call_parted_mkpart(dev_path, 0, 29)
utils.execute("resize2fs", partition_path, run_as_root=True)
self._call_tune2fs_add_journal(partition_path)
self.mox.ReplayAll()
vm_utils._resize_part_and_fs("fake", 0, 20, 30, "")
def test_resize_disk_throws_on_zero_size(self):
self.assertRaises(exception.ResizeError,
vm_utils.resize_disk, "session", "instance", "vdi_ref",
{"root_gb": 0})
def test_auto_config_disk_returns_early_on_zero_size(self):
vm_utils.try_auto_configure_disk("bad_session", "bad_vdi_ref", 0)
@mock.patch.object(utils, "execute")
def test_get_partitions(self, mock_execute):
parted_return = "BYT;\n...\n"
parted_return += "1:2s:11s:10s:ext3::boot;\n"
parted_return += "2:20s:11s:10s::bob:;\n"
mock_execute.return_value = (parted_return, None)
partitions = vm_utils._get_partitions("abc")
self.assertEqual(2, len(partitions))
self.assertEqual((1, 2, 10, "ext3", "", "boot"), partitions[0])
self.assertEqual((2, 20, 10, "", "bob", ""), partitions[1])
class CheckVDISizeTestCase(VMUtilsTestBase):
def setUp(self):
super(CheckVDISizeTestCase, self).setUp()
self.context = 'fakecontext'
self.session = 'fakesession'
self.instance = objects.Instance(uuid=str(uuid.uuid4()))
self.flavor = objects.Flavor()
self.vdi_uuid = 'fakeuuid'
def test_not_too_large(self):
self.mox.StubOutWithMock(vm_utils, '_get_vdi_chain_size')
vm_utils._get_vdi_chain_size(self.session,
self.vdi_uuid).AndReturn(1073741824)
self.mox.ReplayAll()
with mock.patch.object(self.instance, 'get_flavor') as get:
self.flavor.root_gb = 1
get.return_value = self.flavor
vm_utils._check_vdi_size(self.context, self.session, self.instance,
self.vdi_uuid)
def test_too_large(self):
self.mox.StubOutWithMock(vm_utils, '_get_vdi_chain_size')
vm_utils._get_vdi_chain_size(self.session,
self.vdi_uuid).AndReturn(11811160065) # 10GB overhead allowed
self.mox.ReplayAll()
with mock.patch.object(self.instance, 'get_flavor') as get:
self.flavor.root_gb = 1
get.return_value = self.flavor
self.assertRaises(exception.FlavorDiskTooSmall,
vm_utils._check_vdi_size, self.context,
self.session, self.instance, self.vdi_uuid)
def test_zero_root_gb_disables_check(self):
with mock.patch.object(self.instance, 'get_flavor') as get:
self.flavor.root_gb = 0
get.return_value = self.flavor
vm_utils._check_vdi_size(self.context, self.session, self.instance,
self.vdi_uuid)
class GetInstanceForVdisForSrTestCase(VMUtilsTestBase):
def setUp(self):
super(GetInstanceForVdisForSrTestCase, self).setUp()
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
def test_get_instance_vdis_for_sr(self):
vm_ref = fake.create_vm("foo", "Running")
sr_ref = fake.create_sr()
vdi_1 = fake.create_vdi('vdiname1', sr_ref)
vdi_2 = fake.create_vdi('vdiname2', sr_ref)
for vdi_ref in [vdi_1, vdi_2]:
fake.create_vbd(vm_ref, vdi_ref)
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
result = list(vm_utils.get_instance_vdis_for_sr(
driver._session, vm_ref, sr_ref))
self.assertEqual([vdi_1, vdi_2], result)
def test_get_instance_vdis_for_sr_no_vbd(self):
vm_ref = fake.create_vm("foo", "Running")
sr_ref = fake.create_sr()
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
result = list(vm_utils.get_instance_vdis_for_sr(
driver._session, vm_ref, sr_ref))
self.assertEqual([], result)
class VMRefOrRaiseVMFoundTestCase(VMUtilsTestBase):
def test_lookup_call(self):
mock = mox.Mox()
mock.StubOutWithMock(vm_utils, 'lookup')
vm_utils.lookup('session', 'somename').AndReturn('ignored')
mock.ReplayAll()
vm_utils.vm_ref_or_raise('session', 'somename')
mock.VerifyAll()
def test_return_value(self):
mock = mox.Mox()
mock.StubOutWithMock(vm_utils, 'lookup')
vm_utils.lookup(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn('vmref')
mock.ReplayAll()
self.assertEqual(
'vmref', vm_utils.vm_ref_or_raise('session', 'somename'))
mock.VerifyAll()
class VMRefOrRaiseVMNotFoundTestCase(VMUtilsTestBase):
def test_exception_raised(self):
mock = mox.Mox()
mock.StubOutWithMock(vm_utils, 'lookup')
vm_utils.lookup('session', 'somename').AndReturn(None)
mock.ReplayAll()
self.assertRaises(
exception.InstanceNotFound,
lambda: vm_utils.vm_ref_or_raise('session', 'somename')
)
mock.VerifyAll()
def test_exception_msg_contains_vm_name(self):
mock = mox.Mox()
mock.StubOutWithMock(vm_utils, 'lookup')
vm_utils.lookup('session', 'somename').AndReturn(None)
mock.ReplayAll()
try:
vm_utils.vm_ref_or_raise('session', 'somename')
except exception.InstanceNotFound as e:
self.assertIn('somename', six.text_type(e))
mock.VerifyAll()
@mock.patch.object(vm_utils, 'safe_find_sr', return_value='safe_find_sr')
class CreateCachedImageTestCase(VMUtilsTestBase):
def setUp(self):
super(CreateCachedImageTestCase, self).setUp()
self.session = _get_fake_session()
@mock.patch.object(vm_utils, '_clone_vdi', return_value='new_vdi_ref')
def test_cached(self, mock_clone_vdi, mock_safe_find_sr):
self.session.call_xenapi.side_effect = ['ext', {'vdi_ref': 2},
None, None, None, 'vdi_uuid']
self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
vm_utils._create_cached_image('context', self.session,
'instance', 'name', 'uuid',
vm_utils.ImageType.DISK_VHD))
@mock.patch.object(vm_utils, '_safe_copy_vdi', return_value='new_vdi_ref')
def test_no_cow(self, mock_safe_copy_vdi, mock_safe_find_sr):
self.flags(use_cow_images=False)
self.session.call_xenapi.side_effect = ['ext', {'vdi_ref': 2},
None, None, None, 'vdi_uuid']
self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
vm_utils._create_cached_image('context', self.session,
'instance', 'name', 'uuid',
vm_utils.ImageType.DISK_VHD))
def test_no_cow_no_ext(self, mock_safe_find_sr):
self.flags(use_cow_images=False)
self.session.call_xenapi.side_effect = ['non-ext', {'vdi_ref': 2},
'vdi_ref', None, None, None,
'vdi_uuid']
self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
vm_utils._create_cached_image('context', self.session,
'instance', 'name', 'uuid',
vm_utils.ImageType.DISK_VHD))
@mock.patch.object(vm_utils, '_clone_vdi', return_value='new_vdi_ref')
@mock.patch.object(vm_utils, '_fetch_image',
return_value={'root': {'uuid': 'vdi_uuid',
'file': None}})
def test_noncached(self, mock_fetch_image, mock_clone_vdi,
mock_safe_find_sr):
self.session.call_xenapi.side_effect = ['ext', {}, 'cache_vdi_ref',
None, None, None, None, None,
None, 'vdi_uuid']
self.assertEqual((True, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
vm_utils._create_cached_image('context', self.session,
'instance', 'name', 'uuid',
vm_utils.ImageType.DISK_VHD))
class BittorrentTestCase(VMUtilsTestBase):
def setUp(self):
super(BittorrentTestCase, self).setUp()
self.context = context.get_admin_context()
def test_image_uses_bittorrent(self):
instance = {'system_metadata': {'image_bittorrent': True}}
self.flags(torrent_images='some', group='xenserver')
self.assertTrue(vm_utils._image_uses_bittorrent(self.context,
instance))
def _test_create_image(self, cache_type):
instance = {'system_metadata': {'image_cache_in_nova': True}}
self.flags(cache_images=cache_type, group='xenserver')
was = {'called': None}
def fake_create_cached_image(*args):
was['called'] = 'some'
return (False, {})
self.stubs.Set(vm_utils, '_create_cached_image',
fake_create_cached_image)
def fake_fetch_image(*args):
was['called'] = 'none'
return {}
self.stubs.Set(vm_utils, '_fetch_image',
fake_fetch_image)
vm_utils.create_image(self.context, None, instance,
'foo', 'bar', 'baz')
self.assertEqual(was['called'], cache_type)
def test_create_image_cached(self):
self._test_create_image('some')
def test_create_image_uncached(self):
self._test_create_image('none')
class ShutdownTestCase(VMUtilsTestBase):
def test_hardshutdown_should_return_true_when_vm_is_shutdown(self):
self.mock = mox.Mox()
session = FakeSession()
instance = "instance"
vm_ref = "vm-ref"
self.mock.StubOutWithMock(vm_utils, 'is_vm_shutdown')
vm_utils.is_vm_shutdown(session, vm_ref).AndReturn(True)
self.mock.StubOutWithMock(vm_utils, 'LOG')
self.assertTrue(vm_utils.hard_shutdown_vm(
session, instance, vm_ref))
def test_cleanshutdown_should_return_true_when_vm_is_shutdown(self):
self.mock = mox.Mox()
session = FakeSession()
instance = "instance"
vm_ref = "vm-ref"
self.mock.StubOutWithMock(vm_utils, 'is_vm_shutdown')
vm_utils.is_vm_shutdown(session, vm_ref).AndReturn(True)
self.mock.StubOutWithMock(vm_utils, 'LOG')
self.assertTrue(vm_utils.clean_shutdown_vm(
session, instance, vm_ref))
class CreateVBDTestCase(VMUtilsTestBase):
def setUp(self):
super(CreateVBDTestCase, self).setUp()
self.session = FakeSession()
self.mock = mox.Mox()
self.mock.StubOutWithMock(self.session, 'call_xenapi')
self.vbd_rec = self._generate_vbd_rec()
def _generate_vbd_rec(self):
vbd_rec = {}
vbd_rec['VM'] = 'vm_ref'
vbd_rec['VDI'] = 'vdi_ref'
vbd_rec['userdevice'] = '0'
vbd_rec['bootable'] = False
vbd_rec['mode'] = 'RW'
vbd_rec['type'] = 'disk'
vbd_rec['unpluggable'] = True
vbd_rec['empty'] = False
vbd_rec['other_config'] = {}
vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
return vbd_rec
def test_create_vbd_default_args(self):
self.session.call_xenapi('VBD.create',
self.vbd_rec).AndReturn("vbd_ref")
self.mock.ReplayAll()
result = vm_utils.create_vbd(self.session, "vm_ref", "vdi_ref", 0)
self.assertEqual(result, "vbd_ref")
self.mock.VerifyAll()
def test_create_vbd_osvol(self):
self.session.call_xenapi('VBD.create',
self.vbd_rec).AndReturn("vbd_ref")
self.session.call_xenapi('VBD.add_to_other_config', "vbd_ref",
"osvol", "True")
self.mock.ReplayAll()
result = vm_utils.create_vbd(self.session, "vm_ref", "vdi_ref", 0,
osvol=True)
self.assertEqual(result, "vbd_ref")
self.mock.VerifyAll()
def test_create_vbd_extra_args(self):
self.vbd_rec['VDI'] = 'OpaqueRef:NULL'
self.vbd_rec['type'] = 'a'
self.vbd_rec['mode'] = 'RO'
self.vbd_rec['bootable'] = True
self.vbd_rec['empty'] = True
self.vbd_rec['unpluggable'] = False
self.session.call_xenapi('VBD.create',
self.vbd_rec).AndReturn("vbd_ref")
self.mock.ReplayAll()
result = vm_utils.create_vbd(self.session, "vm_ref", None, 0,
vbd_type="a", read_only=True, bootable=True,
empty=True, unpluggable=False)
self.assertEqual(result, "vbd_ref")
self.mock.VerifyAll()
def test_attach_cd(self):
self.mock.StubOutWithMock(vm_utils, 'create_vbd')
vm_utils.create_vbd(self.session, "vm_ref", None, 1,
vbd_type='cd', read_only=True, bootable=True,
empty=True, unpluggable=False).AndReturn("vbd_ref")
self.session.call_xenapi('VBD.insert', "vbd_ref", "vdi_ref")
self.mock.ReplayAll()
result = vm_utils.attach_cd(self.session, "vm_ref", "vdi_ref", 1)
self.assertEqual(result, "vbd_ref")
self.mock.VerifyAll()
class UnplugVbdTestCase(VMUtilsTestBase):
@mock.patch.object(greenthread, 'sleep')
def test_unplug_vbd_works(self, mock_sleep):
session = _get_fake_session()
vbd_ref = "vbd_ref"
vm_ref = 'vm_ref'
vm_utils.unplug_vbd(session, vbd_ref, vm_ref)
session.call_xenapi.assert_called_once_with('VBD.unplug', vbd_ref)
self.assertEqual(0, mock_sleep.call_count)
def test_unplug_vbd_raises_unexpected_error(self):
session = _get_fake_session()
vbd_ref = "vbd_ref"
vm_ref = 'vm_ref'
session.call_xenapi.side_effect = test.TestingException()
self.assertRaises(test.TestingException, vm_utils.unplug_vbd,
session, vm_ref, vbd_ref)
self.assertEqual(1, session.call_xenapi.call_count)
def test_unplug_vbd_already_detached_works(self):
error = "DEVICE_ALREADY_DETACHED"
session = _get_fake_session(error)
vbd_ref = "vbd_ref"
vm_ref = 'vm_ref'
vm_utils.unplug_vbd(session, vbd_ref, vm_ref)
self.assertEqual(1, session.call_xenapi.call_count)
def test_unplug_vbd_already_raises_unexpected_xenapi_error(self):
session = _get_fake_session("")
vbd_ref = "vbd_ref"
vm_ref = 'vm_ref'
self.assertRaises(exception.StorageError, vm_utils.unplug_vbd,
session, vbd_ref, vm_ref)
self.assertEqual(1, session.call_xenapi.call_count)
def _test_uplug_vbd_retries(self, mock_sleep, error):
session = _get_fake_session(error)
vbd_ref = "vbd_ref"
vm_ref = 'vm_ref'
self.assertRaises(exception.StorageError, vm_utils.unplug_vbd,
session, vm_ref, vbd_ref)
self.assertEqual(11, session.call_xenapi.call_count)
self.assertEqual(10, mock_sleep.call_count)
def _test_uplug_vbd_retries_with_neg_val(self):
session = _get_fake_session()
self.flags(num_vbd_unplug_retries=-1, group='xenserver')
vbd_ref = "vbd_ref"
vm_ref = 'vm_ref'
vm_utils.unplug_vbd(session, vbd_ref, vm_ref)
self.assertEqual(1, session.call_xenapi.call_count)
@mock.patch.object(greenthread, 'sleep')
def test_uplug_vbd_retries_on_rejected(self, mock_sleep):
self._test_uplug_vbd_retries(mock_sleep,
"DEVICE_DETACH_REJECTED")
@mock.patch.object(greenthread, 'sleep')
def test_uplug_vbd_retries_on_internal_error(self, mock_sleep):
self._test_uplug_vbd_retries(mock_sleep,
"INTERNAL_ERROR")
class VDIOtherConfigTestCase(VMUtilsTestBase):
"""Tests to ensure that the code is populating VDI's `other_config`
attribute with the correct metadta.
"""
def setUp(self):
super(VDIOtherConfigTestCase, self).setUp()
class _FakeSession(object):
def call_xenapi(self, operation, *args, **kwargs):
# VDI.add_to_other_config -> VDI_add_to_other_config
method = getattr(self, operation.replace('.', '_'), None)
if method:
return method(*args, **kwargs)
self.operation = operation
self.args = args
self.kwargs = kwargs
self.session = _FakeSession()
self.context = context.get_admin_context()
self.fake_instance = {'uuid': 'aaaa-bbbb-cccc-dddd',
'name': 'myinstance'}
def test_create_vdi(self):
# Some images are registered with XenServer explicitly by calling
# `create_vdi`
vm_utils.create_vdi(self.session, 'sr_ref', self.fake_instance,
'myvdi', 'root', 1024, read_only=True)
expected = {'nova_disk_type': 'root',
'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'}
self.assertEqual(expected, self.session.args[0]['other_config'])
def test_create_image(self):
# Other images are registered implicitly when they are dropped into
# the SR by a dom0 plugin or some other process
self.flags(cache_images='none', group='xenserver')
def fake_fetch_image(*args):
return {'root': {'uuid': 'fake-uuid'}}
self.stubs.Set(vm_utils, '_fetch_image', fake_fetch_image)
other_config = {}
def VDI_add_to_other_config(ref, key, value):
other_config[key] = value
# Stubbing on the session object and not class so we don't pollute
# other tests
self.session.VDI_add_to_other_config = VDI_add_to_other_config
self.session.VDI_get_other_config = lambda vdi: {}
vm_utils.create_image(self.context, self.session, self.fake_instance,
'myvdi', 'image1', vm_utils.ImageType.DISK_VHD)
expected = {'nova_disk_type': 'root',
'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'}
self.assertEqual(expected, other_config)
def test_import_migrated_vhds(self):
# Migrated images should preserve the `other_config`
other_config = {}
def VDI_add_to_other_config(ref, key, value):
other_config[key] = value
def call_plugin_serialized(*args, **kwargs):
return {'root': {'uuid': 'aaaa-bbbb-cccc-dddd'}}
# Stubbing on the session object and not class so we don't pollute
# other tests
self.session.VDI_add_to_other_config = VDI_add_to_other_config
self.session.VDI_get_other_config = lambda vdi: {}
self.session.call_plugin_serialized = call_plugin_serialized
self.stubs.Set(vm_utils, 'get_sr_path', lambda *a, **k: None)
self.stubs.Set(vm_utils, 'scan_default_sr', lambda *a, **k: None)
vm_utils._import_migrated_vhds(self.session, self.fake_instance,
"disk_label", "root", "vdi_label")
expected = {'nova_disk_type': 'root',
'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'}
self.assertEqual(expected, other_config)
class GenerateDiskTestCase(VMUtilsTestBase):
def setUp(self):
super(GenerateDiskTestCase, self).setUp()
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
self.session = driver._session
self.session.is_local_connection = False
self.vm_ref = fake.create_vm("foo", "Running")
def tearDown(self):
super(GenerateDiskTestCase, self).tearDown()
fake.destroy_vm(self.vm_ref)
def _expect_parted_calls(self):
self.mox.StubOutWithMock(utils, "execute")
self.mox.StubOutWithMock(utils, "trycmd")
self.mox.StubOutWithMock(vm_utils, "destroy_vdi")
self.mox.StubOutWithMock(vm_utils.os.path, "exists")
if self.session.is_local_connection:
utils.execute('parted', '--script', '/dev/fakedev', 'mklabel',
'msdos', check_exit_code=False, run_as_root=True)
utils.execute('parted', '--script', '/dev/fakedev', '--', 'mkpart',
'primary', '0', '-0',
check_exit_code=False, run_as_root=True)
vm_utils.os.path.exists('/dev/mapper/fakedev1').AndReturn(True)
utils.trycmd('kpartx', '-a', '/dev/fakedev',
discard_warnings=True, run_as_root=True)
else:
utils.execute('parted', '--script', '/dev/fakedev', 'mklabel',
'msdos', check_exit_code=True, run_as_root=True)
utils.execute('parted', '--script', '/dev/fakedev', '--', 'mkpart',
'primary', '0', '-0',
check_exit_code=True, run_as_root=True)
def _check_vdi(self, vdi_ref, check_attached=True):
vdi_rec = self.session.call_xenapi("VDI.get_record", vdi_ref)
self.assertEqual(str(10 * units.Mi), vdi_rec["virtual_size"])
if check_attached:
vbd_ref = vdi_rec["VBDs"][0]
vbd_rec = self.session.call_xenapi("VBD.get_record", vbd_ref)
self.assertEqual(self.vm_ref, vbd_rec['VM'])
else:
self.assertEqual(0, len(vdi_rec["VBDs"]))
@test_xenapi.stub_vm_utils_with_vdi_attached_here
def test_generate_disk_with_no_fs_given(self):
self._expect_parted_calls()
self.mox.ReplayAll()
vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"},
self.vm_ref, "2", "name", "user", 10, None)
self._check_vdi(vdi_ref)
@test_xenapi.stub_vm_utils_with_vdi_attached_here
def test_generate_disk_swap(self):
self._expect_parted_calls()
utils.execute('mkswap', '/dev/fakedev1', run_as_root=True)
self.mox.ReplayAll()
vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"},
self.vm_ref, "2", "name", "swap", 10, "linux-swap")
self._check_vdi(vdi_ref)
@test_xenapi.stub_vm_utils_with_vdi_attached_here
def test_generate_disk_ephemeral(self):
self._expect_parted_calls()
utils.execute('mkfs', '-t', 'ext4', '/dev/fakedev1',
run_as_root=True)
self.mox.ReplayAll()
vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"},
self.vm_ref, "2", "name", "ephemeral", 10, "ext4")
self._check_vdi(vdi_ref)
@test_xenapi.stub_vm_utils_with_vdi_attached_here
def test_generate_disk_ensure_cleanup_called(self):
self._expect_parted_calls()
utils.execute('mkfs', '-t', 'ext4', '/dev/fakedev1',
run_as_root=True).AndRaise(test.TestingException)
vm_utils.destroy_vdi(self.session,
mox.IgnoreArg()).AndRaise(exception.StorageError(reason=""))
self.mox.ReplayAll()
self.assertRaises(test.TestingException, vm_utils._generate_disk,
self.session, {"uuid": "fake_uuid"},
self.vm_ref, "2", "name", "ephemeral", 10, "ext4")
@test_xenapi.stub_vm_utils_with_vdi_attached_here
def test_generate_disk_ephemeral_local_not_attached(self):
self.session.is_local_connection = True
self._expect_parted_calls()
utils.execute('mkfs', '-t', 'ext4', '/dev/mapper/fakedev1',
run_as_root=True)
self.mox.ReplayAll()
vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"},
None, "2", "name", "ephemeral", 10, "ext4")
self._check_vdi(vdi_ref, check_attached=False)
class GenerateEphemeralTestCase(VMUtilsTestBase):
def setUp(self):
super(GenerateEphemeralTestCase, self).setUp()
self.session = "session"
self.instance = "instance"
self.vm_ref = "vm_ref"
self.name_label = "name"
self.ephemeral_name_label = "name ephemeral"
self.userdevice = 4
self.mox.StubOutWithMock(vm_utils, "_generate_disk")
self.mox.StubOutWithMock(vm_utils, "safe_destroy_vdis")
def test_get_ephemeral_disk_sizes_simple(self):
result = vm_utils.get_ephemeral_disk_sizes(20)
expected = [20]
self.assertEqual(expected, list(result))
def test_get_ephemeral_disk_sizes_three_disks_2000(self):
result = vm_utils.get_ephemeral_disk_sizes(4030)
expected = [2000, 2000, 30]
self.assertEqual(expected, list(result))
def test_get_ephemeral_disk_sizes_two_disks_1024(self):
result = vm_utils.get_ephemeral_disk_sizes(2048)
expected = [1024, 1024]
self.assertEqual(expected, list(result))
def _expect_generate_disk(self, size, device, name_label):
vm_utils._generate_disk(self.session, self.instance, self.vm_ref,
str(device), name_label, 'ephemeral',
size * 1024, None).AndReturn(device)
def test_generate_ephemeral_adds_one_disk(self):
self._expect_generate_disk(20, self.userdevice,
self.ephemeral_name_label)
self.mox.ReplayAll()
vm_utils.generate_ephemeral(self.session, self.instance, self.vm_ref,
str(self.userdevice), self.name_label, 20)
def test_generate_ephemeral_adds_multiple_disks(self):
self._expect_generate_disk(2000, self.userdevice,
self.ephemeral_name_label)
self._expect_generate_disk(2000, self.userdevice + 1,
self.ephemeral_name_label + " (1)")
self._expect_generate_disk(30, self.userdevice + 2,
self.ephemeral_name_label + " (2)")
self.mox.ReplayAll()
vm_utils.generate_ephemeral(self.session, self.instance, self.vm_ref,
str(self.userdevice), self.name_label, 4030)
def test_generate_ephemeral_cleans_up_on_error(self):
self._expect_generate_disk(1024, self.userdevice,
self.ephemeral_name_label)
self._expect_generate_disk(1024, self.userdevice + 1,
self.ephemeral_name_label + " (1)")
vm_utils._generate_disk(self.session, self.instance, self.vm_ref,
str(self.userdevice + 2), "name ephemeral (2)", 'ephemeral',
units.Mi, None).AndRaise(exception.NovaException)
vm_utils.safe_destroy_vdis(self.session, [4, 5])
self.mox.ReplayAll()
self.assertRaises(exception.NovaException, vm_utils.generate_ephemeral,
self.session, self.instance, self.vm_ref,
str(self.userdevice), self.name_label, 4096)
class FakeFile(object):
def __init__(self):
self._file_operations = []
def seek(self, offset):
self._file_operations.append((self.seek, offset))
class StreamDiskTestCase(VMUtilsTestBase):
def setUp(self):
import __builtin__
super(StreamDiskTestCase, self).setUp()
self.mox.StubOutWithMock(vm_utils.utils, 'make_dev_path')
self.mox.StubOutWithMock(vm_utils.utils, 'temporary_chown')
self.mox.StubOutWithMock(vm_utils, '_write_partition')
# NOTE(matelakat): This might hide the fail reason, as test runners
# are unhappy with a mocked out open.
self.mox.StubOutWithMock(__builtin__, 'open')
self.image_service_func = self.mox.CreateMockAnything()
def test_non_ami(self):
fake_file = FakeFile()
vm_utils.utils.make_dev_path('dev').AndReturn('some_path')
vm_utils.utils.temporary_chown(
'some_path').AndReturn(contextified(None))
open('some_path', 'wb').AndReturn(contextified(fake_file))
self.image_service_func(fake_file)
self.mox.ReplayAll()
vm_utils._stream_disk("session", self.image_service_func,
vm_utils.ImageType.KERNEL, None, 'dev')
self.assertEqual([(fake_file.seek, 0)], fake_file._file_operations)
def test_ami_disk(self):
fake_file = FakeFile()
vm_utils._write_partition("session", 100, 'dev')
vm_utils.utils.make_dev_path('dev').AndReturn('some_path')
vm_utils.utils.temporary_chown(
'some_path').AndReturn(contextified(None))
open('some_path', 'wb').AndReturn(contextified(fake_file))
self.image_service_func(fake_file)
self.mox.ReplayAll()
vm_utils._stream_disk("session", self.image_service_func,
vm_utils.ImageType.DISK, 100, 'dev')
self.assertEqual(
[(fake_file.seek, vm_utils.MBR_SIZE_BYTES)],
fake_file._file_operations)
class VMUtilsSRPath(VMUtilsTestBase):
def setUp(self):
super(VMUtilsSRPath, self).setUp()
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
self.session = driver._session
self.session.is_local_connection = False
def test_defined(self):
self.mox.StubOutWithMock(vm_utils, "safe_find_sr")
self.mox.StubOutWithMock(self.session, "call_xenapi")
vm_utils.safe_find_sr(self.session).AndReturn("sr_ref")
self.session.host_ref = "host_ref"
self.session.call_xenapi('PBD.get_all_records_where',
'field "host"="host_ref" and field "SR"="sr_ref"').AndReturn(
{'pbd_ref': {'device_config': {'path': 'sr_path'}}})
self.mox.ReplayAll()
self.assertEqual(vm_utils.get_sr_path(self.session), "sr_path")
def test_default(self):
self.mox.StubOutWithMock(vm_utils, "safe_find_sr")
self.mox.StubOutWithMock(self.session, "call_xenapi")
vm_utils.safe_find_sr(self.session).AndReturn("sr_ref")
self.session.host_ref = "host_ref"
self.session.call_xenapi('PBD.get_all_records_where',
'field "host"="host_ref" and field "SR"="sr_ref"').AndReturn(
{'pbd_ref': {'device_config': {}}})
self.session.call_xenapi("SR.get_record", "sr_ref").AndReturn(
{'uuid': 'sr_uuid', 'type': 'ext'})
self.mox.ReplayAll()
self.assertEqual(vm_utils.get_sr_path(self.session),
"/var/run/sr-mount/sr_uuid")
class CreateKernelRamdiskTestCase(VMUtilsTestBase):
def setUp(self):
super(CreateKernelRamdiskTestCase, self).setUp()
self.context = "context"
self.session = FakeSession()
self.instance = {"kernel_id": None, "ramdisk_id": None}
self.name_label = "name"
self.mox.StubOutWithMock(self.session, "call_plugin")
self.mox.StubOutWithMock(uuid, "uuid4")
self.mox.StubOutWithMock(vm_utils, "_fetch_disk_image")
def test_create_kernel_and_ramdisk_no_create(self):
self.mox.ReplayAll()
result = vm_utils.create_kernel_and_ramdisk(self.context,
self.session, self.instance, self.name_label)
self.assertEqual((None, None), result)
def test_create_kernel_and_ramdisk_create_both_cached(self):
kernel_id = "kernel"
ramdisk_id = "ramdisk"
self.instance["kernel_id"] = kernel_id
self.instance["ramdisk_id"] = ramdisk_id
args_kernel = {}
args_kernel['cached-image'] = kernel_id
args_kernel['new-image-uuid'] = "fake_uuid1"
uuid.uuid4().AndReturn("fake_uuid1")
self.session.call_plugin('kernel', 'create_kernel_ramdisk',
args_kernel).AndReturn("k")
args_ramdisk = {}
args_ramdisk['cached-image'] = ramdisk_id
args_ramdisk['new-image-uuid'] = "fake_uuid2"
uuid.uuid4().AndReturn("fake_uuid2")
self.session.call_plugin('kernel', 'create_kernel_ramdisk',
args_ramdisk).AndReturn("r")
self.mox.ReplayAll()
result = vm_utils.create_kernel_and_ramdisk(self.context,
self.session, self.instance, self.name_label)
self.assertEqual(("k", "r"), result)
def test_create_kernel_and_ramdisk_create_kernel_not_cached(self):
kernel_id = "kernel"
self.instance["kernel_id"] = kernel_id
args_kernel = {}
args_kernel['cached-image'] = kernel_id
args_kernel['new-image-uuid'] = "fake_uuid1"
uuid.uuid4().AndReturn("fake_uuid1")
self.session.call_plugin('kernel', 'create_kernel_ramdisk',
args_kernel).AndReturn("")
kernel = {"kernel": {"file": "k"}}
vm_utils._fetch_disk_image(self.context, self.session, self.instance,
self.name_label, kernel_id, 0).AndReturn(kernel)
self.mox.ReplayAll()
result = vm_utils.create_kernel_and_ramdisk(self.context,
self.session, self.instance, self.name_label)
self.assertEqual(("k", None), result)
class ScanSrTestCase(VMUtilsTestBase):
@mock.patch.object(vm_utils, "_scan_sr")
@mock.patch.object(vm_utils, "safe_find_sr")
def test_scan_default_sr(self, mock_safe_find_sr, mock_scan_sr):
mock_safe_find_sr.return_value = "sr_ref"
self.assertEqual("sr_ref", vm_utils.scan_default_sr("fake_session"))
mock_scan_sr.assert_called_once_with("fake_session", "sr_ref")
def test_scan_sr_works(self):
session = mock.Mock()
vm_utils._scan_sr(session, "sr_ref")
session.call_xenapi.assert_called_once_with('SR.scan', "sr_ref")
def test_scan_sr_unknown_error_fails_once(self):
session = mock.Mock()
session.call_xenapi.side_effect = test.TestingException
self.assertRaises(test.TestingException,
vm_utils._scan_sr, session, "sr_ref")
session.call_xenapi.assert_called_once_with('SR.scan', "sr_ref")
@mock.patch.object(greenthread, 'sleep')
def test_scan_sr_known_error_retries_then_throws(self, mock_sleep):
session = mock.Mock()
class FakeException(Exception):
details = ['SR_BACKEND_FAILURE_40', "", "", ""]
session.XenAPI.Failure = FakeException
session.call_xenapi.side_effect = FakeException
self.assertRaises(FakeException,
vm_utils._scan_sr, session, "sr_ref")
session.call_xenapi.assert_called_with('SR.scan', "sr_ref")
self.assertEqual(4, session.call_xenapi.call_count)
mock_sleep.assert_has_calls([mock.call(2), mock.call(4), mock.call(8)])
@mock.patch.object(greenthread, 'sleep')
def test_scan_sr_known_error_retries_then_succeeds(self, mock_sleep):
session = mock.Mock()
class FakeException(Exception):
details = ['SR_BACKEND_FAILURE_40', "", "", ""]
session.XenAPI.Failure = FakeException
def fake_call_xenapi(*args):
fake_call_xenapi.count += 1
if fake_call_xenapi.count != 2:
raise FakeException()
fake_call_xenapi.count = 0
session.call_xenapi.side_effect = fake_call_xenapi
vm_utils._scan_sr(session, "sr_ref")
session.call_xenapi.assert_called_with('SR.scan', "sr_ref")
self.assertEqual(2, session.call_xenapi.call_count)
mock_sleep.assert_called_once_with(2)
@mock.patch.object(flavors, 'extract_flavor',
return_value={
'memory_mb': 1024,
'vcpus': 1,
'vcpu_weight': 1.0,
})
class CreateVmTestCase(VMUtilsTestBase):
def test_vss_provider(self, mock_extract):
self.flags(vcpu_pin_set="2,3")
session = _get_fake_session()
instance = objects.Instance(uuid="uuid",
os_type="windows",
system_metadata={})
with mock.patch.object(instance, 'get_flavor') as get:
get.return_value = objects.Flavor._from_db_object(
None, objects.Flavor(), test_flavor.fake_flavor)
vm_utils.create_vm(session, instance, "label",
"kernel", "ramdisk")
vm_rec = {
'VCPUs_params': {'cap': '0', 'mask': '2,3', 'weight': '1'},
'PV_args': '',
'memory_static_min': '0',
'ha_restart_priority': '',
'HVM_boot_policy': 'BIOS order',
'PV_bootloader': '', 'tags': [],
'VCPUs_max': '4',
'memory_static_max': '1073741824',
'actions_after_shutdown': 'destroy',
'memory_dynamic_max': '1073741824',
'user_version': '0',
'xenstore_data': {'vm-data/allowvssprovider': 'false'},
'blocked_operations': {},
'is_a_template': False,
'name_description': '',
'memory_dynamic_min': '1073741824',
'actions_after_crash': 'destroy',
'memory_target': '1073741824',
'PV_ramdisk': '',
'PV_bootloader_args': '',
'PCI_bus': '',
'other_config': {'nova_uuid': 'uuid'},
'name_label': 'label',
'actions_after_reboot': 'restart',
'VCPUs_at_startup': '4',
'HVM_boot_params': {'order': 'dc'},
'platform': {'nx': 'true', 'pae': 'true', 'apic': 'true',
'timeoffset': '0', 'viridian': 'true',
'acpi': 'true'},
'PV_legacy_args': '',
'PV_kernel': '',
'affinity': '',
'recommendations': '',
'ha_always_run': False
}
session.call_xenapi.assert_called_once_with("VM.create", vm_rec)
def test_invalid_cpu_mask_raises(self, mock_extract):
self.flags(vcpu_pin_set="asdf")
session = mock.Mock()
instance = objects.Instance(uuid=str(uuid.uuid4()),
system_metadata={})
with mock.patch.object(instance, 'get_flavor') as get:
get.return_value = objects.Flavor._from_db_object(
None, objects.Flavor(), test_flavor.fake_flavor)
self.assertRaises(exception.Invalid,
vm_utils.create_vm,
session, instance, "label",
"kernel", "ramdisk")
def test_destroy_vm(self, mock_extract):
session = mock.Mock()
instance = objects.Instance(uuid=str(uuid.uuid4()))
vm_utils.destroy_vm(session, instance, "vm_ref")
session.VM.destroy.assert_called_once_with("vm_ref")
def test_destroy_vm_silently_fails(self, mock_extract):
session = mock.Mock()
exc = test.TestingException()
session.XenAPI.Failure = test.TestingException
session.VM.destroy.side_effect = exc
instance = objects.Instance(uuid=str(uuid.uuid4()))
vm_utils.destroy_vm(session, instance, "vm_ref")
session.VM.destroy.assert_called_once_with("vm_ref")
class DetermineVmModeTestCase(VMUtilsTestBase):
def _fake_object(self, updates):
return fake_instance.fake_instance_obj(None, **updates)
def test_determine_vm_mode_returns_xen_mode(self):
instance = self._fake_object({"vm_mode": "xen"})
self.assertEqual(vm_mode.XEN,
vm_utils.determine_vm_mode(instance, None))
def test_determine_vm_mode_returns_hvm_mode(self):
instance = self._fake_object({"vm_mode": "hvm"})
self.assertEqual(vm_mode.HVM,
vm_utils.determine_vm_mode(instance, None))
def test_determine_vm_mode_returns_xen_for_linux(self):
instance = self._fake_object({"vm_mode": None, "os_type": "linux"})
self.assertEqual(vm_mode.XEN,
vm_utils.determine_vm_mode(instance, None))
def test_determine_vm_mode_returns_hvm_for_windows(self):
instance = self._fake_object({"vm_mode": None, "os_type": "windows"})
self.assertEqual(vm_mode.HVM,
vm_utils.determine_vm_mode(instance, None))
def test_determine_vm_mode_returns_hvm_by_default(self):
instance = self._fake_object({"vm_mode": None, "os_type": None})
self.assertEqual(vm_mode.HVM,
vm_utils.determine_vm_mode(instance, None))
def test_determine_vm_mode_returns_xen_for_VHD(self):
instance = self._fake_object({"vm_mode": None, "os_type": None})
self.assertEqual(vm_mode.XEN,
vm_utils.determine_vm_mode(instance, vm_utils.ImageType.DISK_VHD))
def test_determine_vm_mode_returns_xen_for_DISK(self):
instance = self._fake_object({"vm_mode": None, "os_type": None})
self.assertEqual(vm_mode.XEN,
vm_utils.determine_vm_mode(instance, vm_utils.ImageType.DISK))
class CallXenAPIHelpersTestCase(VMUtilsTestBase):
def test_vm_get_vbd_refs(self):
session = mock.Mock()
session.call_xenapi.return_value = "foo"
self.assertEqual("foo", vm_utils._vm_get_vbd_refs(session, "vm_ref"))
session.call_xenapi.assert_called_once_with("VM.get_VBDs", "vm_ref")
def test_vbd_get_rec(self):
session = mock.Mock()
session.call_xenapi.return_value = "foo"
self.assertEqual("foo", vm_utils._vbd_get_rec(session, "vbd_ref"))
session.call_xenapi.assert_called_once_with("VBD.get_record",
"vbd_ref")
def test_vdi_get_rec(self):
session = mock.Mock()
session.call_xenapi.return_value = "foo"
self.assertEqual("foo", vm_utils._vdi_get_rec(session, "vdi_ref"))
session.call_xenapi.assert_called_once_with("VDI.get_record",
"vdi_ref")
def test_vdi_snapshot(self):
session = mock.Mock()
session.call_xenapi.return_value = "foo"
self.assertEqual("foo", vm_utils._vdi_snapshot(session, "vdi_ref"))
session.call_xenapi.assert_called_once_with("VDI.snapshot",
"vdi_ref", {})
def test_vdi_get_virtual_size(self):
session = mock.Mock()
session.call_xenapi.return_value = "123"
self.assertEqual(123, vm_utils._vdi_get_virtual_size(session, "ref"))
session.call_xenapi.assert_called_once_with("VDI.get_virtual_size",
"ref")
@mock.patch.object(vm_utils, '_get_resize_func_name')
def test_vdi_resize(self, mock_get_resize_func_name):
session = mock.Mock()
mock_get_resize_func_name.return_value = "VDI.fake"
vm_utils._vdi_resize(session, "ref", 123)
session.call_xenapi.assert_called_once_with("VDI.fake", "ref", "123")
@mock.patch.object(vm_utils, '_vdi_resize')
@mock.patch.object(vm_utils, '_vdi_get_virtual_size')
def test_update_vdi_virtual_size_works(self, mock_get_size, mock_resize):
mock_get_size.return_value = (1024 ** 3) - 1
instance = {"uuid": "a"}
vm_utils.update_vdi_virtual_size("s", instance, "ref", 1)
mock_get_size.assert_called_once_with("s", "ref")
mock_resize.assert_called_once_with("s", "ref", 1024 ** 3)
@mock.patch.object(vm_utils, '_vdi_resize')
@mock.patch.object(vm_utils, '_vdi_get_virtual_size')
def test_update_vdi_virtual_size_skips_resize_down(self, mock_get_size,
mock_resize):
mock_get_size.return_value = 1024 ** 3
instance = {"uuid": "a"}
vm_utils.update_vdi_virtual_size("s", instance, "ref", 1)
mock_get_size.assert_called_once_with("s", "ref")
self.assertFalse(mock_resize.called)
@mock.patch.object(vm_utils, '_vdi_resize')
@mock.patch.object(vm_utils, '_vdi_get_virtual_size')
def test_update_vdi_virtual_size_raise_if_disk_big(self, mock_get_size,
mock_resize):
mock_get_size.return_value = 1024 ** 3 + 1
instance = {"uuid": "a"}
self.assertRaises(exception.ResizeError,
vm_utils.update_vdi_virtual_size,
"s", instance, "ref", 1)
mock_get_size.assert_called_once_with("s", "ref")
self.assertFalse(mock_resize.called)
@mock.patch.object(vm_utils, '_vdi_get_rec')
@mock.patch.object(vm_utils, '_vbd_get_rec')
@mock.patch.object(vm_utils, '_vm_get_vbd_refs')
class GetVdiForVMTestCase(VMUtilsTestBase):
def test_get_vdi_for_vm_safely(self, vm_get_vbd_refs,
vbd_get_rec, vdi_get_rec):
session = "session"
vm_get_vbd_refs.return_value = ["a", "b"]
vbd_get_rec.return_value = {'userdevice': '0', 'VDI': 'vdi_ref'}
vdi_get_rec.return_value = {}
result = vm_utils.get_vdi_for_vm_safely(session, "vm_ref")
self.assertEqual(('vdi_ref', {}), result)
vm_get_vbd_refs.assert_called_once_with(session, "vm_ref")
vbd_get_rec.assert_called_once_with(session, "a")
vdi_get_rec.assert_called_once_with(session, "vdi_ref")
def test_get_vdi_for_vm_safely_fails(self, vm_get_vbd_refs,
vbd_get_rec, vdi_get_rec):
session = "session"
vm_get_vbd_refs.return_value = ["a", "b"]
vbd_get_rec.return_value = {'userdevice': '0', 'VDI': 'vdi_ref'}
self.assertRaises(exception.NovaException,
vm_utils.get_vdi_for_vm_safely,
session, "vm_ref", userdevice='1')
self.assertEqual([], vdi_get_rec.call_args_list)
self.assertEqual(2, len(vbd_get_rec.call_args_list))
@mock.patch.object(vm_utils, '_vdi_get_uuid')
@mock.patch.object(vm_utils, '_vbd_get_rec')
@mock.patch.object(vm_utils, '_vm_get_vbd_refs')
class GetAllVdiForVMTestCase(VMUtilsTestBase):
def _setup_get_all_vdi_uuids_for_vm(self, vm_get_vbd_refs,
vbd_get_rec, vdi_get_uuid):
def fake_vbd_get_rec(session, vbd_ref):
return {'userdevice': vbd_ref, 'VDI': "vdi_ref_%s" % vbd_ref}
def fake_vdi_get_uuid(session, vdi_ref):
return vdi_ref
vm_get_vbd_refs.return_value = ["0", "2"]
vbd_get_rec.side_effect = fake_vbd_get_rec
vdi_get_uuid.side_effect = fake_vdi_get_uuid
def test_get_all_vdi_uuids_for_vm_works(self, vm_get_vbd_refs,
vbd_get_rec, vdi_get_uuid):
self._setup_get_all_vdi_uuids_for_vm(vm_get_vbd_refs,
vbd_get_rec, vdi_get_uuid)
result = vm_utils.get_all_vdi_uuids_for_vm('session', "vm_ref")
expected = ['vdi_ref_0', 'vdi_ref_2']
self.assertEqual(expected, list(result))
def test_get_all_vdi_uuids_for_vm_finds_none(self, vm_get_vbd_refs,
vbd_get_rec, vdi_get_uuid):
self._setup_get_all_vdi_uuids_for_vm(vm_get_vbd_refs,
vbd_get_rec, vdi_get_uuid)
result = vm_utils.get_all_vdi_uuids_for_vm('session', "vm_ref",
min_userdevice=1)
expected = ["vdi_ref_2"]
self.assertEqual(expected, list(result))
class GetAllVdisTestCase(VMUtilsTestBase):
def test_get_all_vdis_in_sr(self):
def fake_get_rec(record_type, ref):
if ref == "2":
return "vdi_rec_2"
session = mock.Mock()
session.call_xenapi.return_value = ["1", "2"]
session.get_rec.side_effect = fake_get_rec
sr_ref = "sr_ref"
actual = list(vm_utils._get_all_vdis_in_sr(session, sr_ref))
self.assertEqual(actual, [('2', 'vdi_rec_2')])
session.call_xenapi.assert_called_once_with("SR.get_VDIs", sr_ref)
class VDIAttachedHere(VMUtilsTestBase):
@mock.patch.object(vm_utils, 'destroy_vbd')
@mock.patch.object(vm_utils, '_get_this_vm_ref')
@mock.patch.object(vm_utils, 'create_vbd')
@mock.patch.object(vm_utils, '_remap_vbd_dev')
@mock.patch.object(vm_utils, '_wait_for_device')
@mock.patch.object(utils, 'execute')
def test_sync_called(self, mock_execute, mock_wait_for_device,
mock_remap_vbd_dev, mock_create_vbd,
mock_get_this_vm_ref, mock_destroy_vbd):
session = _get_fake_session()
with vm_utils.vdi_attached_here(session, 'vdi_ref'):
pass
mock_execute.assert_called_with('sync', run_as_root=True)
class SnapshotAttachedHereTestCase(VMUtilsTestBase):
@mock.patch.object(vm_utils, '_snapshot_attached_here_impl')
def test_snapshot_attached_here(self, mock_impl):
def fake_impl(session, instance, vm_ref, label, userdevice,
post_snapshot_callback):
self.assertEqual("session", session)
self.assertEqual("instance", instance)
self.assertEqual("vm_ref", vm_ref)
self.assertEqual("label", label)
self.assertEqual('0', userdevice)
self.assertIsNone(post_snapshot_callback)
yield "fake"
mock_impl.side_effect = fake_impl
with vm_utils.snapshot_attached_here("session", "instance", "vm_ref",
"label") as result:
self.assertEqual("fake", result)
mock_impl.assert_called_once_with("session", "instance", "vm_ref",
"label", '0', None)
@mock.patch.object(vm_utils, '_delete_snapshots_in_vdi_chain')
@mock.patch.object(vm_utils, 'safe_destroy_vdis')
@mock.patch.object(vm_utils, '_walk_vdi_chain')
@mock.patch.object(vm_utils, '_wait_for_vhd_coalesce')
@mock.patch.object(vm_utils, '_vdi_get_uuid')
@mock.patch.object(vm_utils, '_vdi_snapshot')
@mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
def test_snapshot_attached_here_impl(self, mock_get_vdi_for_vm_safely,
mock_vdi_snapshot, mock_vdi_get_uuid,
mock_wait_for_vhd_coalesce, mock_walk_vdi_chain,
mock_safe_destroy_vdis, mock_delete_snapshots_in_vdi_chain):
session = "session"
instance = {"uuid": "uuid"}
mock_callback = mock.Mock()
mock_get_vdi_for_vm_safely.return_value = ("vdi_ref",
{"SR": "sr_ref",
"uuid": "vdi_uuid"})
mock_vdi_snapshot.return_value = "snap_ref"
mock_vdi_get_uuid.return_value = "snap_uuid"
mock_walk_vdi_chain.return_value = [{"uuid": "a"}, {"uuid": "b"}]
try:
with vm_utils.snapshot_attached_here(session, instance, "vm_ref",
"label", '2', mock_callback) as result:
self.assertEqual(["a", "b"], result)
raise test.TestingException()
self.assertTrue(False)
except test.TestingException:
pass
mock_get_vdi_for_vm_safely.assert_called_once_with(session, "vm_ref",
'2')
mock_vdi_snapshot.assert_called_once_with(session, "vdi_ref")
mock_wait_for_vhd_coalesce.assert_called_once_with(session, instance,
"sr_ref", "vdi_ref", ['a', 'b'])
mock_vdi_get_uuid.assert_called_once_with(session, "snap_ref")
mock_walk_vdi_chain.assert_has_calls([mock.call(session, "vdi_uuid"),
mock.call(session, "snap_uuid")])
mock_callback.assert_called_once_with(
task_state="image_pending_upload")
mock_safe_destroy_vdis.assert_called_once_with(session, ["snap_ref"])
mock_delete_snapshots_in_vdi_chain.assert_called_once_with(session,
instance, ['a', 'b'], "sr_ref")
@mock.patch.object(greenthread, 'sleep')
def test_wait_for_vhd_coalesce_leaf_node(self, mock_sleep):
instance = {"uuid": "fake"}
vm_utils._wait_for_vhd_coalesce("session", instance,
"sr_ref", "vdi_ref", ["uuid"])
self.assertFalse(mock_sleep.called)
@mock.patch.object(vm_utils, '_count_children')
@mock.patch.object(greenthread, 'sleep')
def test_wait_for_vhd_coalesce_parent_snapshot(self, mock_sleep,
mock_count):
mock_count.return_value = 2
instance = {"uuid": "fake"}
vm_utils._wait_for_vhd_coalesce("session", instance,
"sr_ref", "vdi_ref", ["uuid1", "uuid2"])
self.assertFalse(mock_sleep.called)
self.assertTrue(mock_count.called)
@mock.patch.object(greenthread, 'sleep')
@mock.patch.object(vm_utils, '_get_vhd_parent_uuid')
@mock.patch.object(vm_utils, '_count_children')
@mock.patch.object(vm_utils, '_scan_sr')
def test_wait_for_vhd_coalesce_raises(self, mock_scan_sr,
mock_count, mock_get_vhd_parent_uuid, mock_sleep):
mock_count.return_value = 1
instance = {"uuid": "fake"}
self.assertRaises(exception.NovaException,
vm_utils._wait_for_vhd_coalesce, "session", instance,
"sr_ref", "vdi_ref", ["uuid1", "uuid2"])
self.assertTrue(mock_count.called)
self.assertEqual(20, mock_sleep.call_count)
self.assertEqual(20, mock_scan_sr.call_count)
@mock.patch.object(greenthread, 'sleep')
@mock.patch.object(vm_utils, '_get_vhd_parent_uuid')
@mock.patch.object(vm_utils, '_count_children')
@mock.patch.object(vm_utils, '_scan_sr')
def test_wait_for_vhd_coalesce_success(self, mock_scan_sr,
mock_count, mock_get_vhd_parent_uuid, mock_sleep):
mock_count.return_value = 1
instance = {"uuid": "fake"}
mock_get_vhd_parent_uuid.side_effect = ["bad", "uuid2"]
vm_utils._wait_for_vhd_coalesce("session", instance,
"sr_ref", "vdi_ref", ["uuid1", "uuid2"])
self.assertEqual(1, mock_sleep.call_count)
self.assertEqual(2, mock_scan_sr.call_count)
@mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
def test_count_children(self, mock_get_all_vdis_in_sr):
vdis = [('child1', {'sm_config': {'vhd-parent': 'parent1'}}),
('child2', {'sm_config': {'vhd-parent': 'parent2'}}),
('child3', {'sm_config': {'vhd-parent': 'parent1'}})]
mock_get_all_vdis_in_sr.return_value = vdis
self.assertEqual(2, vm_utils._count_children('session',
'parent1', 'sr'))
class ImportMigratedDisksTestCase(VMUtilsTestBase):
@mock.patch.object(vm_utils, '_import_migrate_ephemeral_disks')
@mock.patch.object(vm_utils, '_import_migrated_root_disk')
def test_import_all_migrated_disks(self, mock_root, mock_ephemeral):
session = "session"
instance = "instance"
mock_root.return_value = "root_vdi"
mock_ephemeral.return_value = ["a", "b"]
result = vm_utils.import_all_migrated_disks(session, instance)
expected = {'root': 'root_vdi', 'ephemerals': ["a", "b"]}
self.assertEqual(expected, result)
mock_root.assert_called_once_with(session, instance)
mock_ephemeral.assert_called_once_with(session, instance)
@mock.patch.object(vm_utils, '_import_migrate_ephemeral_disks')
@mock.patch.object(vm_utils, '_import_migrated_root_disk')
def test_import_all_migrated_disks_import_root_false(self, mock_root,
mock_ephemeral):
session = "session"
instance = "instance"
mock_root.return_value = "root_vdi"
mock_ephemeral.return_value = ["a", "b"]
result = vm_utils.import_all_migrated_disks(session, instance,
import_root=False)
expected = {'root': None, 'ephemerals': ["a", "b"]}
self.assertEqual(expected, result)
self.assertEqual(0, mock_root.call_count)
mock_ephemeral.assert_called_once_with(session, instance)
@mock.patch.object(vm_utils, '_import_migrated_vhds')
def test_import_migrated_root_disk(self, mock_migrate):
mock_migrate.return_value = "foo"
instance = {"uuid": "uuid", "name": "name"}
result = vm_utils._import_migrated_root_disk("s", instance)
self.assertEqual("foo", result)
mock_migrate.assert_called_once_with("s", instance, "uuid", "root",
"name")
@mock.patch.object(vm_utils, '_import_migrated_vhds')
def test_import_migrate_ephemeral_disks(self, mock_migrate):
mock_migrate.return_value = "foo"
instance = {"uuid": "uuid", "name": "name", "ephemeral_gb": 4000}
result = vm_utils._import_migrate_ephemeral_disks("s", instance)
self.assertEqual({'4': 'foo', '5': 'foo'}, result)
expected_calls = [mock.call("s", instance, "uuid_ephemeral_1",
"ephemeral", "name ephemeral (1)"),
mock.call("s", instance, "uuid_ephemeral_2",
"ephemeral", "name ephemeral (2)")]
self.assertEqual(expected_calls, mock_migrate.call_args_list)
@mock.patch.object(vm_utils, '_set_vdi_info')
@mock.patch.object(vm_utils, 'scan_default_sr')
@mock.patch.object(vm_utils, 'get_sr_path')
def test_import_migrated_vhds(self, mock_get_sr_path, mock_scan_sr,
mock_set_info):
session = mock.Mock()
instance = {"uuid": "uuid"}
session.call_plugin_serialized.return_value = {"root": {"uuid": "a"}}
session.call_xenapi.return_value = "vdi_ref"
mock_get_sr_path.return_value = "sr_path"
result = vm_utils._import_migrated_vhds(session, instance,
'chain_label', 'disk_type', 'vdi_label')
expected = {'uuid': "a", 'ref': "vdi_ref"}
self.assertEqual(expected, result)
mock_get_sr_path.assert_called_once_with(session)
session.call_plugin_serialized.assert_called_once_with('migration',
'move_vhds_into_sr', instance_uuid='chain_label',
sr_path='sr_path', uuid_stack=mock.ANY)
mock_scan_sr.assert_called_once_with(session)
session.call_xenapi.assert_called_once_with('VDI.get_by_uuid', 'a')
mock_set_info.assert_called_once_with(session, 'vdi_ref', 'disk_type',
'vdi_label', 'disk_type', instance)
def test_get_vhd_parent_uuid_rec_provided(self):
session = mock.Mock()
vdi_ref = 'vdi_ref'
vdi_rec = {'sm_config': {}}
self.assertIsNone(vm_utils._get_vhd_parent_uuid(session,
vdi_ref,
vdi_rec))
self.assertFalse(session.call_xenapi.called)
class MigrateVHDTestCase(VMUtilsTestBase):
def _assert_transfer_called(self, session, label):
session.call_plugin_serialized.assert_called_once_with(
'migration', 'transfer_vhd', instance_uuid=label, host="dest",
vdi_uuid="vdi_uuid", sr_path="sr_path", seq_num=2)
def test_migrate_vhd_root(self):
session = mock.Mock()
instance = {"uuid": "a"}
vm_utils.migrate_vhd(session, instance, "vdi_uuid", "dest",
"sr_path", 2)
self._assert_transfer_called(session, "a")
def test_migrate_vhd_ephemeral(self):
session = mock.Mock()
instance = {"uuid": "a"}
vm_utils.migrate_vhd(session, instance, "vdi_uuid", "dest",
"sr_path", 2, 2)
self._assert_transfer_called(session, "a_ephemeral_2")
def test_migrate_vhd_converts_exceptions(self):
session = mock.Mock()
session.XenAPI.Failure = test.TestingException
session.call_plugin_serialized.side_effect = test.TestingException()
instance = {"uuid": "a"}
self.assertRaises(exception.MigrationError, vm_utils.migrate_vhd,
session, instance, "vdi_uuid", "dest", "sr_path", 2)
self._assert_transfer_called(session, "a")
class StripBaseMirrorTestCase(VMUtilsTestBase):
def test_strip_base_mirror_from_vdi_works(self):
session = mock.Mock()
vm_utils._try_strip_base_mirror_from_vdi(session, "vdi_ref")
session.call_xenapi.assert_called_once_with(
"VDI.remove_from_sm_config", "vdi_ref", "base_mirror")
def test_strip_base_mirror_from_vdi_hides_error(self):
session = mock.Mock()
session.XenAPI.Failure = test.TestingException
session.call_xenapi.side_effect = test.TestingException()
vm_utils._try_strip_base_mirror_from_vdi(session, "vdi_ref")
session.call_xenapi.assert_called_once_with(
"VDI.remove_from_sm_config", "vdi_ref", "base_mirror")
@mock.patch.object(vm_utils, '_try_strip_base_mirror_from_vdi')
def test_strip_base_mirror_from_vdis(self, mock_strip):
def call_xenapi(method, arg):
if method == "VM.get_VBDs":
return ['VBD_ref_1', 'VBD_ref_2']
if method == "VBD.get_VDI":
return 'VDI' + arg[3:]
return "Unexpected call_xenapi: %s.%s" % (method, arg)
session = mock.Mock()
session.call_xenapi.side_effect = call_xenapi
vm_utils.strip_base_mirror_from_vdis(session, "vm_ref")
expected = [mock.call('VM.get_VBDs', "vm_ref"),
mock.call('VBD.get_VDI', "VBD_ref_1"),
mock.call('VBD.get_VDI', "VBD_ref_2")]
self.assertEqual(expected, session.call_xenapi.call_args_list)
expected = [mock.call(session, "VDI_ref_1"),
mock.call(session, "VDI_ref_2")]
self.assertEqual(expected, mock_strip.call_args_list)
class DeviceIdTestCase(VMUtilsTestBase):
def test_device_id_is_none_if_not_specified_in_meta_data(self):
image_meta = {}
session = mock.Mock()
session.product_version = (6, 1, 0)
self.assertIsNone(vm_utils.get_vm_device_id(session, image_meta))
def test_get_device_id_if_hypervisor_version_is_greater_than_6_1(self):
image_meta = {'xenapi_device_id': '0002'}
session = mock.Mock()
session.product_version = (6, 2, 0)
self.assertEqual('0002',
vm_utils.get_vm_device_id(session, image_meta))
session.product_version = (6, 3, 1)
self.assertEqual('0002',
vm_utils.get_vm_device_id(session, image_meta))
def test_raise_exception_if_device_id_not_supported_by_hyp_version(self):
image_meta = {'xenapi_device_id': '0002'}
session = mock.Mock()
session.product_version = (6, 0)
exc = self.assertRaises(exception.NovaException,
vm_utils.get_vm_device_id, session, image_meta)
self.assertEqual("Device id 0002 specified is not supported by "
"hypervisor version (6, 0)", exc.message)
session.product_version = ('6a')
exc = self.assertRaises(exception.NovaException,
vm_utils.get_vm_device_id, session, image_meta)
self.assertEqual("Device id 0002 specified is not supported by "
"hypervisor version 6a", exc.message)
class CreateVmRecordTestCase(VMUtilsTestBase):
@mock.patch.object(flavors, 'extract_flavor')
def test_create_vm_record_linux(self, mock_extract_flavor):
instance = objects.Instance(uuid="uuid123",
os_type="linux")
self._test_create_vm_record(mock_extract_flavor, instance, False)
@mock.patch.object(flavors, 'extract_flavor')
def test_create_vm_record_windows(self, mock_extract_flavor):
instance = objects.Instance(uuid="uuid123",
os_type="windows")
with mock.patch.object(instance, 'get_flavor') as get:
get.return_value = objects.Flavor._from_db_object(
None, objects.Flavor(), test_flavor.fake_flavor)
self._test_create_vm_record(mock_extract_flavor, instance, True)
def _test_create_vm_record(self, mock_extract_flavor, instance,
is_viridian):
session = _get_fake_session()
flavor = {"memory_mb": 1024, "vcpus": 1, "vcpu_weight": 2}
mock_extract_flavor.return_value = flavor
with mock.patch.object(instance, 'get_flavor') as get:
get.return_value = objects.Flavor(memory_mb=1024,
vcpus=1,
vcpu_weight=2)
vm_utils.create_vm(session, instance, "name", "kernel", "ramdisk",
device_id="0002")
is_viridian_str = str(is_viridian).lower()
expected_vm_rec = {
'VCPUs_params': {'cap': '0', 'weight': '2'},
'PV_args': '',
'memory_static_min': '0',
'ha_restart_priority': '',
'HVM_boot_policy': 'BIOS order',
'PV_bootloader': '',
'tags': [],
'VCPUs_max': '1',
'memory_static_max': '1073741824',
'actions_after_shutdown': 'destroy',
'memory_dynamic_max': '1073741824',
'user_version': '0',
'xenstore_data': {'vm-data/allowvssprovider': 'false'},
'blocked_operations': {},
'is_a_template': False,
'name_description': '',
'memory_dynamic_min': '1073741824',
'actions_after_crash': 'destroy',
'memory_target': '1073741824',
'PV_ramdisk': '',
'PV_bootloader_args': '',
'PCI_bus': '',
'other_config': {'nova_uuid': 'uuid123'},
'name_label': 'name',
'actions_after_reboot': 'restart',
'VCPUs_at_startup': '1',
'HVM_boot_params': {'order': 'dc'},
'platform': {'nx': 'true', 'pae': 'true', 'apic': 'true',
'timeoffset': '0', 'viridian': is_viridian_str,
'acpi': 'true', 'device_id': '0002'},
'PV_legacy_args': '',
'PV_kernel': '',
'affinity': '',
'recommendations': '',
'ha_always_run': False}
session.call_xenapi.assert_called_with('VM.create', expected_vm_rec)
def test_list_vms(self):
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
fake.create_vm("foo1", "Halted")
vm_ref = fake.create_vm("foo2", "Running")
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
result = list(vm_utils.list_vms(driver._session))
# Will have 3 VMs - but one is Dom0 and one is not running on the host
self.assertEqual(len(driver._session.call_xenapi('VM.get_all')), 3)
self.assertEqual(len(result), 1)
result_keys = [key for (key, value) in result]
self.assertIn(vm_ref, result_keys)
class ChildVHDsTestCase(test.NoDBTestCase):
all_vdis = [
("my-vdi-ref",
{"uuid": "my-uuid", "sm_config": {},
"is_a_snapshot": False, "other_config": {}}),
("non-parent",
{"uuid": "uuid-1", "sm_config": {},
"is_a_snapshot": False, "other_config": {}}),
("diff-parent",
{"uuid": "uuid-1", "sm_config": {"vhd-parent": "other-uuid"},
"is_a_snapshot": False, "other_config": {}}),
("child",
{"uuid": "uuid-child", "sm_config": {"vhd-parent": "my-uuid"},
"is_a_snapshot": False, "other_config": {}}),
("child-snap",
{"uuid": "uuid-child-snap", "sm_config": {"vhd-parent": "my-uuid"},
"is_a_snapshot": True, "other_config": {}}),
]
@mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
def test_child_vhds_defaults(self, mock_get_all):
mock_get_all.return_value = self.all_vdis
result = vm_utils._child_vhds("session", "sr_ref", ["my-uuid"])
self.assertJsonEqual(['uuid-child', 'uuid-child-snap'], result)
@mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
def test_child_vhds_only_snapshots(self, mock_get_all):
mock_get_all.return_value = self.all_vdis
result = vm_utils._child_vhds("session", "sr_ref", ["my-uuid"],
old_snapshots_only=True)
self.assertEqual(['uuid-child-snap'], result)
@mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
def test_child_vhds_chain(self, mock_get_all):
mock_get_all.return_value = self.all_vdis
result = vm_utils._child_vhds("session", "sr_ref",
["my-uuid", "other-uuid"], old_snapshots_only=True)
self.assertEqual(['uuid-child-snap'], result)
def test_is_vdi_a_snapshot_works(self):
vdi_rec = {"is_a_snapshot": True,
"other_config": {}}
self.assertTrue(vm_utils._is_vdi_a_snapshot(vdi_rec))
def test_is_vdi_a_snapshot_base_images_false(self):
vdi_rec = {"is_a_snapshot": True,
"other_config": {"image-id": "fake"}}
self.assertFalse(vm_utils._is_vdi_a_snapshot(vdi_rec))
def test_is_vdi_a_snapshot_false_for_non_snapshot(self):
vdi_rec = {"is_a_snapshot": False,
"other_config": {}}
self.assertFalse(vm_utils._is_vdi_a_snapshot(vdi_rec))
class RemoveOldSnapshotsTestCase(test.NoDBTestCase):
@mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
@mock.patch.object(vm_utils, '_walk_vdi_chain')
@mock.patch.object(vm_utils, '_delete_snapshots_in_vdi_chain')
def test_remove_old_snapshots(self, mock_delete, mock_walk, mock_get):
instance = {"uuid": "fake"}
mock_get.return_value = ("ref", {"uuid": "vdi", "SR": "sr_ref"})
mock_walk.return_value = [{"uuid": "uuid1"}, {"uuid": "uuid2"}]
vm_utils.remove_old_snapshots("session", instance, "vm_ref")
mock_delete.assert_called_once_with("session", instance,
["uuid1", "uuid2"], "sr_ref")
mock_get.assert_called_once_with("session", "vm_ref")
mock_walk.assert_called_once_with("session", "vdi")
@mock.patch.object(vm_utils, '_child_vhds')
def test_delete_snapshots_in_vdi_chain_no_chain(self, mock_child):
instance = {"uuid": "fake"}
vm_utils._delete_snapshots_in_vdi_chain("session", instance,
["uuid"], "sr")
self.assertFalse(mock_child.called)
@mock.patch.object(vm_utils, '_child_vhds')
def test_delete_snapshots_in_vdi_chain_no_snapshots(self, mock_child):
instance = {"uuid": "fake"}
mock_child.return_value = []
vm_utils._delete_snapshots_in_vdi_chain("session", instance,
["uuid1", "uuid2"], "sr")
mock_child.assert_called_once_with("session", "sr", ["uuid2"],
old_snapshots_only=True)
@mock.patch.object(vm_utils, '_scan_sr')
@mock.patch.object(vm_utils, 'safe_destroy_vdis')
@mock.patch.object(vm_utils, '_child_vhds')
def test_delete_snapshots_in_vdi_chain_calls_destroy(self, mock_child,
mock_destroy, mock_scan):
instance = {"uuid": "fake"}
mock_child.return_value = ["suuid1", "suuid2"]
session = mock.Mock()
session.VDI.get_by_uuid.side_effect = ["ref1", "ref2"]
vm_utils._delete_snapshots_in_vdi_chain(session, instance,
["uuid1", "uuid2"], "sr")
mock_child.assert_called_once_with(session, "sr", ["uuid2"],
old_snapshots_only=True)
session.VDI.get_by_uuid.assert_has_calls([
mock.call("suuid1"), mock.call("suuid2")])
mock_destroy.assert_called_once_with(session, ["ref1", "ref2"])
mock_scan.assert_called_once_with(session, "sr")
class ResizeFunctionTestCase(test.NoDBTestCase):
def _call_get_resize_func_name(self, brand, version):
session = mock.Mock()
session.product_brand = brand
session.product_version = version
return vm_utils._get_resize_func_name(session)
def _test_is_resize(self, brand, version):
result = self._call_get_resize_func_name(brand, version)
self.assertEqual("VDI.resize", result)
def _test_is_resize_online(self, brand, version):
result = self._call_get_resize_func_name(brand, version)
self.assertEqual("VDI.resize_online", result)
def test_xenserver_5_5(self):
self._test_is_resize_online("XenServer", (5, 5, 0))
def test_xenserver_6_0(self):
self._test_is_resize("XenServer", (6, 0, 0))
def test_xcp_1_1(self):
self._test_is_resize_online("XCP", (1, 1, 0))
def test_xcp_1_2(self):
self._test_is_resize("XCP", (1, 2, 0))
def test_xcp_2_0(self):
self._test_is_resize("XCP", (2, 0, 0))
def test_random_brand(self):
self._test_is_resize("asfd", (1, 1, 0))
def test_default(self):
self._test_is_resize(None, None)
def test_empty(self):
self._test_is_resize("", "")
def test_bad_version(self):
self._test_is_resize("XenServer", "asdf")
class VMInfoTests(VMUtilsTestBase):
def setUp(self):
super(VMInfoTests, self).setUp()
self.session = mock.Mock()
def test_get_power_state_valid(self):
# Save on test setup calls by having these simple tests in one method
self.session.call_xenapi.return_value = "Running"
self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
power_state.RUNNING)
self.session.call_xenapi.return_value = "Halted"
self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
power_state.SHUTDOWN)
self.session.call_xenapi.return_value = "Paused"
self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
power_state.PAUSED)
self.session.call_xenapi.return_value = "Suspended"
self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
power_state.SUSPENDED)
self.session.call_xenapi.return_value = "Crashed"
self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
power_state.CRASHED)
def test_get_power_state_invalid(self):
self.session.call_xenapi.return_value = "Invalid"
self.assertRaises(KeyError,
vm_utils.get_power_state, self.session, "ref")
_XAPI_record = {'power_state': 'Running',
'memory_static_max': str(10 << 10),
'memory_dynamic_max': str(9 << 10),
'VCPUs_max': '5'}
def test_compile_info(self):
def call_xenapi(method, *args):
if method.startswith('VM.get_') and args[0] == 'dummy':
return self._XAPI_record[method[7:]]
self.session.call_xenapi.side_effect = call_xenapi
info = vm_utils.compile_info(self.session, "dummy")
self.assertEqual(hardware.InstanceInfo(state=power_state.RUNNING,
max_mem_kb=10, mem_kb=9,
num_cpu='5', cpu_time_ns=0),
info)
|
ChrisCummins/clgen | refs/heads/master | deeplearning/clgen/preprocessors/clang.py | 1 | # Copyright (c) 2016-2020 Chris Cummins.
#
# clgen is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# clgen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with clgen. If not, see <https://www.gnu.org/licenses/>.
"""This file contains utility code for working with clang.
This module does not expose any preprocessor functions for CLgen. It contains
wrappers around Clang binaries, which preprocessor functions can use to
implement specific behavior. See deeplearning.clgen.preprocessors.cxx.Compile()
for an example.
"""
import json
import re
import subprocess
import tempfile
import typing
from compilers.llvm import clang
from compilers.llvm import clang_format
from compilers.llvm import llvm
from deeplearning.clgen import errors
from labm8.py import app
FLAGS = app.FLAGS
# The marker used to mark stdin from clang pre-processor output.
CLANG_STDIN_MARKER = re.compile(r'# \d+ "<stdin>" 2')
# Options to pass to clang-format.
# See: http://clang.llvm.org/docs/ClangFormatStyleOptions.html
CLANG_FORMAT_CONFIG = {
"BasedOnStyle": "Google",
"ColumnLimit": 5000,
"IndentWidth": 2,
"AllowShortBlocksOnASingleLine": False,
"AllowShortCaseLabelsOnASingleLine": False,
"AllowShortFunctionsOnASingleLine": False,
"AllowShortLoopsOnASingleLine": False,
"AllowShortIfStatementsOnASingleLine": False,
"DerivePointerAlignment": False,
"PointerAlignment": "Left",
"BreakAfterJavaFieldAnnotations": True,
"BreakBeforeInheritanceComma": False,
"BreakBeforeTernaryOperators": False,
"AlwaysBreakAfterReturnType": "None",
"AlwaysBreakAfterDefinitionReturnType": "None",
}
def StripPreprocessorLines(src: str) -> str:
"""Strip preprocessor remnants from clang frontend output.
Args:
src: Clang frontend output.
Returns:
The output with preprocessor output stripped.
"""
lines = src.split("\n")
# Determine when the final included file ends.
for i in range(len(lines) - 1, -1, -1):
if CLANG_STDIN_MARKER.match(lines[i]):
break
else:
return ""
# Strip lines beginning with '#' (that's preprocessor stuff):
return "\n".join([line for line in lines[i:] if not line.startswith("#")])
def Preprocess(
src: str,
cflags: typing.List[str],
timeout_seconds: int = 60,
strip_preprocessor_lines: bool = True,
):
"""Run input code through the compiler frontend to inline macros.
This uses the repository clang binary.
Args:
src: The source code to preprocess.
cflags: A list of flags to be passed to clang.
timeout_seconds: The number of seconds to allow before killing clang.
strip_preprocessor_lines: Whether to strip the extra lines introduced by
the preprocessor.
Returns:
The preprocessed code.
Raises:
ClangException: In case of an error.
ClangTimeout: If clang does not complete before timeout_seconds.
"""
cmd = [
"timeout",
"-s9",
str(timeout_seconds),
str(clang.CLANG),
"-E",
"-c",
"-",
"-o",
"-",
] + cflags
app.Log(2, "$ %s", " ".join(cmd))
process = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
stdout, stderr = process.communicate(src)
if process.returncode == 9:
raise errors.ClangTimeout(
f"Clang preprocessor timed out after {timeout_seconds}s"
)
elif process.returncode != 0:
raise errors.ClangException(stderr)
if strip_preprocessor_lines:
return StripPreprocessorLines(stdout)
else:
return stdout
def CompileLlvmBytecode(
src: str, suffix: str, cflags: typing.List[str], timeout_seconds: int = 60
) -> str:
"""Compile input code into textual LLVM byte code.
Args:
src: The source code to compile.
suffix: The suffix to append to the source code temporary file. E.g. '.c'
for a C program.
cflags: A list of flags to be passed to clang.
timeout_seconds: The number of seconds to allow before killing clang.
Returns:
The textual LLVM byte code.
Raises:
ClangException: In case of an error.
ClangTimeout: If clang does not complete before timeout_seconds.
"""
builtin_cflags = ["-S", "-emit-llvm", "-o", "-"]
with tempfile.NamedTemporaryFile(
"w", prefix="phd_deeplearning_clgen_preprocessors_clang_", suffix=suffix
) as f:
f.write(src)
f.flush()
cmd = (
["timeout", "-s9", str(timeout_seconds), str(clang.CLANG), f.name]
+ builtin_cflags
+ cflags
)
app.Log(2, "$ %s", " ".join(cmd))
process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
stdout, stderr = process.communicate()
if process.returncode == 9:
raise errors.ClangTimeout(f"Clang timed out after {timeout_seconds}s")
elif process.returncode != 0:
raise errors.ClangException(stderr)
return stdout
def ClangFormat(text: str, suffix: str, timeout_seconds: int = 60) -> str:
"""Run clang-format on a source to enforce code style.
Args:
text: The source code to run through clang-format.
suffix: The suffix to append to the source code temporary file. E.g. '.c'
for a C program.
timeout_seconds: The number of seconds to allow clang-format to run for.
Returns:
The output of clang-format.
Raises:
ClangFormatException: In case of an error.
ClangTimeout: If clang-format does not complete before timeout_seconds.
"""
try:
return clang_format.Exec(
text,
suffix,
["-style={}".format(json.dumps(CLANG_FORMAT_CONFIG))],
timeout_seconds,
)
except llvm.LlvmTimeout:
raise errors.ClangTimeout(
f"Clang-format timed out after {timeout_seconds}s"
)
except clang_format.ClangFormatException as e:
raise errors.ClangFormatException(str(e))
|
TNT-Samuel/Coding-Projects | refs/heads/master | DNS Server/Source - Copy/Lib/site-packages/setuptools/_vendor/packaging/markers.py | 65 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import operator
import os
import platform
import sys
from setuptools.extern.pyparsing import ParseException, ParseResults, stringStart, stringEnd
from setuptools.extern.pyparsing import ZeroOrMore, Group, Forward, QuotedString
from setuptools.extern.pyparsing import Literal as L # noqa
from ._compat import string_types
from .specifiers import Specifier, InvalidSpecifier
__all__ = [
"InvalidMarker", "UndefinedComparison", "UndefinedEnvironmentName",
"Marker", "default_environment",
]
class InvalidMarker(ValueError):
"""
An invalid marker was found, users should refer to PEP 508.
"""
class UndefinedComparison(ValueError):
"""
An invalid operation was attempted on a value that doesn't support it.
"""
class UndefinedEnvironmentName(ValueError):
"""
A name was attempted to be used that does not exist inside of the
environment.
"""
class Node(object):
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
def __repr__(self):
return "<{0}({1!r})>".format(self.__class__.__name__, str(self))
def serialize(self):
raise NotImplementedError
class Variable(Node):
def serialize(self):
return str(self)
class Value(Node):
def serialize(self):
return '"{0}"'.format(self)
class Op(Node):
def serialize(self):
return str(self)
VARIABLE = (
L("implementation_version") |
L("platform_python_implementation") |
L("implementation_name") |
L("python_full_version") |
L("platform_release") |
L("platform_version") |
L("platform_machine") |
L("platform_system") |
L("python_version") |
L("sys_platform") |
L("os_name") |
L("os.name") | # PEP-345
L("sys.platform") | # PEP-345
L("platform.version") | # PEP-345
L("platform.machine") | # PEP-345
L("platform.python_implementation") | # PEP-345
L("python_implementation") | # undocumented setuptools legacy
L("extra")
)
ALIASES = {
'os.name': 'os_name',
'sys.platform': 'sys_platform',
'platform.version': 'platform_version',
'platform.machine': 'platform_machine',
'platform.python_implementation': 'platform_python_implementation',
'python_implementation': 'platform_python_implementation'
}
VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
VERSION_CMP = (
L("===") |
L("==") |
L(">=") |
L("<=") |
L("!=") |
L("~=") |
L(">") |
L("<")
)
MARKER_OP = VERSION_CMP | L("not in") | L("in")
MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
MARKER_VALUE = QuotedString("'") | QuotedString('"')
MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
BOOLOP = L("and") | L("or")
MARKER_VAR = VARIABLE | MARKER_VALUE
MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
LPAREN = L("(").suppress()
RPAREN = L(")").suppress()
MARKER_EXPR = Forward()
MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
MARKER = stringStart + MARKER_EXPR + stringEnd
def _coerce_parse_result(results):
if isinstance(results, ParseResults):
return [_coerce_parse_result(i) for i in results]
else:
return results
def _format_marker(marker, first=True):
assert isinstance(marker, (list, tuple, string_types))
# Sometimes we have a structure like [[...]] which is a single item list
# where the single item is itself it's own list. In that case we want skip
# the rest of this function so that we don't get extraneous () on the
# outside.
if (isinstance(marker, list) and len(marker) == 1 and
isinstance(marker[0], (list, tuple))):
return _format_marker(marker[0])
if isinstance(marker, list):
inner = (_format_marker(m, first=False) for m in marker)
if first:
return " ".join(inner)
else:
return "(" + " ".join(inner) + ")"
elif isinstance(marker, tuple):
return " ".join([m.serialize() for m in marker])
else:
return marker
_operators = {
"in": lambda lhs, rhs: lhs in rhs,
"not in": lambda lhs, rhs: lhs not in rhs,
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _eval_op(lhs, op, rhs):
try:
spec = Specifier("".join([op.serialize(), rhs]))
except InvalidSpecifier:
pass
else:
return spec.contains(lhs)
oper = _operators.get(op.serialize())
if oper is None:
raise UndefinedComparison(
"Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs)
)
return oper(lhs, rhs)
_undefined = object()
def _get_env(environment, name):
value = environment.get(name, _undefined)
if value is _undefined:
raise UndefinedEnvironmentName(
"{0!r} does not exist in evaluation environment.".format(name)
)
return value
def _evaluate_markers(markers, environment):
groups = [[]]
for marker in markers:
assert isinstance(marker, (list, tuple, string_types))
if isinstance(marker, list):
groups[-1].append(_evaluate_markers(marker, environment))
elif isinstance(marker, tuple):
lhs, op, rhs = marker
if isinstance(lhs, Variable):
lhs_value = _get_env(environment, lhs.value)
rhs_value = rhs.value
else:
lhs_value = lhs.value
rhs_value = _get_env(environment, rhs.value)
groups[-1].append(_eval_op(lhs_value, op, rhs_value))
else:
assert marker in ["and", "or"]
if marker == "or":
groups.append([])
return any(all(item) for item in groups)
def format_full_version(info):
version = '{0.major}.{0.minor}.{0.micro}'.format(info)
kind = info.releaselevel
if kind != 'final':
version += kind[0] + str(info.serial)
return version
def default_environment():
if hasattr(sys, 'implementation'):
iver = format_full_version(sys.implementation.version)
implementation_name = sys.implementation.name
else:
iver = '0'
implementation_name = ''
return {
"implementation_name": implementation_name,
"implementation_version": iver,
"os_name": os.name,
"platform_machine": platform.machine(),
"platform_release": platform.release(),
"platform_system": platform.system(),
"platform_version": platform.version(),
"python_full_version": platform.python_version(),
"platform_python_implementation": platform.python_implementation(),
"python_version": platform.python_version()[:3],
"sys_platform": sys.platform,
}
class Marker(object):
def __init__(self, marker):
try:
self._markers = _coerce_parse_result(MARKER.parseString(marker))
except ParseException as e:
err_str = "Invalid marker: {0!r}, parse error at {1!r}".format(
marker, marker[e.loc:e.loc + 8])
raise InvalidMarker(err_str)
def __str__(self):
return _format_marker(self._markers)
def __repr__(self):
return "<Marker({0!r})>".format(str(self))
def evaluate(self, environment=None):
"""Evaluate a marker.
Return the boolean from evaluating the given marker against the
environment. environment is an optional argument to override all or
part of the determined environment.
The environment is determined from the current Python process.
"""
current_environment = default_environment()
if environment is not None:
current_environment.update(environment)
return _evaluate_markers(self._markers, current_environment)
|
mrbox/django | refs/heads/master | tests/template_tests/filter_tests/test_stringformat.py | 345 | from django.template.defaultfilters import stringformat
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class StringformatTests(SimpleTestCase):
"""
Notice that escaping is applied *after* any filters, so the string
formatting here only needs to deal with pre-escaped characters.
"""
@setup({'stringformat01':
'{% autoescape off %}.{{ a|stringformat:"5s" }}. .{{ b|stringformat:"5s" }}.{% endautoescape %}'})
def test_stringformat01(self):
output = self.engine.render_to_string('stringformat01', {'a': 'a<b', 'b': mark_safe('a<b')})
self.assertEqual(output, '. a<b. . a<b.')
@setup({'stringformat02': '.{{ a|stringformat:"5s" }}. .{{ b|stringformat:"5s" }}.'})
def test_stringformat02(self):
output = self.engine.render_to_string('stringformat02', {'a': 'a<b', 'b': mark_safe('a<b')})
self.assertEqual(output, '. a<b. . a<b.')
class FunctionTests(SimpleTestCase):
def test_format(self):
self.assertEqual(stringformat(1, '03d'), '001')
def test_invalid(self):
self.assertEqual(stringformat(1, 'z'), '')
|
bigkevmcd/python-lucky | refs/heads/master | lucky/scripts.py | 1 | #!/usr/bin/env python
import argparse
import sys
import os
from os.path import abspath, join, dirname
from collections import namedtuple
# sys.path.insert(0, abspath(join(dirname(__file__), "..")))
from lucky.config import Config
from lucky.board import Boards
def pprinttable(rows, output=sys.stdout):
if len(rows) > 1:
headers = rows[0]._fields
lens = []
for i in range(len(rows[0])):
lens.append(
len(max([x[i] for x in rows] + [headers[i]],
key=lambda x: len(str(x)))))
formats = []
hformats = []
for i in range(len(rows[0])):
if isinstance(rows[0][i], int):
formats.append("%%%dd" % lens[i])
else:
formats.append("%%-%ds" % lens[i])
hformats.append("%%-%ds" % lens[i])
pattern = " | ".join(formats)
hpattern = " | ".join(hformats)
separator = "-+-".join(["-" * n for n in lens])
output.write((hpattern + "\n") % tuple(headers))
output.write(separator + "\n")
for line in rows:
output.write((pattern % tuple(line)) + "\n")
elif len(rows) == 1:
row = rows[0]
hwidth = len(max(row._fields, key=lambda x: len(x)))
for i in range(len(row)):
output.write("%*s = %s" % (hwidth, row._fields[i], row[i]) + "\n")
def create_parser():
parser = argparse.ArgumentParser(description="Leankit command-line tool")
subparsers = parser.add_subparsers(
title="subcommands", help="subcommand help",
description="valid subcommands",
dest="command")
list_boards = subparsers.add_parser(
"list-boards",
help="list all boards in the account")
list_boards.add_argument(
"--descriptions",
help="Output board descriptions",
default=False, action="store_true")
show_board = subparsers.add_parser(
"show-board", help="Display board details")
show_board.add_argument("board", help="Board id to display")
show_cards = subparsers.add_parser(
"show-cards", help="Display lane cards")
show_cards.add_argument("board", help="Board id to display")
show_cards.add_argument("lane", help="Lane id to display")
return parser
def list_boards(config, args, output=sys.stdout):
board_tuple = namedtuple("Board", ["id", "title"])
boards = []
for board in Boards(config).list():
boards.append(board_tuple(str(board["board_id"]), board["title"]))
pprinttable(boards, output=output)
def show_board(config, args):
board = Boards(config).get(args.board)
items = []
board_tuple = namedtuple("Lane", ["id", "title"])
for lane in board.lanes:
items.append(board_tuple(str(lane.id), lane.title))
pprinttable(items)
def show_cards(config, args):
board = Boards(config).get(args.board)
items = []
card_tuple = namedtuple("Card", ["id", "title", "user", "type"])
lane = board.get_lane_by_id(int(args.lane))
for card in lane.cards:
items.append(
card_tuple(
str(card.id),
card.title,
card.assigned_user,
board.card_types[card.type_id]))
pprinttable(items)
def main():
parser = create_parser()
args = parser.parse_args()
config = Config.load_from_env(os.environ) or Config.load_from_homedir()
if not config:
sys.exit("No configuration loaded")
if args.command == "list-boards":
list_boards(config, args)
elif args.command == "show-board":
show_board(config, args)
elif args.command == "show-cards":
show_cards(config, args)
if __name__ == "__main__":
main()
|
diwer/sublimeconfig | refs/heads/master | Packages/MarkdownEditing/underlined_headers.py | 2 | """Commands for working with with setext-style (underlined) Markdown headers.
Header dashes can be completed with <tab>. For example:
This is an H2
-<tab>
Becomes:
This is an H2
-------------
Inspired by the similar TextMate command.
Also adds "Fix Underlined Markdown Headers" to Tools > Command Palette. After modifying
header text, this command will re-align the underline dashes with the new text length.
"""
import sublime, sublime_plugin
import re, itertools
SETEXT_DASHES_RE = re.compile( r'''
(?: =+ | -+ ) # A run of ---- or ==== underline characters.
\s* # Optional trailing whitespace.
$ # Must fill the while line. Don't match "- list items"
''', re.X )
SETEXT_HEADER_RE = re.compile( r'''
^(.+)\n
( =+ | -+ ) # A run of ---- or ==== underline characters.
[ \t]* # Optional trailing whitespace.
$ # Must fill the while line. Don't match "- list items"
''', re.X | re.M )
def fix_dashes(view, edit, text_region, dash_region):
"""Replaces the underlined "dash" region of a setext header with a run of
dashes or equal-signs that match the length of the header text."""
if len(view.substr(text_region).strip()) == 0:
# Ignore dashes not under text. They are HRs.
return
old_dashes = view.substr(dash_region)
first_dash = old_dashes[0]
new_dashes = first_dash * text_region.size()
view.replace(edit, dash_region, new_dashes)
class CompleteUnderlinedHeaderCommand(sublime_plugin.TextCommand):
"""If the current selection is looks like a setext underline of - or = ,
then inserts enough dash characters to match the length of the previous
(header text) line."""
def run(self, edit):
for region in self.view.sel():
dashes_line = self.view.line(region)
# Ignore first list
if dashes_line.begin() == 0: continue
text_line = self.view.line(dashes_line.begin() - 1)
if text_line.begin() < 0: continue
text = self.view.substr(text_line)
dashes = self.view.substr(dashes_line)
# ignore, text_line is a list item
if text.lstrip().startswith("-") and len(dashes.strip()) < 2:
settings = self.view.settings()
use_spaces = bool(settings.get('translate_tabs_to_spaces'))
tab_size = int(settings.get('tab_size', 8))
indent_characters = '\t'
if use_spaces:
indent_characters = ' ' * tab_size
self.view.insert(edit, dashes_line.begin(), indent_characters)
break
m = SETEXT_DASHES_RE.match(dashes)
if m:
fix_dashes(self.view, edit, text_line, dashes_line)
def is_enabled(self):
return bool(self.view.score_selector(self.view.sel()[0].a, "text.html.markdown"))
class FixAllUnderlinedHeadersCommand(sublime_plugin.TextCommand):
"""Searches for all setext headings resize them to match the preceding
header text."""
def description(self):
# Used as the name for Undo.
return 'Fix Underlined Markdown Headers'
def run(self, edit):
lines = self.view.split_by_newlines(sublime.Region(0, self.view.size()))
if len(lines) < 2: return
# Since we're modifying the text, we are shifting all the following
# regions. To avoid this, just go backwards.
lines = reversed(lines)
# Duplicate the iterator and next() it once to get farther ahead.
# Since lines are reversed, this will always point to the line *above*
# the current one: the text of the header.
prev_lines, lines = itertools.tee(lines)
next(prev_lines)
for text_line, dashes_line in zip(prev_lines, lines):
dashes_text = self.view.substr(dashes_line)
m = SETEXT_DASHES_RE.match(dashes_text)
if m:
fix_dashes(self.view, edit, text_line, dashes_line)
def is_enabled(self):
return bool(self.view.score_selector(self.view.sel()[0].a, "text.html.markdown"))
class ConvertToAtxCommand(sublime_plugin.TextCommand):
def run(self, edit, closed=False):
regions = list(self.view.sel())
if len(regions) == 1 and regions[0].size() == 0:
regions = [sublime.Region(0, self.view.size())]
regions.reverse()
for region in regions:
txt = self.view.substr(region)
matches = list(SETEXT_HEADER_RE.finditer(txt))
matches.reverse()
for m in matches:
mreg = sublime.Region(region.begin()+m.start(), region.begin()+m.end())
atx = "# "
if '-' in m.group(2):
atx = "#" + atx
closing = atx[::-1] if closed else ""
self.view.replace(edit, mreg, atx + m.group(1) + closing)
def is_enabled(self):
return bool(self.view.score_selector(self.view.sel()[0].a, "text.html.markdown"))
|
MountainWei/nova | refs/heads/master | nova/tests/unit/test_versions.py | 51 | # Copyright 2011 Ken Pepple
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
import six
from six.moves import builtins
from nova import test
from nova import version
class VersionTestCase(test.NoDBTestCase):
"""Test cases for Versions code."""
def test_version_string_with_package_is_good(self):
"""Ensure uninstalled code get version string."""
self.stubs.Set(version.version_info, 'version_string',
lambda: '5.5.5.5')
self.stubs.Set(version, 'NOVA_PACKAGE', 'g9ec3421')
self.assertEqual("5.5.5.5-g9ec3421",
version.version_string_with_package())
def test_release_file(self):
version.loaded = False
real_open = builtins.open
real_find_file = cfg.CONF.find_file
def fake_find_file(self, name):
if name == "release":
return "/etc/nova/release"
return real_find_file(self, name)
def fake_open(path, *args, **kwargs):
if path == "/etc/nova/release":
data = """[Nova]
vendor = ACME Corporation
product = ACME Nova
package = 1337"""
return six.StringIO(data)
return real_open(path, *args, **kwargs)
self.stubs.Set(builtins, 'open', fake_open)
self.stubs.Set(cfg.ConfigOpts, 'find_file', fake_find_file)
self.assertEqual(version.vendor_string(), "ACME Corporation")
self.assertEqual(version.product_string(), "ACME Nova")
self.assertEqual(version.package_string(), "1337")
|
W4TCH0UT/kernel-msm-ghost | refs/heads/master | scripts/build-all.py | 1182 | #! /usr/bin/env python
# Copyright (c) 2009-2011, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import shutil
import sys
version = 'build-all.py, version 0.01'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules"]
make_env = os.environ
make_env.update({
'ARCH': 'arm',
'CROSS_COMPILE': 'arm-none-linux-gnueabi-',
'KCONFIG_NOTIMESTAMP': 'true' })
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
for n in glob.glob('arch/arm/configs/[fm]sm[0-9-]*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/qsd*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/apq*_defconfig'):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = 'arch/arm/configs/%s_defconfig' % target
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
shutil.copyfile(defconfig, dotconfig)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'%s_defconfig' % target], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
build = Builder(log_name)
result = build.run(['make', 'O=%s' % dest_dir] + make_command)
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" % (target, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=make_env, stdin=devnull)
devnull.close()
shutil.copyfile(savedefconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs, configs.keys())
elif args == ['perf']:
targets = []
for t in configs.keys():
if "perf" in t:
targets.append(t)
build_many(configs, targets)
elif args == ['noperf']:
targets = []
for t in configs.keys():
if "perf" not in t:
targets.append(t)
build_many(configs, targets)
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
|
axbaretto/beam | refs/heads/master | sdks/python/.tox/docs/lib/python2.7/site-packages/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py | 285 | """The match_hostname() function from Python 3.3.3, essential when using SSL."""
# Note: This file is under the PSF license as the code comes from the python
# stdlib. http://docs.python.org/3/license.html
import re
import sys
# ipaddress has been backported to 2.6+ in pypi. If it is installed on the
# system, use it to handle IPAddress ServerAltnames (this was added in
# python-3.5) otherwise only do DNS matching. This allows
# backports.ssl_match_hostname to continue to be used all the way back to
# python-2.4.
try:
import ipaddress
except ImportError:
ipaddress = None
__version__ = '3.5.0.1'
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def _to_unicode(obj):
if isinstance(obj, str) and sys.version_info < (3,):
obj = unicode(obj, encoding='ascii', errors='strict')
return obj
def _ipaddress_match(ipname, host_ip):
"""Exact matching of IP addresses.
RFC 6125 explicitly doesn't define an algorithm for this
(section 1.7.2 - "Out of Scope").
"""
# OpenSSL may add a trailing newline to a subjectAltName's IP address
# Divergence from upstream: ipaddress can't handle byte str
ip = ipaddress.ip_address(_to_unicode(ipname).rstrip())
return ip == host_ip
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED")
try:
# Divergence from upstream: ipaddress can't handle byte str
host_ip = ipaddress.ip_address(_to_unicode(hostname))
except ValueError:
# Not an IP address (common case)
host_ip = None
except UnicodeError:
# Divergence from upstream: Have to deal with ipaddress not taking
# byte strings. addresses should be all ascii, so we consider it not
# an ipaddress in this case
host_ip = None
except AttributeError:
# Divergence from upstream: Make ipaddress library optional
if ipaddress is None:
host_ip = None
else:
raise
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if host_ip is None and _dnsname_match(value, hostname):
return
dnsnames.append(value)
elif key == 'IP Address':
if host_ip is not None and _ipaddress_match(value, host_ip):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
|
kosz85/django | refs/heads/master | tests/update_only_fields/models.py | 71 |
from django.db import models
GENDER_CHOICES = (
('M', 'Male'),
('F', 'Female'),
)
class Account(models.Model):
num = models.IntegerField()
class Person(models.Model):
name = models.CharField(max_length=20)
gender = models.CharField(max_length=1, choices=GENDER_CHOICES)
pid = models.IntegerField(null=True, default=None)
def __str__(self):
return self.name
class Employee(Person):
employee_num = models.IntegerField(default=0)
profile = models.ForeignKey('Profile', models.SET_NULL, related_name='profiles', null=True)
accounts = models.ManyToManyField('Account', related_name='employees', blank=True)
class Profile(models.Model):
name = models.CharField(max_length=200)
salary = models.FloatField(default=1000.0)
def __str__(self):
return self.name
class ProxyEmployee(Employee):
class Meta:
proxy = True
|
stonegithubs/odoo | refs/heads/8.0 | addons/purchase_requisition/purchase_requisition.py | 200 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
import openerp.addons.decimal_precision as dp
class purchase_requisition(osv.osv):
_name = "purchase.requisition"
_description = "Purchase Requisition"
_inherit = ['mail.thread', 'ir.needaction_mixin']
def _get_po_line(self, cr, uid, ids, field_names, arg=None, context=None):
result = dict((res_id, []) for res_id in ids)
for element in self.browse(cr, uid, ids, context=context):
for po in element.purchase_ids:
result[element.id] += [po_line.id for po_line in po.order_line]
return result
_columns = {
'name': fields.char('Call for Bids Reference', required=True, copy=False),
'origin': fields.char('Source Document'),
'ordering_date': fields.date('Scheduled Ordering Date'),
'date_end': fields.datetime('Bid Submission Deadline'),
'schedule_date': fields.date('Scheduled Date', select=True, help="The expected and scheduled date where all the products are received"),
'user_id': fields.many2one('res.users', 'Responsible'),
'exclusive': fields.selection([('exclusive', 'Select only one RFQ (exclusive)'), ('multiple', 'Select multiple RFQ')], 'Bid Selection Type', required=True, help="Select only one RFQ (exclusive): On the confirmation of a purchase order, it cancels the remaining purchase order.\nSelect multiple RFQ: It allows to have multiple purchase orders.On confirmation of a purchase order it does not cancel the remaining orders"""),
'description': fields.text('Description'),
'company_id': fields.many2one('res.company', 'Company', required=True),
'purchase_ids': fields.one2many('purchase.order', 'requisition_id', 'Purchase Orders', states={'done': [('readonly', True)]}),
'po_line_ids': fields.function(_get_po_line, method=True, type='one2many', relation='purchase.order.line', string='Products by supplier'),
'line_ids': fields.one2many('purchase.requisition.line', 'requisition_id', 'Products to Purchase', states={'done': [('readonly', True)]}, copy=True),
'procurement_id': fields.many2one('procurement.order', 'Procurement', ondelete='set null', copy=False),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse'),
'state': fields.selection([('draft', 'Draft'), ('in_progress', 'Confirmed'),
('open', 'Bid Selection'), ('done', 'PO Created'),
('cancel', 'Cancelled')],
'Status', track_visibility='onchange', required=True,
copy=False),
'multiple_rfq_per_supplier': fields.boolean('Multiple RFQ per supplier'),
'account_analytic_id': fields.many2one('account.analytic.account', 'Analytic Account'),
'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type', required=True),
}
def _get_picking_in(self, cr, uid, context=None):
obj_data = self.pool.get('ir.model.data')
return obj_data.get_object_reference(cr, uid, 'stock', 'picking_type_in')[1]
_defaults = {
'state': 'draft',
'exclusive': 'multiple',
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'purchase.requisition', context=c),
'user_id': lambda self, cr, uid, c: self.pool.get('res.users').browse(cr, uid, uid, c).id,
'name': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'purchase.order.requisition'),
'picking_type_id': _get_picking_in,
}
def tender_cancel(self, cr, uid, ids, context=None):
purchase_order_obj = self.pool.get('purchase.order')
# try to set all associated quotations to cancel state
for tender in self.browse(cr, uid, ids, context=context):
for purchase_order in tender.purchase_ids:
purchase_order_obj.action_cancel(cr, uid, [purchase_order.id], context=context)
purchase_order_obj.message_post(cr, uid, [purchase_order.id], body=_('Cancelled by the tender associated to this quotation.'), context=context)
return self.write(cr, uid, ids, {'state': 'cancel'})
def tender_in_progress(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'in_progress'}, context=context)
def tender_open(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'open'}, context=context)
def tender_reset(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'draft'})
for p_id in ids:
# Deleting the existing instance of workflow for PO
self.delete_workflow(cr, uid, [p_id])
self.create_workflow(cr, uid, [p_id])
return True
def tender_done(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'done'}, context=context)
def open_product_line(self, cr, uid, ids, context=None):
""" This opens product line view to view all lines from the different quotations, groupby default by product and partner to show comparaison
between supplier price
@return: the product line tree view
"""
if context is None:
context = {}
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'purchase_requisition', 'purchase_line_tree', context=context)
res['context'] = context
po_lines = self.browse(cr, uid, ids, context=context)[0].po_line_ids
res['context'] = {
'search_default_groupby_product': True,
'search_default_hide_cancelled': True,
'tender_id': ids[0],
}
res['domain'] = [('id', 'in', [line.id for line in po_lines])]
return res
def open_rfq(self, cr, uid, ids, context=None):
""" This opens rfq view to view all quotations associated to the call for bids
@return: the RFQ tree view
"""
if context is None:
context = {}
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'purchase', 'purchase_rfq', context=context)
res['context'] = context
po_ids = [po.id for po in self.browse(cr, uid, ids, context=context)[0].purchase_ids]
res['domain'] = [('id', 'in', po_ids)]
return res
def _prepare_purchase_order(self, cr, uid, requisition, supplier, context=None):
supplier_pricelist = supplier.property_product_pricelist_purchase
return {
'origin': requisition.name,
'date_order': requisition.date_end or fields.datetime.now(),
'partner_id': supplier.id,
'pricelist_id': supplier_pricelist.id,
'currency_id': supplier_pricelist and supplier_pricelist.currency_id.id or requisition.company_id.currency_id.id,
'location_id': requisition.procurement_id and requisition.procurement_id.location_id.id or requisition.picking_type_id.default_location_dest_id.id,
'company_id': requisition.company_id.id,
'fiscal_position': supplier.property_account_position and supplier.property_account_position.id or False,
'requisition_id': requisition.id,
'notes': requisition.description,
'picking_type_id': requisition.picking_type_id.id
}
def _prepare_purchase_order_line(self, cr, uid, requisition, requisition_line, purchase_id, supplier, context=None):
if context is None:
context = {}
po_line_obj = self.pool.get('purchase.order.line')
product_uom = self.pool.get('product.uom')
product = requisition_line.product_id
default_uom_po_id = product.uom_po_id.id
ctx = context.copy()
ctx['tz'] = requisition.user_id.tz
date_order = requisition.ordering_date and fields.date.date_to_datetime(self, cr, uid, requisition.ordering_date, context=ctx) or fields.datetime.now()
qty = product_uom._compute_qty(cr, uid, requisition_line.product_uom_id.id, requisition_line.product_qty, default_uom_po_id)
supplier_pricelist = supplier.property_product_pricelist_purchase and supplier.property_product_pricelist_purchase.id or False
vals = po_line_obj.onchange_product_id(
cr, uid, [], supplier_pricelist, product.id, qty, default_uom_po_id,
supplier.id, date_order=date_order,
fiscal_position_id=supplier.property_account_position.id,
date_planned=requisition_line.schedule_date,
name=False, price_unit=False, state='draft', context=context)['value']
vals.update({
'order_id': purchase_id,
'product_id': product.id,
'account_analytic_id': requisition_line.account_analytic_id.id,
'taxes_id': [(6, 0, vals.get('taxes_id', []))],
})
return vals
def make_purchase_order(self, cr, uid, ids, partner_id, context=None):
"""
Create New RFQ for Supplier
"""
context = dict(context or {})
assert partner_id, 'Supplier should be specified'
purchase_order = self.pool.get('purchase.order')
purchase_order_line = self.pool.get('purchase.order.line')
res_partner = self.pool.get('res.partner')
supplier = res_partner.browse(cr, uid, partner_id, context=context)
res = {}
for requisition in self.browse(cr, uid, ids, context=context):
if not requisition.multiple_rfq_per_supplier and supplier.id in filter(lambda x: x, [rfq.state != 'cancel' and rfq.partner_id.id or None for rfq in requisition.purchase_ids]):
raise osv.except_osv(_('Warning!'), _('You have already one %s purchase order for this partner, you must cancel this purchase order to create a new quotation.') % rfq.state)
context.update({'mail_create_nolog': True})
purchase_id = purchase_order.create(cr, uid, self._prepare_purchase_order(cr, uid, requisition, supplier, context=context), context=context)
purchase_order.message_post(cr, uid, [purchase_id], body=_("RFQ created"), context=context)
res[requisition.id] = purchase_id
for line in requisition.line_ids:
purchase_order_line.create(cr, uid, self._prepare_purchase_order_line(cr, uid, requisition, line, purchase_id, supplier, context=context), context=context)
return res
def check_valid_quotation(self, cr, uid, quotation, context=None):
"""
Check if a quotation has all his order lines bid in order to confirm it if its the case
return True if all order line have been selected during bidding process, else return False
args : 'quotation' must be a browse record
"""
for line in quotation.order_line:
if line.state != 'confirmed' or line.product_qty != line.quantity_bid:
return False
return True
def _prepare_po_from_tender(self, cr, uid, tender, context=None):
""" Prepare the values to write in the purchase order
created from a tender.
:param tender: the source tender from which we generate a purchase order
"""
return {'order_line': [],
'requisition_id': tender.id,
'origin': tender.name}
def _prepare_po_line_from_tender(self, cr, uid, tender, line, purchase_id, context=None):
""" Prepare the values to write in the purchase order line
created from a line of the tender.
:param tender: the source tender from which we generate a purchase order
:param line: the source tender's line from which we generate a line
:param purchase_id: the id of the new purchase
"""
return {'product_qty': line.quantity_bid,
'order_id': purchase_id}
def generate_po(self, cr, uid, ids, context=None):
"""
Generate all purchase order based on selected lines, should only be called on one tender at a time
"""
po = self.pool.get('purchase.order')
poline = self.pool.get('purchase.order.line')
id_per_supplier = {}
for tender in self.browse(cr, uid, ids, context=context):
if tender.state == 'done':
raise osv.except_osv(_('Warning!'), _('You have already generate the purchase order(s).'))
confirm = False
#check that we have at least confirm one line
for po_line in tender.po_line_ids:
if po_line.state == 'confirmed':
confirm = True
break
if not confirm:
raise osv.except_osv(_('Warning!'), _('You have no line selected for buying.'))
#check for complete RFQ
for quotation in tender.purchase_ids:
if (self.check_valid_quotation(cr, uid, quotation, context=context)):
#use workflow to set PO state to confirm
po.signal_workflow(cr, uid, [quotation.id], 'purchase_confirm')
#get other confirmed lines per supplier
for po_line in tender.po_line_ids:
#only take into account confirmed line that does not belong to already confirmed purchase order
if po_line.state == 'confirmed' and po_line.order_id.state in ['draft', 'sent', 'bid']:
if id_per_supplier.get(po_line.partner_id.id):
id_per_supplier[po_line.partner_id.id].append(po_line)
else:
id_per_supplier[po_line.partner_id.id] = [po_line]
#generate po based on supplier and cancel all previous RFQ
ctx = dict(context or {}, force_requisition_id=True)
for supplier, product_line in id_per_supplier.items():
#copy a quotation for this supplier and change order_line then validate it
quotation_id = po.search(cr, uid, [('requisition_id', '=', tender.id), ('partner_id', '=', supplier)], limit=1)[0]
vals = self._prepare_po_from_tender(cr, uid, tender, context=context)
new_po = po.copy(cr, uid, quotation_id, default=vals, context=context)
#duplicate po_line and change product_qty if needed and associate them to newly created PO
for line in product_line:
vals = self._prepare_po_line_from_tender(cr, uid, tender, line, new_po, context=context)
poline.copy(cr, uid, line.id, default=vals, context=context)
#use workflow to set new PO state to confirm
po.signal_workflow(cr, uid, [new_po], 'purchase_confirm')
#cancel other orders
self.cancel_unconfirmed_quotations(cr, uid, tender, context=context)
#set tender to state done
self.signal_workflow(cr, uid, [tender.id], 'done')
return True
def cancel_unconfirmed_quotations(self, cr, uid, tender, context=None):
#cancel other orders
po = self.pool.get('purchase.order')
for quotation in tender.purchase_ids:
if quotation.state in ['draft', 'sent', 'bid']:
self.pool.get('purchase.order').signal_workflow(cr, uid, [quotation.id], 'purchase_cancel')
po.message_post(cr, uid, [quotation.id], body=_('Cancelled by the call for bids associated to this request for quotation.'), context=context)
return True
class purchase_requisition_line(osv.osv):
_name = "purchase.requisition.line"
_description = "Purchase Requisition Line"
_rec_name = 'product_id'
_columns = {
'product_id': fields.many2one('product.product', 'Product', domain=[('purchase_ok', '=', True)]),
'product_uom_id': fields.many2one('product.uom', 'Product Unit of Measure'),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure')),
'requisition_id': fields.many2one('purchase.requisition', 'Call for Bids', ondelete='cascade'),
'company_id': fields.related('requisition_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
'account_analytic_id': fields.many2one('account.analytic.account', 'Analytic Account',),
'schedule_date': fields.date('Scheduled Date'),
}
def onchange_product_id(self, cr, uid, ids, product_id, product_uom_id, parent_analytic_account, analytic_account, parent_date, date, context=None):
""" Changes UoM and name if product_id changes.
@param name: Name of the field
@param product_id: Changed product_id
@return: Dictionary of changed values
"""
value = {'product_uom_id': ''}
if product_id:
prod = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
value = {'product_uom_id': prod.uom_id.id, 'product_qty': 1.0}
if not analytic_account:
value.update({'account_analytic_id': parent_analytic_account})
if not date:
value.update({'schedule_date': parent_date})
return {'value': value}
_defaults = {
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'purchase.requisition.line', context=c),
}
class purchase_order(osv.osv):
_inherit = "purchase.order"
_columns = {
'requisition_id': fields.many2one('purchase.requisition', 'Call for Bids', copy=False),
}
def wkf_confirm_order(self, cr, uid, ids, context=None):
res = super(purchase_order, self).wkf_confirm_order(cr, uid, ids, context=context)
proc_obj = self.pool.get('procurement.order')
for po in self.browse(cr, uid, ids, context=context):
if po.requisition_id and (po.requisition_id.exclusive == 'exclusive'):
for order in po.requisition_id.purchase_ids:
if order.id != po.id:
proc_ids = proc_obj.search(cr, uid, [('purchase_id', '=', order.id)])
if proc_ids and po.state == 'confirmed':
proc_obj.write(cr, uid, proc_ids, {'purchase_id': po.id})
order.signal_workflow('purchase_cancel')
po.requisition_id.tender_done(context=context)
return res
def _prepare_order_line_move(self, cr, uid, order, order_line, picking_id, group_id, context=None):
stock_move_lines = super(purchase_order, self)._prepare_order_line_move(cr, uid, order, order_line, picking_id, group_id, context=context)
if order.requisition_id and order.requisition_id.procurement_id and order.requisition_id.procurement_id.move_dest_id:
for i in range(0, len(stock_move_lines)):
stock_move_lines[i]['move_dest_id'] = order.requisition_id.procurement_id.move_dest_id.id
return stock_move_lines
class purchase_order_line(osv.osv):
_inherit = 'purchase.order.line'
_columns = {
'quantity_bid': fields.float('Quantity Bid', digits_compute=dp.get_precision('Product Unit of Measure'), help="Technical field for not loosing the initial information about the quantity proposed in the bid"),
}
def action_draft(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'draft'}, context=context)
def action_confirm(self, cr, uid, ids, context=None):
super(purchase_order_line, self).action_confirm(cr, uid, ids, context=context)
for element in self.browse(cr, uid, ids, context=context):
if not element.quantity_bid:
self.write(cr, uid, ids, {'quantity_bid': element.product_qty}, context=context)
return True
def generate_po(self, cr, uid, tender_id, context=None):
#call generate_po from tender with active_id. Called from js widget
return self.pool.get('purchase.requisition').generate_po(cr, uid, [tender_id], context=context)
class product_template(osv.osv):
_inherit = 'product.template'
_columns = {
'purchase_requisition': fields.boolean('Call for Bids', help="Check this box to generate Call for Bids instead of generating requests for quotation from procurement.")
}
class procurement_order(osv.osv):
_inherit = 'procurement.order'
_columns = {
'requisition_id': fields.many2one('purchase.requisition', 'Latest Requisition')
}
def _run(self, cr, uid, procurement, context=None):
requisition_obj = self.pool.get('purchase.requisition')
warehouse_obj = self.pool.get('stock.warehouse')
if procurement.rule_id and procurement.rule_id.action == 'buy' and procurement.product_id.purchase_requisition:
warehouse_id = warehouse_obj.search(cr, uid, [('company_id', '=', procurement.company_id.id)], context=context)
requisition_id = requisition_obj.create(cr, uid, {
'origin': procurement.origin,
'date_end': procurement.date_planned,
'warehouse_id': warehouse_id and warehouse_id[0] or False,
'company_id': procurement.company_id.id,
'procurement_id': procurement.id,
'picking_type_id': procurement.rule_id.picking_type_id.id,
'line_ids': [(0, 0, {
'product_id': procurement.product_id.id,
'product_uom_id': procurement.product_uom.id,
'product_qty': procurement.product_qty
})],
})
self.message_post(cr, uid, [procurement.id], body=_("Purchase Requisition created"), context=context)
return self.write(cr, uid, [procurement.id], {'requisition_id': requisition_id}, context=context)
return super(procurement_order, self)._run(cr, uid, procurement, context=context)
def _check(self, cr, uid, procurement, context=None):
if procurement.rule_id and procurement.rule_id.action == 'buy' and procurement.product_id.purchase_requisition:
if procurement.requisition_id.state == 'done':
if any([purchase.shipped for purchase in procurement.requisition_id.purchase_ids]):
return True
return False
return super(procurement_order, self)._check(cr, uid, procurement, context=context)
|
zhangjunlei26/servo | refs/heads/master | tests/wpt/css-tests/tools/runner/update_manifest.py | 316 | import imp
import json
import os
import sys
here = os.path.dirname(__file__)
localpaths = imp.load_source("localpaths", os.path.abspath(os.path.join(here, os.pardir, "localpaths.py")))
root = localpaths.repo_root
import manifest
def main(request, response):
path = os.path.join(root, "MANIFEST.json")
manifest_file = manifest.manifest.load(root, path)
manifest.update.update(root, "/", manifest_file)
manifest.manifest.write(manifest_file, path)
return [("Content-Type", "application/json")], json.dumps({"url": "/MANIFEST.json"})
|
smi96/django-blog_website | refs/heads/master | lib/python2.7/site-packages/PIL/PcfFontFile.py | 72 | #
# THIS IS WORK IN PROGRESS
#
# The Python Imaging Library
# $Id$
#
# portable compiled font file parser
#
# history:
# 1997-08-19 fl created
# 2003-09-13 fl fixed loading of unicode fonts
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1997-2003 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
from PIL import Image
from PIL import FontFile
from PIL import _binary
# --------------------------------------------------------------------
# declarations
PCF_MAGIC = 0x70636601 # "\x01fcp"
PCF_PROPERTIES = (1 << 0)
PCF_ACCELERATORS = (1 << 1)
PCF_METRICS = (1 << 2)
PCF_BITMAPS = (1 << 3)
PCF_INK_METRICS = (1 << 4)
PCF_BDF_ENCODINGS = (1 << 5)
PCF_SWIDTHS = (1 << 6)
PCF_GLYPH_NAMES = (1 << 7)
PCF_BDF_ACCELERATORS = (1 << 8)
BYTES_PER_ROW = [
lambda bits: ((bits+7) >> 3),
lambda bits: ((bits+15) >> 3) & ~1,
lambda bits: ((bits+31) >> 3) & ~3,
lambda bits: ((bits+63) >> 3) & ~7,
]
i8 = _binary.i8
l16 = _binary.i16le
l32 = _binary.i32le
b16 = _binary.i16be
b32 = _binary.i32be
def sz(s, o):
return s[o:s.index(b"\0", o)]
##
# Font file plugin for the X11 PCF format.
class PcfFontFile(FontFile.FontFile):
name = "name"
def __init__(self, fp):
magic = l32(fp.read(4))
if magic != PCF_MAGIC:
raise SyntaxError("not a PCF file")
FontFile.FontFile.__init__(self)
count = l32(fp.read(4))
self.toc = {}
for i in range(count):
type = l32(fp.read(4))
self.toc[type] = l32(fp.read(4)), l32(fp.read(4)), l32(fp.read(4))
self.fp = fp
self.info = self._load_properties()
metrics = self._load_metrics()
bitmaps = self._load_bitmaps(metrics)
encoding = self._load_encoding()
#
# create glyph structure
for ch in range(256):
ix = encoding[ch]
if ix is not None:
x, y, l, r, w, a, d, f = metrics[ix]
glyph = (w, 0), (l, d-y, x+l, d), (0, 0, x, y), bitmaps[ix]
self.glyph[ch] = glyph
def _getformat(self, tag):
format, size, offset = self.toc[tag]
fp = self.fp
fp.seek(offset)
format = l32(fp.read(4))
if format & 4:
i16, i32 = b16, b32
else:
i16, i32 = l16, l32
return fp, format, i16, i32
def _load_properties(self):
#
# font properties
properties = {}
fp, format, i16, i32 = self._getformat(PCF_PROPERTIES)
nprops = i32(fp.read(4))
# read property description
p = []
for i in range(nprops):
p.append((i32(fp.read(4)), i8(fp.read(1)), i32(fp.read(4))))
if nprops & 3:
fp.seek(4 - (nprops & 3), 1) # pad
data = fp.read(i32(fp.read(4)))
for k, s, v in p:
k = sz(data, k)
if s:
v = sz(data, v)
properties[k] = v
return properties
def _load_metrics(self):
#
# font metrics
metrics = []
fp, format, i16, i32 = self._getformat(PCF_METRICS)
append = metrics.append
if (format & 0xff00) == 0x100:
# "compressed" metrics
for i in range(i16(fp.read(2))):
left = i8(fp.read(1)) - 128
right = i8(fp.read(1)) - 128
width = i8(fp.read(1)) - 128
ascent = i8(fp.read(1)) - 128
descent = i8(fp.read(1)) - 128
xsize = right - left
ysize = ascent + descent
append(
(xsize, ysize, left, right, width,
ascent, descent, 0)
)
else:
# "jumbo" metrics
for i in range(i32(fp.read(4))):
left = i16(fp.read(2))
right = i16(fp.read(2))
width = i16(fp.read(2))
ascent = i16(fp.read(2))
descent = i16(fp.read(2))
attributes = i16(fp.read(2))
xsize = right - left
ysize = ascent + descent
append(
(xsize, ysize, left, right, width,
ascent, descent, attributes)
)
return metrics
def _load_bitmaps(self, metrics):
#
# bitmap data
bitmaps = []
fp, format, i16, i32 = self._getformat(PCF_BITMAPS)
nbitmaps = i32(fp.read(4))
if nbitmaps != len(metrics):
raise IOError("Wrong number of bitmaps")
offsets = []
for i in range(nbitmaps):
offsets.append(i32(fp.read(4)))
bitmapSizes = []
for i in range(4):
bitmapSizes.append(i32(fp.read(4)))
# byteorder = format & 4 # non-zero => MSB
bitorder = format & 8 # non-zero => MSB
padindex = format & 3
bitmapsize = bitmapSizes[padindex]
offsets.append(bitmapsize)
data = fp.read(bitmapsize)
pad = BYTES_PER_ROW[padindex]
mode = "1;R"
if bitorder:
mode = "1"
for i in range(nbitmaps):
x, y, l, r, w, a, d, f = metrics[i]
b, e = offsets[i], offsets[i+1]
bitmaps.append(
Image.frombytes("1", (x, y), data[b:e], "raw", mode, pad(x))
)
return bitmaps
def _load_encoding(self):
# map character code to bitmap index
encoding = [None] * 256
fp, format, i16, i32 = self._getformat(PCF_BDF_ENCODINGS)
firstCol, lastCol = i16(fp.read(2)), i16(fp.read(2))
firstRow, lastRow = i16(fp.read(2)), i16(fp.read(2))
default = i16(fp.read(2))
nencoding = (lastCol - firstCol + 1) * (lastRow - firstRow + 1)
for i in range(nencoding):
encodingOffset = i16(fp.read(2))
if encodingOffset != 0xFFFF:
try:
encoding[i+firstCol] = encodingOffset
except IndexError:
break # only load ISO-8859-1 glyphs
return encoding
|
letolab/cookiecutter | refs/heads/master | cookiecutter/config.py | 18 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
cookiecutter.config
-------------------
Global configuration handling
"""
from __future__ import unicode_literals
import copy
import logging
import os
import io
import yaml
from .exceptions import ConfigDoesNotExistException
from .exceptions import InvalidConfiguration
logger = logging.getLogger(__name__)
DEFAULT_CONFIG = {
'cookiecutters_dir': os.path.expanduser('~/.cookiecutters/'),
'default_context': {}
}
def get_config(config_path):
"""
Retrieve the config from the specified path, returning it as a config dict.
"""
if not os.path.exists(config_path):
raise ConfigDoesNotExistException
logger.debug('config_path is {0}'.format(config_path))
with io.open(config_path, encoding='utf-8') as file_handle:
try:
yaml_dict = yaml.safe_load(file_handle)
except yaml.scanner.ScannerError:
raise InvalidConfiguration(
'{0} is no a valid YAML file'.format(config_path))
config_dict = copy.copy(DEFAULT_CONFIG)
config_dict.update(yaml_dict)
return config_dict
def get_user_config():
"""
Retrieve config from the user's ~/.cookiecutterrc, if it exists.
Otherwise, return None.
"""
# TODO: test on windows...
USER_CONFIG_PATH = os.path.expanduser('~/.cookiecutterrc')
if os.path.exists(USER_CONFIG_PATH):
return get_config(USER_CONFIG_PATH)
return copy.copy(DEFAULT_CONFIG)
|
gkc1000/pyscf | refs/heads/master | pyscf/nao/test/test_0061_gw_rescf_g0w0_h2o.py | 1 | from __future__ import print_function, division
import os,unittest,numpy as np
from pyscf.nao import gw as gw_c
class KnowValues(unittest.TestCase):
def test_rescf(self):
""" reSCF then G0W0 """
fc = \
"""-1.176137582599898090e+00
-6.697973984258517310e-01
-5.155143130039178123e-01
-4.365448724088398791e-01
2.104535161143837596e-01
2.985738190760626187e-01
5.383631831528181699e-01
5.960427511708059622e-01
6.298425248864513160e-01
6.702150570679562547e-01
7.488635881500678160e-01
1.030485556414411974e+00
1.133596236538136015e+00
1.308430815822860804e+00
1.322564760433334374e+00
1.444841711461231304e+00
1.831867938363858750e+00
1.902393397937107045e+00
1.977107479006525059e+00
2.119748779125555149e+00
2.150570967014801216e+00
2.899024682518652973e+00
3.912773887375614823e+00 """
dname = os.path.dirname(os.path.abspath(__file__))
gw = gw_c(label='water', cd=dname, verbosity=0, nocc=8, nvrt=6, nocc_conv=4, nvrt_conv=4, rescf=True, tol_ia=1e-6,)
gw.kernel_gw()
np.savetxt('eigvals_g0w0_pyscf_rescf_water_0061.txt', gw.mo_energy_gw[0,:,:].T)
#gw.report()
for e,eref_str in zip(gw.mo_energy_gw[0,0,:],fc.splitlines()):
self.assertAlmostEqual(e,float(eref_str))
if __name__ == "__main__": unittest.main()
|
ntpwilliam/totravel | refs/heads/master | mysite/login_account.py | 1 | from django.http import HttpResponse
def user_list(request):
return HttpResponse("ha ha first Acconnt page") |
mhauskn/HFO | refs/heads/master | example/example_defense_agent.py | 2 | #!/usr/bin/env python
# encoding: utf-8
# Before running this program, first Start HFO server:
# $> ./bin/HFO --offense-agents 1
import itertools
from hfo import *
def main():
# Create the HFO Environment
hfo = HFOEnvironment()
# Connect to the server with the specified
# feature set. See feature sets in hfo.py/hfo.hpp.
hfo.connectToServer(LOW_LEVEL_FEATURE_SET,
'bin/teams/base/config/formations-dt', 6000,
'localhost', 'base_right', True)
for episode in itertools.count():
status = IN_GAME
while status == IN_GAME:
# Grab the state features from the environment
features = hfo.getState()
# Take an action and get the current game status
hfo.act(DASH, 20.0, 0.)
# Advance the environment and get the game status
status = hfo.step()
# Check the outcome of the episode
print(('Episode %d ended with %s'%(episode, hfo.statusToString(status))))
# Quit if the server goes down
if status == SERVER_DOWN:
hfo.act(QUIT)
exit()
if __name__ == '__main__':
main()
|
xrg/django-static-gitified | refs/heads/master | django/contrib/formtools/wizard/storage/session.py | 426 | from django.contrib.formtools.wizard import storage
class SessionStorage(storage.BaseStorage):
def __init__(self, *args, **kwargs):
super(SessionStorage, self).__init__(*args, **kwargs)
if self.prefix not in self.request.session:
self.init_data()
def _get_data(self):
self.request.session.modified = True
return self.request.session[self.prefix]
def _set_data(self, value):
self.request.session[self.prefix] = value
self.request.session.modified = True
data = property(_get_data, _set_data)
|
SoSBallers/pogo-map | refs/heads/master | pogom/__init__.py | 27 | #!/usr/bin/python
# -*- coding: utf-8 -*-
config = {
'LOCALE': 'en',
'LOCALES_DIR': 'static/locales',
'ROOT_PATH': None,
'ORIGINAL_LATITUDE': None,
'ORIGINAL_LONGITUDE': None,
'GMAPS_KEY': None,
'REQ_SLEEP': 1,
'REQ_HEAVY_SLEEP': 30,
'REQ_MAX_FAILED': 5
}
|
msanterre/deep_learning | refs/heads/master | tv-script-generation/helper.py | 175 | import os
import pickle
def load_data(path):
"""
Load Dataset from File
"""
input_file = os.path.join(path)
with open(input_file, "r") as f:
data = f.read()
return data
def preprocess_and_save_data(dataset_path, token_lookup, create_lookup_tables):
"""
Preprocess Text Data
"""
text = load_data(dataset_path)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
token_dict = token_lookup()
for key, token in token_dict.items():
text = text.replace(key, ' {} '.format(token))
text = text.lower()
text = text.split()
vocab_to_int, int_to_vocab = create_lookup_tables(text)
int_text = [vocab_to_int[word] for word in text]
pickle.dump((int_text, vocab_to_int, int_to_vocab, token_dict), open('preprocess.p', 'wb'))
def load_preprocess():
"""
Load the Preprocessed Training data and return them in batches of <batch_size> or less
"""
return pickle.load(open('preprocess.p', mode='rb'))
def save_params(params):
"""
Save parameters to file
"""
pickle.dump(params, open('params.p', 'wb'))
def load_params():
"""
Load parameters from file
"""
return pickle.load(open('params.p', mode='rb'))
|
mindnervestech/mnrp | refs/heads/master | openerp/addons/base/ir/ir_http.py | 27 | #----------------------------------------------------------
# ir_http modular http routing
#----------------------------------------------------------
import logging
import re
import sys
import werkzeug.exceptions
import werkzeug.routing
import werkzeug.urls
import werkzeug.utils
import openerp
import openerp.exceptions
import openerp.models
from openerp import http
from openerp.http import request
from openerp.osv import osv, orm
_logger = logging.getLogger(__name__)
UID_PLACEHOLDER = object()
class ModelConverter(werkzeug.routing.BaseConverter):
def __init__(self, url_map, model=False):
super(ModelConverter, self).__init__(url_map)
self.model = model
self.regex = '([0-9]+)'
def to_python(self, value):
m = re.match(self.regex, value)
return request.registry[self.model].browse(
request.cr, UID_PLACEHOLDER, int(m.group(1)), context=request.context)
def to_url(self, value):
return value.id
class ModelsConverter(werkzeug.routing.BaseConverter):
def __init__(self, url_map, model=False):
super(ModelsConverter, self).__init__(url_map)
self.model = model
# TODO add support for slug in the form [A-Za-z0-9-] bla-bla-89 -> id 89
self.regex = '([0-9,]+)'
def to_python(self, value):
return request.registry[self.model].browse(request.cr, UID_PLACEHOLDER, [int(i) for i in value.split(',')], context=request.context)
def to_url(self, value):
return ",".join(i.id for i in value)
class ir_http(osv.AbstractModel):
_name = 'ir.http'
_description = "HTTP routing"
def _get_converters(self):
return {'model': ModelConverter, 'models': ModelsConverter}
def _find_handler(self, return_rule=False):
return self.routing_map().bind_to_environ(request.httprequest.environ).match(return_rule=return_rule)
def _auth_method_user(self):
request.uid = request.session.uid
if not request.uid:
raise http.SessionExpiredException("Session expired")
def _auth_method_none(self):
request.uid = None
def _auth_method_public(self):
if not request.session.uid:
dummy, request.uid = self.pool['ir.model.data'].get_object_reference(request.cr, openerp.SUPERUSER_ID, 'base', 'public_user')
else:
request.uid = request.session.uid
def _authenticate(self, auth_method='user'):
try:
if request.session.uid:
try:
request.session.check_security()
# what if error in security.check()
# -> res_users.check()
# -> res_users.check_credentials()
except (openerp.exceptions.AccessDenied, openerp.http.SessionExpiredException):
# All other exceptions mean undetermined status (e.g. connection pool full),
# let them bubble up
request.session.logout(keep_db=True)
getattr(self, "_auth_method_%s" % auth_method)()
except (openerp.exceptions.AccessDenied, openerp.http.SessionExpiredException):
raise
except Exception:
_logger.exception("Exception during request Authentication.")
raise openerp.exceptions.AccessDenied()
return auth_method
def _handle_exception(self, exception):
# If handle_exception returns something different than None, it will be used as a response
try:
return request._handle_exception(exception)
except openerp.exceptions.AccessDenied:
return werkzeug.exceptions.Forbidden()
def _dispatch(self):
# locate the controller method
try:
rule, arguments = self._find_handler(return_rule=True)
func = rule.endpoint
except werkzeug.exceptions.NotFound, e:
return self._handle_exception(e)
# check authentication level
try:
auth_method = self._authenticate(func.routing["auth"])
except Exception as e:
return self._handle_exception(e)
processing = self._postprocess_args(arguments, rule)
if processing:
return processing
# set and execute handler
try:
request.set_handler(func, arguments, auth_method)
result = request.dispatch()
if isinstance(result, Exception):
raise result
except Exception, e:
return self._handle_exception(e)
return result
def _postprocess_args(self, arguments, rule):
""" post process arg to set uid on browse records """
for name, arg in arguments.items():
if isinstance(arg, orm.browse_record) and arg._uid is UID_PLACEHOLDER:
arguments[name] = arg.sudo(request.uid)
try:
arg.exists()
except openerp.models.MissingError:
return self._handle_exception(werkzeug.exceptions.NotFound())
def routing_map(self):
if not hasattr(self, '_routing_map'):
_logger.info("Generating routing map")
cr = request.cr
m = request.registry.get('ir.module.module')
ids = m.search(cr, openerp.SUPERUSER_ID, [('state', '=', 'installed'), ('name', '!=', 'web')], context=request.context)
installed = set(x['name'] for x in m.read(cr, 1, ids, ['name'], context=request.context))
if openerp.tools.config['test_enable']:
installed.add(openerp.modules.module.current_test)
mods = [''] + openerp.conf.server_wide_modules + sorted(installed)
self._routing_map = http.routing_map(mods, False, converters=self._get_converters())
return self._routing_map
def convert_exception_to(to_type, with_message=False):
""" Should only be called from an exception handler. Fetches the current
exception data from sys.exc_info() and creates a new exception of type
``to_type`` with the original traceback.
If ``with_message`` is ``True``, sets the new exception's message to be
the stringification of the original exception. If ``False``, does not
set the new exception's message. Otherwise, uses ``with_message`` as the
new exception's message.
:type with_message: str|bool
"""
etype, original, tb = sys.exc_info()
try:
if with_message is False:
message = None
elif with_message is True:
message = str(original)
else:
message = str(with_message)
raise to_type, message, tb
except to_type, e:
return e
# vim:et:
|
virtualanup/hal | refs/heads/master | tests/libtest.py | 1 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from hal.library import HalLibrary
import unittest
class HalLibTest(unittest.TestCase):
"""
Base class for testing hal libraries
"""
def assert_successful_response(self, command):
a = self.lib(command)
a.process_input()
self.assertEqual(a.status, HalLibrary.SUCCESS)
def assert_failure_response(self, command):
a = self.lib(command)
a.process_input()
self.assertEqual(a.status, HalLibrary.FAILURE)
def assert_error_response(self, command, message=None):
a = self.lib(command)
a.process_input()
self.assertEqual(a.status, HalLibrary.ERROR)
if message:
self.assertEqual(a.get_error(), message)
def assert_in_response(self, command, string):
a = self.lib(command)
a.process_input()
a.process()
for s in a.get_response():
if string.lower() in s.lower():
return
raise Exception(string," not in ", a.get_response())
def assert_response_count(self, command, count):
a = self.lib(command)
a.process_input()
a.process()
self.assertEqual(count,len(a.get_response()))
|
Jeff-Tian/mybnb | refs/heads/master | Python27/Lib/encodings/iso8859_7.py | 93 | """ Python Character Mapping Codec iso8859_7 generated from 'MAPPINGS/ISO8859/8859-7.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-7',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u2018' # 0xA1 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xA2 -> RIGHT SINGLE QUOTATION MARK
u'\xa3' # 0xA3 -> POUND SIGN
u'\u20ac' # 0xA4 -> EURO SIGN
u'\u20af' # 0xA5 -> DRACHMA SIGN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u037a' # 0xAA -> GREEK YPOGEGRAMMENI
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\ufffe'
u'\u2015' # 0xAF -> HORIZONTAL BAR
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\u0384' # 0xB4 -> GREEK TONOS
u'\u0385' # 0xB5 -> GREEK DIALYTIKA TONOS
u'\u0386' # 0xB6 -> GREEK CAPITAL LETTER ALPHA WITH TONOS
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\u0388' # 0xB8 -> GREEK CAPITAL LETTER EPSILON WITH TONOS
u'\u0389' # 0xB9 -> GREEK CAPITAL LETTER ETA WITH TONOS
u'\u038a' # 0xBA -> GREEK CAPITAL LETTER IOTA WITH TONOS
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u038c' # 0xBC -> GREEK CAPITAL LETTER OMICRON WITH TONOS
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\u038e' # 0xBE -> GREEK CAPITAL LETTER UPSILON WITH TONOS
u'\u038f' # 0xBF -> GREEK CAPITAL LETTER OMEGA WITH TONOS
u'\u0390' # 0xC0 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
u'\u0391' # 0xC1 -> GREEK CAPITAL LETTER ALPHA
u'\u0392' # 0xC2 -> GREEK CAPITAL LETTER BETA
u'\u0393' # 0xC3 -> GREEK CAPITAL LETTER GAMMA
u'\u0394' # 0xC4 -> GREEK CAPITAL LETTER DELTA
u'\u0395' # 0xC5 -> GREEK CAPITAL LETTER EPSILON
u'\u0396' # 0xC6 -> GREEK CAPITAL LETTER ZETA
u'\u0397' # 0xC7 -> GREEK CAPITAL LETTER ETA
u'\u0398' # 0xC8 -> GREEK CAPITAL LETTER THETA
u'\u0399' # 0xC9 -> GREEK CAPITAL LETTER IOTA
u'\u039a' # 0xCA -> GREEK CAPITAL LETTER KAPPA
u'\u039b' # 0xCB -> GREEK CAPITAL LETTER LAMDA
u'\u039c' # 0xCC -> GREEK CAPITAL LETTER MU
u'\u039d' # 0xCD -> GREEK CAPITAL LETTER NU
u'\u039e' # 0xCE -> GREEK CAPITAL LETTER XI
u'\u039f' # 0xCF -> GREEK CAPITAL LETTER OMICRON
u'\u03a0' # 0xD0 -> GREEK CAPITAL LETTER PI
u'\u03a1' # 0xD1 -> GREEK CAPITAL LETTER RHO
u'\ufffe'
u'\u03a3' # 0xD3 -> GREEK CAPITAL LETTER SIGMA
u'\u03a4' # 0xD4 -> GREEK CAPITAL LETTER TAU
u'\u03a5' # 0xD5 -> GREEK CAPITAL LETTER UPSILON
u'\u03a6' # 0xD6 -> GREEK CAPITAL LETTER PHI
u'\u03a7' # 0xD7 -> GREEK CAPITAL LETTER CHI
u'\u03a8' # 0xD8 -> GREEK CAPITAL LETTER PSI
u'\u03a9' # 0xD9 -> GREEK CAPITAL LETTER OMEGA
u'\u03aa' # 0xDA -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
u'\u03ab' # 0xDB -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
u'\u03ac' # 0xDC -> GREEK SMALL LETTER ALPHA WITH TONOS
u'\u03ad' # 0xDD -> GREEK SMALL LETTER EPSILON WITH TONOS
u'\u03ae' # 0xDE -> GREEK SMALL LETTER ETA WITH TONOS
u'\u03af' # 0xDF -> GREEK SMALL LETTER IOTA WITH TONOS
u'\u03b0' # 0xE0 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
u'\u03b1' # 0xE1 -> GREEK SMALL LETTER ALPHA
u'\u03b2' # 0xE2 -> GREEK SMALL LETTER BETA
u'\u03b3' # 0xE3 -> GREEK SMALL LETTER GAMMA
u'\u03b4' # 0xE4 -> GREEK SMALL LETTER DELTA
u'\u03b5' # 0xE5 -> GREEK SMALL LETTER EPSILON
u'\u03b6' # 0xE6 -> GREEK SMALL LETTER ZETA
u'\u03b7' # 0xE7 -> GREEK SMALL LETTER ETA
u'\u03b8' # 0xE8 -> GREEK SMALL LETTER THETA
u'\u03b9' # 0xE9 -> GREEK SMALL LETTER IOTA
u'\u03ba' # 0xEA -> GREEK SMALL LETTER KAPPA
u'\u03bb' # 0xEB -> GREEK SMALL LETTER LAMDA
u'\u03bc' # 0xEC -> GREEK SMALL LETTER MU
u'\u03bd' # 0xED -> GREEK SMALL LETTER NU
u'\u03be' # 0xEE -> GREEK SMALL LETTER XI
u'\u03bf' # 0xEF -> GREEK SMALL LETTER OMICRON
u'\u03c0' # 0xF0 -> GREEK SMALL LETTER PI
u'\u03c1' # 0xF1 -> GREEK SMALL LETTER RHO
u'\u03c2' # 0xF2 -> GREEK SMALL LETTER FINAL SIGMA
u'\u03c3' # 0xF3 -> GREEK SMALL LETTER SIGMA
u'\u03c4' # 0xF4 -> GREEK SMALL LETTER TAU
u'\u03c5' # 0xF5 -> GREEK SMALL LETTER UPSILON
u'\u03c6' # 0xF6 -> GREEK SMALL LETTER PHI
u'\u03c7' # 0xF7 -> GREEK SMALL LETTER CHI
u'\u03c8' # 0xF8 -> GREEK SMALL LETTER PSI
u'\u03c9' # 0xF9 -> GREEK SMALL LETTER OMEGA
u'\u03ca' # 0xFA -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
u'\u03cb' # 0xFB -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
u'\u03cc' # 0xFC -> GREEK SMALL LETTER OMICRON WITH TONOS
u'\u03cd' # 0xFD -> GREEK SMALL LETTER UPSILON WITH TONOS
u'\u03ce' # 0xFE -> GREEK SMALL LETTER OMEGA WITH TONOS
u'\ufffe'
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
Tendrl/commons | refs/heads/master | setup.py | 1 | import re
import subprocess
from setuptools import Command
from setuptools import find_packages
from setuptools import setup
try:
# Python 2 backwards compat
from __builtin__ import raw_input as input
except ImportError:
pass
def read_module_contents():
with open('version.py') as app_init:
return app_init.read()
def read_spec_contents():
with open('tendrl-commons.spec') as spec:
return spec.read()
module_file = read_module_contents()
metadata = dict(re.findall("__([a-z]+)__\s*=\s*'([^']+)'", module_file))
version = metadata['version']
class BumpCommand(Command):
"""Bump the __version__ number and commit all changes. """
user_options = [('version=', 'v', 'version number to use')]
def initialize_options(self):
new_version = metadata['version'].split('.')
new_version[-1] = str(int(new_version[-1]) + 1) # Bump the final part
self.version = ".".join(new_version)
def finalize_options(self):
pass
def run(self):
print('old version: %s new version: %s' %
(metadata['version'], self.version))
try:
input('Press enter to confirm, or ctrl-c to exit >')
except KeyboardInterrupt:
raise SystemExit("\nNot proceeding")
old = "__version__ = '%s'" % metadata['version']
new = "__version__ = '%s'" % self.version
module_file = read_module_contents()
with open('version.py', 'w') as fileh:
fileh.write(module_file.replace(old, new))
old = 'Version: %s' % metadata['version']
new = 'Version: %s' % self.version
spec_file = read_spec_contents()
with open('tendrl-commons.spec', 'w') as fileh:
fileh.write(spec_file.replace(old, new))
# Commit everything with a standard commit message
cmd = ['git', 'commit', '-a', '-m', 'version %s' % self.version]
print(' '.join(cmd))
subprocess.check_call(cmd)
class ReleaseCommand(Command):
"""Tag and push a new release. """
user_options = [('sign', 's', 'GPG-sign the Git tag and release files')]
def initialize_options(self):
self.sign = False
def finalize_options(self):
pass
def run(self):
# Create Git tag
tag_name = 'v%s' % version
cmd = ['git', 'tag', '-a', tag_name, '-m', 'version %s' % version]
if self.sign:
cmd.append('-s')
print(' '.join(cmd))
subprocess.check_call(cmd)
# Push Git tag to origin remote
cmd = ['git', 'push', 'origin', tag_name]
print(' '.join(cmd))
subprocess.check_call(cmd)
# Push package to pypi
# cmd = ['python', 'setup.py', 'sdist', 'upload']
# if self.sign:
# cmd.append('--sign')
# print(' '.join(cmd))
# subprocess.check_call(cmd)
setup(
name="tendrl-commons",
version=version,
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*",
"tests"]),
namespace_packages=['tendrl'],
url="http://www.redhat.com",
author="Rohan Kanade.",
author_email="rkanade@redhat.com",
license="LGPL-2.1+",
zip_safe=False,
install_requires=[
"ansible>=2.5",
"psutil",
"python-etcd",
"python-dateutil",
"ruamel.yaml",
"maps==4.2.0",
"pytz",
"six",
"netifaces"
],
include_package_data=True,
cmdclass={'bumpversion': BumpCommand, 'release': ReleaseCommand},
)
|
bn1/python-apify | refs/heads/master | tests/test_apify.py | 1 | import os
import pytest
from httmock import HTTMock, all_requests
from apify import KeyValueStore
@all_requests
def mock_page_store(url, request):
path = (os.path.dirname(os.path.realpath(__file__)) +
"/data" + url.path + '.json')
with open(path, "r") as fo:
return fo.read()
def test_init():
store = KeyValueStore('dummy-store')
assert isinstance(store, KeyValueStore)
assert store.page == 0
def test_can_iterate():
with HTTMock(mock_page_store):
store = KeyValueStore('dummy-store')
got_item = False
for item in store:
got_item = True
break
assert got_item
assert store.page == 1
def test_stops_iteration():
with HTTMock(mock_page_store):
with pytest.raises(StopIteration):
store = KeyValueStore('dummy-store')
while True:
store.next()
def test_iterating_over_all_pages():
with HTTMock(mock_page_store):
store = KeyValueStore('dummy-store')
items = []
for item in store:
items.append(item)
assert store.page == 4
assert len(items) == 8
if __name__ == '__main__':
pytest.main() |
mustafat/odoo-1 | refs/heads/8.0 | addons/mass_mailing/models/mass_mailing_report.py | 364 | # -*- coding: utf-8 -*-
from openerp.osv import fields, osv
from openerp import tools
class MassMailingReport(osv.Model):
_name = 'mail.statistics.report'
_auto = False
_description = 'Mass Mailing Statistics'
_columns = {
'scheduled_date': fields.datetime('Scheduled Date', readonly=True),
'name': fields.char('Mass Mail', readonly=True),
'campaign': fields.char('Mass Mail Campaign', readonly=True),
'sent': fields.integer('Sent', readonly=True),
'delivered': fields.integer('Delivered', readonly=True),
'opened': fields.integer('Opened', readonly=True),
'bounced': fields.integer('Bounced', readonly=True),
'replied': fields.integer('Replied', readonly=True),
'state': fields.selection(
[('draft', 'Draft'), ('test', 'Tested'), ('done', 'Sent')],
string='Status', readonly=True,
),
'email_from': fields.char('From', readonly=True),
}
def init(self, cr):
"""Mass Mail Statistical Report: based on mail.mail.statistics that models the various
statistics collected for each mailing, and mail.mass_mailing model that models the
various mailing performed. """
tools.drop_view_if_exists(cr, 'mail_statistics_report')
cr.execute("""
CREATE OR REPLACE VIEW mail_statistics_report AS (
SELECT
min(ms.id) as id,
ms.scheduled as scheduled_date,
mm.name as name,
mc.name as campaign,
count(ms.bounced) as bounced,
count(ms.sent) as sent,
(count(ms.sent) - count(ms.bounced)) as delivered,
count(ms.opened) as opened,
count(ms.replied) as replied,
mm.state,
mm.email_from
FROM
mail_mail_statistics as ms
left join mail_mass_mailing as mm ON (ms.mass_mailing_id=mm.id)
left join mail_mass_mailing_campaign as mc ON (ms.mass_mailing_campaign_id=mc.id)
GROUP BY ms.scheduled, mm.name, mc.name, mm.state, mm.email_from
)""")
|
lmprice/ansible | refs/heads/devel | lib/ansible/modules/cloud/google/gcpubsub_facts.py | 95 | #!/usr/bin/python
# Copyright 2016 Google Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcpubsub_facts
version_added: "2.3"
short_description: List Topics/Subscriptions and Messages from Google PubSub.
description:
- List Topics/Subscriptions from Google PubSub. Use the gcpubsub module for
topic/subscription management.
See U(https://cloud.google.com/pubsub/docs) for an overview.
requirements:
- "python >= 2.6"
- "google-auth >= 0.5.0"
- "google-cloud-pubsub >= 0.22.0"
notes:
- list state enables user to list topics or subscriptions in the project. See examples for details.
author:
- "Tom Melendez (@supertom) <tom@supertom.com>"
options:
topic:
description:
- GCP pubsub topic name. Only the name, not the full path, is required.
required: False
view:
description:
- Choices are 'topics' or 'subscriptions'
required: True
state:
description:
- list is the only valid option.
required: False
'''
EXAMPLES = '''
## List all Topics in a project
- gcpubsub_facts:
view: topics
state: list
## List all Subscriptions in a project
- gcpubsub_facts:
view: subscriptions
state: list
## List all Subscriptions for a Topic in a project
- gcpubsub_facts:
view: subscriptions
topic: my-topic
state: list
'''
RETURN = '''
subscriptions:
description: List of subscriptions.
returned: When view is set to subscriptions.
type: list
sample: ["mysubscription", "mysubscription2"]
topic:
description: Name of topic. Used to filter subscriptions.
returned: Always
type: str
sample: "mytopic"
topics:
description: List of topics.
returned: When view is set to topics.
type: list
sample: ["mytopic", "mytopic2"]
'''
try:
from ast import literal_eval
HAS_PYTHON26 = True
except ImportError:
HAS_PYTHON26 = False
try:
from google.cloud import pubsub
HAS_GOOGLE_CLOUD_PUBSUB = True
except ImportError as e:
HAS_GOOGLE_CLOUD_PUBSUB = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.gcp import check_min_pkg_version, get_google_cloud_credentials
def list_func(data, member='name'):
"""Used for state=list."""
return [getattr(x, member) for x in data]
def main():
module = AnsibleModule(argument_spec=dict(
view=dict(choices=['topics', 'subscriptions'], default='topics'),
topic=dict(required=False),
state=dict(choices=['list'], default='list'),
service_account_email=dict(),
credentials_file=dict(),
project_id=dict(), ),)
if not HAS_PYTHON26:
module.fail_json(
msg="GCE module requires python's 'ast' module, python v2.6+")
if not HAS_GOOGLE_CLOUD_PUBSUB:
module.fail_json(msg="Please install google-cloud-pubsub library.")
CLIENT_MINIMUM_VERSION = '0.22.0'
if not check_min_pkg_version('google-cloud-pubsub', CLIENT_MINIMUM_VERSION):
module.fail_json(msg="Please install google-cloud-pubsub library version %s" % CLIENT_MINIMUM_VERSION)
mod_params = {}
mod_params['state'] = module.params.get('state')
mod_params['topic'] = module.params.get('topic')
mod_params['view'] = module.params.get('view')
creds, params = get_google_cloud_credentials(module)
pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds, use_gax=False)
pubsub_client.user_agent = 'ansible-pubsub-0.1'
json_output = {}
if mod_params['view'] == 'topics':
json_output['topics'] = list_func(pubsub_client.list_topics())
elif mod_params['view'] == 'subscriptions':
if mod_params['topic']:
t = pubsub_client.topic(mod_params['topic'])
json_output['subscriptions'] = list_func(t.list_subscriptions())
else:
json_output['subscriptions'] = list_func(pubsub_client.list_subscriptions())
json_output['changed'] = False
json_output.update(mod_params)
module.exit_json(**json_output)
if __name__ == '__main__':
main()
|
CatsAndDogsbvba/odoo | refs/heads/8.0 | addons/hw_screen/__openerp__.py | 25 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2015 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Screen Driver',
'version': '1.0',
'category': 'Hardware Drivers',
'sequence': 6,
'summary': 'Provides support for customer facing displays',
'website': 'https://www.odoo.com/page/point-of-sale',
'description': """
Screen Driver
=============
This module allows the POS client to send rendered HTML to a remotely
installed screen. This module then displays this HTML using a web
browser.
""",
'author': 'OpenERP SA',
'depends': ['hw_proxy'],
'installable': False,
'auto_install': False,
}
|
arpitar/osf.io | refs/heads/develop | scripts/populate_conferences.py | 3 | #!/usr/bin/env python
# encoding: utf-8
import os
from modularodm import Q
from modularodm.exceptions import ModularOdmException
from framework.auth.core import User
from website import settings
from website.app import init_app
from website.conferences.model import Conference
def main():
init_app(set_backends=True, routes=False)
populate_conferences()
MEETING_DATA = {
'spsp2014': {
'name': 'Society for Personality and Social Psychology 2014',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'asb2014': {
'name': 'Association of Southeastern Biologists 2014',
'info_url': 'http://www.sebiologists.org/meetings/talks_posters.html',
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'aps2014': {
'name': 'Association for Psychological Science 2014',
'info_url': 'http://centerforopenscience.org/aps/',
'logo_url': '/static/img/2014_Convention_banner-with-APS_700px.jpg',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'annopeer2014': {
'name': '#annopeer',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'cpa2014': {
'name': 'Canadian Psychological Association 2014',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'filaments2014': {
'name': 'National Radio Astronomy Observatory Filaments 2014',
'info_url': None,
'logo_url': 'https://science.nrao.edu/science/meetings/2014/'
'filamentary-structure/images/filaments2014_660x178.png',
'active': False,
'admins': [
'lvonschi@nrao.edu',
# 'Dkim@nrao.edu',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'bitss2014': {
'name': 'Berkeley Initiative for Transparency in the Social Sciences Research Transparency Forum 2014',
'info_url': None,
'logo_url': os.path.join(
settings.STATIC_URL_PATH,
'img',
'conferences',
'bitss.jpg',
),
'active': False,
'admins': [
'gkroll@berkeley.edu',
'awais@berkeley.edu',
],
'public_projects': True,
'poster': False,
'talk': True,
},
'spsp2015': {
'name': 'Society for Personality and Social Psychology 2015',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [
'meetings@spsp.org',
],
'poster': True,
'talk': True,
},
'aps2015': {
'name': 'Association for Psychological Science 2015',
'info_url': None,
'logo_url': 'http://www.psychologicalscience.org/images/APS_2015_Banner_990x157.jpg',
'active': True,
'admins': [
],
'public_projects': True,
'poster': True,
'talk': True,
},
'icps2015': {
'name': 'International Convention of Psychological Science 2015',
'info_url': None,
'logo_url': 'http://icps.psychologicalscience.org/wp-content/themes/deepblue/images/ICPS_Website-header_990px.jpg',
'active': False,
'admins': [
],
'public_projects': True,
'poster': True,
'talk': True,
},
'mpa2015': {
'name': 'Midwestern Psychological Association 2015',
'info_url': None,
'logo_url': 'http://www.midwesternpsych.org/resources/Pictures/MPA%20logo.jpg',
'active': True,
'admins': [
'mpa@kent.edu',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'NCCC2015': {
'name': 'North Carolina Cognition Conference 2015',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [
'aoverman@elon.edu',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'VPRSF2015': {
'name': 'Virginia Piedmont Regional Science Fair 2015',
'info_url': None,
'logo_url': 'http://vprsf.org/wp-content/themes/VPRSF/images/logo.png',
'active': False,
'admins': [
'director@vprsf.org',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'APRS2015': {
'name': 'UVA Annual Postdoctoral Research Symposium 2015',
'info_url': None,
'logo_url': 'http://s1.postimg.org/50qj9u6i7/GPA_Logo.jpg',
'active': False,
'admins': [
'mhurst@virginia.edu',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'ASB2015': {
'name': 'Association of Southeastern Biologists 2015',
'info_url': None,
'logo_url': 'http://www.sebiologists.org/wp/wp-content/uploads/2014/09/banner_image_Large.png',
'active': False,
'admins': [
'amorris.mtsu@gmail.com',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'TeaP2015': {
'name': 'Tagung experimentell arbeitender Psychologen 2015',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [
],
'public_projects': True,
'poster': True,
'talk': True,
},
'VSSEF2015': {
'name': 'Virginia State Science and Engineering Fair 2015',
'info_url': 'http://www.vmi.edu/conferences/vssef/vssef_home/',
'logo_url': 'http://www.vmi.edu/uploadedImages/Images/Headers/vssef4.jpg',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'RMPA2015': {
'name': 'Rocky Mountain Psychological Association 2015',
'info_url': 'http://www.rockymountainpsych.org/uploads/7/4/2/6/7426961/85th_annual_rmpa_conference_program_hr.pdf',
'logo_url': 'http://www.rockymountainpsych.org/uploads/7/4/2/6/7426961/header_images/1397234084.jpg',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'ARP2015': {
'name': 'Association for Research in Personality 2015',
'info_url': 'http://www.personality-arp.org/conference/',
'logo_url': 'http://www.personality-arp.org/wp-content/uploads/conference/st-louis-arp.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'SEP2015': {
'name': 'Society of Experimental Psychologists Meeting 2015',
'info_url': 'http://faculty.virginia.edu/Society_of_Experimental_Psychologists/',
'logo_url': 'http://www.sepsych.org/nav/images/SEP-header.gif',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'Reid2015': {
'name': 'L. Starling Reid Undergraduate Psychology Conference 2015',
'info_url': 'http://avillage.web.virginia.edu/Psych/Conference',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'NEEPS2015': {
'name': 'Northeastern Evolutionary Psychology Conference 2015',
'info_url': 'http://neeps2015.weebly.com/',
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'VaACS2015': {
'name': 'Virginia Section American Chemical Society Student Poster Session 2015',
'info_url': 'http://virginia.sites.acs.org/',
'logo_url': 'http://virginia.sites.acs.org/Bulletin/15/UVA.jpg',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'MADSSCi2015': {
'name': 'Mid-Atlantic Directors and Staff of Scientific Cores & Southeastern Association of Shared Services 2015',
'info_url': 'http://madssci.abrf.org',
'logo_url': 'http://s24.postimg.org/qtc3baefp/2015madssci_seasr.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'NRAO2015': {
'name': 'National Radio Astronomy Observatory Accretion 2015',
'info_url': 'https://science.nrao.edu/science/meetings/2015/accretion2015/posters',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'ARCS2015': {
'name': 'Advancing Research Communication and Scholarship 2015',
'info_url': 'http://commons.pacificu.edu/arcs/',
'logo_url': 'http://commons.pacificu.edu/assets/md5images/4dfd167454e9f4745360a9550e189323.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'singlecasedesigns2015': {
'name': 'Single Case Designs in Clinical Psychology: Uniting Research and Practice',
'info_url': 'https://www.royalholloway.ac.uk/psychology/events/eventsarticles/singlecasedesignsinclinicalpsychologyunitingresearchandpractice.aspx',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'OSFM2015': {
'name': 'OSF for Meetings 2015',
'info_url': None,
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'JSSP2015': {
'name': 'Japanese Society of Social Psychology 2015',
'info_url': 'http://www.socialpsychology.jp/conf2015/index.html',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'4S2015': {
'name': 'Society for Social Studies of Science 2015',
'info_url': 'http://www.4sonline.org/meeting',
'logo_url': 'http://www.4sonline.org/ee/denver-skyline.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'IARR2016': {
'name': 'International Association for Relationship Research 2016',
'info_url': 'http://iarr.psych.utoronto.ca/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'IA2015': {
'name': 'Inclusive Astronomy 2015',
'info_url': 'https://vanderbilt.irisregistration.com/Home/Site?code=InclusiveAstronomy2015',
'logo_url': 'https://vanderbilt.blob.core.windows.net/images/Inclusive%20Astronomy.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'PsiChiRepository': {
'name': 'Psi Chi Repository',
'info_url': None,
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'R2RC': {
'name': 'Right to Research Coalition',
'info_url': None,
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'OpenCon2015': {
'name': 'OpenCon2015',
'info_url': 'http://opencon2015.org/',
'logo_url': 'http://s8.postimg.org/w9b30pxyd/Open_Con2015_new_logo.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'ESIP2015': {
'name': 'Earth Science Information Partners 2015',
'info_url': 'http://esipfed.org/',
'logo_url': 'http://s30.postimg.org/m2uz2g4pt/ESIP.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'SPSP2016': {
'name': 'Society for Personality and Social Psychology 2016 ',
'info_url': 'http://meeting.spsp.org',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'NACIII': {
'name': '2015 National Astronomy Consortium (NAC) III Workshop',
'info_url': 'https://info.nrao.edu/do/odi/meetings/2015/nac111/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'CDS2015': {
'name': 'Cognitive Development Society 2015',
'info_url': 'http://meetings.cogdevsoc.org/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'SEASR2016': {
'name': 'Southeastern Association of Shared Resources 2016',
'info_url': 'http://seasr.abrf.org',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'Accretion2015': {
'name': 'Observational Evidence of Gas Accretion onto Galaxies?',
'info_url': 'https://science.nrao.edu/science/meetings/2015/accretion2015',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'2020Futures': {
'name': 'U.S. Radio/Millimeter/Submillimeter Science Futures in the 2020s',
'info_url': 'https://science.nrao.edu/science/meetings/2015/2020futures/home',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
}
def populate_conferences():
for meeting, attrs in MEETING_DATA.iteritems():
meeting = meeting.strip()
admin_emails = attrs.pop('admins')
admin_objs = []
for email in admin_emails:
try:
user = User.find_one(Q('username', 'iexact', email))
admin_objs.append(user)
except ModularOdmException:
raise RuntimeError('Username {0!r} is not registered.'.format(email))
conf = Conference(
endpoint=meeting, admins=admin_objs, **attrs
)
try:
conf.save()
except ModularOdmException:
print('{0} Conference already exists. Updating existing record...'.format(meeting))
conf = Conference.find_one(Q('endpoint', 'eq', meeting))
for key, value in attrs.items():
setattr(conf, key, value)
conf.admins = admin_objs
changed_fields = conf.save()
if changed_fields:
print('Changed: {}'.format(changed_fields))
else:
print('Added new Conference: {}'.format(meeting))
if __name__ == '__main__':
main()
|
WeichenXu123/spark | refs/heads/master | examples/src/main/python/ml/one_vs_rest_example.py | 52 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
An example of Multiclass to Binary Reduction with One Vs Rest,
using Logistic Regression as the base classifier.
Run with:
bin/spark-submit examples/src/main/python/ml/one_vs_rest_example.py
"""
from __future__ import print_function
# $example on$
from pyspark.ml.classification import LogisticRegression, OneVsRest
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession \
.builder \
.appName("OneVsRestExample") \
.getOrCreate()
# $example on$
# load data file.
inputData = spark.read.format("libsvm") \
.load("data/mllib/sample_multiclass_classification_data.txt")
# generate the train/test split.
(train, test) = inputData.randomSplit([0.8, 0.2])
# instantiate the base classifier.
lr = LogisticRegression(maxIter=10, tol=1E-6, fitIntercept=True)
# instantiate the One Vs Rest Classifier.
ovr = OneVsRest(classifier=lr)
# train the multiclass model.
ovrModel = ovr.fit(train)
# score the model on test data.
predictions = ovrModel.transform(test)
# obtain evaluator.
evaluator = MulticlassClassificationEvaluator(metricName="accuracy")
# compute the classification error on test data.
accuracy = evaluator.evaluate(predictions)
print("Test Error = %g" % (1.0 - accuracy))
# $example off$
spark.stop()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.