repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
gentlemans/gentlemanly_engine | deps/freetype2/src/tools/docmaker/sources.py | 45 | 13294 | #
# sources.py
#
# Convert source code comments to multi-line blocks (library file).
#
# Copyright 2002-2016 by
# David Turner.
#
# This file is part of the FreeType project, and may only be used,
# modified, and distributed under the terms of the FreeType project
# license, LICENSE.TXT. By continuing to use, modify, or distribute
# this file you indicate that you have read the license and
# understand and accept it fully.
#
# This library file contains definitions of classes needed to decompose C
# source code files into a series of multi-line `blocks'. There are two
# kinds of blocks.
#
# - Normal blocks, which contain source code or ordinary comments.
#
# - Documentation blocks, which have restricted formatting, and whose text
# always start with a documentation markup tag like `<Function>',
# `<Type>', etc.
#
# The routines to process the content of documentation blocks are contained
# in file `content.py'; the classes and methods found here only deal with
# text parsing and basic documentation block extraction.
#
import fileinput, re, sys, os, string
################################################################
##
## SOURCE BLOCK FORMAT CLASS
##
## A simple class containing compiled regular expressions to detect
## potential documentation format block comments within C source code.
##
## The `column' pattern must contain a group to `unbox' the content of
## documentation comment blocks.
##
## Later on, paragraphs are converted to long lines, which simplifies the
## regular expressions that act upon the text.
##
class SourceBlockFormat:
def __init__( self, id, start, column, end ):
"""Create a block pattern, used to recognize special documentation
blocks."""
self.id = id
self.start = re.compile( start, re.VERBOSE )
self.column = re.compile( column, re.VERBOSE )
self.end = re.compile( end, re.VERBOSE )
#
# Format 1 documentation comment blocks.
#
# /************************************/ (at least 2 asterisks)
# /* */
# /* */
# /* */
# /************************************/ (at least 2 asterisks)
#
start = r'''
\s* # any number of whitespace
/\*{2,}/ # followed by '/' and at least two asterisks then '/'
\s*$ # probably followed by whitespace
'''
column = r'''
\s* # any number of whitespace
/\*{1} # followed by '/' and precisely one asterisk
([^*].*) # followed by anything (group 1)
\*{1}/ # followed by one asterisk and a '/'
\s*$ # probably followed by whitespace
'''
re_source_block_format1 = SourceBlockFormat( 1, start, column, start )
#
# Format 2 documentation comment blocks.
#
# /************************************ (at least 2 asterisks)
# *
# * (1 asterisk)
# *
# */ (1 or more asterisks)
#
start = r'''
\s* # any number of whitespace
/\*{2,} # followed by '/' and at least two asterisks
\s*$ # probably followed by whitespace
'''
column = r'''
\s* # any number of whitespace
\*{1}(?![*/]) # followed by precisely one asterisk not followed by `/'
(.*) # then anything (group1)
'''
end = r'''
\s* # any number of whitespace
\*+/ # followed by at least one asterisk, then '/'
'''
re_source_block_format2 = SourceBlockFormat( 2, start, column, end )
#
# The list of supported documentation block formats. We could add new ones
# quite easily.
#
re_source_block_formats = [re_source_block_format1, re_source_block_format2]
#
# The following regular expressions correspond to markup tags within the
# documentation comment blocks. They are equivalent despite their different
# syntax.
#
# A markup tag consists of letters or character `-', to be found in group 1.
#
# Notice that a markup tag _must_ begin a new paragraph.
#
re_markup_tag1 = re.compile( r'''\s*<((?:\w|-)*)>''' ) # <xxxx> format
re_markup_tag2 = re.compile( r'''\s*@((?:\w|-)*):''' ) # @xxxx: format
#
# The list of supported markup tags. We could add new ones quite easily.
#
re_markup_tags = [re_markup_tag1, re_markup_tag2]
#
# A regular expression to detect a cross reference, after markup tags have
# been stripped off.
#
# Two syntax forms are supported:
#
# @<name>
# @<name>[<id>]
#
# where both `<name>' and `<id>' consist of alphanumeric characters, `_',
# and `-'. Use `<id>' if there are multiple, valid `<name>' entries.
#
# Example: @foo[bar]
#
re_crossref = re.compile( r"""
@
(?P<name>(?:\w|-)+
(?:\[(?:\w|-)+\])?)
(?P<rest>.*)
""", re.VERBOSE )
#
# Two regular expressions to detect italic and bold markup, respectively.
# Group 1 is the markup, group 2 the rest of the line.
#
# Note that the markup is limited to words consisting of letters, digits,
# the characters `_' and `-', or an apostrophe (but not as the first
# character).
#
re_italic = re.compile( r"_((?:\w|-)(?:\w|'|-)*)_(.*)" ) # _italic_
re_bold = re.compile( r"\*((?:\w|-)(?:\w|'|-)*)\*(.*)" ) # *bold*
#
# This regular expression code to identify an URL has been taken from
#
# http://mail.python.org/pipermail/tutor/2002-September/017228.html
#
# (with slight modifications).
#
urls = r'(?:https?|telnet|gopher|file|wais|ftp)'
ltrs = r'\w'
gunk = r'/#~:.?+=&%@!\-'
punc = r'.:?\-'
any = "%(ltrs)s%(gunk)s%(punc)s" % { 'ltrs' : ltrs,
'gunk' : gunk,
'punc' : punc }
url = r"""
(
\b # start at word boundary
%(urls)s : # need resource and a colon
[%(any)s] +? # followed by one or more of any valid
# character, but be conservative and
# take only what you need to...
(?= # [look-ahead non-consumptive assertion]
[%(punc)s]* # either 0 or more punctuation
(?: # [non-grouping parentheses]
[^%(any)s] | $ # followed by a non-url char
# or end of the string
)
)
)
""" % {'urls' : urls,
'any' : any,
'punc' : punc }
re_url = re.compile( url, re.VERBOSE | re.MULTILINE )
#
# A regular expression that stops collection of comments for the current
# block.
#
re_source_sep = re.compile( r'\s*/\*\s*\*/' ) # /* */
#
# A regular expression to find possible C identifiers while outputting
# source code verbatim, covering things like `*foo' or `(bar'. Group 1 is
# the prefix, group 2 the identifier -- since we scan lines from left to
# right, sequentially splitting the source code into prefix and identifier
# is fully sufficient for our purposes.
#
re_source_crossref = re.compile( r'(\W*)(\w*)' )
#
# A regular expression that matches a list of reserved C source keywords.
#
re_source_keywords = re.compile( '''\\b ( typedef |
struct |
enum |
union |
const |
char |
int |
short |
long |
void |
signed |
unsigned |
\#include |
\#define |
\#undef |
\#if |
\#ifdef |
\#ifndef |
\#else |
\#endif ) \\b''', re.VERBOSE )
################################################################
##
## SOURCE BLOCK CLASS
##
## There are two important fields in a `SourceBlock' object.
##
## self.lines
## A list of text lines for the corresponding block.
##
## self.content
## For documentation comment blocks only, this is the block content
## that has been `unboxed' from its decoration. This is `None' for all
## other blocks (i.e., sources or ordinary comments with no starting
## markup tag)
##
class SourceBlock:
def __init__( self, processor, filename, lineno, lines ):
self.processor = processor
self.filename = filename
self.lineno = lineno
self.lines = lines[:]
self.format = processor.format
self.content = []
if self.format == None:
return
words = []
# extract comment lines
lines = []
for line0 in self.lines:
m = self.format.column.match( line0 )
if m:
lines.append( m.group( 1 ) )
# now, look for a markup tag
for l in lines:
l = string.strip( l )
if len( l ) > 0:
for tag in re_markup_tags:
if tag.match( l ):
self.content = lines
return
def location( self ):
return "(" + self.filename + ":" + repr( self.lineno ) + ")"
# debugging only -- not used in normal operations
def dump( self ):
if self.content:
print "{{{content start---"
for l in self.content:
print l
print "---content end}}}"
return
fmt = ""
if self.format:
fmt = repr( self.format.id ) + " "
for line in self.lines:
print line
################################################################
##
## SOURCE PROCESSOR CLASS
##
## The `SourceProcessor' is in charge of reading a C source file and
## decomposing it into a series of different `SourceBlock' objects.
##
## A SourceBlock object consists of the following data.
##
## - A documentation comment block using one of the layouts above. Its
## exact format will be discussed later.
##
## - Normal sources lines, including comments.
##
##
class SourceProcessor:
def __init__( self ):
"""Initialize a source processor."""
self.blocks = []
self.filename = None
self.format = None
self.lines = []
def reset( self ):
"""Reset a block processor and clean up all its blocks."""
self.blocks = []
self.format = None
def parse_file( self, filename ):
"""Parse a C source file and add its blocks to the processor's
list."""
self.reset()
self.filename = filename
fileinput.close()
self.format = None
self.lineno = 0
self.lines = []
for line in fileinput.input( filename ):
# strip trailing newlines, important on Windows machines!
if line[-1] == '\012':
line = line[0:-1]
if self.format == None:
self.process_normal_line( line )
else:
if self.format.end.match( line ):
# A normal block end. Add it to `lines' and create a
# new block
self.lines.append( line )
self.add_block_lines()
elif self.format.column.match( line ):
# A normal column line. Add it to `lines'.
self.lines.append( line )
else:
# An unexpected block end. Create a new block, but
# don't process the line.
self.add_block_lines()
# we need to process the line again
self.process_normal_line( line )
# record the last lines
self.add_block_lines()
def process_normal_line( self, line ):
"""Process a normal line and check whether it is the start of a new
block."""
for f in re_source_block_formats:
if f.start.match( line ):
self.add_block_lines()
self.format = f
self.lineno = fileinput.filelineno()
self.lines.append( line )
def add_block_lines( self ):
"""Add the current accumulated lines and create a new block."""
if self.lines != []:
block = SourceBlock( self,
self.filename,
self.lineno,
self.lines )
self.blocks.append( block )
self.format = None
self.lines = []
# debugging only, not used in normal operations
def dump( self ):
"""Print all blocks in a processor."""
for b in self.blocks:
b.dump()
# eof
| mit |
carlosbeatortega/sociedades | web/node/node_modules/npm/node_modules/node-gyp/legacy/tools/gyp/pylib/gyp/MSVSVersion.py | 42 | 9442 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Handle version information related to Visual Stuio."""
import errno
import os
import re
import subprocess
import sys
class VisualStudioVersion(object):
"""Information regarding a version of Visual Studio."""
def __init__(self, short_name, description,
solution_version, project_version, flat_sln, uses_vcxproj):
self.short_name = short_name
self.description = description
self.solution_version = solution_version
self.project_version = project_version
self.flat_sln = flat_sln
self.uses_vcxproj = uses_vcxproj
def ShortName(self):
return self.short_name
def Description(self):
"""Get the full description of the version."""
return self.description
def SolutionVersion(self):
"""Get the version number of the sln files."""
return self.solution_version
def ProjectVersion(self):
"""Get the version number of the vcproj or vcxproj files."""
return self.project_version
def FlatSolution(self):
return self.flat_sln
def UsesVcxproj(self):
"""Returns true if this version uses a vcxproj file."""
return self.uses_vcxproj
def ProjectExtension(self):
"""Returns the file extension for the project."""
return self.uses_vcxproj and '.vcxproj' or '.vcproj'
def _RegistryQueryBase(sysdir, key, value):
"""Use reg.exe to read a particular key.
While ideally we might use the win32 module, we would like gyp to be
python neutral, so for instance cygwin python lacks this module.
Arguments:
sysdir: The system subdirectory to attempt to launch reg.exe from.
key: The registry key to read from.
value: The particular value to read.
Return:
stdout from reg.exe, or None for failure.
"""
# Skip if not on Windows or Python Win32 setup issue
if sys.platform not in ('win32', 'cygwin'):
return None
# Setup params to pass to and attempt to launch reg.exe
cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
'query', key]
if value:
cmd.extend(['/v', value])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Obtain the stdout from reg.exe, reading to the end so p.returncode is valid
# Note that the error text may be in [1] in some cases
text = p.communicate()[0]
# Check return code from reg.exe; officially 0==success and 1==error
if p.returncode:
return None
return text
def _RegistryQuery(key, value=None):
"""Use reg.exe to read a particular key through _RegistryQueryBase.
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
that fails, it falls back to System32. Sysnative is available on Vista and
up and available on Windows Server 2003 and XP through KB patch 942589. Note
that Sysnative will always fail if using 64-bit python due to it being a
virtual directory and System32 will work correctly in the first place.
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
Arguments:
key: The registry key.
value: The particular registry value to read (optional).
Return:
stdout from reg.exe, or None for failure.
"""
text = None
try:
text = _RegistryQueryBase('Sysnative', key, value)
except OSError, e:
if e.errno == errno.ENOENT:
text = _RegistryQueryBase('System32', key, value)
else:
raise
return text
def _RegistryGetValue(key, value):
"""Use reg.exe to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure.
"""
text = _RegistryQuery(key, value)
if not text:
return None
# Extract value.
match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
if not match:
return None
return match.group(1)
def _RegistryKeyExists(key):
"""Use reg.exe to see if a key exists.
Args:
key: The registry key to check.
Return:
True if the key exists
"""
if not _RegistryQuery(key):
return False
return True
def _CreateVersion(name):
"""Sets up MSVS project generation.
Setup is based off the GYP_MSVS_VERSION environment variable or whatever is
autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is
passed in that doesn't match a value in versions python will throw a error.
"""
versions = {
'2010': VisualStudioVersion('2010',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True),
'2010e': VisualStudioVersion('2010e',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True),
'2008': VisualStudioVersion('2008',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=False,
uses_vcxproj=False),
'2008e': VisualStudioVersion('2008e',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=True,
uses_vcxproj=False),
'2005': VisualStudioVersion('2005',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=False,
uses_vcxproj=False),
'2005e': VisualStudioVersion('2005e',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=True,
uses_vcxproj=False),
}
return versions[str(name)]
def _DetectVisualStudioVersions():
"""Collect the list of installed visual studio versions.
Returns:
A list of visual studio versions installed in descending order of
usage preference.
Base this on the registry and a quick check if devenv.exe exists.
Only versions 8-10 are considered.
Possibilities are:
2005(e) - Visual Studio 2005 (8)
2008(e) - Visual Studio 2008 (9)
2010(e) - Visual Studio 2010 (10)
Where (e) is e for express editions of MSVS and blank otherwise.
"""
version_to_year = {'8.0': '2005', '9.0': '2008', '10.0': '2010'}
versions = []
# For now, prefer versions before VS2010
for version in ('9.0', '8.0', '10.0'):
# Check if VS2010 and later is installed as specified by
# http://msdn.microsoft.com/en-us/library/bb164659.aspx
keys = [r'HKLM\SOFTWARE\Microsoft\DevDiv\VS\Servicing\%s' % version,
r'HKLM\SOFTWARE\Wow6432Node\Microsoft\DevDiv\VS\Servicing\%s' % (
version)]
for index in range(len(keys)):
if not _RegistryKeyExists(keys[index]):
continue
# Check for express
if _RegistryKeyExists(keys[index] + '\\expbsln'):
# Add this one
versions.append(_CreateVersion(version_to_year[version] + 'e'))
else:
# Add this one
versions.append(_CreateVersion(version_to_year[version]))
# Old (pre-VS2010) method of searching for which VS version is installed
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Microsoft\VCExpress\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version]
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], 'InstallDir')
if not path:
continue
# Check for full.
if os.path.exists(os.path.join(path, 'devenv.exe')):
# Add this one.
versions.append(_CreateVersion(version_to_year[version]))
# Check for express.
elif os.path.exists(os.path.join(path, 'vcexpress.exe')):
# Add this one.
versions.append(_CreateVersion(version_to_year[version] + 'e'))
return versions
def SelectVisualStudioVersion(version='auto'):
"""Select which version of Visual Studio projects to generate.
Arguments:
version: Hook to allow caller to force a particular version (vs auto).
Returns:
An object representing a visual studio project format version.
"""
# In auto mode, check environment variable for override.
if version == 'auto':
version = os.environ.get('GYP_MSVS_VERSION', 'auto')
# In auto mode, pick the most preferred version present.
if version == 'auto':
versions = _DetectVisualStudioVersions()
if not versions:
# Default to 2005.
return _CreateVersion('2005')
return versions[0]
# Convert version string into a version object.
return _CreateVersion(version)
| mit |
goldeneye-source/ges-python | lib/multiprocessing/spawn.py | 91 | 8847 | #
# Code used to start processes when using the spawn or forkserver
# start methods.
#
# multiprocessing/spawn.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
import os
import pickle
import sys
import runpy
import types
from . import get_start_method, set_start_method
from . import process
from . import util
__all__ = ['_main', 'freeze_support', 'set_executable', 'get_executable',
'get_preparation_data', 'get_command_line', 'import_main_path']
#
# _python_exe is the assumed path to the python executable.
# People embedding Python want to modify it.
#
if sys.platform != 'win32':
WINEXE = False
WINSERVICE = False
else:
WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
if WINSERVICE:
_python_exe = os.path.join(sys.exec_prefix, 'python.exe')
else:
_python_exe = sys.executable
def set_executable(exe):
global _python_exe
_python_exe = exe
def get_executable():
return _python_exe
#
#
#
def is_forking(argv):
'''
Return whether commandline indicates we are forking
'''
if len(argv) >= 2 and argv[1] == '--multiprocessing-fork':
return True
else:
return False
def freeze_support():
'''
Run code for process object if this in not the main process
'''
if is_forking(sys.argv):
kwds = {}
for arg in sys.argv[2:]:
name, value = arg.split('=')
if value == 'None':
kwds[name] = None
else:
kwds[name] = int(value)
spawn_main(**kwds)
sys.exit()
def get_command_line(**kwds):
'''
Returns prefix of command line used for spawning a child process
'''
if getattr(sys, 'frozen', False):
return ([sys.executable, '--multiprocessing-fork'] +
['%s=%r' % item for item in kwds.items()])
else:
prog = 'from multiprocessing.spawn import spawn_main; spawn_main(%s)'
prog %= ', '.join('%s=%r' % item for item in kwds.items())
opts = util._args_from_interpreter_flags()
return [_python_exe] + opts + ['-c', prog, '--multiprocessing-fork']
def spawn_main(pipe_handle, parent_pid=None, tracker_fd=None):
'''
Run code specifed by data received over pipe
'''
assert is_forking(sys.argv)
if sys.platform == 'win32':
import msvcrt
from .reduction import steal_handle
new_handle = steal_handle(parent_pid, pipe_handle)
fd = msvcrt.open_osfhandle(new_handle, os.O_RDONLY)
else:
from . import semaphore_tracker
semaphore_tracker._semaphore_tracker._fd = tracker_fd
fd = pipe_handle
exitcode = _main(fd)
sys.exit(exitcode)
def _main(fd):
with os.fdopen(fd, 'rb', closefd=True) as from_parent:
process.current_process()._inheriting = True
try:
preparation_data = pickle.load(from_parent)
prepare(preparation_data)
self = pickle.load(from_parent)
finally:
del process.current_process()._inheriting
return self._bootstrap()
def _check_not_importing_main():
if getattr(process.current_process(), '_inheriting', False):
raise RuntimeError('''
An attempt has been made to start a new process before the
current process has finished its bootstrapping phase.
This probably means that you are not using fork to start your
child processes and you have forgotten to use the proper idiom
in the main module:
if __name__ == '__main__':
freeze_support()
...
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce an executable.''')
def get_preparation_data(name):
'''
Return info about parent needed by child to unpickle process object
'''
_check_not_importing_main()
d = dict(
log_to_stderr=util._log_to_stderr,
authkey=process.current_process().authkey,
)
if util._logger is not None:
d['log_level'] = util._logger.getEffectiveLevel()
sys_path=sys.path.copy()
try:
i = sys_path.index('')
except ValueError:
pass
else:
sys_path[i] = process.ORIGINAL_DIR
d.update(
name=name,
sys_path=sys_path,
sys_argv=sys.argv,
orig_dir=process.ORIGINAL_DIR,
dir=os.getcwd(),
start_method=get_start_method(),
)
# Figure out whether to initialise main in the subprocess as a module
# or through direct execution (or to leave it alone entirely)
main_module = sys.modules['__main__']
main_mod_name = getattr(main_module.__spec__, "name", None)
if main_mod_name is not None:
d['init_main_from_name'] = main_mod_name
elif sys.platform != 'win32' or (not WINEXE and not WINSERVICE):
main_path = getattr(main_module, '__file__', None)
if main_path is not None:
if (not os.path.isabs(main_path) and
process.ORIGINAL_DIR is not None):
main_path = os.path.join(process.ORIGINAL_DIR, main_path)
d['init_main_from_path'] = os.path.normpath(main_path)
return d
#
# Prepare current process
#
old_main_modules = []
def prepare(data):
'''
Try to get current process ready to unpickle process object
'''
if 'name' in data:
process.current_process().name = data['name']
if 'authkey' in data:
process.current_process().authkey = data['authkey']
if 'log_to_stderr' in data and data['log_to_stderr']:
util.log_to_stderr()
if 'log_level' in data:
util.get_logger().setLevel(data['log_level'])
if 'sys_path' in data:
sys.path = data['sys_path']
if 'sys_argv' in data:
sys.argv = data['sys_argv']
if 'dir' in data:
os.chdir(data['dir'])
if 'orig_dir' in data:
process.ORIGINAL_DIR = data['orig_dir']
if 'start_method' in data:
set_start_method(data['start_method'])
if 'init_main_from_name' in data:
_fixup_main_from_name(data['init_main_from_name'])
elif 'init_main_from_path' in data:
_fixup_main_from_path(data['init_main_from_path'])
# Multiprocessing module helpers to fix up the main module in
# spawned subprocesses
def _fixup_main_from_name(mod_name):
# __main__.py files for packages, directories, zip archives, etc, run
# their "main only" code unconditionally, so we don't even try to
# populate anything in __main__, nor do we make any changes to
# __main__ attributes
current_main = sys.modules['__main__']
if mod_name == "__main__" or mod_name.endswith(".__main__"):
return
# If this process was forked, __main__ may already be populated
if getattr(current_main.__spec__, "name", None) == mod_name:
return
# Otherwise, __main__ may contain some non-main code where we need to
# support unpickling it properly. We rerun it as __mp_main__ and make
# the normal __main__ an alias to that
old_main_modules.append(current_main)
main_module = types.ModuleType("__mp_main__")
main_content = runpy.run_module(mod_name,
run_name="__mp_main__",
alter_sys=True)
main_module.__dict__.update(main_content)
sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module
def _fixup_main_from_path(main_path):
# If this process was forked, __main__ may already be populated
current_main = sys.modules['__main__']
# Unfortunately, the main ipython launch script historically had no
# "if __name__ == '__main__'" guard, so we work around that
# by treating it like a __main__.py file
# See https://github.com/ipython/ipython/issues/4698
main_name = os.path.splitext(os.path.basename(main_path))[0]
if main_name == 'ipython':
return
# Otherwise, if __file__ already has the setting we expect,
# there's nothing more to do
if getattr(current_main, '__file__', None) == main_path:
return
# If the parent process has sent a path through rather than a module
# name we assume it is an executable script that may contain
# non-main code that needs to be executed
old_main_modules.append(current_main)
main_module = types.ModuleType("__mp_main__")
main_content = runpy.run_path(main_path,
run_name="__mp_main__")
main_module.__dict__.update(main_content)
sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module
def import_main_path(main_path):
'''
Set sys.modules['__main__'] to module at main_path
'''
_fixup_main_from_path(main_path)
| gpl-3.0 |
windj007/bulbs | bulbs/neo4jserver/tests/client_tests.py | 3 | 3431 | from uuid import uuid1
import unittest
from bulbs.config import Config
from bulbs.utils import json
from bulbs.neo4jserver import Neo4jClient, NEO4J_URI
from bulbs.tests.client_tests import ClientTestCase
from bulbs.tests.client_index_tests import ClientIndexTestCase
from bulbs.factory import Factory
from bulbs.element import Vertex, Edge
from bulbs.neo4jserver.index import ExactIndex
import time
class Neo4jClientTestCase(ClientTestCase):
def setUp(self):
config = Config(NEO4J_URI)
self.client = Neo4jClient(config)
# Separated client index tests for Titan
class Neo4jClientIndexTestCase(ClientIndexTestCase):
def setUp(self):
config = Config(NEO4J_URI)
self.client = Neo4jClient(config)
def test_create_unique_vertex(self):
idx_name = 'test_idx'
self._delete_vertex_index(idx_name)
self.client.create_vertex_index(idx_name)
k, v = 'key', uuid1().get_hex()
args = (k, v, {k: v})
resp = self.client.create_unique_vertex(idx_name, *args)
assert resp.headers['status'] == '201'
assert resp.results.data.get(k) == v
resp = self.client.create_unique_vertex(idx_name, *args)
assert resp.headers['status'] == '200'
assert resp.results.data.get(k) == v
# why is this here? - JT 10/22/2012
class Neo4jIndexTestCase(unittest.TestCase):
def setUp(self):
config = Config(NEO4J_URI)
self.client = Neo4jClient(config)
self.factory = Factory(self.client)
def test_gremlin(self):
# limiting return count so we don't exceed heap size
resp = self.client.gremlin("g.V[0..9]")
assert resp.total_size > 5
def test_query_exact_vertex_index(self):
index = self.factory.get_index(Vertex, ExactIndex)
vertices = index.query("name", "Jam*")
assert len(list(vertices)) > 1
def test_query_exact_edge_index(self):
index = self.factory.get_index(Edge, ExactIndex)
edges = index.query("timestamp", "1*")
assert len(list(edges)) > 1
def test_create_unique_vertex(self):
index = self.factory.get_index(Vertex, ExactIndex)
k, v = 'key', uuid1().get_hex()
args = (k, v, {k: v})
vertex, created = index.create_unique_vertex(*args)
assert isinstance(vertex, Vertex)
assert created is True
vertex, created = index.create_unique_vertex(*args)
assert isinstance(vertex, Vertex)
assert created is False
class CypherTestCase(unittest.TestCase):
def setUp(self):
config = Config(NEO4J_URI)
self.client = Neo4jClient(config)
#def test_warm_cache(self):
# resp = self.client.warm_cache()
# print resp.raw
def test_cypher(self):
query = """START x = node({_id}) MATCH x -[r]-> n RETURN type(r), n.name?, n.age?"""
params = dict(_id=1261)
resp = self.client.cypher(query,params)
#print resp.raw
def neo4j_client_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(Neo4jClientTestCase))
suite.addTest(unittest.makeSuite(Neo4jClientIndexTestCase))
suite.addTest(unittest.makeSuite(Neo4jIndexTestCase))
#suite.addTest(unittest.makeSuite(GremlinTestCase))
#suite.addTest(unittest.makeSuite(CypherTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='neo4j_client_suite')
| bsd-3-clause |
ChromiumWebApps/chromium | third_party/pexpect/FSM.py | 171 | 14248 | #!/usr/bin/env python
"""This module implements a Finite State Machine (FSM). In addition to state
this FSM also maintains a user defined "memory". So this FSM can be used as a
Push-down Automata (PDA) since a PDA is a FSM + memory.
The following describes how the FSM works, but you will probably also need to
see the example function to understand how the FSM is used in practice.
You define an FSM by building tables of transitions. For a given input symbol
the process() method uses these tables to decide what action to call and what
the next state will be. The FSM has a table of transitions that associate:
(input_symbol, current_state) --> (action, next_state)
Where "action" is a function you define. The symbols and states can be any
objects. You use the add_transition() and add_transition_list() methods to add
to the transition table. The FSM also has a table of transitions that
associate:
(current_state) --> (action, next_state)
You use the add_transition_any() method to add to this transition table. The
FSM also has one default transition that is not associated with any specific
input_symbol or state. You use the set_default_transition() method to set the
default transition.
When an action function is called it is passed a reference to the FSM. The
action function may then access attributes of the FSM such as input_symbol,
current_state, or "memory". The "memory" attribute can be any object that you
want to pass along to the action functions. It is not used by the FSM itself.
For parsing you would typically pass a list to be used as a stack.
The processing sequence is as follows. The process() method is given an
input_symbol to process. The FSM will search the table of transitions that
associate:
(input_symbol, current_state) --> (action, next_state)
If the pair (input_symbol, current_state) is found then process() will call the
associated action function and then set the current state to the next_state.
If the FSM cannot find a match for (input_symbol, current_state) it will then
search the table of transitions that associate:
(current_state) --> (action, next_state)
If the current_state is found then the process() method will call the
associated action function and then set the current state to the next_state.
Notice that this table lacks an input_symbol. It lets you define transitions
for a current_state and ANY input_symbol. Hence, it is called the "any" table.
Remember, it is always checked after first searching the table for a specific
(input_symbol, current_state).
For the case where the FSM did not match either of the previous two cases the
FSM will try to use the default transition. If the default transition is
defined then the process() method will call the associated action function and
then set the current state to the next_state. This lets you define a default
transition as a catch-all case. You can think of it as an exception handler.
There can be only one default transition.
Finally, if none of the previous cases are defined for an input_symbol and
current_state then the FSM will raise an exception. This may be desirable, but
you can always prevent this just by defining a default transition.
Noah Spurrier 20020822
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <noah@noah.org>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
class ExceptionFSM(Exception):
"""This is the FSM Exception class."""
def __init__(self, value):
self.value = value
def __str__(self):
return `self.value`
class FSM:
"""This is a Finite State Machine (FSM).
"""
def __init__(self, initial_state, memory=None):
"""This creates the FSM. You set the initial state here. The "memory"
attribute is any object that you want to pass along to the action
functions. It is not used by the FSM. For parsing you would typically
pass a list to be used as a stack. """
# Map (input_symbol, current_state) --> (action, next_state).
self.state_transitions = {}
# Map (current_state) --> (action, next_state).
self.state_transitions_any = {}
self.default_transition = None
self.input_symbol = None
self.initial_state = initial_state
self.current_state = self.initial_state
self.next_state = None
self.action = None
self.memory = memory
def reset (self):
"""This sets the current_state to the initial_state and sets
input_symbol to None. The initial state was set by the constructor
__init__(). """
self.current_state = self.initial_state
self.input_symbol = None
def add_transition (self, input_symbol, state, action=None, next_state=None):
"""This adds a transition that associates:
(input_symbol, current_state) --> (action, next_state)
The action may be set to None in which case the process() method will
ignore the action and only set the next_state. The next_state may be
set to None in which case the current state will be unchanged.
You can also set transitions for a list of symbols by using
add_transition_list(). """
if next_state is None:
next_state = state
self.state_transitions[(input_symbol, state)] = (action, next_state)
def add_transition_list (self, list_input_symbols, state, action=None, next_state=None):
"""This adds the same transition for a list of input symbols.
You can pass a list or a string. Note that it is handy to use
string.digits, string.whitespace, string.letters, etc. to add
transitions that match character classes.
The action may be set to None in which case the process() method will
ignore the action and only set the next_state. The next_state may be
set to None in which case the current state will be unchanged. """
if next_state is None:
next_state = state
for input_symbol in list_input_symbols:
self.add_transition (input_symbol, state, action, next_state)
def add_transition_any (self, state, action=None, next_state=None):
"""This adds a transition that associates:
(current_state) --> (action, next_state)
That is, any input symbol will match the current state.
The process() method checks the "any" state associations after it first
checks for an exact match of (input_symbol, current_state).
The action may be set to None in which case the process() method will
ignore the action and only set the next_state. The next_state may be
set to None in which case the current state will be unchanged. """
if next_state is None:
next_state = state
self.state_transitions_any [state] = (action, next_state)
def set_default_transition (self, action, next_state):
"""This sets the default transition. This defines an action and
next_state if the FSM cannot find the input symbol and the current
state in the transition list and if the FSM cannot find the
current_state in the transition_any list. This is useful as a final
fall-through state for catching errors and undefined states.
The default transition can be removed by setting the attribute
default_transition to None. """
self.default_transition = (action, next_state)
def get_transition (self, input_symbol, state):
"""This returns (action, next state) given an input_symbol and state.
This does not modify the FSM state, so calling this method has no side
effects. Normally you do not call this method directly. It is called by
process().
The sequence of steps to check for a defined transition goes from the
most specific to the least specific.
1. Check state_transitions[] that match exactly the tuple,
(input_symbol, state)
2. Check state_transitions_any[] that match (state)
In other words, match a specific state and ANY input_symbol.
3. Check if the default_transition is defined.
This catches any input_symbol and any state.
This is a handler for errors, undefined states, or defaults.
4. No transition was defined. If we get here then raise an exception.
"""
if self.state_transitions.has_key((input_symbol, state)):
return self.state_transitions[(input_symbol, state)]
elif self.state_transitions_any.has_key (state):
return self.state_transitions_any[state]
elif self.default_transition is not None:
return self.default_transition
else:
raise ExceptionFSM ('Transition is undefined: (%s, %s).' %
(str(input_symbol), str(state)) )
def process (self, input_symbol):
"""This is the main method that you call to process input. This may
cause the FSM to change state and call an action. This method calls
get_transition() to find the action and next_state associated with the
input_symbol and current_state. If the action is None then the action
is not called and only the current state is changed. This method
processes one complete input symbol. You can process a list of symbols
(or a string) by calling process_list(). """
self.input_symbol = input_symbol
(self.action, self.next_state) = self.get_transition (self.input_symbol, self.current_state)
if self.action is not None:
self.action (self)
self.current_state = self.next_state
self.next_state = None
def process_list (self, input_symbols):
"""This takes a list and sends each element to process(). The list may
be a string or any iterable object. """
for s in input_symbols:
self.process (s)
##############################################################################
# The following is an example that demonstrates the use of the FSM class to
# process an RPN expression. Run this module from the command line. You will
# get a prompt > for input. Enter an RPN Expression. Numbers may be integers.
# Operators are * / + - Use the = sign to evaluate and print the expression.
# For example:
#
# 167 3 2 2 * * * 1 - =
#
# will print:
#
# 2003
##############################################################################
import sys, os, traceback, optparse, time, string
#
# These define the actions.
# Note that "memory" is a list being used as a stack.
#
def BeginBuildNumber (fsm):
fsm.memory.append (fsm.input_symbol)
def BuildNumber (fsm):
s = fsm.memory.pop ()
s = s + fsm.input_symbol
fsm.memory.append (s)
def EndBuildNumber (fsm):
s = fsm.memory.pop ()
fsm.memory.append (int(s))
def DoOperator (fsm):
ar = fsm.memory.pop()
al = fsm.memory.pop()
if fsm.input_symbol == '+':
fsm.memory.append (al + ar)
elif fsm.input_symbol == '-':
fsm.memory.append (al - ar)
elif fsm.input_symbol == '*':
fsm.memory.append (al * ar)
elif fsm.input_symbol == '/':
fsm.memory.append (al / ar)
def DoEqual (fsm):
print str(fsm.memory.pop())
def Error (fsm):
print 'That does not compute.'
print str(fsm.input_symbol)
def main():
"""This is where the example starts and the FSM state transitions are
defined. Note that states are strings (such as 'INIT'). This is not
necessary, but it makes the example easier to read. """
f = FSM ('INIT', []) # "memory" will be used as a stack.
f.set_default_transition (Error, 'INIT')
f.add_transition_any ('INIT', None, 'INIT')
f.add_transition ('=', 'INIT', DoEqual, 'INIT')
f.add_transition_list (string.digits, 'INIT', BeginBuildNumber, 'BUILDING_NUMBER')
f.add_transition_list (string.digits, 'BUILDING_NUMBER', BuildNumber, 'BUILDING_NUMBER')
f.add_transition_list (string.whitespace, 'BUILDING_NUMBER', EndBuildNumber, 'INIT')
f.add_transition_list ('+-*/', 'INIT', DoOperator, 'INIT')
print
print 'Enter an RPN Expression.'
print 'Numbers may be integers. Operators are * / + -'
print 'Use the = sign to evaluate and print the expression.'
print 'For example: '
print ' 167 3 2 2 * * * 1 - ='
inputstr = raw_input ('> ')
f.process_list(inputstr)
if __name__ == '__main__':
try:
start_time = time.time()
parser = optparse.OptionParser(formatter=optparse.TitledHelpFormatter(), usage=globals()['__doc__'], version='$Id: FSM.py 533 2012-10-20 02:19:33Z noah $')
parser.add_option ('-v', '--verbose', action='store_true', default=False, help='verbose output')
(options, args) = parser.parse_args()
if options.verbose: print time.asctime()
main()
if options.verbose: print time.asctime()
if options.verbose: print 'TOTAL TIME IN MINUTES:',
if options.verbose: print (time.time() - start_time) / 60.0
sys.exit(0)
except KeyboardInterrupt, e: # Ctrl-C
raise e
except SystemExit, e: # sys.exit()
raise e
except Exception, e:
print 'ERROR, UNEXPECTED EXCEPTION'
print str(e)
traceback.print_exc()
os._exit(1)
| bsd-3-clause |
hyperized/ansible-modules-core | cloud/amazon/route53.py | 5 | 21343 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: route53
version_added: "1.3"
short_description: add or delete entries in Amazons Route53 DNS service
description:
- Creates and deletes DNS records in Amazons Route53 service
options:
command:
description:
- Specifies the action to take.
required: true
choices: [ 'get', 'create', 'delete' ]
zone:
description:
- The DNS zone to modify
required: true
hosted_zone_id:
description:
- The Hosted Zone ID of the DNS zone to modify
required: false
version_added: "2.0"
default: null
record:
description:
- The full DNS record to create or delete
required: true
ttl:
description:
- The TTL to give the new record
required: false
default: 3600 (one hour)
type:
description:
- The type of DNS record to create
required: true
choices: [ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS', 'SOA' ]
alias:
description:
- Indicates if this is an alias record.
required: false
version_added: "1.9"
default: False
choices: [ 'True', 'False' ]
alias_hosted_zone_id:
description:
- The hosted zone identifier.
required: false
version_added: "1.9"
default: null
alias_evaluate_target_health:
description:
- Whether or not to evaluate an alias target health. Useful for aliases to Elastic Load Balancers.
required: false
version_added: "2.1"
default: false
value:
description:
- The new value when creating a DNS record. Multiple comma-spaced values are allowed for non-alias records. When deleting a record all values for the record must be specified or Route53 will not delete it.
required: false
default: null
overwrite:
description:
- Whether an existing record should be overwritten on create if values do not match
required: false
default: null
retry_interval:
description:
- In the case that route53 is still servicing a prior request, this module will wait and try again after this many seconds. If you have many domain names, the default of 500 seconds may be too long.
required: false
default: 500
private_zone:
description:
- If set to true, the private zone matching the requested name within the domain will be used if there are both public and private zones. The default is to use the public zone.
required: false
default: false
version_added: "1.9"
identifier:
description:
- Have to be specified for Weighted, latency-based and failover resource record sets only. An identifier
that differentiates among multiple resource record sets that have the
same combination of DNS name and type.
required: false
default: null
version_added: "2.0"
weight:
description:
- Weighted resource record sets only. Among resource record sets that
have the same combination of DNS name and type, a value that
determines what portion of traffic for the current resource record set
is routed to the associated location.
required: false
default: null
version_added: "2.0"
region:
description:
- Latency-based resource record sets only Among resource record sets
that have the same combination of DNS name and type, a value that
determines which region this should be associated with for the
latency-based routing
required: false
default: null
version_added: "2.0"
health_check:
description:
- Health check to associate with this record
required: false
default: null
version_added: "2.0"
failover:
description:
- Failover resource record sets only. Whether this is the primary or
secondary resource record set. Allowed values are PRIMARY and SECONDARY
required: false
default: null
version_added: "2.0"
vpc_id:
description:
- "When used in conjunction with private_zone: true, this will only modify records in the private hosted zone attached to this VPC."
- This allows you to have multiple private hosted zones, all with the same name, attached to different VPCs.
required: false
default: null
version_added: "2.0"
wait:
description:
- Wait until the changes have been replicated to all Amazon Route 53 DNS servers.
required: false
default: no
version_added: "2.1"
wait_timeout:
description:
- How long to wait for the changes to be replicated, in seconds.
required: false
default: 300
version_added: "2.1"
author:
- "Bruce Pennypacker (@bpennypacker)"
- "Mike Buzzetti <mike.buzzetti@gmail.com>"
extends_documentation_fragment: aws
'''
# FIXME: the command stuff should have a more state like configuration alias -- MPD
EXAMPLES = '''
# Add new.foo.com as an A record with 3 IPs and wait until the changes have been replicated
- route53:
command: create
zone: foo.com
record: new.foo.com
type: A
ttl: 7200
value: 1.1.1.1,2.2.2.2,3.3.3.3
wait: yes
# Retrieve the details for new.foo.com
- route53:
command: get
zone: foo.com
record: new.foo.com
type: A
register: rec
# Delete new.foo.com A record using the results from the get command
- route53:
command: delete
zone: foo.com
record: "{{ rec.set.record }}"
ttl: "{{ rec.set.ttl }}"
type: "{{ rec.set.type }}"
value: "{{ rec.set.value }}"
# Add an AAAA record. Note that because there are colons in the value
# that the entire parameter list must be quoted:
- route53:
command: "create"
zone: "foo.com"
record: "localhost.foo.com"
type: "AAAA"
ttl: "7200"
value: "::1"
# Add a TXT record. Note that TXT and SPF records must be surrounded
# by quotes when sent to Route 53:
- route53:
command: "create"
zone: "foo.com"
record: "localhost.foo.com"
type: "TXT"
ttl: "7200"
value: '"bar"'
# Add an alias record that points to an Amazon ELB:
- route53:
command=create
zone=foo.com
record=elb.foo.com
type=A
value="{{ elb_dns_name }}"
alias=True
alias_hosted_zone_id="{{ elb_zone_id }}"
# Add an alias record that points to an Amazon ELB and evaluates it health:
- route53:
command=create
zone=foo.com
record=elb.foo.com
type=A
value="{{ elb_dns_name }}"
alias=True
alias_hosted_zone_id="{{ elb_zone_id }}"
alias_evaluate_target_health=True
# Add an AAAA record with Hosted Zone ID. Note that because there are colons in the value
# that the entire parameter list must be quoted:
- route53:
command: "create"
zone: "foo.com"
hosted_zone_id: "Z2AABBCCDDEEFF"
record: "localhost.foo.com"
type: "AAAA"
ttl: "7200"
value: "::1"
# Add an AAAA record with Hosted Zone ID. Note that because there are colons in the value
# that the entire parameter list must be quoted:
- route53:
command: "create"
zone: "foo.com"
hosted_zone_id: "Z2AABBCCDDEEFF"
record: "localhost.foo.com"
type: "AAAA"
ttl: "7200"
value: "::1"
# Use a routing policy to distribute traffic:
- route53:
command: "create"
zone: "foo.com"
record: "www.foo.com"
type: "CNAME"
value: "host1.foo.com"
ttl: 30
# Routing policy
identifier: "host1@www"
weight: 100
health_check: "d994b780-3150-49fd-9205-356abdd42e75"
'''
MINIMUM_BOTO_VERSION = '2.28.0'
WAIT_RETRY_SLEEP = 5 # how many seconds to wait between propagation status polls
import time
import distutils.version
try:
import boto
import boto.ec2
from boto import route53
from boto.route53 import Route53Connection
from boto.route53.record import Record, ResourceRecordSets
from boto.route53.status import Status
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
class TimeoutError(Exception):
pass
def get_zone_by_name(conn, module, zone_name, want_private, zone_id, want_vpc_id):
"""Finds a zone by name or zone_id"""
for zone in conn.get_zones():
# only save this zone id if the private status of the zone matches
# the private_zone_in boolean specified in the params
private_zone = module.boolean(zone.config.get('PrivateZone', False))
if private_zone == want_private and ((zone.name == zone_name and zone_id == None) or zone.id.replace('/hostedzone/', '') == zone_id):
if want_vpc_id:
# NOTE: These details aren't available in other boto methods, hence the necessary
# extra API call
zone_details = conn.get_hosted_zone(zone.id)['GetHostedZoneResponse']
# this is to deal with this boto bug: https://github.com/boto/boto/pull/2882
if isinstance(zone_details['VPCs'], dict):
if zone_details['VPCs']['VPC']['VPCId'] == want_vpc_id:
return zone
else: # Forward compatibility for when boto fixes that bug
if want_vpc_id in [v['VPCId'] for v in zone_details['VPCs']]:
return zone
else:
return zone
return None
def commit(changes, retry_interval, wait, wait_timeout):
"""Commit changes, but retry PriorRequestNotComplete errors."""
result = None
retry = 10
while True:
try:
retry -= 1
result = changes.commit()
break
except boto.route53.exception.DNSServerError, e:
code = e.body.split("<Code>")[1]
code = code.split("</Code>")[0]
if code != 'PriorRequestNotComplete' or retry < 0:
raise e
time.sleep(float(retry_interval))
if wait:
timeout_time = time.time() + wait_timeout
connection = changes.connection
change = result['ChangeResourceRecordSetsResponse']['ChangeInfo']
status = Status(connection, change)
while status.status != 'INSYNC' and time.time() < timeout_time:
time.sleep(WAIT_RETRY_SLEEP)
status.update()
if time.time() >= timeout_time:
raise TimeoutError()
return result
# Shamelessly copied over from https://git.io/vgmDG
IGNORE_CODE = 'Throttling'
MAX_RETRIES=5
def invoke_with_throttling_retries(function_ref, *argv):
retries=0
while True:
try:
retval=function_ref(*argv)
return retval
except boto.exception.BotoServerError, e:
if e.code != IGNORE_CODE or retries==MAX_RETRIES:
raise e
time.sleep(5 * (2**retries))
retries += 1
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
command = dict(choices=['get', 'create', 'delete'], required=True),
zone = dict(required=True),
hosted_zone_id = dict(required=False, default=None),
record = dict(required=True),
ttl = dict(required=False, type='int', default=3600),
type = dict(choices=['A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS', 'SOA'], required=True),
alias = dict(required=False, type='bool'),
alias_hosted_zone_id = dict(required=False),
alias_evaluate_target_health = dict(required=False, type='bool', default=False),
value = dict(required=False),
overwrite = dict(required=False, type='bool'),
retry_interval = dict(required=False, default=500),
private_zone = dict(required=False, type='bool', default=False),
identifier = dict(required=False, default=None),
weight = dict(required=False, type='int'),
region = dict(required=False),
health_check = dict(required=False),
failover = dict(required=False,choices=['PRIMARY','SECONDARY']),
vpc_id = dict(required=False),
wait = dict(required=False, type='bool', default=False),
wait_timeout = dict(required=False, type='int', default=300),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
if distutils.version.StrictVersion(boto.__version__) < distutils.version.StrictVersion(MINIMUM_BOTO_VERSION):
module.fail_json(msg='Found boto in version %s, but >= %s is required' % (boto.__version__, MINIMUM_BOTO_VERSION))
command_in = module.params.get('command')
zone_in = module.params.get('zone').lower()
hosted_zone_id_in = module.params.get('hosted_zone_id')
ttl_in = module.params.get('ttl')
record_in = module.params.get('record').lower()
type_in = module.params.get('type')
value_in = module.params.get('value')
alias_in = module.params.get('alias')
alias_hosted_zone_id_in = module.params.get('alias_hosted_zone_id')
alias_evaluate_target_health_in = module.params.get('alias_evaluate_target_health')
retry_interval_in = module.params.get('retry_interval')
private_zone_in = module.params.get('private_zone')
identifier_in = module.params.get('identifier')
weight_in = module.params.get('weight')
region_in = module.params.get('region')
health_check_in = module.params.get('health_check')
failover_in = module.params.get('failover')
vpc_id_in = module.params.get('vpc_id')
wait_in = module.params.get('wait')
wait_timeout_in = module.params.get('wait_timeout')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
value_list = ()
if type(value_in) is str:
if value_in:
value_list = sorted([s.strip() for s in value_in.split(',')])
elif type(value_in) is list:
value_list = sorted(value_in)
if zone_in[-1:] != '.':
zone_in += "."
if record_in[-1:] != '.':
record_in += "."
if command_in == 'create' or command_in == 'delete':
if not value_in:
module.fail_json(msg = "parameter 'value' required for create/delete")
elif alias_in:
if len(value_list) != 1:
module.fail_json(msg = "parameter 'value' must contain a single dns name for alias create/delete")
elif not alias_hosted_zone_id_in:
module.fail_json(msg = "parameter 'alias_hosted_zone_id' required for alias create/delete")
elif ( weight_in!=None or region_in!=None or failover_in!=None ) and identifier_in==None:
module.fail_json(msg= "If you specify failover, region or weight you must also specify identifier")
if command_in == 'create':
if ( weight_in!=None or region_in!=None or failover_in!=None ) and identifier_in==None:
module.fail_json(msg= "If you specify failover, region or weight you must also specify identifier")
elif ( weight_in==None and region_in==None and failover_in==None ) and identifier_in!=None:
module.fail_json(msg= "You have specified identifier which makes sense only if you specify one of: weight, region or failover.")
if vpc_id_in and not private_zone_in:
module.fail_json(msg="parameter 'private_zone' must be true when specifying parameter"
" 'vpc_id'")
# connect to the route53 endpoint
try:
conn = Route53Connection(**aws_connect_kwargs)
except boto.exception.BotoServerError, e:
module.fail_json(msg = e.error_message)
# Find the named zone ID
zone = get_zone_by_name(conn, module, zone_in, private_zone_in, hosted_zone_id_in, vpc_id_in)
# Verify that the requested zone is already defined in Route53
if zone is None:
errmsg = "Zone %s does not exist in Route53" % zone_in
module.fail_json(msg = errmsg)
record = {}
found_record = False
wanted_rset = Record(name=record_in, type=type_in, ttl=ttl_in,
identifier=identifier_in, weight=weight_in, region=region_in,
health_check=health_check_in, failover=failover_in)
for v in value_list:
if alias_in:
wanted_rset.set_alias(alias_hosted_zone_id_in, v, alias_evaluate_target_health_in)
else:
wanted_rset.add_value(v)
sets = conn.get_all_rrsets(zone.id, name=record_in, type=type_in, identifier=identifier_in)
for rset in sets:
# Due to a bug in either AWS or Boto, "special" characters are returned as octals, preventing round
# tripping of things like * and @.
decoded_name = rset.name.replace(r'\052', '*')
decoded_name = decoded_name.replace(r'\100', '@')
#Need to save this changes in rset, because of comparing rset.to_xml() == wanted_rset.to_xml() in next block
rset.name = decoded_name
if identifier_in is not None:
identifier_in = str(identifier_in)
if rset.type == type_in and decoded_name.lower() == record_in.lower() and rset.identifier == identifier_in:
found_record = True
record['zone'] = zone_in
record['type'] = rset.type
record['record'] = decoded_name
record['ttl'] = rset.ttl
record['value'] = ','.join(sorted(rset.resource_records))
record['values'] = sorted(rset.resource_records)
if hosted_zone_id_in:
record['hosted_zone_id'] = hosted_zone_id_in
record['identifier'] = rset.identifier
record['weight'] = rset.weight
record['region'] = rset.region
record['failover'] = rset.failover
record['health_check'] = rset.health_check
if hosted_zone_id_in:
record['hosted_zone_id'] = hosted_zone_id_in
if rset.alias_dns_name:
record['alias'] = True
record['value'] = rset.alias_dns_name
record['values'] = [rset.alias_dns_name]
record['alias_hosted_zone_id'] = rset.alias_hosted_zone_id
record['alias_evaluate_target_health'] = rset.alias_evaluate_target_health
else:
record['alias'] = False
record['value'] = ','.join(sorted(rset.resource_records))
record['values'] = sorted(rset.resource_records)
if command_in == 'create' and rset.to_xml() == wanted_rset.to_xml():
module.exit_json(changed=False)
break
if command_in == 'get':
if type_in == 'NS':
ns = record['values']
else:
# Retrieve name servers associated to the zone.
ns = conn.get_zone(zone_in).get_nameservers()
module.exit_json(changed=False, set=record, nameservers=ns)
if command_in == 'delete' and not found_record:
module.exit_json(changed=False)
changes = ResourceRecordSets(conn, zone.id)
if command_in == 'create' or command_in == 'delete':
if command_in == 'create' and found_record:
if not module.params['overwrite']:
module.fail_json(msg = "Record already exists with different value. Set 'overwrite' to replace it")
command = 'UPSERT'
else:
command = command_in.upper()
changes.add_change_record(command, wanted_rset)
try:
result = invoke_with_throttling_retries(commit, changes, retry_interval_in, wait_in, wait_timeout_in)
except boto.route53.exception.DNSServerError, e:
txt = e.body.split("<Message>")[1]
txt = txt.split("</Message>")[0]
if "but it already exists" in txt:
module.exit_json(changed=False)
else:
module.fail_json(msg = txt)
except TimeoutError:
module.fail_json(msg='Timeout waiting for changes to replicate')
module.exit_json(changed=True)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 |
gurneyalex/OpenUpgrade | addons/board/board.py | 65 | 6715 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2013 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from operator import itemgetter
from textwrap import dedent
from openerp import tools
from openerp.osv import fields, osv
class board_board(osv.osv):
_name = 'board.board'
_description = "Board"
_auto = False
_columns = {}
@tools.cache()
def list(self, cr, uid, context=None):
Actions = self.pool.get('ir.actions.act_window')
Menus = self.pool.get('ir.ui.menu')
IrValues = self.pool.get('ir.values')
act_ids = Actions.search(cr, uid, [('res_model', '=', self._name)], context=context)
refs = ['%s,%s' % (Actions._name, act_id) for act_id in act_ids]
# cannot search "action" field on menu (non stored function field without search_fnct)
irv_ids = IrValues.search(cr, uid, [
('model', '=', 'ir.ui.menu'),
('key', '=', 'action'),
('key2', '=', 'tree_but_open'),
('value', 'in', refs),
], context=context)
menu_ids = map(itemgetter('res_id'), IrValues.read(cr, uid, irv_ids, ['res_id'], context=context))
menu_ids = Menus._filter_visible_menus(cr, uid, menu_ids, context=context)
menu_names = Menus.name_get(cr, uid, menu_ids, context=context)
return [dict(id=m[0], name=m[1]) for m in menu_names]
def _clear_list_cache(self):
self.list.clear_cache(self)
def create(self, cr, user, vals, context=None):
return 0
def fields_view_get(self, cr, user, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
"""
Overrides orm field_view_get.
@return: Dictionary of Fields, arch and toolbar.
"""
res = {}
res = super(board_board, self).fields_view_get(cr, user, view_id, view_type,
context, toolbar=toolbar, submenu=submenu)
CustView = self.pool.get('ir.ui.view.custom')
vids = CustView.search(cr, user, [('user_id', '=', user), ('ref_id', '=', view_id)], context=context)
if vids:
view_id = vids[0]
arch = CustView.browse(cr, user, view_id, context=context)
res['custom_view_id'] = view_id
res['arch'] = arch.arch
res['arch'] = self._arch_preprocessing(cr, user, res['arch'], context=context)
res['toolbar'] = {'print': [], 'action': [], 'relate': []}
return res
def _arch_preprocessing(self, cr, user, arch, context=None):
from lxml import etree
def remove_unauthorized_children(node):
for child in node.iterchildren():
if child.tag == 'action' and child.get('invisible'):
node.remove(child)
else:
child = remove_unauthorized_children(child)
return node
def encode(s):
if isinstance(s, unicode):
return s.encode('utf8')
return s
archnode = etree.fromstring(encode(arch))
return etree.tostring(remove_unauthorized_children(archnode), pretty_print=True)
class board_create(osv.osv_memory):
def board_create(self, cr, uid, ids, context=None):
assert len(ids) == 1
this = self.browse(cr, uid, ids[0], context=context)
view_arch = dedent("""<?xml version="1.0"?>
<form string="%s" version="7.0">
<board style="2-1">
<column/>
<column/>
</board>
</form>
""".strip() % (this.name,))
view_id = self.pool.get('ir.ui.view').create(cr, uid, {
'name': this.name,
'model': 'board.board',
'priority': 16,
'type': 'form',
'arch': view_arch,
}, context=context)
action_id = self.pool.get('ir.actions.act_window').create(cr, uid, {
'name': this.name,
'view_type': 'form',
'view_mode': 'form',
'res_model': 'board.board',
'usage': 'menu',
'view_id': view_id,
'help': dedent('''<div class="oe_empty_custom_dashboard">
<p>
<b>This dashboard is empty.</b>
</p><p>
To add the first report into this dashboard, go to any
menu, switch to list or graph view, and click <i>'Add to
Dashboard'</i> in the extended search options.
</p><p>
You can filter and group data before inserting into the
dashboard using the search options.
</p>
</div>
''')
}, context=context)
menu_id = self.pool.get('ir.ui.menu').create(cr, uid, {
'name': this.name,
'parent_id': this.menu_parent_id.id,
'action': 'ir.actions.act_window,%s' % (action_id,)
}, context=context)
self.pool.get('board.board')._clear_list_cache()
return {
'type': 'ir.actions.client',
'tag': 'reload',
'params': {
'menu_id': menu_id
},
}
def _default_menu_parent_id(self, cr, uid, context=None):
_, menu_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'menu_reporting_dashboard')
return menu_id
_name = "board.create"
_description = "Board Creation"
_columns = {
'name': fields.char('Board Name', size=64, required=True),
'menu_parent_id': fields.many2one('ir.ui.menu', 'Parent Menu', required=True),
}
_defaults = {
'menu_parent_id': _default_menu_parent_id,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
XiaosongWei/crosswalk-test-suite | webapi/tct-csp-w3c-tests/csp-py/csp_connect-src_none.py | 25 | 3170 | def main(request, response):
response.headers.set(
"Content-Security-Policy",
"connect-src 'none'; script-src 'self' 'unsafe-inline'")
response.headers.set(
"X-Content-Security-Policy",
"connect-src 'none'; script-src 'self' 'unsafe-inline'")
response.headers.set(
"X-WebKit-CSP",
"connect-src 'none'; script-src 'self' 'unsafe-inline'")
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Hao, Yunfei <yunfeix.hao@intel.com>
-->
<html>
<head>
<title>CSP Test: csp_connect-src_none_xmlhttprequest</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#connect-src"/>
<meta name="flags" content=""/>
<meta name="assert" content="connect-src 'none'; script-src 'self' 'unsafe-inline'"/>
<meta charset="utf-8"/>
<script src="../resources/testharness.js"></script>
<script src="../resources/testharnessreport.js"></script>
</head>
<body>
<div id="log"></div>
<script>
var xhr = new XMLHttpRequest();
test(function() {
try {
xhr.open("GET", "support/csp.js");
assert_unreached("Should not reach here");
} catch(e) {
// To be improved for exception error checking
}
}, document.title + "_blocked");
test(function() {
try {
xhr.open("GET", "http://https://www.tizen.org");
assert_unreached("Should not reach here");
} catch(e) {
// To be improved for exception error checking
}
}, document.title + "_blocked_ext");
</script>
</body>
</html> """
| bsd-3-clause |
BlackPole/bp-enigma2 | lib/python/Tools/ASCIItranslit.py | 84 | 4273 | # -*- coding:utf-8 -*-
ASCIItranslit = { \
0x0022: "''", \
0x002A: "_", \
0x002F: "_", \
0x003A: "_", \
0x003C: "_", \
0x003D: "_", \
0x003E: "_", \
0x003F: "_", \
0x005C: "_", \
0x007C: "_", \
0x007F: "", \
0x00A0: "_", \
0x00A1: "!", \
0x00A2: "c", \
0x00A3: "lb", \
0x00A4: "", \
0x00A5: "yen", \
0x00A6: "I", \
0x00A7: "SS", \
0x00A8: "'", \
0x00A9: "(c)", \
0x00AA: "a", \
0x00AB: "<<", \
0x00AC: "not", \
0x00AD: "-", \
0x00AE: "(R)", \
0x00AF: "", \
0x00B0: "^0", \
0x00B1: "+-", \
0x00B2: "^2", \
0x00B3: "^3", \
0x00B4: "'", \
0x00B5: "u", \
0x00B6: "P", \
0x00B7: ".", \
0x00B8: ",", \
0x00B9: "^1", \
0x00BA: "o", \
0x00BB: ">>", \
0x00BC: "1_4 ", \
0x00BD: "1_2 ", \
0x00BE: "3_4 ", \
0x00BF: "_", \
0x00C0: "`A", \
0x00C1: "'A", \
0x00C2: "^A", \
0x00C3: "~A", \
0x00C4: "Ae", \
0x00C5: "A", \
0x00C6: "AE", \
0x00C7: "C", \
0x00C8: "`E", \
0x00C9: "'E", \
0x00CA: "^E", \
0x00CB: "E", \
0x00CC: "`I", \
0x00CD: "'I", \
0x00CE: "^I", \
0x00CF: "I", \
0x00D0: "D", \
0x00D1: "~N", \
0x00D2: "`O", \
0x00D3: "'O", \
0x00D4: "^O", \
0x00D5: "~O", \
0x00D6: "Oe", \
0x00D7: "x", \
0x00D8: "O", \
0x00D9: "`U", \
0x00DA: "'U", \
0x00DB: "^U", \
0x00DC: "Ue", \
0x00DD: "'Y", \
0x00DE: "Th", \
0x00DF: "ss", \
0x00E0: "`a", \
0x00E1: "'a", \
0x00E2: "^a", \
0x00E3: "~a", \
0x00E4: "AE", \
0x00E5: "a", \
0x00E6: "ae", \
0x00E7: "c", \
0x00E8: "`e", \
0x00E9: "'e", \
0x00EA: "^e", \
0x00EB: "e", \
0x00EC: "`i", \
0x00ED: "'i", \
0x00EE: "^i", \
0x00EF: "i", \
0x00F0: "d", \
0x00F1: "~n", \
0x00F2: "`o", \
0x00F3: "'o", \
0x00F4: "^o", \
0x00F5: "~o", \
0x00F6: "oe", \
0x00F7: "_", \
0x00F8: "o", \
0x00F9: "`u", \
0x00FA: "'u", \
0x00FB: "^u", \
0x00FC: "ue", \
0x00FD: "'y", \
0x00FE: "th", \
0x00FF: "Y", \
0x0100: "A", \
0x0101: "a", \
0x0102: "A", \
0x0103: "a", \
0x0104: "A", \
0x0105: "a", \
0x0106: "'C", \
0x0107: "'c", \
0x0108: "^C", \
0x0109: "^c", \
0x010A: "C", \
0x010B: "c", \
0x010C: "C", \
0x010D: "c", \
0x010E: "D", \
0x010F: "d", \
0x0110: "D", \
0x0111: "d", \
0x0112: "E", \
0x0113: "e", \
0x0114: "E", \
0x0115: "e", \
0x0116: "E", \
0x0117: "e", \
0x0118: "E", \
0x0119: "e", \
0x011A: "E", \
0x011B: "e", \
0x011C: "^G", \
0x011D: "^g", \
0x011E: "G", \
0x011F: "g", \
0x0120: "G", \
0x0121: "g", \
0x0122: "G", \
0x0123: "g", \
0x0124: "^H", \
0x0125: "^h", \
0x0126: "H", \
0x0127: "h", \
0x0128: "~I", \
0x0129: "~i", \
0x012A: "I", \
0x012B: "i", \
0x012C: "I", \
0x012D: "i", \
0x012E: "I", \
0x012F: "i", \
0x0130: "I", \
0x0131: "i", \
0x0132: "IJ", \
0x0133: "ij", \
0x0134: "^J", \
0x0135: "^j", \
0x0136: "K", \
0x0137: "k", \
0x0138: "", \
0x0139: "L", \
0x013A: "l", \
0x013B: "L", \
0x013C: "l", \
0x013D: "L", \
0x013E: "l", \
0x013F: "L", \
0x0140: "l", \
0x0141: "L", \
0x0142: "l", \
0x0143: "'N", \
0x0144: "'n", \
0x0145: "N", \
0x0146: "n", \
0x0147: "N", \
0x0148: "n", \
0x0149: "n", \
0x014A: "_", \
0x014B: "_", \
0x014C: "O", \
0x014D: "o", \
0x014E: "O", \
0x014F: "o", \
0x0150: "''o", \
0x0152: "OE", \
0x0153: "oe", \
0x0154: "'R", \
0x0155: "'r", \
0x0156: "R", \
0x0157: "r", \
0x0158: "R", \
0x0159: "r", \
0x015A: "'s", \
0x015B: "'s", \
0x015C: "^S", \
0x015D: "^s", \
0x015E: "S", \
0x015F: "s", \
0x0160: "S", \
0x0161: "s", \
0x0162: "T", \
0x0163: "t", \
0x0164: "T", \
0x0165: "t", \
0x0166: "T", \
0x0167: "t", \
0x0168: "~U", \
0x0169: "~u", \
0x016A: "U", \
0x016B: "u", \
0x016C: "U", \
0x016D: "u", \
0x016E: "U", \
0x016F: "u", \
0x0170: "''u", \
0x0172: "U", \
0x0173: "u", \
0x0174: "^W", \
0x0175: "^w", \
0x0176: "^Y", \
0x0177: "^y", \
0x0178: "Y", \
0x0179: "'Z", \
0x017A: "'z", \
0x017B: "Z", \
0x017C: "z", \
0x017D: "Z", \
0x017E: "z", \
0x017F: "s", \
0x018F: "_", \
0x0192: "f", \
0x01C4: "DZ", \
0x01C5: "DZ", \
0x01C6: "DZ", \
0x01C7: "LJ", \
0x01C8: "Lj", \
0x01C9: "lj", \
0x01CA: "NJ", \
0x01CB: "Nj", \
0x01CC: "nj", \
0x01F1: "DZ", \
0x01F2: "Dz", \
0x01F3: "dz", \
0x0218: "S", \
0x0219: "s", \
0x021A: "T", \
0x021B: "t", \
0x0259: "_", \
0x20AC: "EUR" }
def legacyEncode(string):
string2 = ""
for z, char in enumerate(string.decode("utf-8")):
i = ord(char)
if i < 33:
string2 += "_"
elif i in ASCIItranslit:
string2 += ASCIItranslit[i]
else:
try:
string2 += char.encode('ascii', 'strict')
except:
string2 += "_"
return string2.upper()
| gpl-2.0 |
liorvh/infernal-twin | build/reportlab/src/rl_addons/rl_accel/tests/t4.py | 14 | 1875 | from reportlab.pdfbase.pdfmetrics import getFont, registerFont
from reportlab.pdfbase.ttfonts import TTFont
import time
from sys import getrefcount
registerFont(TTFont("Vera", "Vera.ttf"))
font = getFont('Vera')
_py_stringWidth = font._py_stringWidth
stringWidth = font.stringWidth
#assert stringWidth!=_py_stringWidth
#print "font=%s(%d) widths=%s(%d)" % (
# hex(id(font)), getrefcount(font),
# hex(id(font.widths)), getrefcount(font.widths),
# )
utext = 'This is the end of the \xce\x91\xce\xb2 world.'.decode('utf8')
fontSize = 12
print(stringWidth(utext,fontSize))
print(_py_stringWidth(utext,fontSize))
print(hex(id(font)), getrefcount(font),hex(id(font.face)), getrefcount(font.face), hex(id(font.face.charWidths)), getrefcount(font.face.charWidths), hex(id(font.face.defaultWidth)), getrefcount(font.face.defaultWidth), hex(id(utext)), getrefcount(utext), hex(id(fontSize)), getrefcount(fontSize))
assert stringWidth(utext,fontSize)==_py_stringWidth(utext,fontSize)
def tim(N,msg,func,*args):
t0 = time.time()
for i in range(N):
x = func(*args)
t1 = time.time()
return "%s N=%d t=%.3f\n%r" % (msg,N,t1-t0,x)
N=100000
print(tim(N,'_py_stringWidth',_py_stringWidth,utext,fontSize))
print(tim(N,'stringWidth',stringWidth,utext,fontSize))
utext1='ABCDEFG'
N=1000000
print(tim(N,'_py_stringWidth',_py_stringWidth,utext1,fontSize))
print(tim(N,'stringWidth',stringWidth,utext1,fontSize))
utext1='ABCDE\xce\xb2G'
print(tim(N,'_py_stringWidth',_py_stringWidth,utext1,fontSize))
print(tim(N,'stringWidth',stringWidth,utext1,fontSize))
print(hex(id(font)), getrefcount(font),hex(id(font.face)), getrefcount(font.face), hex(id(font.face.charWidths)), getrefcount(font.face.charWidths), hex(id(font.face.defaultWidth)), getrefcount(font.face.defaultWidth), hex(id(utext)), getrefcount(utext), hex(id(fontSize)), getrefcount(fontSize))
| gpl-3.0 |
foursquare/pants | src/python/pants/java/nailgun_executor.py | 1 | 12211 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import hashlib
import logging
import os
import re
import select
import threading
import time
from contextlib import closing
from six import string_types
from twitter.common.collections import maybe_list
from pants.base.build_environment import get_buildroot
from pants.java.executor import Executor, SubprocessExecutor
from pants.java.nailgun_client import NailgunClient
from pants.pantsd.process_manager import FingerprintedProcessManager, ProcessGroup
from pants.util.dirutil import read_file, safe_file_dump, safe_open
logger = logging.getLogger(__name__)
class NailgunProcessGroup(ProcessGroup):
_NAILGUN_KILL_LOCK = threading.Lock()
def __init__(self, metadata_base_dir=None):
super(NailgunProcessGroup, self).__init__(name='nailgun', metadata_base_dir=metadata_base_dir)
# TODO: this should enumerate the .pids dir first, then fallback to ps enumeration (& warn).
def _iter_nailgun_instances(self, everywhere=False):
def predicate(proc):
if proc.name() == NailgunExecutor._PROCESS_NAME:
if not everywhere:
return NailgunExecutor._PANTS_NG_BUILDROOT_ARG in proc.cmdline()
else:
return any(arg.startswith(NailgunExecutor._PANTS_NG_ARG_PREFIX) for arg in proc.cmdline())
return self.iter_instances(predicate)
def killall(self, everywhere=False):
"""Kills all nailgun servers started by pants.
:param bool everywhere: If ``True``, kills all pants-started nailguns on this machine;
otherwise restricts the nailguns killed to those started for the
current build root.
"""
with self._NAILGUN_KILL_LOCK:
for proc in self._iter_nailgun_instances(everywhere):
logger.info('killing nailgun server pid={pid}'.format(pid=proc.pid))
proc.terminate()
# TODO: Once we integrate standard logging into our reporting framework, we can consider making
# some of the log.debug() below into log.info(). Right now it just looks wrong on the console.
class NailgunExecutor(Executor, FingerprintedProcessManager):
"""Executes java programs by launching them in nailgun server.
If a nailgun is not available for a given set of jvm args and classpath, one is launched and
re-used for the given jvm args and classpath on subsequent runs.
"""
# 'NGServer 0.9.1 started on 127.0.0.1, port 53785.'
_NG_PORT_REGEX = re.compile(r'.*\s+port\s+(\d+)\.$')
# Used to identify if we own a given nailgun server.
FINGERPRINT_CMD_KEY = b'-Dpants.nailgun.fingerprint'
_PANTS_NG_ARG_PREFIX = b'-Dpants.buildroot'
_PANTS_OWNER_ARG_PREFIX = b'-Dpants.nailgun.owner'
_PANTS_NG_BUILDROOT_ARG = b'='.join((_PANTS_NG_ARG_PREFIX, get_buildroot().encode('utf-8')))
_NAILGUN_SPAWN_LOCK = threading.Lock()
_SELECT_WAIT = 1
_PROCESS_NAME = b'java'
def __init__(self, identity, workdir, nailgun_classpath, distribution,
connect_timeout=10, connect_attempts=5, metadata_base_dir=None):
Executor.__init__(self, distribution=distribution)
FingerprintedProcessManager.__init__(self,
name=identity,
process_name=self._PROCESS_NAME,
metadata_base_dir=metadata_base_dir)
if not isinstance(workdir, string_types):
raise ValueError('Workdir must be a path string, not: {workdir}'.format(workdir=workdir))
self._identity = identity
self._workdir = workdir
self._ng_stdout = os.path.join(workdir, 'stdout')
self._ng_stderr = os.path.join(workdir, 'stderr')
self._nailgun_classpath = maybe_list(nailgun_classpath)
self._connect_timeout = connect_timeout
self._connect_attempts = connect_attempts
def __str__(self):
return 'NailgunExecutor({identity}, dist={dist}, pid={pid} socket={socket})'.format(
identity=self._identity, dist=self._distribution, pid=self.pid, socket=self.socket)
def _create_owner_arg(self, workdir):
# Currently the owner is identified via the full path to the workdir.
return '='.join((self._PANTS_OWNER_ARG_PREFIX, workdir))
def _create_fingerprint_arg(self, fingerprint):
return '='.join((self.FINGERPRINT_CMD_KEY, fingerprint))
@staticmethod
def _fingerprint(jvm_options, classpath, java_version):
"""Compute a fingerprint for this invocation of a Java task.
:param list jvm_options: JVM options passed to the java invocation
:param list classpath: The -cp arguments passed to the java invocation
:param Revision java_version: return value from Distribution.version()
:return: a hexstring representing a fingerprint of the java invocation
"""
digest = hashlib.sha1()
# TODO(John Sirois): hash classpath contents?
[digest.update(item) for item in (''.join(sorted(jvm_options)),
''.join(sorted(classpath)),
repr(java_version))]
return digest.hexdigest()
def _runner(self, classpath, main, jvm_options, args, cwd=None):
"""Runner factory. Called via Executor.execute()."""
command = self._create_command(classpath, main, jvm_options, args)
class Runner(self.Runner):
@property
def executor(this):
return self
@property
def command(self):
return list(command)
def run(this, stdout=None, stderr=None, stdin=None, cwd=None):
nailgun = self._get_nailgun_client(jvm_options, classpath, stdout, stderr, stdin)
try:
logger.debug('Executing via {ng_desc}: {cmd}'.format(ng_desc=nailgun, cmd=this.cmd))
return nailgun.execute(main, cwd, *args)
except nailgun.NailgunError as e:
self.terminate()
raise self.Error('Problem launching via {ng_desc} command {main} {args}: {msg}'
.format(ng_desc=nailgun, main=main, args=' '.join(args), msg=e))
return Runner()
def _check_nailgun_state(self, new_fingerprint):
running = self.is_alive()
updated = self.needs_restart(new_fingerprint) or self.cmd != self._distribution.java
logging.debug('Nailgun {nailgun} state: updated={up!s} running={run!s} fingerprint={old_fp} '
'new_fingerprint={new_fp} distribution={old_dist} new_distribution={new_dist}'
.format(nailgun=self._identity, up=updated, run=running,
old_fp=self.fingerprint, new_fp=new_fingerprint,
old_dist=self.cmd, new_dist=self._distribution.java))
return running, updated
def _get_nailgun_client(self, jvm_options, classpath, stdout, stderr, stdin):
"""This (somewhat unfortunately) is the main entrypoint to this class via the Runner. It handles
creation of the running nailgun server as well as creation of the client."""
classpath = self._nailgun_classpath + classpath
new_fingerprint = self._fingerprint(jvm_options, classpath, self._distribution.version)
with self._NAILGUN_SPAWN_LOCK:
running, updated = self._check_nailgun_state(new_fingerprint)
if running and updated:
logger.debug('Found running nailgun server that needs updating, killing {server}'
.format(server=self._identity))
self.terminate()
if (not running) or (running and updated):
return self._spawn_nailgun_server(new_fingerprint, jvm_options, classpath, stdout, stderr, stdin)
return self._create_ngclient(self.socket, stdout, stderr, stdin)
def _await_socket(self, timeout):
"""Blocks for the nailgun subprocess to bind and emit a listening port in the nailgun stdout."""
with safe_open(self._ng_stdout, 'r') as ng_stdout:
start_time = time.time()
accumulated_stdout = ''
while 1:
readable, _, _ = select.select([ng_stdout], [], [], self._SELECT_WAIT)
if readable:
line = ng_stdout.readline() # TODO: address deadlock risk here.
try:
return self._NG_PORT_REGEX.match(line).group(1)
except AttributeError:
pass
accumulated_stdout += line
if (time.time() - start_time) > timeout:
stderr = read_file(self._ng_stderr)
raise NailgunClient.NailgunError(
'Failed to read nailgun output after {sec} seconds!\n'
'Stdout:\n{stdout}\nStderr:\n{stderr}'.format(
sec=timeout,
stdout=accumulated_stdout,
stderr=stderr,
)
)
def _create_ngclient(self, port, stdout, stderr, stdin):
return NailgunClient(port=port, ins=stdin, out=stdout, err=stderr, workdir=get_buildroot())
def ensure_connectable(self, nailgun):
"""Ensures that a nailgun client is connectable or raises NailgunError."""
attempt_count = 1
while 1:
try:
with closing(nailgun.try_connect()) as sock:
logger.debug('Verified new ng server is connectable at {}'.format(sock.getpeername()))
return
except nailgun.NailgunConnectionError:
if attempt_count >= self._connect_attempts:
logger.debug('Failed to connect to ng after {} attempts'.format(self._connect_attempts))
raise # Re-raise the NailgunConnectionError which provides more context to the user.
attempt_count += 1
time.sleep(self.WAIT_INTERVAL_SEC)
def _spawn_nailgun_server(self, fingerprint, jvm_options, classpath, stdout, stderr, stdin):
"""Synchronously spawn a new nailgun server."""
# Truncate the nailguns stdout & stderr.
safe_file_dump(self._ng_stdout, '')
safe_file_dump(self._ng_stderr, '')
jvm_options = jvm_options + [self._PANTS_NG_BUILDROOT_ARG,
self._create_owner_arg(self._workdir),
self._create_fingerprint_arg(fingerprint)]
post_fork_child_opts = dict(fingerprint=fingerprint,
jvm_options=jvm_options,
classpath=classpath,
stdout=stdout,
stderr=stderr)
logger.debug('Spawning nailgun server {i} with fingerprint={f}, jvm_options={j}, classpath={cp}'
.format(i=self._identity, f=fingerprint, j=jvm_options, cp=classpath))
self.daemon_spawn(post_fork_child_opts=post_fork_child_opts)
# Wait for and write the port information in the parent so we can bail on exception/timeout.
self.await_pid(self._connect_timeout)
self.write_socket(self._await_socket(self._connect_timeout))
logger.debug('Spawned nailgun server {i} with fingerprint={f}, pid={pid} port={port}'
.format(i=self._identity, f=fingerprint, pid=self.pid, port=self.socket))
client = self._create_ngclient(self.socket, stdout, stderr, stdin)
self.ensure_connectable(client)
return client
def _check_process_buildroot(self, process):
"""Matches only processes started from the current buildroot."""
return self._PANTS_NG_BUILDROOT_ARG in process.cmdline()
def is_alive(self):
"""A ProcessManager.is_alive() override that ensures buildroot flags are present in the process
command line arguments."""
return super(NailgunExecutor, self).is_alive(self._check_process_buildroot)
def post_fork_child(self, fingerprint, jvm_options, classpath, stdout, stderr):
"""Post-fork() child callback for ProcessManager.daemon_spawn()."""
java = SubprocessExecutor(self._distribution)
subproc = java.spawn(classpath=classpath,
main='com.martiansoftware.nailgun.NGServer',
jvm_options=jvm_options,
args=[':0'],
stdin=safe_open('/dev/null', 'r'),
stdout=safe_open(self._ng_stdout, 'w'),
stderr=safe_open(self._ng_stderr, 'w'),
close_fds=True)
self.write_pid(subproc.pid)
| apache-2.0 |
toofar/qutebrowser | tests/unit/utils/usertypes/test_question.py | 4 | 2892 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2018 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Tests for usertypes.Question."""
import pytest
from qutebrowser.utils import usertypes
@pytest.fixture
def question():
return usertypes.Question()
def test_attributes(question):
"""Test setting attributes."""
question.default = True
question.text = "foo"
def test_mode(question):
"""Test setting mode to valid members."""
question.mode = usertypes.PromptMode.yesno
assert question.mode == usertypes.PromptMode.yesno
def test_mode_invalid(question):
"""Test setting mode to something which is not a PromptMode member."""
with pytest.raises(TypeError):
question.mode = 42
@pytest.mark.parametrize('mode, answer, signal_names', [
(usertypes.PromptMode.text, 'foo', ['answered', 'completed']),
(usertypes.PromptMode.yesno, True, ['answered', 'completed',
'answered_yes']),
(usertypes.PromptMode.yesno, False, ['answered', 'completed',
'answered_no']),
])
def test_done(mode, answer, signal_names, question, qtbot):
"""Test the 'done' method and completed/answered signals."""
question.mode = mode
question.answer = answer
signals = [getattr(question, name) for name in signal_names]
with qtbot.waitSignals(signals, order='strict'):
question.done()
assert not question.is_aborted
def test_cancel(question, qtbot):
"""Test Question.cancel()."""
with qtbot.waitSignals([question.cancelled, question.completed],
order='strict'):
question.cancel()
assert not question.is_aborted
def test_abort(question, qtbot):
"""Test Question.abort()."""
with qtbot.waitSignals([question.aborted, question.completed],
order='strict'):
question.abort()
assert question.is_aborted
def test_abort_twice(question, qtbot):
"""Abort a question twice."""
with qtbot.wait_signal(question.aborted):
question.abort()
assert question.is_aborted
with qtbot.assert_not_emitted(question.aborted):
question.abort()
| gpl-3.0 |
ctrlaltdel/neutrinator | vendor/openstack/clustering/v1/event.py | 1 | 2015 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import resource
class Event(resource.Resource):
resource_key = 'event'
resources_key = 'events'
base_path = '/events'
# Capabilities
allow_list = True
allow_fetch = True
_query_mapping = resource.QueryParameters(
'cluster_id', 'action', 'level', 'sort', 'global_project',
obj_id='oid', obj_name='oname', obj_type='otype',
)
# Properties
#: Timestamp string (in ISO8601 format) when the event was generated.
generated_at = resource.Body('timestamp')
#: The UUID of the object related to this event.
obj_id = resource.Body('oid')
#: The name of the object related to this event.
obj_name = resource.Body('oname')
#: The type name of the object related to this event.
obj_type = resource.Body('otype')
#: The UUID of the cluster related to this event, if any.
cluster_id = resource.Body('cluster_id')
#: The event level (priority).
level = resource.Body('level')
#: The ID of the user.
user_id = resource.Body('user')
#: The ID of the project (tenant).
project_id = resource.Body('project')
#: The string representation of the action associated with the event.
action = resource.Body('action')
#: The status of the associated object.
status = resource.Body('status')
#: A string description of the reason that brought the object into its
#: current status.
status_reason = resource.Body('status_reason')
| gpl-3.0 |
zvolsky/codex | languages/zh-cn.py | 142 | 10465 | # coding: utf8
{
'!langcode!': 'zh-cn',
'!langname!': '中文',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" 应为选择表达式, 格式如 "field1=\'value\'". 但是对 JOIN 的结果不可以使用 update 或者 delete"',
'%s %%{row} deleted': '已删除 %s 笔',
'%s %%{row} updated': '已更新 %s 笔',
'%s selected': '%s 已选择',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'(something like "it-it")': '(格式类似 "zh-tw")',
'A new version of web2py is available': '新版 web2py 已推出',
'A new version of web2py is available: %s': '新版 web2py 已推出: %s',
'about': '关于',
'About': '关于',
'About application': '关于本应用程序',
'Access Control': 'Access Control',
'Admin is disabled because insecure channel': '管理功能(Admin)在非安全连接环境下自动关闭',
'Admin is disabled because unsecure channel': '管理功能(Admin)在非安全连接环境下自动关闭',
'Administrative Interface': 'Administrative Interface',
'Administrative interface': '点击进入管理界面',
'Administrator Password:': '管理员密码:',
'Ajax Recipes': 'Ajax Recipes',
'An error occured, please %s the page': 'An error occured, please %s the page',
'appadmin is disabled because insecure channel': '管理界面在非安全通道下被禁用',
'Are you sure you want to delete file "%s"?': '确定要删除文件"%s"?',
'Are you sure you want to delete this object?': '确定要删除该对象么?',
'Are you sure you want to uninstall application "%s"': '确定要删除应用程序 "%s"',
'Are you sure you want to uninstall application "%s"?': '确定要删除应用程序 "%s"',
'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': '注意: 登录管理账号需要安全连接(HTTPS)或是在本地连接(localhost).',
'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': '注意: 因为在测试模式不保证多线程安全性,所以不可同时执行多个测试案例',
'ATTENTION: you cannot edit the running application!': '注意:不可编辑正在执行的应用程序!',
'Authentication': '验证',
'Available Databases and Tables': '可提供的数据库和数据表',
'Buy this book': '购买本书',
'cache': '高速缓存',
'Cache': 'Cache',
'Cache Keys': 'Cache Keys',
'Cannot be empty': '不可空白',
'Cannot compile: there are errors in your app. Debug it, correct errors and try again.': '编译失败:应用程序有错误,请排除错误后再尝试编译.',
'Change Password': '修改密码',
'change password': '修改密码',
'Check to delete': '打勾以示删除',
'Check to delete:': '打勾以示删除:',
'Clear CACHE?': 'Clear CACHE?',
'Clear DISK': 'Clear DISK',
'Clear RAM': 'Clear RAM',
'Client IP': '客户端网址(IP)',
'Community': 'Community',
'Components and Plugins': 'Components and Plugins',
'Controller': '控件',
'Controllers': '控件',
'Copyright': '版权所有',
'Create new application': '创建应用程序',
'Created By': 'Created By',
'Created On': 'Created On',
'Current request': '当前网络要求(request)',
'Current response': '当前网络响应(response)',
'Current session': '当前网络连接信息(session)',
'customize me!': '请调整我!',
'data uploaded': '数据已上传',
'Database': '数据库',
'Database %s select': '已选择 %s 数据库',
'Date and Time': '日期和时间',
'db': 'db',
'DB Model': '数据库模型',
'Delete': '删除',
'Delete:': '删除:',
'Demo': 'Demo',
'Deploy on Google App Engine': '发布到 Google App Engine',
'Deployment Recipes': 'Deployment Recipes',
'Description': '描述',
'DESIGN': '设计',
'design': '设计',
'Design for': '设计用于',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Cleared',
'Documentation': 'Documentation',
"Don't know what to do?": "Don't know what to do?",
'done!': '完成!',
'Download': '下载',
'E-mail': '电子邮件',
'EDIT': '编辑',
'Edit': '编辑',
'Edit application': '编辑应用程序',
'Edit current record': '编辑当前记录',
'edit profile': '编辑配置文件',
'Edit Profile': '编辑配置文件',
'Edit This App': '编辑本应用程序',
'Editing file': '编辑文件',
'Editing file "%s"': '编辑文件"%s"',
'Email and SMS': 'Email and SMS',
'enter an integer between %(min)g and %(max)g': 'enter an integer between %(min)g and %(max)g',
'Error logs for "%(app)s"': '"%(app)s"的错误记录',
'Errors': 'Errors',
'export as csv file': '以CSV格式导出',
'FAQ': 'FAQ',
'First name': '名',
'Forgot username?': '忘记用户名?',
'Forms and Validators': 'Forms and Validators',
'Free Applications': 'Free Applications',
'Functions with no doctests will result in [passed] tests.': '沒有 doctests 的函数会显示 [passed].',
'Group ID': '群组编号',
'Groups': 'Groups',
'Hello World': 'Hello World',
'Home': 'Home',
'How did you get here?': 'How did you get here?',
'import': 'import',
'Import/Export': '导入/导出',
'Index': '索引',
'insert new': '插入新纪录',
'insert new %s': '插入新纪录 %s',
'Installed applications': '已安裝应用程序',
'Internal State': '內部状态',
'Introduction': 'Introduction',
'Invalid action': '非法操作(action)',
'Invalid email': '不符合电子邮件格式',
'Invalid Query': '无效的查询请求',
'invalid request': '网络要求无效',
'Is Active': 'Is Active',
'Key': 'Key',
'Language files (static strings) updated': '语言文件已更新',
'Languages': '各国语言',
'Last name': '姓',
'Last saved on:': '最后保存时间:',
'Layout': '网页布局',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'Layouts',
'License for': '软件授权',
'Live Chat': 'Live Chat',
'login': '登录',
'Login': '登录',
'Login to the Administrative Interface': '登录到管理员界面',
'logout': '登出',
'Logout': '登出',
'Lost Password': '忘记密码',
'Lost password?': '忘记密码?',
'Main Menu': '主菜单',
'Manage Cache': 'Manage Cache',
'Menu Model': '菜单模型(menu)',
'Models': '数据模型',
'Modified By': '修改者',
'Modified On': '修改时间',
'Modules': '程序模块',
'My Sites': 'My Sites',
'Name': '名字',
'New Record': '新记录',
'new record inserted': '已插入新记录',
'next 100 rows': '往后 100 笔',
'NO': '否',
'No databases in this application': '该应用程序不含数据库',
'Object or table name': 'Object or table name',
'Online examples': '点击进入在线例子',
'or import from csv file': '或导入CSV文件',
'Origin': '原文',
'Original/Translation': '原文/翻译',
'Other Plugins': 'Other Plugins',
'Other Recipes': 'Other Recipes',
'Overview': '概览',
'Password': '密码',
"Password fields don't match": '密码不匹配',
'Peeking at file': '选择文件',
'Plugins': 'Plugins',
'Powered by': '基于下列技术构建:',
'Preface': 'Preface',
'previous 100 rows': '往前 100 笔',
'Python': 'Python',
'Query:': '查询:',
'Quick Examples': 'Quick Examples',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'Recipes': 'Recipes',
'Record': '记录',
'record does not exist': '记录不存在',
'Record ID': '记录编号',
'Record id': '记录编号',
'Register': '注册',
'register': '注册',
'Registration identifier': 'Registration identifier',
'Registration key': '注册密钥',
'reload': 'reload',
'Remember me (for 30 days)': '记住我(30 天)',
'Reset Password key': '重置密码',
'Resolve Conflict file': '解决冲突文件',
'Role': '角色',
'Rows in Table': '在数据表里的记录',
'Rows selected': '笔记录被选择',
'Saved file hash:': '已保存文件的哈希值:',
'Semantic': 'Semantic',
'Services': 'Services',
'Size of cache:': 'Size of cache:',
'state': '状态',
'Static files': '静态文件',
'Statistics': '统计数据',
'Stylesheet': '网页样式表',
'submit': '提交',
'Submit': '提交',
'Support': 'Support',
'Sure you want to delete this object?': '确定要删除此对象?',
'Table': '数据表',
'Table name': '数据表名称',
'Testing application': '测试中的应用程序',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"query"应是类似 "db.table1.field1==\'value\'" 的条件表达式. "db.table1.field1==db.table2.field2"的形式则代表执行 JOIN SQL.',
'The Core': 'The Core',
'The output of the file is a dictionary that was rendered by the view %s': 'The output of the file is a dictionary that was rendered by the view %s',
'The Views': '视图',
'There are no controllers': '沒有控件(controllers)',
'There are no models': '沒有数据库模型(models)',
'There are no modules': '沒有程序模块(modules)',
'There are no static files': '沒有静态文件',
'There are no translators, only default language is supported': '沒有对应的语言文件,仅支持原始语言',
'There are no views': '沒有视图',
'This App': '该应用',
'This is the %(filename)s template': '这是%(filename)s文件的模板(template)',
'Ticket': '问题清单',
'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',
'Timestamp': '时间戳',
'Twitter': 'Twitter',
'Unable to check for upgrades': '查询新版本失败',
'Unable to download': '无法下载',
'Unable to download app': '无法下载应用程序',
'unable to parse csv file': '无法解析CSV文件',
'Update:': '更新:',
'Upload existing application': '上传已有应用程序',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': '使用下列方式可得到更复杂的条件表达式, (...)&(...) 代表必须都满足, (...)|(...) 代表其一, ~(...)则代表否.',
'User %(id)s Logged-in': '用户 %(id)s 已登录',
'User %(id)s Registered': '用户 %(id)s 已注册',
'User ID': '用户编号',
'Verify Password': '验证密码',
'Videos': '视频',
'View': '查看',
'Views': '视图',
'Welcome': '欢迎',
'Welcome %s': '欢迎 %s',
'Welcome to web2py': '欢迎使用 web2py',
'Welcome to web2py!': '欢迎使用 web2py!',
'Which called the function %s located in the file %s': 'Which called the function %s located in the file %s',
'YES': '是',
'You are successfully running web2py': '您已成功运行 web2py',
'You can modify this application and adapt it to your needs': '请根据您的需要修改本程序',
'You visited the url %s': 'You visited the url %s',
}
| agpl-3.0 |
iABC2XYZ/abc | StockPredict/TensorflowGPUPredictFinal.py | 2 | 10474 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 11 12:17:54 2017
@author: A
"""
import tensorflow as tf # Version 1.0 or 0.12
import numpy as np
import matplotlib.pyplot as plt
import os
plt.close('all')
inSeq=20
outSeq=20
batchSize = 50
ioputDim=2
hiddenDim =30
numIterLearning = 1000
def ReadHistDataIndex(indexCode,kindCode):
fileName='./data/'+kindCode.lower()+'/'+indexCode+'.'+kindCode.lower()
if(kindCode.lower()=='date'):
if not (os.path.exists(fileName)):
return '3000-01-01'
with open(fileName,'r') as fid:
dataCodeTmp=fid.readlines()
nDataCodeTmp=len(dataCodeTmp)
dataCode=np.copy(dataCodeTmp)
for nLine in xrange(nDataCodeTmp):
dataCode[nLine]=dataCodeTmp[nDataCodeTmp-nLine-1]
else:
if not (os.path.exists(fileName)):
return [0]
dataCode= np.loadtxt(fileName)
if np.shape(dataCode)==():
return [0]
dataCode=np.flip(np.loadtxt(fileName),0)
return dataCode
def GetDataXY(indexCode,isTrain, lenSeq,batchSize,ioputDim):
openCode=ReadHistDataIndex(indexCode,'open')
if len(lenSeq)==1:
inSeq=outSeq=lenSeq
else:
inSeq,outSeq=lenSeq[0],lenSeq[1]
nSeq=inSeq+outSeq
nData=len(openCode)
NChoose=nData-nSeq
nChoose= np.random.randint(low=0,high=NChoose+1,size=batchSize*ioputDim)
xBatch = []
yBatch = []
iChoose=-1
for _ in xrange(batchSize):
x_=np.empty((inSeq,ioputDim))
y_=np.empty((outSeq,ioputDim))
for nIO in xrange(ioputDim):
iChoose+=1
xTmp=openCode[nChoose[iChoose]:nChoose[iChoose]+inSeq]
yTmp=openCode[nChoose[iChoose]+inSeq:nChoose[iChoose]+nSeq]
x_[:,nIO]=xTmp
y_[:,nIO]=yTmp
xBatch.append(x_)
yBatch.append(y_)
xBatch = np.array(xBatch)
yBatch = np.array(yBatch)
# shape: (batchSize, lenSeq, outputDim)
xBatch = np.array(xBatch).transpose((1, 0, 2))
yBatch = np.array(yBatch).transpose((1, 0, 2))
return xBatch, yBatch
def TensorFlowPredict(indexCode,inSeq,outSeq,batchSize,hiddenDim,ioputDim,numIterLearning):
lenSeq = [inSeq,outSeq] ##############################
outputDim = inputDim =ioputDim ###################################
layersStackedCount = 2
# Optmizer:
learningRate = 0.01 # Small lr helps not to diverge during training.
lrDecay = 0.9 # default: 0.9 . Simulated annealing.
momentumTF = 0.00 # default: 0.0 . momentumTF technique in weights update
lambdaL2Reg = 0.003 # L2 regularization of weights - avoids overfitting
try:
tf.nn.seq2seq = tf.contrib.legacy_sreq2seq
tf.nn.rnn_cell = tf.contrib.rnn
tf.nn.rnn_cell.GRUCell = tf.contrib.rnn.GRUCell
print("TensorFlow's version : 1.0 (or more)")
except:
print("TensorFlow's version : 0.12")
tf.reset_default_graph()
sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))
with tf.variable_scope('Seq2seq'):
# Encoder: inputs
enc_inp = [
tf.placeholder(tf.float32, shape=(
None, inputDim), name="inp_{}".format(t))
for t in range(inSeq)
]
# Decoder: expected outputs
expected_sparse_output = [
tf.placeholder(tf.float32, shape=(None, outputDim),
name="expected_sparse_output_".format(t))
for t in range(outSeq)
]
dec_inp = [tf.zeros_like(
enc_inp[0], dtype=np.float32, name="GO")] + enc_inp[:-1]
# Create a `layersStackedCount` of stacked RNNs (GRU cells here).
cells = []
for i in range(layersStackedCount):
with tf.variable_scope('RNN_{}'.format(i)):
cells.append(tf.nn.rnn_cell.GRUCell(hiddenDim))
# cells.append(tf.nn.rnn_cell.BasicLSTMCell(...))
cell = tf.nn.rnn_cell.MultiRNNCell(cells)
# For reshaping the input and output dimensions of the seq2seq RNN:
w_in = tf.Variable(tf.random_normal([inputDim, hiddenDim]))
b_in = tf.Variable(tf.random_normal([hiddenDim], mean=1.0))
w_out = tf.Variable(tf.random_normal([hiddenDim, outputDim]))
b_out = tf.Variable(tf.random_normal([outputDim]))
reshaped_inputs = [tf.nn.relu(tf.matmul(i, w_in) + b_in) for i in enc_inp]
dec_outputs, dec_memory = tf.nn.seq2seq.basic_rnn_seq2seq(
enc_inp,
dec_inp,
cell
)
output_scale_factor = tf.Variable(1.0, name="Output_ScaleFactor")
reshaped_outputs = [output_scale_factor *
(tf.matmul(i, w_out) + b_out) for i in dec_outputs]
with tf.variable_scope('Loss'):
# L2 loss
output_loss = 0
for _y, _Y in zip(reshaped_outputs, expected_sparse_output):
output_loss += tf.reduce_mean(tf.nn.l2_loss(_y - _Y))
reg_loss = 0
for tf_var in tf.trainable_variables():
if not ("Bias" in tf_var.name or "Output_" in tf_var.name):
reg_loss += tf.reduce_mean(tf.nn.l2_loss(tf_var))
loss = output_loss + lambdaL2Reg * reg_loss
with tf.variable_scope('Optimizer'):
optimizer = tf.train.RMSPropOptimizer(
learningRate, decay=lrDecay, momentum=momentumTF)
train_op = optimizer.minimize(loss)
# Training
train_losses = []
test_losses = []
sess.run(tf.global_variables_initializer())
for t in range(numIterLearning + 1):
X, Y = GetDataXY(indexCode,isTrain=True, lenSeq=lenSeq,batchSize=batchSize,ioputDim=ioputDim)
feed_dict = {enc_inp[t]: X[t] for t in range(len(enc_inp))}
feed_dict.update({expected_sparse_output[t]: Y[
t] for t in range(len(expected_sparse_output))})
_, loss_t = sess.run([train_op, loss], feed_dict)
train_loss=loss_t
train_losses.append(train_loss)
if t % 10 == 0:
X, Y = GetDataXY(indexCode,isTrain=True, lenSeq=lenSeq,batchSize=batchSize,ioputDim=ioputDim)
feed_dict = {enc_inp[t]: X[t] for t in range(len(enc_inp))}
feed_dict.update({expected_sparse_output[t]: Y[
t] for t in range(len(expected_sparse_output))})
loss_t = sess.run([loss], feed_dict)
test_loss= loss_t[0]
test_losses.append(test_loss)
print("Step {}/{}, train loss: {}, \tTEST loss: {}".format(t,
numIterLearning, train_loss, test_loss))
print("Fin. train loss: {}, \tTEST loss: {}".format(train_loss, test_loss))
# Plot loss over time:
plt.figure(figsize=(12, 6))
plt.plot(
np.array(range(0, len(test_losses))) /
float(len(test_losses) - 1) * (len(train_losses) - 1),
np.log(test_losses),
label="Test loss"
)
plt.plot(
np.log(train_losses),
label="Train loss"
)
plt.title("Training errors over time (on a logarithmic scale)")
plt.xlabel('Iteration')
plt.ylabel('log(Loss)')
plt.legend(loc='best')
plt.show()
# Test ############################
nb_predictions = 5
print("Let's visualize {} predictions with our signals:".format(nb_predictions))
X, Y = GetDataXY(indexCode,isTrain=True, lenSeq=lenSeq,batchSize=batchSize,ioputDim=ioputDim)
feed_dict = {enc_inp[t]: X[t] for t in range(inSeq)}
outputs = np.array(sess.run([reshaped_outputs], feed_dict)[0])
for j in range(nb_predictions):
plt.figure(figsize=(12, 3))
for k in range(outputDim):
past = X[:, j, k]
expected = Y[:, j, k]
pred = outputs[:, j, k]
label1 = "Seen (past) values" if k == 0 else "_nolegend_"
label2 = "True future values" if k == 0 else "_nolegend_"
label3 = "Predictions" if k == 0 else "_nolegend_"
plt.plot(range(len(past)), past, "o--b", label=label1)
plt.plot(range(len(past), len(expected) + len(past)),
expected, "x--b", label=label2)
plt.plot(range(len(past), len(pred) + len(past)),
pred, "o--y", label=label3)
plt.legend(loc='best')
plt.title("Predictions v.s. true values")
plt.show()
print("Reminder: the signal can contain many dimensions at once.")
print("In that case, signals have the same color.")
print("In reality, we could imagine multiple stock market symbols evolving,")
print("tied in time together and seen at once by the neural network.")
######################PREDICT:
#X=Generate_x_data4Future(indexCode,isTrain=True, lenSeq=lenSeq,batchSize=batchSize,ioputDim=ioputDim)
openCode=ReadHistDataIndex(indexCode,'open')
if len(lenSeq)==1:
inSeq=outSeq=lenSeq
else:
inSeq,outSeq=lenSeq[0],lenSeq[1]
nData=len(openCode)
X4Future=openCode[nData-inSeq::]
#print X4FutureTmp.shape,X.shape
X4FutureTmp=np.empty((inSeq,batchSize,ioputDim))
for iB in xrange(batchSize):
for iIO in xrange(ioputDim):
X4FutureTmp[:,iB,iIO]=X4Future
feed_dict = {enc_inp[t]: X4FutureTmp[t] for t in range(inSeq)}
Y4FutureTmp = np.array(sess.run([reshaped_outputs], feed_dict)[0])
Y4Future=Y4FutureTmp[:,0,0]
plt.figure('FUTURE @ JIANG PEIYONG')
plt.clf()
plt.plot(np.arange(inSeq),X4Future,'b-')
plt.hold('on')
plt.plot(np.arange(inSeq),X4Future,'b^')
plt.plot(np.arange(inSeq)+inSeq,Y4Future,'r-')
plt.plot(np.arange(inSeq)+inSeq,Y4Future,'ro')
plt.figure('FUTURE (ALL) @ JIANG PEIYONG')
plt.clf()
plt.plot(np.arange(nData),openCode,'b-')
plt.hold('on')
plt.plot(np.arange(inSeq)+nData,Y4Future,'r-')
###################################################################################
indexCode='300401'
TensorFlowPredict(indexCode,inSeq,outSeq,batchSize,hiddenDim,ioputDim,numIterLearning)
| gpl-3.0 |
40223137/2015abc | static/Brython3.1.1-20150328-091302/Lib/pydoc.py | 637 | 102017 | #!/usr/bin/env python3
"""Generate Python documentation in HTML or text for interactive use.
In the Python interpreter, do "from pydoc import help" to provide
help. Calling help(thing) on a Python object documents the object.
Or, at the shell command line outside of Python:
Run "pydoc <name>" to show documentation on something. <name> may be
the name of a function, module, package, or a dotted reference to a
class or function within a module or module in a package. If the
argument contains a path segment delimiter (e.g. slash on Unix,
backslash on Windows) it is treated as the path to a Python source file.
Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines
of all available modules.
Run "pydoc -p <port>" to start an HTTP server on the given port on the
local machine. Port number 0 can be used to get an arbitrary unused port.
Run "pydoc -b" to start an HTTP server on an arbitrary unused port and
open a Web browser to interactively browse documentation. The -p option
can be used with the -b option to explicitly specify the server port.
Run "pydoc -w <name>" to write out the HTML documentation for a module
to a file named "<name>.html".
Module docs for core modules are assumed to be in
http://docs.python.org/X.Y/library/
This can be overridden by setting the PYTHONDOCS environment variable
to a different URL or to a local directory containing the Library
Reference Manual pages.
"""
__all__ = ['help']
__author__ = "Ka-Ping Yee <ping@lfw.org>"
__date__ = "26 February 2001"
__credits__ = """Guido van Rossum, for an excellent programming language.
Tommy Burnette, the original creator of manpy.
Paul Prescod, for all his work on onlinehelp.
Richard Chamberlain, for the first implementation of textdoc.
"""
# Known bugs that can't be fixed here:
# - imp.load_module() cannot be prevented from clobbering existing
# loaded modules, so calling synopsis() on a binary module file
# changes the contents of any existing module with the same name.
# - If the __file__ attribute on a module is a relative path and
# the current directory is changed with os.chdir(), an incorrect
# path will be displayed.
import builtins
import imp
import importlib.machinery
#brython fix me
import inspect
import io
import os
#brython fix me
#import pkgutil
import platform
import re
import sys
import time
import tokenize
import warnings
from collections import deque
from reprlib import Repr
#fix me brython
#from traceback import extract_tb, format_exception_only
# --------------------------------------------------------- common routines
def pathdirs():
"""Convert sys.path into a list of absolute, existing, unique paths."""
dirs = []
normdirs = []
for dir in sys.path:
dir = os.path.abspath(dir or '.')
normdir = os.path.normcase(dir)
if normdir not in normdirs and os.path.isdir(dir):
dirs.append(dir)
normdirs.append(normdir)
return dirs
def getdoc(object):
"""Get the doc string or comments for an object."""
result = inspect.getdoc(object) or inspect.getcomments(object)
return result and re.sub('^ *\n', '', result.rstrip()) or ''
def splitdoc(doc):
"""Split a doc string into a synopsis line (if any) and the rest."""
lines = doc.strip().split('\n')
if len(lines) == 1:
return lines[0], ''
elif len(lines) >= 2 and not lines[1].rstrip():
return lines[0], '\n'.join(lines[2:])
return '', '\n'.join(lines)
def classname(object, modname):
"""Get a class name and qualify it with a module name if necessary."""
name = object.__name__
if object.__module__ != modname:
name = object.__module__ + '.' + name
return name
def isdata(object):
"""Check if an object is of a type that probably means it's data."""
return not (inspect.ismodule(object) or inspect.isclass(object) or
inspect.isroutine(object) or inspect.isframe(object) or
inspect.istraceback(object) or inspect.iscode(object))
def replace(text, *pairs):
"""Do a series of global replacements on a string."""
while pairs:
text = pairs[1].join(text.split(pairs[0]))
pairs = pairs[2:]
return text
def cram(text, maxlen):
"""Omit part of a string if needed to make it fit in a maximum length."""
if len(text) > maxlen:
pre = max(0, (maxlen-3)//2)
post = max(0, maxlen-3-pre)
return text[:pre] + '...' + text[len(text)-post:]
return text
_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE)
def stripid(text):
"""Remove the hexadecimal id from a Python object representation."""
# The behaviour of %p is implementation-dependent in terms of case.
#fix me brython
#return _re_stripid.sub(r'\1', text)
return text
def _is_some_method(obj):
return (inspect.isfunction(obj) or
inspect.ismethod(obj) or
inspect.isbuiltin(obj) or
inspect.ismethoddescriptor(obj))
def allmethods(cl):
methods = {}
for key, value in inspect.getmembers(cl, _is_some_method):
methods[key] = 1
for base in cl.__bases__:
methods.update(allmethods(base)) # all your base are belong to us
for key in methods.keys():
methods[key] = getattr(cl, key)
return methods
def _split_list(s, predicate):
"""Split sequence s via predicate, and return pair ([true], [false]).
The return value is a 2-tuple of lists,
([x for x in s if predicate(x)],
[x for x in s if not predicate(x)])
"""
yes = []
no = []
for x in s:
if predicate(x):
yes.append(x)
else:
no.append(x)
return yes, no
def visiblename(name, all=None, obj=None):
"""Decide whether to show documentation on a variable."""
# Certain special names are redundant or internal.
if name in {'__author__', '__builtins__', '__cached__', '__credits__',
'__date__', '__doc__', '__file__', '__initializing__',
'__loader__', '__module__', '__name__', '__package__',
'__path__', '__qualname__', '__slots__', '__version__'}:
return 0
# Private names are hidden, but special names are displayed.
if name.startswith('__') and name.endswith('__'): return 1
# Namedtuples have public fields and methods with a single leading underscore
if name.startswith('_') and hasattr(obj, '_fields'):
return True
if all is not None:
# only document that which the programmer exported in __all__
return name in all
else:
return not name.startswith('_')
def classify_class_attrs(object):
"""Wrap inspect.classify_class_attrs, with fixup for data descriptors."""
results = []
for (name, kind, cls, value) in inspect.classify_class_attrs(object):
if inspect.isdatadescriptor(value):
kind = 'data descriptor'
results.append((name, kind, cls, value))
return results
# ----------------------------------------------------- module manipulation
def ispackage(path):
"""Guess whether a path refers to a package directory."""
if os.path.isdir(path):
for ext in ('.py', '.pyc', '.pyo'):
if os.path.isfile(os.path.join(path, '__init__' + ext)):
return True
return False
def source_synopsis(file):
line = file.readline()
while line[:1] == '#' or not line.strip():
line = file.readline()
if not line: break
line = line.strip()
if line[:4] == 'r"""': line = line[1:]
if line[:3] == '"""':
line = line[3:]
if line[-1:] == '\\': line = line[:-1]
while not line.strip():
line = file.readline()
if not line: break
result = line.split('"""')[0].strip()
else: result = None
return result
def synopsis(filename, cache={}):
"""Get the one-line summary out of a module file."""
mtime = os.stat(filename).st_mtime
lastupdate, result = cache.get(filename, (None, None))
if lastupdate is None or lastupdate < mtime:
try:
file = tokenize.open(filename)
except IOError:
# module can't be opened, so skip it
return None
binary_suffixes = importlib.machinery.BYTECODE_SUFFIXES[:]
binary_suffixes += importlib.machinery.EXTENSION_SUFFIXES[:]
if any(filename.endswith(x) for x in binary_suffixes):
# binary modules have to be imported
file.close()
if any(filename.endswith(x) for x in
importlib.machinery.BYTECODE_SUFFIXES):
loader = importlib.machinery.SourcelessFileLoader('__temp__',
filename)
else:
loader = importlib.machinery.ExtensionFileLoader('__temp__',
filename)
try:
module = loader.load_module('__temp__')
except:
return None
result = (module.__doc__ or '').splitlines()[0]
del sys.modules['__temp__']
else:
# text modules can be directly examined
result = source_synopsis(file)
file.close()
cache[filename] = (mtime, result)
return result
class ErrorDuringImport(Exception):
"""Errors that occurred while trying to import something to document it."""
def __init__(self, filename, exc_info):
self.filename = filename
self.exc, self.value, self.tb = exc_info
def __str__(self):
exc = self.exc.__name__
return 'problem in %s - %s: %s' % (self.filename, exc, self.value)
def importfile(path):
"""Import a Python source file or compiled file given its path."""
magic = imp.get_magic()
with open(path, 'rb') as file:
if file.read(len(magic)) == magic:
kind = imp.PY_COMPILED
else:
kind = imp.PY_SOURCE
file.seek(0)
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
try:
module = imp.load_module(name, file, path, (ext, 'r', kind))
except:
raise ErrorDuringImport(path, sys.exc_info())
return module
def safeimport(path, forceload=0, cache={}):
"""Import a module; handle errors; return None if the module isn't found.
If the module *is* found but an exception occurs, it's wrapped in an
ErrorDuringImport exception and reraised. Unlike __import__, if a
package path is specified, the module at the end of the path is returned,
not the package at the beginning. If the optional 'forceload' argument
is 1, we reload the module from disk (unless it's a dynamic extension)."""
try:
# If forceload is 1 and the module has been previously loaded from
# disk, we always have to reload the module. Checking the file's
# mtime isn't good enough (e.g. the module could contain a class
# that inherits from another module that has changed).
if forceload and path in sys.modules:
if path not in sys.builtin_module_names:
# Remove the module from sys.modules and re-import to try
# and avoid problems with partially loaded modules.
# Also remove any submodules because they won't appear
# in the newly loaded module's namespace if they're already
# in sys.modules.
subs = [m for m in sys.modules if m.startswith(path + '.')]
for key in [path] + subs:
# Prevent garbage collection.
cache[key] = sys.modules[key]
del sys.modules[key]
module = __import__(path)
except:
# Did the error occur before or after the module was found?
(exc, value, tb) = info = sys.exc_info()
if path in sys.modules:
# An error occurred while executing the imported module.
raise ErrorDuringImport(sys.modules[path].__file__, info)
elif exc is SyntaxError:
# A SyntaxError occurred before we could execute the module.
raise ErrorDuringImport(value.filename, info)
#fix me brython
#elif exc is ImportError and value.name == path:
elif exc is ImportError and str(value) == str(path):
# No such module in the path.
return None
else:
# Some other error occurred during the importing process.
raise ErrorDuringImport(path, sys.exc_info())
for part in path.split('.')[1:]:
try: module = getattr(module, part)
except AttributeError: return None
return module
# ---------------------------------------------------- formatter base class
class Doc:
PYTHONDOCS = os.environ.get("PYTHONDOCS",
"http://docs.python.org/%d.%d/library"
% sys.version_info[:2])
def document(self, object, name=None, *args):
"""Generate documentation for an object."""
args = (object, name) + args
# 'try' clause is to attempt to handle the possibility that inspect
# identifies something in a way that pydoc itself has issues handling;
# think 'super' and how it is a descriptor (which raises the exception
# by lacking a __name__ attribute) and an instance.
if inspect.isgetsetdescriptor(object): return self.docdata(*args)
if inspect.ismemberdescriptor(object): return self.docdata(*args)
try:
if inspect.ismodule(object): return self.docmodule(*args)
if inspect.isclass(object): return self.docclass(*args)
if inspect.isroutine(object): return self.docroutine(*args)
except AttributeError:
pass
if isinstance(object, property): return self.docproperty(*args)
return self.docother(*args)
def fail(self, object, name=None, *args):
"""Raise an exception for unimplemented types."""
message = "don't know how to document object%s of type %s" % (
name and ' ' + repr(name), type(object).__name__)
raise TypeError(message)
docmodule = docclass = docroutine = docother = docproperty = docdata = fail
def getdocloc(self, object):
"""Return the location of module docs or None"""
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
docloc = os.environ.get("PYTHONDOCS", self.PYTHONDOCS)
basedir = os.path.join(sys.base_exec_prefix, "lib",
"python%d.%d" % sys.version_info[:2])
if (isinstance(object, type(os)) and
(object.__name__ in ('errno', 'exceptions', 'gc', 'imp',
'marshal', 'posix', 'signal', 'sys',
'_thread', 'zipimport') or
(file.startswith(basedir) and
not file.startswith(os.path.join(basedir, 'site-packages')))) and
object.__name__ not in ('xml.etree', 'test.pydoc_mod')):
if docloc.startswith("http://"):
docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__)
else:
docloc = os.path.join(docloc, object.__name__ + ".html")
else:
docloc = None
return docloc
# -------------------------------------------- HTML documentation generator
class HTMLRepr(Repr):
"""Class for safely making an HTML representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def escape(self, text):
return replace(text, '&', '&', '<', '<', '>', '>')
def repr(self, object):
return Repr.repr(self, object)
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + '_'.join(type(x).__name__.split())
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return self.escape(cram(stripid(repr(x)), self.maxother))
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + self.escape(test) + testrepr[0]
return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)',
r'<font color="#c040c0">\1</font>',
self.escape(testrepr))
repr_str = repr_string
def repr_instance(self, x, level):
try:
return self.escape(cram(stripid(repr(x)), self.maxstring))
except:
return self.escape('<%s instance>' % x.__class__.__name__)
repr_unicode = repr_string
class HTMLDoc(Doc):
"""Formatter class for HTML documentation."""
# ------------------------------------------- HTML formatting utilities
_repr_instance = HTMLRepr()
repr = _repr_instance.repr
escape = _repr_instance.escape
def page(self, title, contents):
"""Format an HTML page."""
return '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Python: %s</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
</head><body bgcolor="#f0f0f8">
%s
</body></html>''' % (title, contents)
def heading(self, title, fgcol, bgcol, extras=''):
"""Format a page heading."""
return '''
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="%s">
<td valign=bottom> <br>
<font color="%s" face="helvetica, arial"> <br>%s</font></td
><td align=right valign=bottom
><font color="%s" face="helvetica, arial">%s</font></td></tr></table>
''' % (bgcol, fgcol, title, fgcol, extras or ' ')
def section(self, title, fgcol, bgcol, contents, width=6,
prelude='', marginalia=None, gap=' '):
"""Format a section with a heading."""
if marginalia is None:
marginalia = '<tt>' + ' ' * width + '</tt>'
result = '''<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="%s">
<td colspan=3 valign=bottom> <br>
<font color="%s" face="helvetica, arial">%s</font></td></tr>
''' % (bgcol, fgcol, title)
if prelude:
result = result + '''
<tr bgcolor="%s"><td rowspan=2>%s</td>
<td colspan=2>%s</td></tr>
<tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap)
else:
result = result + '''
<tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap)
return result + '\n<td width="100%%">%s</td></tr></table>' % contents
def bigsection(self, title, *args):
"""Format a section with a big heading."""
title = '<big><strong>%s</strong></big>' % title
return self.section(title, *args)
def preformat(self, text):
"""Format literal preformatted text."""
text = self.escape(text.expandtabs())
return replace(text, '\n\n', '\n \n', '\n\n', '\n \n',
' ', ' ', '\n', '<br>\n')
def multicolumn(self, list, format, cols=4):
"""Format a list of items into a multi-column list."""
result = ''
rows = (len(list)+cols-1)//cols
for col in range(cols):
result = result + '<td width="%d%%" valign=top>' % (100//cols)
for i in range(rows*col, rows*col+rows):
if i < len(list):
result = result + format(list[i]) + '<br>\n'
result = result + '</td>'
return '<table width="100%%" summary="list"><tr>%s</tr></table>' % result
def grey(self, text): return '<font color="#909090">%s</font>' % text
def namelink(self, name, *dicts):
"""Make a link for an identifier, given name-to-URL mappings."""
for dict in dicts:
if name in dict:
return '<a href="%s">%s</a>' % (dict[name], name)
return name
def classlink(self, object, modname):
"""Make a link for a class."""
name, module = object.__name__, sys.modules.get(object.__module__)
if hasattr(module, name) and getattr(module, name) is object:
return '<a href="%s.html#%s">%s</a>' % (
module.__name__, name, classname(object, modname))
return classname(object, modname)
def modulelink(self, object):
"""Make a link for a module."""
return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__)
def modpkglink(self, modpkginfo):
"""Make a link for a module or package to display in an index."""
name, path, ispackage, shadowed = modpkginfo
if shadowed:
return self.grey(name)
if path:
url = '%s.%s.html' % (path, name)
else:
url = '%s.html' % name
if ispackage:
text = '<strong>%s</strong> (package)' % name
else:
text = name
return '<a href="%s">%s</a>' % (url, text)
def filelink(self, url, path):
"""Make a link to source file."""
return '<a href="file:%s">%s</a>' % (url, path)
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?(\w+))')
while True:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return ''.join(results)
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None):
"""Produce HTML for a class tree as given by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + '<dt><font face="helvetica, arial">'
result = result + self.classlink(c, modname)
if bases and bases != (parent,):
parents = []
for base in bases:
parents.append(self.classlink(base, modname))
result = result + '(' + ', '.join(parents) + ')'
result = result + '\n</font></dt>'
elif type(entry) is type([]):
result = result + '<dd>\n%s</dd>\n' % self.formattree(
entry, modname, c)
return '<dl>\n%s</dl>\n' % result
def docmodule(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a module object."""
name = object.__name__ # ignore the passed-in name
try:
all = object.__all__
except AttributeError:
all = None
parts = name.split('.')
links = []
for i in range(len(parts)-1):
links.append(
'<a href="%s.html"><font color="#ffffff">%s</font></a>' %
('.'.join(parts[:i+1]), parts[i]))
linkedname = '.'.join(links + parts[-1:])
head = '<big><big><strong>%s</strong></big></big>' % linkedname
try:
path = inspect.getabsfile(object)
url = path
if sys.platform == 'win32':
import nturl2path
url = nturl2path.pathname2url(path)
filelink = self.filelink(url, path)
except TypeError:
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = version[11:-1].strip()
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
info.append(self.escape(str(object.__date__)))
if info:
head = head + ' (%s)' % ', '.join(info)
docloc = self.getdocloc(object)
if docloc is not None:
docloc = '<br><a href="%(docloc)s">Module Reference</a>' % locals()
else:
docloc = ''
result = self.heading(
head, '#ffffff', '#7799ee',
'<a href=".">index</a><br>' + filelink + docloc)
modules = inspect.getmembers(object, inspect.ismodule)
classes, cdict = [], {}
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
(inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
cdict[key] = cdict[value] = '#' + key
for key, value in classes:
for base in value.__bases__:
key, modname = base.__name__, base.__module__
module = sys.modules.get(modname)
if modname != name and module and hasattr(module, key):
if getattr(module, key) is base:
if not key in cdict:
cdict[key] = cdict[base] = modname + '.html#' + key
funcs, fdict = [], {}
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
fdict[key] = '#-' + key
if inspect.isfunction(value): fdict[value] = fdict[key]
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
if hasattr(object, '__path__'):
modpkgs = []
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs.append((modname, name, ispkg, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
result = result + self.bigsection(
'Package Contents', '#ffffff', '#aa55cc', contents)
elif modules:
contents = self.multicolumn(
modules, lambda t: self.modulelink(t[1]))
result = result + self.bigsection(
'Modules', '#ffffff', '#aa55cc', contents)
if classes:
classlist = [value for (key, value) in classes]
contents = [
self.formattree(inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Classes', '#ffffff', '#ee77aa', ' '.join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Functions', '#ffffff', '#eeaa77', ' '.join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.document(value, key))
result = result + self.bigsection(
'Data', '#ffffff', '#55aa55', '<br>\n'.join(contents))
if hasattr(object, '__author__'):
contents = self.markup(str(object.__author__), self.preformat)
result = result + self.bigsection(
'Author', '#ffffff', '#7799ee', contents)
if hasattr(object, '__credits__'):
contents = self.markup(str(object.__credits__), self.preformat)
result = result + self.bigsection(
'Credits', '#ffffff', '#7799ee', contents)
return result
def docclass(self, object, name=None, mod=None, funcs={}, classes={},
*ignored):
"""Produce HTML documentation for a class object."""
print('docclass')
realname = object.__name__
name = name or realname
bases = object.__bases__
contents = []
push = contents.append
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('<hr>\n')
self.needone = 1
hr = HorizontalRule()
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
hr.maybe()
push('<dl><dt>Method resolution order:</dt>\n')
for base in mro:
push('<dd>%s</dd>\n' % self.classlink(base,
object.__module__))
push('</dl>\n')
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value, name, mod,
funcs, classes, mdict, object))
push('\n')
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
base = self.docother(getattr(object, name), name, mod)
if callable(value) or inspect.isdatadescriptor(value):
doc = getattr(value, "__doc__", None)
else:
doc = None
if doc is None:
push('<dl><dt>%s</dl>\n' % base)
else:
doc = self.markup(getdoc(value), self.preformat,
funcs, classes, mdict)
doc = '<dd><tt>%s</tt>' % doc
push('<dl><dt>%s%s</dl>\n' % (base, doc))
push('\n')
return attrs
attrs = [(name, kind, cls, value)
for name, kind, cls, value in classify_class_attrs(object)
if visiblename(name, obj=object)]
mdict = {}
for key, kind, homecls, value in attrs:
mdict[key] = anchor = '#' + name + '-' + key
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
pass
try:
# The value may not be hashable (e.g., a data attr with
# a dict or list value).
mdict[value] = anchor
except TypeError:
pass
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is builtins.object:
attrs = inherited
continue
elif thisclass is object:
tag = 'defined here'
else:
tag = 'inherited from %s' % self.classlink(thisclass,
object.__module__)
tag += ':<br>\n'
# Sort attrs by name.
attrs.sort(key=lambda t: t[0])
# Pump out the attrs, segregated by kind.
attrs = spill('Methods %s' % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill('Class methods %s' % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill('Static methods %s' % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors('Data descriptors %s' % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata('Data and other attributes %s' % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = ''.join(contents)
if name == realname:
title = '<a name="%s">class <strong>%s</strong></a>' % (
name, realname)
else:
title = '<strong>%s</strong> = <a name="%s">class %s</a>' % (
name, name, realname)
if bases:
parents = []
for base in bases:
parents.append(self.classlink(base, object.__module__))
title = title + '(%s)' % ', '.join(parents)
doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict)
doc = doc and '<tt>%s<br> </tt>' % doc
return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)
def formatvalue(self, object):
"""Format an argument default value as text."""
return self.grey('=' + self.repr(object))
def docroutine(self, object, name=None, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
realname = object.__name__
name = name or realname
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.__self__.__class__
if cl:
if imclass is not cl:
note = ' from ' + self.classlink(imclass, mod)
else:
if object.__self__ is not None:
note = ' method of %s instance' % self.classlink(
object.__self__.__class__, mod)
else:
note = ' unbound %s method' % self.classlink(imclass,mod)
object = object.__func__
if name == realname:
title = '<a name="%s"><strong>%s</strong></a>' % (anchor, realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
reallink = '<a href="#%s">%s</a>' % (
cl.__name__ + '-' + realname, realname)
skipdocs = 1
else:
reallink = realname
title = '<a name="%s"><strong>%s</strong></a> = %s' % (
anchor, name, reallink)
if inspect.isfunction(object):
args, varargs, kwonlyargs, kwdefaults, varkw, defaults, ann = \
inspect.getfullargspec(object)
argspec = inspect.formatargspec(
args, varargs, kwonlyargs, kwdefaults, varkw, defaults, ann,
formatvalue=self.formatvalue,
formatannotation=inspect.formatannotationrelativeto(object))
if realname == '<lambda>':
title = '<strong>%s</strong> <em>lambda</em> ' % name
# XXX lambda's won't usually have func_annotations['return']
# since the syntax doesn't support but it is possible.
# So removing parentheses isn't truly safe.
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
if skipdocs:
return '<dl><dt>%s</dt></dl>\n' % decl
else:
doc = self.markup(
getdoc(object), self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push('<dl><dt><strong>%s</strong></dt>\n' % name)
if value.__doc__ is not None:
doc = self.markup(getdoc(value), self.preformat)
push('<dd><tt>%s</tt></dd>\n' % doc)
push('</dl>\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a property."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a data object."""
lhs = name and '<strong>%s</strong> = ' % name or ''
return lhs + self.repr(object)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def index(self, dir, shadowed=None):
"""Generate an HTML index for a directory of modules."""
modpkgs = []
if shadowed is None: shadowed = {}
for importer, name, ispkg in pkgutil.iter_modules([dir]):
if any((0xD800 <= ord(ch) <= 0xDFFF) for ch in name):
# ignore a module if its name contains a surrogate character
continue
modpkgs.append((name, '', ispkg, name in shadowed))
shadowed[name] = 1
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
return self.bigsection(dir, '#ffffff', '#ee77aa', contents)
# -------------------------------------------- text documentation generator
class TextRepr(Repr):
"""Class for safely making a text representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
#def repr1(self, x, level):
# if hasattr(type(x), '__name__'):
# methodname = 'repr_' + '_'.join(type(x).__name__.split())
# if hasattr(self, methodname):
# return getattr(self, methodname)(x, level)
# return cram(stripid(repr(x)), self.maxother)
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + test + testrepr[0]
return testrepr
repr_str = repr_string
def repr_instance(self, x, level):
try:
return cram(stripid(repr(x)), self.maxstring)
except:
return '<%s instance>' % x.__class__.__name__
class TextDoc(Doc):
"""Formatter class for text documentation."""
# ------------------------------------------- text formatting utilities
_repr_instance = TextRepr()
repr = _repr_instance.repr
def bold(self, text):
"""Format a string in bold by overstriking."""
return ''.join(ch + '\b' + ch for ch in text)
def indent(self, text, prefix=' '):
"""Indent text by prepending a given prefix to each line."""
if not text: return ''
lines = [prefix + line for line in text.split('\n')]
if lines: lines[-1] = lines[-1].rstrip()
return '\n'.join(lines)
def section(self, title, contents):
"""Format a section with a given heading."""
clean_contents = self.indent(contents).rstrip()
return self.bold(title) + '\n' + clean_contents + '\n\n'
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None, prefix=''):
"""Render in text a class tree as returned by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + prefix + classname(c, modname)
if bases and bases != (parent,):
parents = (classname(c, modname) for c in bases)
result = result + '(%s)' % ', '.join(parents)
result = result + '\n'
elif type(entry) is type([]):
result = result + self.formattree(
entry, modname, c, prefix + ' ')
return result
def docmodule(self, object, name=None, mod=None):
"""Produce text documentation for a given module object."""
name = object.__name__ # ignore the passed-in name
synop, desc = splitdoc(getdoc(object))
result = self.section('NAME', name + (synop and ' - ' + synop))
all = getattr(object, '__all__', None)
docloc = self.getdocloc(object)
if docloc is not None:
result = result + self.section('MODULE REFERENCE', docloc + """
The following documentation is automatically generated from the Python
source files. It may be incomplete, incorrect or include features that
are considered implementation detail and may vary between Python
implementations. When in doubt, consult the module reference at the
location listed above.
""")
if desc:
result = result + self.section('DESCRIPTION', desc)
classes = []
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None
or (inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
funcs = []
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
modpkgs = []
modpkgs_names = set()
if hasattr(object, '__path__'):
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs_names.add(modname)
if ispkg:
modpkgs.append(modname + ' (package)')
else:
modpkgs.append(modname)
modpkgs.sort()
result = result + self.section(
'PACKAGE CONTENTS', '\n'.join(modpkgs))
# Detect submodules as sometimes created by C extensions
submodules = []
for key, value in inspect.getmembers(object, inspect.ismodule):
if value.__name__.startswith(name + '.') and key not in modpkgs_names:
submodules.append(key)
if submodules:
submodules.sort()
result = result + self.section(
'SUBMODULES', '\n'.join(submodules))
if classes:
classlist = [value for key, value in classes]
contents = [self.formattree(
inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name))
result = result + self.section('CLASSES', '\n'.join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name))
result = result + self.section('FUNCTIONS', '\n'.join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.docother(value, key, name, maxlen=70))
result = result + self.section('DATA', '\n'.join(contents))
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = version[11:-1].strip()
result = result + self.section('VERSION', version)
if hasattr(object, '__date__'):
result = result + self.section('DATE', str(object.__date__))
if hasattr(object, '__author__'):
result = result + self.section('AUTHOR', str(object.__author__))
if hasattr(object, '__credits__'):
result = result + self.section('CREDITS', str(object.__credits__))
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
result = result + self.section('FILE', file)
return result
def docclass(self, object, name=None, mod=None, *ignored):
"""Produce text documentation for a given class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
def makename(c, m=object.__module__):
return classname(c, m)
if name == realname:
title = 'class ' + self.bold(realname)
else:
title = self.bold(name) + ' = class ' + realname
if bases:
parents = map(makename, bases)
title = title + '(%s)' % ', '.join(parents)
doc = getdoc(object)
contents = doc and [doc + '\n'] or []
push = contents.append
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
push("Method resolution order:")
for base in mro:
push(' ' + makename(base))
push('')
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('-' * 70)
self.needone = 1
hr = HorizontalRule()
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value,
name, mod, object))
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
if callable(value) or inspect.isdatadescriptor(value):
doc = getdoc(value)
else:
doc = None
push(self.docother(getattr(object, name),
name, mod, maxlen=70, doc=doc) + '\n')
return attrs
attrs = [(name, kind, cls, value)
for name, kind, cls, value in classify_class_attrs(object)
if visiblename(name, obj=object)]
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is builtins.object:
attrs = inherited
continue
elif thisclass is object:
tag = "defined here"
else:
tag = "inherited from %s" % classname(thisclass,
object.__module__)
# Sort attrs by name.
attrs.sort()
# Pump out the attrs, segregated by kind.
attrs = spill("Methods %s:\n" % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill("Class methods %s:\n" % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill("Static methods %s:\n" % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata("Data and other attributes %s:\n" % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = '\n'.join(contents)
if not contents:
return title + '\n'
return title + '\n' + self.indent(contents.rstrip(), ' | ') + '\n'
def formatvalue(self, object):
"""Format an argument default value as text."""
return '=' + self.repr(object)
def docroutine(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a function or method object."""
realname = object.__name__
name = name or realname
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.__self__.__class__
if cl:
if imclass is not cl:
note = ' from ' + classname(imclass, mod)
else:
if object.__self__ is not None:
note = ' method of %s instance' % classname(
object.__self__.__class__, mod)
else:
note = ' unbound %s method' % classname(imclass,mod)
object = object.__func__
if name == realname:
title = self.bold(realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
skipdocs = 1
title = self.bold(name) + ' = ' + realname
if inspect.isfunction(object):
args, varargs, varkw, defaults, kwonlyargs, kwdefaults, ann = \
inspect.getfullargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, kwonlyargs, kwdefaults, ann,
formatvalue=self.formatvalue,
formatannotation=inspect.formatannotationrelativeto(object))
if realname == '<lambda>':
title = self.bold(name) + ' lambda '
# XXX lambda's won't usually have func_annotations['return']
# since the syntax doesn't support but it is possible.
# So removing parentheses isn't truly safe.
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + note
if skipdocs:
return decl + '\n'
else:
doc = getdoc(object) or ''
return decl + '\n' + (doc and self.indent(doc).rstrip() + '\n')
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push(self.bold(name))
push('\n')
doc = getdoc(value) or ''
if doc:
push(self.indent(doc))
push('\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a property."""
return self._docdescriptor(name, object, mod)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None):
"""Produce text documentation for a data object."""
repr = self.repr(object)
if maxlen:
line = (name and name + ' = ' or '') + repr
chop = maxlen - len(line)
if chop < 0: repr = repr[:chop] + '...'
line = (name and self.bold(name) + ' = ' or '') + repr
if doc is not None:
line += '\n' + self.indent(str(doc))
return line
class _PlainTextDoc(TextDoc):
"""Subclass of TextDoc which overrides string styling"""
def bold(self, text):
return text
# --------------------------------------------------------- user interfaces
def pager(text):
"""The first time this is called, determine what kind of pager to use."""
global pager
pager = getpager()
pager(text)
def getpager():
"""Decide what method to use for paging through text."""
if not hasattr(sys.stdout, "isatty"):
return plainpager
if not sys.stdin.isatty() or not sys.stdout.isatty():
return plainpager
if 'PAGER' in os.environ:
if sys.platform == 'win32': # pipes completely broken in Windows
return lambda text: tempfilepager(plain(text), os.environ['PAGER'])
elif os.environ.get('TERM') in ('dumb', 'emacs'):
return lambda text: pipepager(plain(text), os.environ['PAGER'])
else:
return lambda text: pipepager(text, os.environ['PAGER'])
if os.environ.get('TERM') in ('dumb', 'emacs'):
return plainpager
if sys.platform == 'win32' or sys.platform.startswith('os2'):
return lambda text: tempfilepager(plain(text), 'more <')
if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
return lambda text: pipepager(text, 'less')
import tempfile
(fd, filename) = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0:
return lambda text: pipepager(text, 'more')
else:
return ttypager
finally:
os.unlink(filename)
def plain(text):
"""Remove boldface formatting from text."""
return re.sub('.\b', '', text)
def pipepager(text, cmd):
"""Page through text by feeding it to another program."""
pipe = os.popen(cmd, 'w')
try:
pipe.write(text)
pipe.close()
except IOError:
pass # Ignore broken pipes caused by quitting the pager program.
def tempfilepager(text, cmd):
"""Page through text by invoking a program on a temporary file."""
import tempfile
filename = tempfile.mktemp()
file = open(filename, 'w')
file.write(text)
file.close()
try:
os.system(cmd + ' "' + filename + '"')
finally:
os.unlink(filename)
def ttypager(text):
"""Page through text on a text terminal."""
lines = plain(text).split('\n')
try:
import tty
fd = sys.stdin.fileno()
old = tty.tcgetattr(fd)
tty.setcbreak(fd)
getchar = lambda: sys.stdin.read(1)
except (ImportError, AttributeError):
tty = None
getchar = lambda: sys.stdin.readline()[:-1][:1]
try:
r = inc = os.environ.get('LINES', 25) - 1
sys.stdout.write('\n'.join(lines[:inc]) + '\n')
while lines[r:]:
sys.stdout.write('-- more --')
sys.stdout.flush()
c = getchar()
if c in ('q', 'Q'):
sys.stdout.write('\r \r')
break
elif c in ('\r', '\n'):
sys.stdout.write('\r \r' + lines[r] + '\n')
r = r + 1
continue
if c in ('b', 'B', '\x1b'):
r = r - inc - inc
if r < 0: r = 0
sys.stdout.write('\n' + '\n'.join(lines[r:r+inc]) + '\n')
r = r + inc
finally:
if tty:
tty.tcsetattr(fd, tty.TCSAFLUSH, old)
def plainpager(text):
"""Simply print unformatted text. This is the ultimate fallback."""
sys.stdout.write(plain(text))
def describe(thing):
"""Produce a short description of the given thing."""
if inspect.ismodule(thing):
if thing.__name__ in sys.builtin_module_names:
return 'built-in module ' + thing.__name__
if hasattr(thing, '__path__'):
return 'package ' + thing.__name__
else:
return 'module ' + thing.__name__
if inspect.isbuiltin(thing):
return 'built-in function ' + thing.__name__
if inspect.isgetsetdescriptor(thing):
return 'getset descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.ismemberdescriptor(thing):
return 'member descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.isclass(thing):
return 'class ' + thing.__name__
if inspect.isfunction(thing):
return 'function ' + thing.__name__
if inspect.ismethod(thing):
return 'method ' + thing.__name__
return type(thing).__name__
def locate(path, forceload=0):
"""Locate an object by name or dotted path, importing as necessary."""
parts = [part for part in path.split('.') if part]
module, n = None, 0
while n < len(parts):
nextmodule = safeimport('.'.join(parts[:n+1]), forceload)
if nextmodule: module, n = nextmodule, n + 1
else: break
if module:
object = module
else:
object = builtins
for part in parts[n:]:
try:
object = getattr(object, part)
except AttributeError:
return None
return object
# --------------------------------------- interactive interpreter interface
text = TextDoc()
plaintext = _PlainTextDoc()
html = HTMLDoc()
def resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, str):
object = locate(thing, forceload)
if not object:
raise ImportError('no Python documentation found for %r' % thing)
return object, thing
else:
name = getattr(thing, '__name__', None)
return thing, name if isinstance(name, str) else None
def render_doc(thing, title='Python Library Documentation: %s', forceload=0,
renderer=None):
"""Render text documentation, given an object or a path to an object."""
if renderer is None:
renderer = text
object, name = resolve(thing, forceload)
desc = describe(object)
module = inspect.getmodule(object)
if name and '.' in name:
desc += ' in ' + name[:name.rfind('.')]
elif module and module is not object:
desc += ' in module ' + module.__name__
if not (inspect.ismodule(object) or
inspect.isclass(object) or
inspect.isroutine(object) or
inspect.isgetsetdescriptor(object) or
inspect.ismemberdescriptor(object) or
isinstance(object, property)):
# If the passed object is a piece of data or an instance,
# document its available methods instead of its value.
object = type(object)
desc += ' object'
return title % desc + '\n\n' + renderer.document(object, name)
def doc(thing, title='Python Library Documentation: %s', forceload=0,
output=None):
"""Display text documentation, given an object or a path to an object."""
try:
if output is None:
pager(render_doc(thing, title, forceload))
else:
output.write(render_doc(thing, title, forceload, plaintext))
except (ImportError, ErrorDuringImport) as value:
print(value)
def writedoc(thing, forceload=0):
"""Write HTML documentation to a file in the current directory."""
try:
object, name = resolve(thing, forceload)
page = html.page(describe(object), html.document(object, name))
file = open(name + '.html', 'w', encoding='utf-8')
file.write(page)
file.close()
print('wrote', name + '.html')
except (ImportError, ErrorDuringImport) as value:
print(value)
def writedocs(dir, pkgpath='', done=None):
"""Write out HTML documentation for all modules in a directory tree."""
if done is None: done = {}
for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath):
writedoc(modname)
return
class Helper:
# These dictionaries map a topic name to either an alias, or a tuple
# (label, seealso-items). The "label" is the label of the corresponding
# section in the .rst file under Doc/ and an index into the dictionary
# in pydoc_data/topics.py.
#
# CAUTION: if you change one of these dictionaries, be sure to adapt the
# list of needed labels in Doc/tools/sphinxext/pyspecific.py and
# regenerate the pydoc_data/topics.py file by running
# make pydoc-topics
# in Doc/ and copying the output file into the Lib/ directory.
keywords = {
'False': '',
'None': '',
'True': '',
'and': 'BOOLEAN',
'as': 'with',
'assert': ('assert', ''),
'break': ('break', 'while for'),
'class': ('class', 'CLASSES SPECIALMETHODS'),
'continue': ('continue', 'while for'),
'def': ('function', ''),
'del': ('del', 'BASICMETHODS'),
'elif': 'if',
'else': ('else', 'while for'),
'except': 'try',
'finally': 'try',
'for': ('for', 'break continue while'),
'from': 'import',
'global': ('global', 'nonlocal NAMESPACES'),
'if': ('if', 'TRUTHVALUE'),
'import': ('import', 'MODULES'),
'in': ('in', 'SEQUENCEMETHODS'),
'is': 'COMPARISON',
'lambda': ('lambda', 'FUNCTIONS'),
'nonlocal': ('nonlocal', 'global NAMESPACES'),
'not': 'BOOLEAN',
'or': 'BOOLEAN',
'pass': ('pass', ''),
'raise': ('raise', 'EXCEPTIONS'),
'return': ('return', 'FUNCTIONS'),
'try': ('try', 'EXCEPTIONS'),
'while': ('while', 'break continue if TRUTHVALUE'),
'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'),
'yield': ('yield', ''),
}
# Either add symbols to this dictionary or to the symbols dictionary
# directly: Whichever is easier. They are merged later.
_symbols_inverse = {
'STRINGS' : ("'", "'''", "r'", "b'", '"""', '"', 'r"', 'b"'),
'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&',
'|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'),
'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'),
'UNARY' : ('-', '~'),
'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=',
'^=', '<<=', '>>=', '**=', '//='),
'BITWISE' : ('<<', '>>', '&', '|', '^', '~'),
'COMPLEX' : ('j', 'J')
}
symbols = {
'%': 'OPERATORS FORMATTING',
'**': 'POWER',
',': 'TUPLES LISTS FUNCTIONS',
'.': 'ATTRIBUTES FLOAT MODULES OBJECTS',
'...': 'ELLIPSIS',
':': 'SLICINGS DICTIONARYLITERALS',
'@': 'def class',
'\\': 'STRINGS',
'_': 'PRIVATENAMES',
'__': 'PRIVATENAMES SPECIALMETHODS',
'`': 'BACKQUOTES',
'(': 'TUPLES FUNCTIONS CALLS',
')': 'TUPLES FUNCTIONS CALLS',
'[': 'LISTS SUBSCRIPTS SLICINGS',
']': 'LISTS SUBSCRIPTS SLICINGS'
}
for topic, symbols_ in _symbols_inverse.items():
for symbol in symbols_:
topics = symbols.get(symbol, topic)
if topic not in topics:
topics = topics + ' ' + topic
symbols[symbol] = topics
topics = {
'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS '
'FUNCTIONS CLASSES MODULES FILES inspect'),
'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'),
'FORMATTING': ('formatstrings', 'OPERATORS'),
'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'),
'INTEGER': ('integers', 'int range'),
'FLOAT': ('floating', 'float math'),
'COMPLEX': ('imaginary', 'complex cmath'),
'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING range LISTS'),
'MAPPINGS': 'DICTIONARIES',
'FUNCTIONS': ('typesfunctions', 'def TYPES'),
'METHODS': ('typesmethods', 'class def CLASSES TYPES'),
'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'),
'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'),
'FRAMEOBJECTS': 'TYPES',
'TRACEBACKS': 'TYPES',
'NONE': ('bltin-null-object', ''),
'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'),
'FILES': ('bltin-file-objects', ''),
'SPECIALATTRIBUTES': ('specialattrs', ''),
'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'),
'MODULES': ('typesmodules', 'import'),
'PACKAGES': 'import',
'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN '
'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER '
'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES '
'LISTS DICTIONARIES'),
'OPERATORS': 'EXPRESSIONS',
'PRECEDENCE': 'EXPRESSIONS',
'OBJECTS': ('objects', 'TYPES'),
'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS '
'CALLABLEMETHODS SEQUENCEMETHODS MAPPINGMETHODS '
'NUMBERMETHODS CLASSES'),
'BASICMETHODS': ('customization', 'hash repr str SPECIALMETHODS'),
'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'),
'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'),
'SEQUENCEMETHODS': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS '
'SPECIALMETHODS'),
'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'),
'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT '
'SPECIALMETHODS'),
'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'),
'NAMESPACES': ('naming', 'global nonlocal ASSIGNMENT DELETION DYNAMICFEATURES'),
'DYNAMICFEATURES': ('dynamic-features', ''),
'SCOPING': 'NAMESPACES',
'FRAMES': 'NAMESPACES',
'EXCEPTIONS': ('exceptions', 'try except finally raise'),
'CONVERSIONS': ('conversions', ''),
'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'),
'SPECIALIDENTIFIERS': ('id-classes', ''),
'PRIVATENAMES': ('atom-identifiers', ''),
'LITERALS': ('atom-literals', 'STRINGS NUMBERS TUPLELITERALS '
'LISTLITERALS DICTIONARYLITERALS'),
'TUPLES': 'SEQUENCES',
'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'),
'LISTS': ('typesseq-mutable', 'LISTLITERALS'),
'LISTLITERALS': ('lists', 'LISTS LITERALS'),
'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'),
'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'),
'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr ATTRIBUTEMETHODS'),
'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS'),
'SLICINGS': ('slicings', 'SEQUENCEMETHODS'),
'CALLS': ('calls', 'EXPRESSIONS'),
'POWER': ('power', 'EXPRESSIONS'),
'UNARY': ('unary', 'EXPRESSIONS'),
'BINARY': ('binary', 'EXPRESSIONS'),
'SHIFTING': ('shifting', 'EXPRESSIONS'),
'BITWISE': ('bitwise', 'EXPRESSIONS'),
'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'),
'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'),
'ASSERTION': 'assert',
'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'),
'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'),
'DELETION': 'del',
'RETURNING': 'return',
'IMPORTING': 'import',
'CONDITIONAL': 'if',
'LOOPING': ('compound', 'for while break continue'),
'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'),
'DEBUGGING': ('debugger', 'pdb'),
'CONTEXTMANAGERS': ('context-managers', 'with'),
}
def __init__(self, input=None, output=None):
self._input = input
self._output = output
#fix me brython
self.input = self._input or sys.stdin
self.output = self._output or sys.stdout
#fix me brython
#input = property(lambda self: self._input or sys.stdin)
#output = property(lambda self: self._output or sys.stdout)
def __repr__(self):
if inspect.stack()[1][3] == '?':
self()
return ''
return '<pydoc.Helper instance>'
_GoInteractive = object()
def __call__(self, request=_GoInteractive):
if request is not self._GoInteractive:
self.help(request)
else:
self.intro()
self.interact()
self.output.write('''
You are now leaving help and returning to the Python interpreter.
If you want to ask for help on a particular object directly from the
interpreter, you can type "help(object)". Executing "help('string')"
has the same effect as typing a particular string at the help> prompt.
''')
def interact(self):
self.output.write('\n')
while True:
try:
request = self.getline('help> ')
if not request: break
except (KeyboardInterrupt, EOFError):
break
request = replace(request, '"', '', "'", '').strip()
if request.lower() in ('q', 'quit'): break
self.help(request)
def getline(self, prompt):
"""Read one line, using input() when appropriate."""
if self.input is sys.stdin:
return input(prompt)
else:
self.output.write(prompt)
self.output.flush()
return self.input.readline()
def help(self, request):
if type(request) is type(''):
request = request.strip()
if request == 'help': self.intro()
elif request == 'keywords': self.listkeywords()
elif request == 'symbols': self.listsymbols()
elif request == 'topics': self.listtopics()
elif request == 'modules': self.listmodules()
elif request[:8] == 'modules ':
self.listmodules(request.split()[1])
elif request in self.symbols: self.showsymbol(request)
elif request in ['True', 'False', 'None']:
# special case these keywords since they are objects too
doc(eval(request), 'Help on %s:')
elif request in self.keywords: self.showtopic(request)
elif request in self.topics: self.showtopic(request)
elif request: doc(request, 'Help on %s:', output=self._output)
elif isinstance(request, Helper): self()
else: doc(request, 'Help on %s:', output=self._output)
self.output.write('\n')
def intro(self):
self.output.write('''
Welcome to Python %s! This is the interactive help utility.
If this is your first time using Python, you should definitely check out
the tutorial on the Internet at http://docs.python.org/%s/tutorial/.
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
return to the interpreter, just type "quit".
To get a list of available modules, keywords, or topics, type "modules",
"keywords", or "topics". Each module also comes with a one-line summary
of what it does; to list the modules whose summaries contain a given word
such as "spam", type "modules spam".
''' % tuple([sys.version[:3]]*2))
def list(self, items, columns=4, width=80):
items = list(sorted(items))
colw = width // columns
rows = (len(items) + columns - 1) // columns
for row in range(rows):
for col in range(columns):
i = col * rows + row
if i < len(items):
self.output.write(items[i])
if col < columns - 1:
self.output.write(' ' + ' ' * (colw - 1 - len(items[i])))
self.output.write('\n')
def listkeywords(self):
self.output.write('''
Here is a list of the Python keywords. Enter any keyword to get more help.
''')
self.list(self.keywords.keys())
def listsymbols(self):
self.output.write('''
Here is a list of the punctuation symbols which Python assigns special meaning
to. Enter any symbol to get more help.
''')
self.list(self.symbols.keys())
def listtopics(self):
self.output.write('''
Here is a list of available topics. Enter any topic name to get more help.
''')
self.list(self.topics.keys())
def showtopic(self, topic, more_xrefs=''):
try:
import pydoc_data.topics
except ImportError:
self.output.write('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''')
return
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
self.output.write('no documentation found for %s\n' % repr(topic))
return
if type(target) is type(''):
return self.showtopic(target, more_xrefs)
label, xrefs = target
try:
doc = pydoc_data.topics.topics[label]
except KeyError:
self.output.write('no documentation found for %s\n' % repr(topic))
return
pager(doc.strip() + '\n')
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
if xrefs:
import formatter
buffer = io.StringIO()
formatter.DumbWriter(buffer).send_flowing_data(
'Related help topics: ' + ', '.join(xrefs.split()) + '\n')
self.output.write('\n%s\n' % buffer.getvalue())
def _gettopic(self, topic, more_xrefs=''):
"""Return unbuffered tuple of (topic, xrefs).
If an error occurs here, the exception is caught and displayed by
the url handler.
This function duplicates the showtopic method but returns its
result directly so it can be formatted for display in an html page.
"""
try:
import pydoc_data.topics
except ImportError:
return('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''' , '')
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
raise ValueError('could not find topic')
if isinstance(target, str):
return self._gettopic(target, more_xrefs)
label, xrefs = target
doc = pydoc_data.topics.topics[label]
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
return doc, xrefs
def showsymbol(self, symbol):
target = self.symbols[symbol]
topic, _, xrefs = target.partition(' ')
self.showtopic(topic, xrefs)
def listmodules(self, key=''):
if key:
self.output.write('''
Here is a list of matching modules. Enter any module name to get more help.
''')
apropos(key)
else:
self.output.write('''
Please wait a moment while I gather a list of all available modules...
''')
modules = {}
def callback(path, modname, desc, modules=modules):
if modname and modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
if modname.find('.') < 0:
modules[modname] = 1
def onerror(modname):
callback(None, modname, None)
ModuleScanner().run(callback, onerror=onerror)
self.list(modules.keys())
self.output.write('''
Enter any module name to get more help. Or, type "modules spam" to search
for modules whose descriptions contain the word "spam".
''')
help = Helper()
class Scanner:
"""A generic tree iterator."""
def __init__(self, roots, children, descendp):
self.roots = roots[:]
self.state = []
self.children = children
self.descendp = descendp
def next(self):
if not self.state:
if not self.roots:
return None
root = self.roots.pop(0)
self.state = [(root, self.children(root))]
node, children = self.state[-1]
if not children:
self.state.pop()
return self.next()
child = children.pop(0)
if self.descendp(child):
self.state.append((child, self.children(child)))
return child
class ModuleScanner:
"""An interruptible scanner that searches module synopses."""
def run(self, callback, key=None, completer=None, onerror=None):
if key: key = key.lower()
self.quit = False
seen = {}
for modname in sys.builtin_module_names:
if modname != '__main__':
seen[modname] = 1
if key is None:
callback(None, modname, '')
else:
name = __import__(modname).__doc__ or ''
desc = name.split('\n')[0]
name = modname + ' - ' + desc
if name.lower().find(key) >= 0:
callback(None, modname, desc)
for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror):
if self.quit:
break
if key is None:
callback(None, modname, '')
else:
try:
loader = importer.find_module(modname)
except SyntaxError:
# raised by tests for bad coding cookies or BOM
continue
if hasattr(loader, 'get_source'):
try:
source = loader.get_source(modname)
except Exception:
if onerror:
onerror(modname)
continue
desc = source_synopsis(io.StringIO(source)) or ''
if hasattr(loader, 'get_filename'):
path = loader.get_filename(modname)
else:
path = None
else:
try:
module = loader.load_module(modname)
except ImportError:
if onerror:
onerror(modname)
continue
desc = (module.__doc__ or '').splitlines()[0]
path = getattr(module,'__file__',None)
name = modname + ' - ' + desc
if name.lower().find(key) >= 0:
callback(path, modname, desc)
if completer:
completer()
def apropos(key):
"""Print all the one-line module summaries that contain a substring."""
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
print(modname, desc and '- ' + desc)
def onerror(modname):
pass
with warnings.catch_warnings():
warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key, onerror=onerror)
# --------------------------------------- enhanced Web browser interface
def _start_server(urlhandler, port):
"""Start an HTTP server thread on a specific port.
Start an HTML/text server thread, so HTML or text documents can be
browsed dynamically and interactively with a Web browser. Example use:
>>> import time
>>> import pydoc
Define a URL handler. To determine what the client is asking
for, check the URL and content_type.
Then get or generate some text or HTML code and return it.
>>> def my_url_handler(url, content_type):
... text = 'the URL sent was: (%s, %s)' % (url, content_type)
... return text
Start server thread on port 0.
If you use port 0, the server will pick a random port number.
You can then use serverthread.port to get the port number.
>>> port = 0
>>> serverthread = pydoc._start_server(my_url_handler, port)
Check that the server is really started. If it is, open browser
and get first page. Use serverthread.url as the starting page.
>>> if serverthread.serving:
... import webbrowser
The next two lines are commented out so a browser doesn't open if
doctest is run on this module.
#... webbrowser.open(serverthread.url)
#True
Let the server do its thing. We just need to monitor its status.
Use time.sleep so the loop doesn't hog the CPU.
>>> starttime = time.time()
>>> timeout = 1 #seconds
This is a short timeout for testing purposes.
>>> while serverthread.serving:
... time.sleep(.01)
... if serverthread.serving and time.time() - starttime > timeout:
... serverthread.stop()
... break
Print any errors that may have occurred.
>>> print(serverthread.error)
None
"""
import http.server
import email.message
import select
import threading
class DocHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
"""Process a request from an HTML browser.
The URL received is in self.path.
Get an HTML page from self.urlhandler and send it.
"""
if self.path.endswith('.css'):
content_type = 'text/css'
else:
content_type = 'text/html'
self.send_response(200)
self.send_header('Content-Type', '%s; charset=UTF-8' % content_type)
self.end_headers()
self.wfile.write(self.urlhandler(
self.path, content_type).encode('utf-8'))
def log_message(self, *args):
# Don't log messages.
pass
class DocServer(http.server.HTTPServer):
def __init__(self, port, callback):
self.host = (sys.platform == 'mac') and '127.0.0.1' or 'localhost'
self.address = ('', port)
self.callback = callback
self.base.__init__(self, self.address, self.handler)
self.quit = False
def serve_until_quit(self):
while not self.quit:
rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)
if rd:
self.handle_request()
self.server_close()
def server_activate(self):
self.base.server_activate(self)
if self.callback:
self.callback(self)
class ServerThread(threading.Thread):
def __init__(self, urlhandler, port):
self.urlhandler = urlhandler
self.port = int(port)
threading.Thread.__init__(self)
self.serving = False
self.error = None
def run(self):
"""Start the server."""
try:
DocServer.base = http.server.HTTPServer
DocServer.handler = DocHandler
DocHandler.MessageClass = email.message.Message
DocHandler.urlhandler = staticmethod(self.urlhandler)
docsvr = DocServer(self.port, self.ready)
self.docserver = docsvr
docsvr.serve_until_quit()
except Exception as e:
self.error = e
def ready(self, server):
self.serving = True
self.host = server.host
self.port = server.server_port
self.url = 'http://%s:%d/' % (self.host, self.port)
def stop(self):
"""Stop the server and this thread nicely"""
self.docserver.quit = True
self.serving = False
self.url = None
thread = ServerThread(urlhandler, port)
thread.start()
# Wait until thread.serving is True to make sure we are
# really up before returning.
while not thread.error and not thread.serving:
time.sleep(.01)
return thread
def _url_handler(url, content_type="text/html"):
"""The pydoc url handler for use with the pydoc server.
If the content_type is 'text/css', the _pydoc.css style
sheet is read and returned if it exits.
If the content_type is 'text/html', then the result of
get_html_page(url) is returned.
"""
class _HTMLDoc(HTMLDoc):
def page(self, title, contents):
"""Format an HTML page."""
css_path = "pydoc_data/_pydoc.css"
css_link = (
'<link rel="stylesheet" type="text/css" href="%s">' %
css_path)
return '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Pydoc: %s</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
%s</head><body bgcolor="#f0f0f8">%s<div style="clear:both;padding-top:.5em;">%s</div>
</body></html>''' % (title, css_link, html_navbar(), contents)
def filelink(self, url, path):
return '<a href="getfile?key=%s">%s</a>' % (url, path)
html = _HTMLDoc()
def html_navbar():
version = html.escape("%s [%s, %s]" % (platform.python_version(),
platform.python_build()[0],
platform.python_compiler()))
return """
<div style='float:left'>
Python %s<br>%s
</div>
<div style='float:right'>
<div style='text-align:center'>
<a href="index.html">Module Index</a>
: <a href="topics.html">Topics</a>
: <a href="keywords.html">Keywords</a>
</div>
<div>
<form action="get" style='display:inline;'>
<input type=text name=key size=15>
<input type=submit value="Get">
</form>
<form action="search" style='display:inline;'>
<input type=text name=key size=15>
<input type=submit value="Search">
</form>
</div>
</div>
""" % (version, html.escape(platform.platform(terse=True)))
def html_index():
"""Module Index page."""
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
heading = html.heading(
'<big><big><strong>Index of Modules</strong></big></big>',
'#ffffff', '#7799ee')
names = [name for name in sys.builtin_module_names
if name != '__main__']
contents = html.multicolumn(names, bltinlink)
contents = [heading, '<p>' + html.bigsection(
'Built-in Modules', '#ffffff', '#ee77aa', contents)]
seen = {}
for dir in sys.path:
contents.append(html.index(dir, seen))
contents.append(
'<p align=right><font color="#909090" face="helvetica,'
'arial"><strong>pydoc</strong> by Ka-Ping Yee'
'<ping@lfw.org></font>')
return 'Index of Modules', ''.join(contents)
def html_search(key):
"""Search results page."""
# scan for modules
search_result = []
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
search_result.append((modname, desc and '- ' + desc))
with warnings.catch_warnings():
warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key)
# format page
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
results = []
heading = html.heading(
'<big><big><strong>Search Results</strong></big></big>',
'#ffffff', '#7799ee')
for name, desc in search_result:
results.append(bltinlink(name) + desc)
contents = heading + html.bigsection(
'key = %s' % key, '#ffffff', '#ee77aa', '<br>'.join(results))
return 'Search Results', contents
def html_getfile(path):
"""Get and display a source file listing safely."""
path = path.replace('%20', ' ')
with tokenize.open(path) as fp:
lines = html.escape(fp.read())
body = '<pre>%s</pre>' % lines
heading = html.heading(
'<big><big><strong>File Listing</strong></big></big>',
'#ffffff', '#7799ee')
contents = heading + html.bigsection(
'File: %s' % path, '#ffffff', '#ee77aa', body)
return 'getfile %s' % path, contents
def html_topics():
"""Index of topic texts available."""
def bltinlink(name):
return '<a href="topic?key=%s">%s</a>' % (name, name)
heading = html.heading(
'<big><big><strong>INDEX</strong></big></big>',
'#ffffff', '#7799ee')
names = sorted(Helper.topics.keys())
contents = html.multicolumn(names, bltinlink)
contents = heading + html.bigsection(
'Topics', '#ffffff', '#ee77aa', contents)
return 'Topics', contents
def html_keywords():
"""Index of keywords."""
heading = html.heading(
'<big><big><strong>INDEX</strong></big></big>',
'#ffffff', '#7799ee')
names = sorted(Helper.keywords.keys())
def bltinlink(name):
return '<a href="topic?key=%s">%s</a>' % (name, name)
contents = html.multicolumn(names, bltinlink)
contents = heading + html.bigsection(
'Keywords', '#ffffff', '#ee77aa', contents)
return 'Keywords', contents
def html_topicpage(topic):
"""Topic or keyword help page."""
buf = io.StringIO()
htmlhelp = Helper(buf, buf)
contents, xrefs = htmlhelp._gettopic(topic)
if topic in htmlhelp.keywords:
title = 'KEYWORD'
else:
title = 'TOPIC'
heading = html.heading(
'<big><big><strong>%s</strong></big></big>' % title,
'#ffffff', '#7799ee')
contents = '<pre>%s</pre>' % html.markup(contents)
contents = html.bigsection(topic , '#ffffff','#ee77aa', contents)
if xrefs:
xrefs = sorted(xrefs.split())
def bltinlink(name):
return '<a href="topic?key=%s">%s</a>' % (name, name)
xrefs = html.multicolumn(xrefs, bltinlink)
xrefs = html.section('Related help topics: ',
'#ffffff', '#ee77aa', xrefs)
return ('%s %s' % (title, topic),
''.join((heading, contents, xrefs)))
def html_getobj(url):
obj = locate(url, forceload=1)
if obj is None and url != 'None':
raise ValueError('could not find object')
title = describe(obj)
content = html.document(obj, url)
return title, content
def html_error(url, exc):
heading = html.heading(
'<big><big><strong>Error</strong></big></big>',
'#ffffff', '#7799ee')
contents = '<br>'.join(html.escape(line) for line in
format_exception_only(type(exc), exc))
contents = heading + html.bigsection(url, '#ffffff', '#bb0000',
contents)
return "Error - %s" % url, contents
def get_html_page(url):
"""Generate an HTML page for url."""
complete_url = url
if url.endswith('.html'):
url = url[:-5]
try:
if url in ("", "index"):
title, content = html_index()
elif url == "topics":
title, content = html_topics()
elif url == "keywords":
title, content = html_keywords()
elif '=' in url:
op, _, url = url.partition('=')
if op == "search?key":
title, content = html_search(url)
elif op == "getfile?key":
title, content = html_getfile(url)
elif op == "topic?key":
# try topics first, then objects.
try:
title, content = html_topicpage(url)
except ValueError:
title, content = html_getobj(url)
elif op == "get?key":
# try objects first, then topics.
if url in ("", "index"):
title, content = html_index()
else:
try:
title, content = html_getobj(url)
except ValueError:
title, content = html_topicpage(url)
else:
raise ValueError('bad pydoc url')
else:
title, content = html_getobj(url)
except Exception as exc:
# Catch any errors and display them in an error page.
title, content = html_error(complete_url, exc)
return html.page(title, content)
if url.startswith('/'):
url = url[1:]
if content_type == 'text/css':
path_here = os.path.dirname(os.path.realpath(__file__))
css_path = os.path.join(path_here, url)
with open(css_path) as fp:
return ''.join(fp.readlines())
elif content_type == 'text/html':
return get_html_page(url)
# Errors outside the url handler are caught by the server.
raise TypeError('unknown content type %r for url %s' % (content_type, url))
def browse(port=0, *, open_browser=True):
"""Start the enhanced pydoc Web server and open a Web browser.
Use port '0' to start the server on an arbitrary port.
Set open_browser to False to suppress opening a browser.
"""
import webbrowser
serverthread = _start_server(_url_handler, port)
if serverthread.error:
print(serverthread.error)
return
if serverthread.serving:
server_help_msg = 'Server commands: [b]rowser, [q]uit'
if open_browser:
webbrowser.open(serverthread.url)
try:
print('Server ready at', serverthread.url)
print(server_help_msg)
while serverthread.serving:
cmd = input('server> ')
cmd = cmd.lower()
if cmd == 'q':
break
elif cmd == 'b':
webbrowser.open(serverthread.url)
else:
print(server_help_msg)
except (KeyboardInterrupt, EOFError):
print()
finally:
if serverthread.serving:
serverthread.stop()
print('Server stopped')
# -------------------------------------------------- command-line interface
def ispath(x):
return isinstance(x, str) and x.find(os.sep) >= 0
def cli():
"""Command-line interface (looks at sys.argv to decide what to do)."""
import getopt
class BadUsage(Exception): pass
# Scripts don't get the current directory in their path by default
# unless they are run with the '-m' switch
if '' not in sys.path:
scriptdir = os.path.dirname(sys.argv[0])
if scriptdir in sys.path:
sys.path.remove(scriptdir)
sys.path.insert(0, '.')
try:
opts, args = getopt.getopt(sys.argv[1:], 'bk:p:w')
writing = False
start_server = False
open_browser = False
port = None
for opt, val in opts:
if opt == '-b':
start_server = True
open_browser = True
if opt == '-k':
apropos(val)
return
if opt == '-p':
start_server = True
port = val
if opt == '-w':
writing = True
if start_server:
if port is None:
port = 0
browse(port, open_browser=open_browser)
return
if not args: raise BadUsage
for arg in args:
if ispath(arg) and not os.path.exists(arg):
print('file %r does not exist' % arg)
break
try:
if ispath(arg) and os.path.isfile(arg):
arg = importfile(arg)
if writing:
if ispath(arg) and os.path.isdir(arg):
writedocs(arg)
else:
writedoc(arg)
else:
help.help(arg)
except ErrorDuringImport as value:
print(value)
except (getopt.error, BadUsage):
cmd = os.path.splitext(os.path.basename(sys.argv[0]))[0]
print("""pydoc - the Python documentation tool
{cmd} <name> ...
Show text documentation on something. <name> may be the name of a
Python keyword, topic, function, module, or package, or a dotted
reference to a class or function within a module or module in a
package. If <name> contains a '{sep}', it is used as the path to a
Python source file to document. If name is 'keywords', 'topics',
or 'modules', a listing of these things is displayed.
{cmd} -k <keyword>
Search for a keyword in the synopsis lines of all available modules.
{cmd} -p <port>
Start an HTTP server on the given port on the local machine. Port
number 0 can be used to get an arbitrary unused port.
{cmd} -b
Start an HTTP server on an arbitrary unused port and open a Web browser
to interactively browse documentation. The -p option can be used with
the -b option to explicitly specify the server port.
{cmd} -w <name> ...
Write out the HTML documentation for a module to a file in the current
directory. If <name> contains a '{sep}', it is treated as a filename; if
it names a directory, documentation is written for all the contents.
""".format(cmd=cmd, sep=os.sep))
if __name__ == '__main__':
cli()
| gpl-3.0 |
fivejjs/pyspider | pyspider/database/sqlalchemy/projectdb.py | 51 | 4255 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<roy@binux.me>
# http://binux.me
# Created on 2014-12-04 23:25:10
import six
import time
from sqlalchemy import create_engine, MetaData, Table, Column, String, Float, Text
from sqlalchemy.engine.url import make_url
from pyspider.libs import utils
from pyspider.database.base.projectdb import ProjectDB as BaseProjectDB
from .sqlalchemybase import result2dict
if six.PY3:
where_type = utils.utf8
else:
where_type = utils.text
class ProjectDB(BaseProjectDB):
__tablename__ = 'projectdb'
def __init__(self, url):
self.table = Table(self.__tablename__, MetaData(),
Column('name', String(64)),
Column('group', String(64)),
Column('status', String(16)),
Column('script', Text),
Column('comments', String(1024)),
Column('rate', Float(11)),
Column('burst', Float(11)),
Column('updatetime', Float(32)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
self.url = make_url(url)
if self.url.database:
database = self.url.database
self.url.database = None
engine = create_engine(self.url, convert_unicode=False)
engine.execute("CREATE DATABASE IF NOT EXISTS %s" % database)
self.url.database = database
self.engine = create_engine(url, convert_unicode=False)
self.table.create(self.engine, checkfirst=True)
@staticmethod
def _parse(data):
if six.PY3:
for key, value in list(six.iteritems(data)):
if isinstance(value, six.binary_type):
data[utils.text(key)] = utils.text(value)
else:
data[utils.text(key)] = value
return data
@staticmethod
def _stringify(data):
if six.PY3:
for key, value in list(six.iteritems(data)):
if isinstance(value, six.string_types):
data[key] = utils.utf8(value)
return data
def insert(self, name, obj={}):
obj = dict(obj)
obj['name'] = name
obj['updatetime'] = time.time()
return self.engine.execute(self.table.insert()
.values(**self._stringify(obj)))
def update(self, name, obj={}, **kwargs):
obj = dict(obj)
obj.update(kwargs)
obj['updatetime'] = time.time()
return self.engine.execute(self.table.update()
.where(self.table.c.name == where_type(name))
.values(**self._stringify(obj)))
def get_all(self, fields=None):
columns = [getattr(self.table.c, f, f) for f in fields] if fields else self.table.c
for task in self.engine.execute(self.table.select()
.with_only_columns(columns)):
yield self._parse(result2dict(columns, task))
def get(self, name, fields=None):
columns = [getattr(self.table.c, f, f) for f in fields] if fields else self.table.c
for task in self.engine.execute(self.table.select()
.where(self.table.c.name == where_type(name))
.limit(1)
.with_only_columns(columns)):
return self._parse(result2dict(columns, task))
def drop(self, name):
return self.engine.execute(self.table.delete()
.where(self.table.c.name == where_type(name)))
def check_update(self, timestamp, fields=None):
columns = [getattr(self.table.c, f, f) for f in fields] if fields else self.table.c
for task in self.engine.execute(self.table.select()
.with_only_columns(columns)
.where(self.table.c.updatetime >= timestamp)):
yield self._parse(result2dict(columns, task))
| apache-2.0 |
unpingco/csvkit | csvkit/convert/js.py | 20 | 1763 | #!/usr/bin/env python
try:
from collections import OrderedDict
import json
except ImportError:
from ordereddict import OrderedDict
import simplejson as json
import itertools
import six
from csvkit import CSVKitWriter
def parse_object(obj, path=''):
"""
Recursively parse JSON objects and a dictionary of paths/keys and values.
Inspired by JSONPipe (https://github.com/dvxhouse/jsonpipe).
"""
if isinstance(obj, dict):
iterator = obj.items()
elif isinstance(obj, (list, tuple)):
iterator = enumerate(obj)
else:
return { path.strip('/'): obj }
d = {}
for key, value in iterator:
key = six.text_type(key)
d.update(parse_object(value, path + key + '/'))
return d
def json2csv(f, key=None, **kwargs):
"""
Convert a JSON document into CSV format.
The top-level element of the input must be a list or a dictionary. If it is a dictionary, a key must be provided which is an item of the dictionary which contains a list.
"""
js = json.load(f, object_pairs_hook=OrderedDict)
if isinstance(js, dict):
if not key:
raise TypeError('When converting a JSON document with a top-level dictionary element, a key must be specified.')
js = js[key]
fields = []
flat = []
for obj in js:
flat.append(parse_object(obj))
for key in obj.keys():
if key not in fields:
fields.append(key)
o = six.StringIO()
writer = CSVKitWriter(o)
writer.writerow(fields)
for i in flat:
row = []
for field in fields:
row.append(i.get(field, None))
writer.writerow(row)
output = o.getvalue()
o.close()
return output
| mit |
huson2012/psutil | psutil/_pslinux.py | 21 | 43124 | #!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Linux platform implementation."""
from __future__ import division
import base64
import errno
import os
import re
import socket
import struct
import sys
import warnings
from psutil import _common
from psutil import _psposix
from psutil._common import (isfile_strict, usage_percent, deprecated)
from psutil._compat import PY3, xrange, namedtuple, wraps, b, defaultdict
import _psutil_linux as cext
import _psutil_posix
__extra__all__ = [
# io prio constants
"IOPRIO_CLASS_NONE", "IOPRIO_CLASS_RT", "IOPRIO_CLASS_BE",
"IOPRIO_CLASS_IDLE",
# connection status constants
"CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1",
"CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT",
"CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING",
# other
"phymem_buffers", "cached_phymem"]
# --- constants
HAS_PRLIMIT = hasattr(cext, "linux_prlimit")
# RLIMIT_* constants, not guaranteed to be present on all kernels
if HAS_PRLIMIT:
for name in dir(cext):
if name.startswith('RLIM'):
__extra__all__.append(name)
# Number of clock ticks per second
CLOCK_TICKS = os.sysconf("SC_CLK_TCK")
PAGESIZE = os.sysconf("SC_PAGE_SIZE")
BOOT_TIME = None # set later
DEFAULT_ENCODING = sys.getdefaultencoding()
# ioprio_* constants http://linux.die.net/man/2/ioprio_get
IOPRIO_CLASS_NONE = 0
IOPRIO_CLASS_RT = 1
IOPRIO_CLASS_BE = 2
IOPRIO_CLASS_IDLE = 3
# taken from /fs/proc/array.c
PROC_STATUSES = {
"R": _common.STATUS_RUNNING,
"S": _common.STATUS_SLEEPING,
"D": _common.STATUS_DISK_SLEEP,
"T": _common.STATUS_STOPPED,
"t": _common.STATUS_TRACING_STOP,
"Z": _common.STATUS_ZOMBIE,
"X": _common.STATUS_DEAD,
"x": _common.STATUS_DEAD,
"K": _common.STATUS_WAKE_KILL,
"W": _common.STATUS_WAKING
}
# http://students.mimuw.edu.pl/lxr/source/include/net/tcp_states.h
TCP_STATUSES = {
"01": _common.CONN_ESTABLISHED,
"02": _common.CONN_SYN_SENT,
"03": _common.CONN_SYN_RECV,
"04": _common.CONN_FIN_WAIT1,
"05": _common.CONN_FIN_WAIT2,
"06": _common.CONN_TIME_WAIT,
"07": _common.CONN_CLOSE,
"08": _common.CONN_CLOSE_WAIT,
"09": _common.CONN_LAST_ACK,
"0A": _common.CONN_LISTEN,
"0B": _common.CONN_CLOSING
}
# set later from __init__.py
NoSuchProcess = None
AccessDenied = None
TimeoutExpired = None
# --- named tuples
def _get_cputimes_fields():
"""Return a namedtuple of variable fields depending on the
CPU times available on this Linux kernel version which may be:
(user, nice, system, idle, iowait, irq, softirq, [steal, [guest,
[guest_nice]]])
"""
f = open('/proc/stat', 'rb')
try:
values = f.readline().split()[1:]
finally:
f.close()
fields = ['user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq']
vlen = len(values)
if vlen >= 8:
# Linux >= 2.6.11
fields.append('steal')
if vlen >= 9:
# Linux >= 2.6.24
fields.append('guest')
if vlen >= 10:
# Linux >= 3.2.0
fields.append('guest_nice')
return fields
scputimes = namedtuple('scputimes', _get_cputimes_fields())
svmem = namedtuple(
'svmem', ['total', 'available', 'percent', 'used', 'free',
'active', 'inactive', 'buffers', 'cached'])
pextmem = namedtuple('pextmem', 'rss vms shared text lib data dirty')
pmmap_grouped = namedtuple(
'pmmap_grouped', ['path', 'rss', 'size', 'pss', 'shared_clean',
'shared_dirty', 'private_clean', 'private_dirty',
'referenced', 'anonymous', 'swap'])
pmmap_ext = namedtuple(
'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
# --- system memory
def virtual_memory():
total, free, buffers, shared, _, _ = cext.linux_sysinfo()
cached = active = inactive = None
f = open('/proc/meminfo', 'rb')
CACHED, ACTIVE, INACTIVE = b("Cached:"), b("Active:"), b("Inactive:")
try:
for line in f:
if line.startswith(CACHED):
cached = int(line.split()[1]) * 1024
elif line.startswith(ACTIVE):
active = int(line.split()[1]) * 1024
elif line.startswith(INACTIVE):
inactive = int(line.split()[1]) * 1024
if (cached is not None
and active is not None
and inactive is not None):
break
else:
# we might get here when dealing with exotic Linux flavors, see:
# https://github.com/giampaolo/psutil/issues/313
msg = "'cached', 'active' and 'inactive' memory stats couldn't " \
"be determined and were set to 0"
warnings.warn(msg, RuntimeWarning)
cached = active = inactive = 0
finally:
f.close()
avail = free + buffers + cached
used = total - free
percent = usage_percent((total - avail), total, _round=1)
return svmem(total, avail, percent, used, free,
active, inactive, buffers, cached)
def swap_memory():
_, _, _, _, total, free = cext.linux_sysinfo()
used = total - free
percent = usage_percent(used, total, _round=1)
# get pgin/pgouts
f = open("/proc/vmstat", "rb")
SIN, SOUT = b('pswpin'), b('pswpout')
sin = sout = None
try:
for line in f:
# values are expressed in 4 kilo bytes, we want bytes instead
if line.startswith(SIN):
sin = int(line.split(b(' '))[1]) * 4 * 1024
elif line.startswith(SOUT):
sout = int(line.split(b(' '))[1]) * 4 * 1024
if sin is not None and sout is not None:
break
else:
# we might get here when dealing with exotic Linux flavors, see:
# https://github.com/giampaolo/psutil/issues/313
msg = "'sin' and 'sout' swap memory stats couldn't " \
"be determined and were set to 0"
warnings.warn(msg, RuntimeWarning)
sin = sout = 0
finally:
f.close()
return _common.sswap(total, used, free, percent, sin, sout)
@deprecated(replacement='psutil.virtual_memory().cached')
def cached_phymem():
return virtual_memory().cached
@deprecated(replacement='psutil.virtual_memory().buffers')
def phymem_buffers():
return virtual_memory().buffers
# --- CPUs
def cpu_times():
"""Return a named tuple representing the following system-wide
CPU times:
(user, nice, system, idle, iowait, irq, softirq [steal, [guest,
[guest_nice]]])
Last 3 fields may not be available on all Linux kernel versions.
"""
f = open('/proc/stat', 'rb')
try:
values = f.readline().split()
finally:
f.close()
fields = values[1:len(scputimes._fields) + 1]
fields = [float(x) / CLOCK_TICKS for x in fields]
return scputimes(*fields)
def per_cpu_times():
"""Return a list of namedtuple representing the CPU times
for every CPU available on the system.
"""
cpus = []
f = open('/proc/stat', 'rb')
try:
# get rid of the first line which refers to system wide CPU stats
f.readline()
CPU = b('cpu')
for line in f:
if line.startswith(CPU):
values = line.split()
fields = values[1:len(scputimes._fields) + 1]
fields = [float(x) / CLOCK_TICKS for x in fields]
entry = scputimes(*fields)
cpus.append(entry)
return cpus
finally:
f.close()
def cpu_count_logical():
"""Return the number of logical CPUs in the system."""
try:
return os.sysconf("SC_NPROCESSORS_ONLN")
except ValueError:
# as a second fallback we try to parse /proc/cpuinfo
num = 0
f = open('/proc/cpuinfo', 'rb')
try:
lines = f.readlines()
finally:
f.close()
PROCESSOR = b('processor')
for line in lines:
if line.lower().startswith(PROCESSOR):
num += 1
# unknown format (e.g. amrel/sparc architectures), see:
# https://github.com/giampaolo/psutil/issues/200
# try to parse /proc/stat as a last resort
if num == 0:
f = open('/proc/stat', 'rt')
try:
lines = f.readlines()
finally:
f.close()
search = re.compile('cpu\d')
for line in lines:
line = line.split(' ')[0]
if search.match(line):
num += 1
if num == 0:
# mimic os.cpu_count()
return None
return num
def cpu_count_physical():
"""Return the number of physical CPUs in the system."""
f = open('/proc/cpuinfo', 'rb')
try:
lines = f.readlines()
finally:
f.close()
found = set()
PHYSICAL_ID = b('physical id')
for line in lines:
if line.lower().startswith(PHYSICAL_ID):
found.add(line.strip())
if found:
return len(found)
else:
return None # mimic os.cpu_count()
# --- other system functions
def users():
"""Return currently connected users as a list of namedtuples."""
retlist = []
rawlist = cext.users()
for item in rawlist:
user, tty, hostname, tstamp, user_process = item
# note: the underlying C function includes entries about
# system boot, run level and others. We might want
# to use them in the future.
if not user_process:
continue
if hostname == ':0.0':
hostname = 'localhost'
nt = _common.suser(user, tty or None, hostname, tstamp)
retlist.append(nt)
return retlist
def boot_time():
"""Return the system boot time expressed in seconds since the epoch."""
global BOOT_TIME
f = open('/proc/stat', 'rb')
try:
BTIME = b('btime')
for line in f:
if line.startswith(BTIME):
ret = float(line.strip().split()[1])
BOOT_TIME = ret
return ret
raise RuntimeError("line 'btime' not found")
finally:
f.close()
# --- processes
def pids():
"""Returns a list of PIDs currently running on the system."""
return [int(x) for x in os.listdir(b('/proc')) if x.isdigit()]
def pid_exists(pid):
"""Check For the existence of a unix pid."""
return _psposix.pid_exists(pid)
# --- network
class Connections:
"""A wrapper on top of /proc/net/* files, retrieving per-process
and system-wide open connections (TCP, UDP, UNIX) similarly to
"netstat -an".
Note: in case of UNIX sockets we're only able to determine the
local endpoint/path, not the one it's connected to.
According to [1] it would be possible but not easily.
[1] http://serverfault.com/a/417946
"""
def __init__(self):
tcp4 = ("tcp", socket.AF_INET, socket.SOCK_STREAM)
tcp6 = ("tcp6", socket.AF_INET6, socket.SOCK_STREAM)
udp4 = ("udp", socket.AF_INET, socket.SOCK_DGRAM)
udp6 = ("udp6", socket.AF_INET6, socket.SOCK_DGRAM)
unix = ("unix", socket.AF_UNIX, None)
self.tmap = {
"all": (tcp4, tcp6, udp4, udp6, unix),
"tcp": (tcp4, tcp6),
"tcp4": (tcp4,),
"tcp6": (tcp6,),
"udp": (udp4, udp6),
"udp4": (udp4,),
"udp6": (udp6,),
"unix": (unix,),
"inet": (tcp4, tcp6, udp4, udp6),
"inet4": (tcp4, udp4),
"inet6": (tcp6, udp6),
}
def get_proc_inodes(self, pid):
inodes = defaultdict(list)
for fd in os.listdir("/proc/%s/fd" % pid):
try:
inode = os.readlink("/proc/%s/fd/%s" % (pid, fd))
except OSError:
# TODO: need comment here
continue
else:
if inode.startswith('socket:['):
# the process is using a socket
inode = inode[8:][:-1]
inodes[inode].append((pid, int(fd)))
return inodes
def get_all_inodes(self):
inodes = {}
for pid in pids():
try:
inodes.update(self.get_proc_inodes(pid))
except OSError:
# os.listdir() is gonna raise a lot of access denied
# exceptions in case of unprivileged user; that's fine
# as we'll just end up returning a connection with PID
# and fd set to None anyway.
# Both netstat -an and lsof does the same so it's
# unlikely we can do any better.
# ENOENT just means a PID disappeared on us.
err = sys.exc_info()[1]
if err.errno not in (
errno.ENOENT, errno.ESRCH, errno.EPERM, errno.EACCES):
raise
return inodes
def decode_address(self, addr, family):
"""Accept an "ip:port" address as displayed in /proc/net/*
and convert it into a human readable form, like:
"0500000A:0016" -> ("10.0.0.5", 22)
"0000000000000000FFFF00000100007F:9E49" -> ("::ffff:127.0.0.1", 40521)
The IP address portion is a little or big endian four-byte
hexadecimal number; that is, the least significant byte is listed
first, so we need to reverse the order of the bytes to convert it
to an IP address.
The port is represented as a two-byte hexadecimal number.
Reference:
http://linuxdevcenter.com/pub/a/linux/2000/11/16/LinuxAdmin.html
"""
ip, port = addr.split(':')
port = int(port, 16)
# this usually refers to a local socket in listen mode with
# no end-points connected
if not port:
return ()
if PY3:
ip = ip.encode('ascii')
if family == socket.AF_INET:
# see: https://github.com/giampaolo/psutil/issues/201
if sys.byteorder == 'little':
ip = socket.inet_ntop(family, base64.b16decode(ip)[::-1])
else:
ip = socket.inet_ntop(family, base64.b16decode(ip))
else: # IPv6
# old version - let's keep it, just in case...
# ip = ip.decode('hex')
# return socket.inet_ntop(socket.AF_INET6,
# ''.join(ip[i:i+4][::-1] for i in xrange(0, 16, 4)))
ip = base64.b16decode(ip)
# see: https://github.com/giampaolo/psutil/issues/201
if sys.byteorder == 'little':
ip = socket.inet_ntop(
socket.AF_INET6,
struct.pack('>4I', *struct.unpack('<4I', ip)))
else:
ip = socket.inet_ntop(
socket.AF_INET6,
struct.pack('<4I', *struct.unpack('<4I', ip)))
return (ip, port)
def process_inet(self, file, family, type_, inodes, filter_pid=None):
"""Parse /proc/net/tcp* and /proc/net/udp* files."""
if file.endswith('6') and not os.path.exists(file):
# IPv6 not supported
return
f = open(file, 'rt')
f.readline() # skip the first line
for line in f:
_, laddr, raddr, status, _, _, _, _, _, inode = \
line.split()[:10]
if inode in inodes:
# We assume inet sockets are unique, so we error
# out if there are multiple references to the
# same inode. We won't do this for UNIX sockets.
if len(inodes[inode]) > 1 and type_ != socket.AF_UNIX:
raise ValueError("ambiguos inode with multiple "
"PIDs references")
pid, fd = inodes[inode][0]
else:
pid, fd = None, -1
if filter_pid is not None and filter_pid != pid:
continue
else:
if type_ == socket.SOCK_STREAM:
status = TCP_STATUSES[status]
else:
status = _common.CONN_NONE
laddr = self.decode_address(laddr, family)
raddr = self.decode_address(raddr, family)
yield (fd, family, type_, laddr, raddr, status, pid)
f.close()
def process_unix(self, file, family, inodes, filter_pid=None):
"""Parse /proc/net/unix files."""
f = open(file, 'rt')
f.readline() # skip the first line
for line in f:
tokens = line.split()
_, _, _, _, type_, _, inode = tokens[0:7]
if inode in inodes:
# With UNIX sockets we can have a single inode
# referencing many file descriptors.
pairs = inodes[inode]
else:
pairs = [(None, -1)]
for pid, fd in pairs:
if filter_pid is not None and filter_pid != pid:
continue
else:
if len(tokens) == 8:
path = tokens[-1]
else:
path = ""
type_ = int(type_)
raddr = None
status = _common.CONN_NONE
yield (fd, family, type_, path, raddr, status, pid)
f.close()
def retrieve(self, kind, pid=None):
if kind not in self.tmap:
raise ValueError("invalid %r kind argument; choose between %s"
% (kind, ', '.join([repr(x) for x in self.tmap])))
if pid is not None:
inodes = self.get_proc_inodes(pid)
if not inodes:
# no connections for this process
return []
else:
inodes = self.get_all_inodes()
ret = []
for f, family, type_ in self.tmap[kind]:
if family in (socket.AF_INET, socket.AF_INET6):
ls = self.process_inet(
"/proc/net/%s" % f, family, type_, inodes, filter_pid=pid)
else:
ls = self.process_unix(
"/proc/net/%s" % f, family, inodes, filter_pid=pid)
for fd, family, type_, laddr, raddr, status, bound_pid in ls:
if pid:
conn = _common.pconn(fd, family, type_, laddr, raddr,
status)
else:
conn = _common.sconn(fd, family, type_, laddr, raddr,
status, bound_pid)
ret.append(conn)
return ret
_connections = Connections()
def net_connections(kind='inet'):
"""Return system-wide open connections."""
return _connections.retrieve(kind)
def net_io_counters():
"""Return network I/O statistics for every network interface
installed on the system as a dict of raw tuples.
"""
f = open("/proc/net/dev", "rt")
try:
lines = f.readlines()
finally:
f.close()
retdict = {}
for line in lines[2:]:
colon = line.rfind(':')
assert colon > 0, repr(line)
name = line[:colon].strip()
fields = line[colon + 1:].strip().split()
bytes_recv = int(fields[0])
packets_recv = int(fields[1])
errin = int(fields[2])
dropin = int(fields[3])
bytes_sent = int(fields[8])
packets_sent = int(fields[9])
errout = int(fields[10])
dropout = int(fields[11])
retdict[name] = (bytes_sent, bytes_recv, packets_sent, packets_recv,
errin, errout, dropin, dropout)
return retdict
# --- disks
def disk_io_counters():
"""Return disk I/O statistics for every disk installed on the
system as a dict of raw tuples.
"""
# man iostat states that sectors are equivalent with blocks and
# have a size of 512 bytes since 2.4 kernels. This value is
# needed to calculate the amount of disk I/O in bytes.
SECTOR_SIZE = 512
# determine partitions we want to look for
partitions = []
f = open("/proc/partitions", "rt")
try:
lines = f.readlines()[2:]
finally:
f.close()
for line in reversed(lines):
_, _, _, name = line.split()
if name[-1].isdigit():
# we're dealing with a partition (e.g. 'sda1'); 'sda' will
# also be around but we want to omit it
partitions.append(name)
else:
if not partitions or not partitions[-1].startswith(name):
# we're dealing with a disk entity for which no
# partitions have been defined (e.g. 'sda' but
# 'sda1' was not around), see:
# https://github.com/giampaolo/psutil/issues/338
partitions.append(name)
#
retdict = {}
f = open("/proc/diskstats", "rt")
try:
lines = f.readlines()
finally:
f.close()
for line in lines:
# http://www.mjmwired.net/kernel/Documentation/iostats.txt
_, _, name, reads, _, rbytes, rtime, writes, _, wbytes, wtime = \
line.split()[:11]
if name in partitions:
rbytes = int(rbytes) * SECTOR_SIZE
wbytes = int(wbytes) * SECTOR_SIZE
reads = int(reads)
writes = int(writes)
rtime = int(rtime)
wtime = int(wtime)
retdict[name] = (reads, writes, rbytes, wbytes, rtime, wtime)
return retdict
def disk_partitions(all=False):
"""Return mounted disk partitions as a list of nameduples"""
phydevs = []
f = open("/proc/filesystems", "r")
try:
for line in f:
if not line.startswith("nodev"):
phydevs.append(line.strip())
finally:
f.close()
retlist = []
partitions = cext.disk_partitions()
for partition in partitions:
device, mountpoint, fstype, opts = partition
if device == 'none':
device = ''
if not all:
if device == '' or fstype not in phydevs:
continue
ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)
retlist.append(ntuple)
return retlist
disk_usage = _psposix.disk_usage
# --- decorators
def wrap_exceptions(fun):
"""Decorator which translates bare OSError and IOError exceptions
into NoSuchProcess and AccessDenied.
"""
@wraps(fun)
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except EnvironmentError:
# support for private module import
if NoSuchProcess is None or AccessDenied is None:
raise
# ENOENT (no such file or directory) gets raised on open().
# ESRCH (no such process) can get raised on read() if
# process is gone in meantime.
err = sys.exc_info()[1]
if err.errno in (errno.ENOENT, errno.ESRCH):
raise NoSuchProcess(self.pid, self._name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._name)
raise
return wrapper
class Process(object):
"""Linux process implementation."""
__slots__ = ["pid", "_name"]
def __init__(self, pid):
self.pid = pid
self._name = None
@wrap_exceptions
def name(self):
fname = "/proc/%s/stat" % self.pid
if PY3:
f = open(fname, "rt", encoding=DEFAULT_ENCODING)
else:
f = open(fname, "rt")
try:
name = f.read().split(' ')[1].replace('(', '').replace(')', '')
finally:
f.close()
# XXX - gets changed later and probably needs refactoring
return name
def exe(self):
try:
exe = os.readlink("/proc/%s/exe" % self.pid)
except (OSError, IOError):
err = sys.exc_info()[1]
if err.errno in (errno.ENOENT, errno.ESRCH):
# no such file error; might be raised also if the
# path actually exists for system processes with
# low pids (about 0-20)
if os.path.lexists("/proc/%s" % self.pid):
return ""
else:
# ok, it is a process which has gone away
raise NoSuchProcess(self.pid, self._name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._name)
raise
# readlink() might return paths containing null bytes ('\x00').
# Certain names have ' (deleted)' appended. Usually this is
# bogus as the file actually exists. Either way that's not
# important as we don't want to discriminate executables which
# have been deleted.
exe = exe.split('\x00')[0]
if exe.endswith(' (deleted)') and not os.path.exists(exe):
exe = exe[:-10]
return exe
@wrap_exceptions
def cmdline(self):
fname = "/proc/%s/cmdline" % self.pid
if PY3:
f = open(fname, "rt", encoding=DEFAULT_ENCODING)
else:
f = open(fname, "rt")
try:
# return the args as a list
return [x for x in f.read().split('\x00') if x]
finally:
f.close()
@wrap_exceptions
def terminal(self):
tmap = _psposix._get_terminal_map()
f = open("/proc/%s/stat" % self.pid, 'rb')
try:
tty_nr = int(f.read().split(b(' '))[6])
finally:
f.close()
try:
return tmap[tty_nr]
except KeyError:
return None
if os.path.exists('/proc/%s/io' % os.getpid()):
@wrap_exceptions
def io_counters(self):
fname = "/proc/%s/io" % self.pid
f = open(fname, 'rb')
SYSCR, SYSCW = b("syscr"), b("syscw")
READ_BYTES, WRITE_BYTES = b("read_bytes"), b("write_bytes")
try:
rcount = wcount = rbytes = wbytes = None
for line in f:
if rcount is None and line.startswith(SYSCR):
rcount = int(line.split()[1])
elif wcount is None and line.startswith(SYSCW):
wcount = int(line.split()[1])
elif rbytes is None and line.startswith(READ_BYTES):
rbytes = int(line.split()[1])
elif wbytes is None and line.startswith(WRITE_BYTES):
wbytes = int(line.split()[1])
for x in (rcount, wcount, rbytes, wbytes):
if x is None:
raise NotImplementedError(
"couldn't read all necessary info from %r" % fname)
return _common.pio(rcount, wcount, rbytes, wbytes)
finally:
f.close()
else:
def io_counters(self):
raise NotImplementedError("couldn't find /proc/%s/io (kernel "
"too old?)" % self.pid)
@wrap_exceptions
def cpu_times(self):
f = open("/proc/%s/stat" % self.pid, 'rb')
try:
st = f.read().strip()
finally:
f.close()
# ignore the first two values ("pid (exe)")
st = st[st.find(b(')')) + 2:]
values = st.split(b(' '))
utime = float(values[11]) / CLOCK_TICKS
stime = float(values[12]) / CLOCK_TICKS
return _common.pcputimes(utime, stime)
@wrap_exceptions
def wait(self, timeout=None):
try:
return _psposix.wait_pid(self.pid, timeout)
except _psposix.TimeoutExpired:
# support for private module import
if TimeoutExpired is None:
raise
raise TimeoutExpired(timeout, self.pid, self._name)
@wrap_exceptions
def create_time(self):
f = open("/proc/%s/stat" % self.pid, 'rb')
try:
st = f.read().strip()
finally:
f.close()
# ignore the first two values ("pid (exe)")
st = st[st.rfind(b(')')) + 2:]
values = st.split(b(' '))
# According to documentation, starttime is in field 21 and the
# unit is jiffies (clock ticks).
# We first divide it for clock ticks and then add uptime returning
# seconds since the epoch, in UTC.
# Also use cached value if available.
bt = BOOT_TIME or boot_time()
return (float(values[19]) / CLOCK_TICKS) + bt
@wrap_exceptions
def memory_info(self):
f = open("/proc/%s/statm" % self.pid, 'rb')
try:
vms, rss = f.readline().split()[:2]
return _common.pmem(int(rss) * PAGESIZE,
int(vms) * PAGESIZE)
finally:
f.close()
@wrap_exceptions
def memory_info_ex(self):
# ============================================================
# | FIELD | DESCRIPTION | AKA | TOP |
# ============================================================
# | rss | resident set size | | RES |
# | vms | total program size | size | VIRT |
# | shared | shared pages (from shared mappings) | | SHR |
# | text | text ('code') | trs | CODE |
# | lib | library (unused in Linux 2.6) | lrs | |
# | data | data + stack | drs | DATA |
# | dirty | dirty pages (unused in Linux 2.6) | dt | |
# ============================================================
f = open("/proc/%s/statm" % self.pid, "rb")
try:
vms, rss, shared, text, lib, data, dirty = \
[int(x) * PAGESIZE for x in f.readline().split()[:7]]
finally:
f.close()
return pextmem(rss, vms, shared, text, lib, data, dirty)
if os.path.exists('/proc/%s/smaps' % os.getpid()):
def memory_maps(self):
"""Return process's mapped memory regions as a list of nameduples.
Fields are explained in 'man proc'; here is an updated (Apr 2012)
version: http://goo.gl/fmebo
"""
f = None
try:
f = open("/proc/%s/smaps" % self.pid, "rt")
first_line = f.readline()
current_block = [first_line]
def get_blocks():
data = {}
for line in f:
fields = line.split(None, 5)
if not fields[0].endswith(':'):
# new block section
yield (current_block.pop(), data)
current_block.append(line)
else:
try:
data[fields[0]] = int(fields[1]) * 1024
except ValueError:
if fields[0].startswith('VmFlags:'):
# see issue #369
continue
else:
raise ValueError("don't know how to inte"
"rpret line %r" % line)
yield (current_block.pop(), data)
if first_line: # smaps file can be empty
for header, data in get_blocks():
hfields = header.split(None, 5)
try:
addr, perms, offset, dev, inode, path = hfields
except ValueError:
addr, perms, offset, dev, inode, path = \
hfields + ['']
if not path:
path = '[anon]'
else:
path = path.strip()
yield (addr, perms, path,
data['Rss:'],
data.get('Size:', 0),
data.get('Pss:', 0),
data.get('Shared_Clean:', 0),
data.get('Shared_Dirty:', 0),
data.get('Private_Clean:', 0),
data.get('Private_Dirty:', 0),
data.get('Referenced:', 0),
data.get('Anonymous:', 0),
data.get('Swap:', 0))
f.close()
except EnvironmentError:
# XXX - Can't use wrap_exceptions decorator as we're
# returning a generator; this probably needs some
# refactoring in order to avoid this code duplication.
if f is not None:
f.close()
err = sys.exc_info()[1]
if err.errno in (errno.ENOENT, errno.ESRCH):
raise NoSuchProcess(self.pid, self._name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._name)
raise
except:
if f is not None:
f.close()
raise
f.close()
else:
def memory_maps(self, ext):
msg = "couldn't find /proc/%s/smaps; kernel < 2.6.14 or " \
"CONFIG_MMU kernel configuration option is not enabled" \
% self.pid
raise NotImplementedError(msg)
@wrap_exceptions
def cwd(self):
# readlink() might return paths containing null bytes causing
# problems when used with other fs-related functions (os.*,
# open(), ...)
path = os.readlink("/proc/%s/cwd" % self.pid)
return path.replace('\x00', '')
@wrap_exceptions
def num_ctx_switches(self):
vol = unvol = None
f = open("/proc/%s/status" % self.pid, "rb")
VOLUNTARY = b("voluntary_ctxt_switches")
NON_VOLUNTARY = b("nonvoluntary_ctxt_switches")
try:
for line in f:
if line.startswith(VOLUNTARY):
vol = int(line.split()[1])
elif line.startswith(NON_VOLUNTARY):
unvol = int(line.split()[1])
if vol is not None and unvol is not None:
return _common.pctxsw(vol, unvol)
raise NotImplementedError(
"'voluntary_ctxt_switches' and 'nonvoluntary_ctxt_switches'"
"fields were not found in /proc/%s/status; the kernel is "
"probably older than 2.6.23" % self.pid)
finally:
f.close()
@wrap_exceptions
def num_threads(self):
f = open("/proc/%s/status" % self.pid, "rb")
try:
THREADS = b("Threads:")
for line in f:
if line.startswith(THREADS):
return int(line.split()[1])
raise NotImplementedError("line not found")
finally:
f.close()
@wrap_exceptions
def threads(self):
thread_ids = os.listdir("/proc/%s/task" % self.pid)
thread_ids.sort()
retlist = []
hit_enoent = False
for thread_id in thread_ids:
try:
f = open("/proc/%s/task/%s/stat" % (self.pid, thread_id), 'rb')
except EnvironmentError:
err = sys.exc_info()[1]
if err.errno == errno.ENOENT:
# no such file or directory; it means thread
# disappeared on us
hit_enoent = True
continue
raise
try:
st = f.read().strip()
finally:
f.close()
# ignore the first two values ("pid (exe)")
st = st[st.find(b(')')) + 2:]
values = st.split(b(' '))
utime = float(values[11]) / CLOCK_TICKS
stime = float(values[12]) / CLOCK_TICKS
ntuple = _common.pthread(int(thread_id), utime, stime)
retlist.append(ntuple)
if hit_enoent:
# raise NSP if the process disappeared on us
os.stat('/proc/%s' % self.pid)
return retlist
@wrap_exceptions
def nice_get(self):
# f = open('/proc/%s/stat' % self.pid, 'r')
# try:
# data = f.read()
# return int(data.split()[18])
# finally:
# f.close()
# Use C implementation
return _psutil_posix.getpriority(self.pid)
@wrap_exceptions
def nice_set(self, value):
return _psutil_posix.setpriority(self.pid, value)
@wrap_exceptions
def cpu_affinity_get(self):
from_bitmask = lambda x: [i for i in xrange(64) if (1 << i) & x]
bitmask = cext.proc_cpu_affinity_get(self.pid)
return from_bitmask(bitmask)
@wrap_exceptions
def cpu_affinity_set(self, cpus):
try:
cext.proc_cpu_affinity_set(self.pid, cpus)
except OSError:
err = sys.exc_info()[1]
if err.errno == errno.EINVAL:
allcpus = tuple(range(len(per_cpu_times())))
for cpu in cpus:
if cpu not in allcpus:
raise ValueError("invalid CPU #%i (choose between %s)"
% (cpu, allcpus))
raise
# only starting from kernel 2.6.13
if hasattr(cext, "proc_ioprio_get"):
@wrap_exceptions
def ionice_get(self):
ioclass, value = cext.proc_ioprio_get(self.pid)
return _common.pionice(ioclass, value)
@wrap_exceptions
def ionice_set(self, ioclass, value):
if ioclass in (IOPRIO_CLASS_NONE, None):
if value:
msg = "can't specify value with IOPRIO_CLASS_NONE"
raise ValueError(msg)
ioclass = IOPRIO_CLASS_NONE
value = 0
if ioclass in (IOPRIO_CLASS_RT, IOPRIO_CLASS_BE):
if value is None:
value = 4
elif ioclass == IOPRIO_CLASS_IDLE:
if value:
msg = "can't specify value with IOPRIO_CLASS_IDLE"
raise ValueError(msg)
value = 0
else:
value = 0
if not 0 <= value <= 8:
raise ValueError(
"value argument range expected is between 0 and 8")
return cext.proc_ioprio_set(self.pid, ioclass, value)
if HAS_PRLIMIT:
@wrap_exceptions
def rlimit(self, resource, limits=None):
# if pid is 0 prlimit() applies to the calling process and
# we don't want that
if self.pid == 0:
raise ValueError("can't use prlimit() against PID 0 process")
if limits is None:
# get
return cext.linux_prlimit(self.pid, resource)
else:
# set
if len(limits) != 2:
raise ValueError(
"second argument must be a (soft, hard) tuple")
soft, hard = limits
cext.linux_prlimit(self.pid, resource, soft, hard)
@wrap_exceptions
def status(self):
f = open("/proc/%s/status" % self.pid, 'rb')
try:
STATE = b("State:")
for line in f:
if line.startswith(STATE):
letter = line.split()[1]
if PY3:
letter = letter.decode()
# XXX is '?' legit? (we're not supposed to return
# it anyway)
return PROC_STATUSES.get(letter, '?')
finally:
f.close()
@wrap_exceptions
def open_files(self):
retlist = []
files = os.listdir("/proc/%s/fd" % self.pid)
hit_enoent = False
for fd in files:
file = "/proc/%s/fd/%s" % (self.pid, fd)
if os.path.islink(file):
try:
file = os.readlink(file)
except OSError:
# ENOENT == file which is gone in the meantime
err = sys.exc_info()[1]
if err.errno in (errno.ENOENT, errno.ESRCH):
hit_enoent = True
continue
raise
else:
# If file is not an absolute path there's no way
# to tell whether it's a regular file or not,
# so we skip it. A regular file is always supposed
# to be absolutized though.
if file.startswith('/') and isfile_strict(file):
ntuple = _common.popenfile(file, int(fd))
retlist.append(ntuple)
if hit_enoent:
# raise NSP if the process disappeared on us
os.stat('/proc/%s' % self.pid)
return retlist
@wrap_exceptions
def connections(self, kind='inet'):
ret = _connections.retrieve(kind, self.pid)
# raise NSP if the process disappeared on us
os.stat('/proc/%s' % self.pid)
return ret
@wrap_exceptions
def num_fds(self):
return len(os.listdir("/proc/%s/fd" % self.pid))
@wrap_exceptions
def ppid(self):
f = open("/proc/%s/status" % self.pid, 'rb')
try:
PPID = b("PPid:")
for line in f:
if line.startswith(PPID):
# PPid: nnnn
return int(line.split()[1])
raise NotImplementedError("line not found")
finally:
f.close()
@wrap_exceptions
def uids(self):
f = open("/proc/%s/status" % self.pid, 'rb')
try:
UID = b('Uid:')
for line in f:
if line.startswith(UID):
_, real, effective, saved, fs = line.split()
return _common.puids(int(real), int(effective), int(saved))
raise NotImplementedError("line not found")
finally:
f.close()
@wrap_exceptions
def gids(self):
f = open("/proc/%s/status" % self.pid, 'rb')
try:
GID = b('Gid:')
for line in f:
if line.startswith(GID):
_, real, effective, saved, fs = line.split()
return _common.pgids(int(real), int(effective), int(saved))
raise NotImplementedError("line not found")
finally:
f.close()
| bsd-3-clause |
valkjsaaa/sl4a | python/src/Lib/ctypes/test/test_stringptr.py | 66 | 2504 | import unittest
from ctypes import *
import _ctypes_test
lib = CDLL(_ctypes_test.__file__)
class StringPtrTestCase(unittest.TestCase):
def test__POINTER_c_char(self):
class X(Structure):
_fields_ = [("str", POINTER(c_char))]
x = X()
# NULL pointer access
self.assertRaises(ValueError, getattr, x.str, "contents")
b = c_buffer("Hello, World")
from sys import getrefcount as grc
self.failUnlessEqual(grc(b), 2)
x.str = b
self.failUnlessEqual(grc(b), 3)
# POINTER(c_char) and Python string is NOT compatible
# POINTER(c_char) and c_buffer() is compatible
for i in range(len(b)):
self.failUnlessEqual(b[i], x.str[i])
self.assertRaises(TypeError, setattr, x, "str", "Hello, World")
def test__c_char_p(self):
class X(Structure):
_fields_ = [("str", c_char_p)]
x = X()
# c_char_p and Python string is compatible
# c_char_p and c_buffer is NOT compatible
self.failUnlessEqual(x.str, None)
x.str = "Hello, World"
self.failUnlessEqual(x.str, "Hello, World")
b = c_buffer("Hello, World")
self.failUnlessRaises(TypeError, setattr, x, "str", b)
def test_functions(self):
strchr = lib.my_strchr
strchr.restype = c_char_p
# c_char_p and Python string is compatible
# c_char_p and c_buffer are now compatible
strchr.argtypes = c_char_p, c_char
self.failUnlessEqual(strchr("abcdef", "c"), "cdef")
self.failUnlessEqual(strchr(c_buffer("abcdef"), "c"), "cdef")
# POINTER(c_char) and Python string is NOT compatible
# POINTER(c_char) and c_buffer() is compatible
strchr.argtypes = POINTER(c_char), c_char
buf = c_buffer("abcdef")
self.failUnlessEqual(strchr(buf, "c"), "cdef")
self.failUnlessEqual(strchr("abcdef", "c"), "cdef")
# XXX These calls are dangerous, because the first argument
# to strchr is no longer valid after the function returns!
# So we must keep a reference to buf separately
strchr.restype = POINTER(c_char)
buf = c_buffer("abcdef")
r = strchr(buf, "c")
x = r[0], r[1], r[2], r[3], r[4]
self.failUnlessEqual(x, ("c", "d", "e", "f", "\000"))
del buf
# x1 will NOT be the same as x, usually:
x1 = r[0], r[1], r[2], r[3], r[4]
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
LindaLinsefors/Lorentz-transformator | LorentzTransformer.py | 1 | 27684 | #! /usr/bin/env python
##########################################################
import pygame
pygame.init() # I don't know what it does or if it is needed for this program
import pygame.freetype
pygame.freetype.init() # makes font work
import tkinter # tkinter is only used to choose files when loading and saving
import tkinter.filedialog
tkinter.Tk().withdraw() # stops the save and load dialogue windows from hanging around
from operator import sub
from math import sinh, cosh, tanh, copysign, ceil, log10 # math is nice :)
import random, json, os, sys, subprocess
##########################################################
# Defining graphics options
screenSize = 600, 600 # Initial screen size. Can be changed while running.
universePos = 0, 0 # Position of top left corner of the universe
controlsHeight = 55 # Thickness of control panel.
# A hidden horizontal menu that appear when hovered over
menuPos = 0, 0 # Position of top left corner of menu
menuHeight = 20 # Thickness of menu
menuMargin = 10 # Space between text is 2*menuMarign
'''Since the screen size can change while running the program
this definitions needs to be dynamic'''
def universe_size(screenSize):
# Let the Universe fill all space that is not controls
return screenSize[0], screenSize[1] - controlsHeight
def controls_pos(screenSize):
# Let the controls be at the bottom of the program window
return 0, screenSize[1] - controlsHeight
def controls_size(screenSize):
# Let the controls be as wide as the program window
return screenSize[0], controlsHeight
controlsPos = controls_pos(screenSize)
font = pygame.freetype.Font(None, 14) # Used for buttons, menu and speed display
bigfont = pygame.freetype.Font(None, 20) # Used for special messages
yellow = 240, 222, 5
darkYellow = 50, 50, 0
green = 0, 255, 0
red = 255, 0, 0
blue = 0, 0, 225
gray = 100, 100, 100
darkGray = 50, 50, 50
lightGray = 150, 150, 150
white = 255, 255, 255
black = 0, 0, 0
controlsBgColor = gray
buttonColor = lightGray
activeButtonColor = darkGray
textColor = black
menuColor = lightGray
menuActiveColor = gray
universeColor = black # universe background color
lightconeColor = darkYellow
lightlikeColor = yellow
spacelikeColor = red
timelikeColor = green
pointColor = blue
lineWidth = 5
pointRadius = 5
lightconeLineWidth = lineWidth
##########################################################
# Stuff to make Pyinstaller work
# There are probably more fixes needed...
'''
import packaging
import packaging.version
import packaging.specifiers
import packaging.requirements
import appdirs
font = pygame.freetype.Font('/home/tilia/anaconda3/lib/python3.5/site-packages/pygame/freesansbold.ttf', 14)
bigfont = pygame.freetype.Font('/home/tilia/anaconda3/lib/python3.5/site-packages/pygame/freesansbold.ttf', 20)
'''
##############################################################
# The universe, the objects (points and lines)
# and the Lorentz-transform
class Universe:
def get_origo(self):
return self.surface.get_rect().center
# objects in the universe will use coordinates centerd at origo
def get_origo_on_screen(self):
return self.surface.get_rect(topleft = universePos).center
def draw_lightcone(self):
x, y = self.get_origo()
dist = min(x, y) # distance to cloest edge
pygame.draw.line(self.surface,lightconeColor,
(x-dist, y-dist), (x+dist, y+dist),
lightconeLineWidth)
pygame.draw.line(self.surface, lightconeColor,
(x+dist, y-dist), (x-dist, y+dist),
lightconeLineWidth)
def clear(self): # empty the Universe
self.frame = 0 # Lorentz frame represented by a number
self.lines = [] # objects in the universe
self.points = [] # objects in the universe
def __init__(self, size):
self.show_lightcone = True # show light-cone as default
self.surface = pygame.Surface(size) # Here be Universe
self.clear() # start empty
def draw_in_frame(self, frame):
''' draws the universe and all objects in it,
in the specified Lorentz frame '''
self.surface.fill(universeColor)
if self.show_lightcone:
self.draw_lightcone()
for line in self.lines:
coords = line.in_other_frame(frame)
# convert to specified Lorentz frame
pos = tuple(spacetime_to_pixel(self, coord)
for coord in coords)
# converts to pixel position
pygame.draw.line(self.surface, line.color(), pos[0], pos[1], lineWidth)
for point in self.points:
coord = point.in_other_frame(frame)
pos = spacetime_to_pixel(self, coord)
pygame.draw.circle(self.surface, pointColor, pos, pointRadius)
def draw(self): # draws the universe and all objects in it
self.draw_in_frame(self.frame)
def show(self): # puts the last drawn version of the universe on the screen
screen.blit(self.surface, universePos)
def Lorentz_transform(coord, frame_diff):
sh, ch = sinh(frame_diff), cosh(frame_diff)
r, t = coord
return ch*r -sh*t, ch*t - sh*r
class Point:
def __init__(self, frame, coord):
self.coord = coord # space-time coordinate
self.frame = frame
# the Lorentz frame in which the object is defined
def in_other_frame(self, display_frame):
return Lorentz_transform(self.coord, display_frame - self.frame)
# gives space-time coordinates in display_frame
def line_color(coords):
'''different colors to show if the line is
time-like, light-like or space like'''
time = abs( coords[1][1] - coords[0][1] )
space = abs( coords[1][0] - coords[0][0] )
if time > space:
return timelikeColor
elif time == space:
return lightlikeColor
else:
return spacelikeColor
class Line:
def __init__(self, frame, coords):
self.frame = frame
# the Lorentz frame in which the object is defined
self.coords = coords # coordinates for the two end points
def in_other_frame(self, display_frame):
return tuple(Lorentz_transform(coord, display_frame - self.frame)
for coord in self.coords )
def color(self):
return line_color(self.coords)
##################################################################
# Relating position on the screen with coordinates in the Universe
def pixel_to_spacetime(universe, pos):
''' takes pixel position on the screen and gives
space-time coordinates in the universe '''
origo = universe.get_origo()
r = pos[0] - origo[0] # space coordinate
t = -(pos[1] - origo[1]) # time coordinate
return r, t
def spacetime_to_pixel(universe, coord):
''' takes space-time coordinates in universe
and gives pixel coordinates on universe.surface '''
origo = universe.get_origo_on_screen()
x = int(round(origo[0] + coord[0]))
y = int(round(origo[1] - coord[1]))
return x, y
##################################################################
# Creating and destroying lines and points
def make_point(universe, pos):
# takes the pixel position of a point, and makes a point object
point = Point(universe.frame, pixel_to_spacetime(universe, pos) )
universe.points.append(point) # adds object to universe content
universe.draw() # update picture of universe
return point
def make_line(universe, pos):
# takes a tuple of two pixel positions and makes a Line object
coords = tuple(pixel_to_spacetime(universe, point)
for point in pos) # convert to space-time coordinates
line = Line(universe.frame, coords)
universe.lines.append(line) # adds object to list
universe.draw() # update picture of universe
return line
def straighten_line(start, end):
''' Aids the user in drawing perfectly
horizontal, vertical or exactly diagonal line '''
dx = end[0] - start[0]
dy = end[1] - start[1]
if abs(dx) < abs(dy)/2:
return start[0], end[1]
elif abs(dy) < abs(dx)/2:
return end[0], start[1]
else:
dx = copysign(dy,dx)
return start[0] + dx, end[1]
def remove(universe, pos):
''' removes any point or line in the universe that are on position pos'''
coord = pixel_to_spacetime(universe, pos)
for point in universe.points:
point_coord = point.in_other_frame(universe.frame)
dist_sq = (coord[0] - point_coord[0])**2 + (coord[1] - point_coord[1])**2
if dist_sq <= pointRadius**2:
universe.points.remove(point)
return 1 # This return value means that a point was removed
for line in universe.lines:
line_coords = line.in_other_frame(universe.frame)
if line_coords[1][0] - line_coords[0][0] == 0: # a vertical line
if ( abs(coord[0] - line_coords[0][0]) <= lineWidth/2
and coord[1] <= max(line_coords[0][1], line_coords[1][1])
and coord[1] >= min(line_coords[0][1], line_coords[1][1])):
universe.lines.remove(line)
return 2 # This return value means that a line was removed
else: # not a vertical line
slope = (line_coords[1][1] - line_coords[0][1])/(line_coords[1][0] - line_coords[0][0])
if ( coord[0] <= max(line_coords[0][0], line_coords[1][0]) + lineWidth/2
and coord[0] >= min(line_coords[0][0], line_coords[1][0]) - lineWidth/2
and abs(coord[1] - (line_coords[0][1] + slope * (coord[0] - line_coords[0][0]))) <= lineWidth/2):
universe.lines.remove(line)
return 2 # This return value means that a line was removed
return 0 # This return value means that noting was removed
##################################################################
# Miscellaneous useful stuff
def center(rect, surface):
''' returns the pos for putting surface in the center of rect
good for centering text'''
return surface.get_rect(center = rect.center).topleft
###################################################################
# Menu: Help, Save, Load, Show/Hide light-cone
'''
This section of the code defines a hidden menu in the top left corner of the screen
which becomes visible when the mouse hovers over this area.
'''
def show_message(text): # will shows message on the screen
text, rect = bigfont.render(text, textColor)
rect.center = screen.get_rect().center
rect.move_ip(random.randint(-15,15), random.randint(-15,15))
pygame.draw.rect(screen, gray, rect.inflate(30,30))
pygame.draw.rect(screen, lightGray, rect.inflate(20,20))
screen.blit(text, rect.topleft)
def help(): # Tries to opens the README
try:
if sys.platform == 'linux2' or sys.platform == 'linux':
subprocess.call(["xdg-open", "README.txt"])
else:
os.startfile("README.txt")
except:
show_message("Sorry, cant help you")
def save():
if not os.path.exists('Saves/'):
os.mkdir("Saves")
file = tkinter.filedialog.asksaveasfile(defaultextension=".lor", initialdir = "Saves")
if file:
points = [{'frame': point.frame, 'coord': point.coord} for point in universe.points]
lines = [{'frame': line.frame, 'coords': line.coords} for line in universe.lines]
json.dump({ 'frame': universe.frame,
'show_lightcone': universe.show_lightcone,
'points': points,
'lines': lines}, file, indent=4)
file.close()
def load():
file = tkinter.filedialog.askopenfile(defaultextension=".lor", initialdir = "Saves")
if file:
try:
universe_dict = json.load(file)
universeSize = universe_size(pygame.display.get_surface().get_rect().size)
global universe # So that I can modify universe
universe = Universe(universeSize)
universe.frame = universe_dict['frame']
universe.show_lightcone = universe_dict['show_lightcone']
universe.points = [Point(point['frame'], point['coord'])
for point in universe_dict['points']]
universe.lines = [Line(line['frame'], line['coords'])
for line in universe_dict['lines']]
except:
show_message("Unable to load that file")
universe.draw()
def show_or_hide_lightcone():
global universe
universe.show_lightcone = not universe.show_lightcone
universe.draw()
universe.show()
draw_menu(event.pos)
class MenuButton:
def __init__(self, name, pos, effect):
text, rect = font.render(name, textColor) # render label text
self.text = text
self.rect = pygame.Rect(pos, (rect.width + menuMargin, menuHeight))
# Button area
self.textpos = center(self.rect, self.text) # text in the center
self.effect = effect # what happens when clicked
def draw(self, pos):
if self.rect.collidepoint(pos):
pygame.draw.rect(screen, menuActiveColor, self.rect)
else:
pygame.draw.rect(screen, menuColor, self.rect)
screen.blit(self.text, self.textpos)
def do(self):
self.effect()
menu_options = (("Help", help), # menu options (label, effect)
("Save", save),
("Load", load),
("Show/Hide light-cone", show_or_hide_lightcone))
menu_list = [] # List of menu buttons, added when created
next_pos = menuPos # pos of fist menu button
for name, effect in menu_options: # Creates all menu buttons
menu_button = MenuButton(name, next_pos, effect)
menu_list.append(menu_button)
next_pos = menu_button.rect.topright
menu_rect = pygame.Rect(menuPos, (next_pos[0] - menuPos[0], 20)) # Menu area
def draw_menu(pos):
for menu_button in menu_list:
menu_button.draw(pos)
###################################################################
# Creating the GUI
controls = pygame.Surface(controls_size(screenSize)) # The control panel area
class Button:
def __init__(self, pos, size, text):
self.rect = pygame.Rect(pos, size)
self.text = font.render(text, textColor)[0]
self.is_active = False
self.textpos = center(self.rect, self.text)
def draw(self): # draws button on screen
if self.is_active:
pygame.draw.rect(controls, activeButtonColor, self.rect)
else:
pygame.draw.rect(controls, buttonColor, self.rect)
controls.blit(self.text, self.textpos) # put text on button
# Create all buttons
lineButton = Button((5, 5), (60, 20), "Lines")
pointButton = Button((5, 30), (60, 20), "Points")
removeButton = Button((70, 5), (60, 20), "Remove")
clearButton = Button((70, 30), (60, 20), "Clear")
buttons = (lineButton, pointButton, removeButton, clearButton) # All buttons
drawingOptions = (lineButton, pointButton, removeButton)
# These buttons that can not be active simultaneously
class Scroll_bar:
def __init__(self, pos, size):
self.rect = pygame.Rect(pos, size)
self.is_grabed = False
self.max = int((size[0] - size[1])/2)
self.handle = pygame.Rect((pos[0] + self.max, pos[1]), (size[1], size[1]))
self.grab_pos = None # x-pos for wher scrollbar was grabed
def draw(self, shift):
''' draws the scrollbar,
where the handle is moved from the center
by the length "shift"'''
pygame.draw.rect(controls, darkGray, self.rect)
pygame.draw.rect(controls, lightGray, self.handle.move(shift, 0))
def my_round(frac):
''' round of the speed value to a sensible number of decimals
this means more decimals when closer to light speed '''
if abs(frac) < 90:
return round(frac)
if abs(frac) < 99:
return round(frac,1)
if abs(frac) < 99.9:
return round(frac,2)
return round(frac, 2-ceil(log10(100-abs(frac))))
class Speed_display: # Creates a place to display text
def __init__(self, pos, size):
self.rect = pygame.Rect(pos, size)
def hide(self): # Erase any text
pygame.draw.rect(controls, controlsBgColor, self.rect)
def show(self, shift): # Shows the relative velocity
if self.rect.width < 130:
text = str(my_round(100 * tanh(0.01 * shift))) + "% c"
elif self.rect.width < 290:
text = str(my_round(100 * tanh(0.01 * shift))) + "% of light speed"
else:
text = ("Instantly accelerate to "
+ str(my_round(100 * tanh(0.01 * shift)))
+ "% of light speed.")
# Calculate the relative speed and adjust the text depending on avalable space
pygame.draw.rect(controls, controlsBgColor, self.rect) # Paint over any old text
text = font.render(text, textColor)[0] # Render text
textpos = center(self.rect, text) # Center text
controls.blit(text, textpos) # Place text on control panel
scroll_bar = Scroll_bar((138, 8), (screenSize[0] - 138 - 8, 18))
# one scroll bar to specify Lorentz transformations
speed_display = Speed_display((138, 8+18), (screenSize[0] - 138 - 8, 55-8-18))
# one text display to show the related velocity change
###################################################################
# Draw the initial view
screen = pygame.display.set_mode(screenSize, pygame.RESIZABLE)
# Creates the program window
universe = Universe(universe_size(screenSize)) # create empty universe
universe.draw() # draws universe
universe.show() # puts the univeres on screen
controls.fill(controlsBgColor) # paints control panels bacground color
for button in buttons:
button.draw() # draws button
scroll_bar.draw(0) # draws scroll bare, with the handle in the center.
screen.blit(controls, controlsPos) # put controls on screen
pygame.display.flip() # update window
###################################################################
# Running the program
clock = pygame.time.Clock() # clock to have clock-ticks, to save on CPU
running = True # Is program running? Assign "False" to quit.
# Global trackers that needs to be updated from inside functions
class Global():
pass
gl = Global()
gl.is_drawing_line = False # True iff the user in the middle of drawing a line
gl.shift_key_is_down = False # True if the shift key is down
gl.last_pos = (-1, -1) # pos for last MOUSEMOTION event. Put initialy outside screen
gl.last_save_or_load = None # Name of last name file the session is saved or loaded as
gl.start = None # Starting point of line
def in_the_universe(pos): # Check if the point is in the universe
return (universe.surface.get_rect(topleft = universePos).collidepoint(pos)
and not menu_rect.collidepoint(pos) )
def left_click_in_menu(pos):
for button in menu_list:
if button.rect.collidepoint(pos):
button.do()
def left_click_in_the_universe(pos):
if pointButton.is_active:
make_point(universe, pos) # make point there
pygame.draw.circle(screen, pointColor, pos, pointRadius)
# draw the point
elif lineButton.is_active:
if not gl.is_drawing_line: # not already marked start of line
gl.start = pos # remember start of line
gl.is_drawing_line = True # drawing in progress
else:
end = pos
if gl.shift_key_is_down:
end = straighten_line(gl.start, end)
make_line(universe, (gl.start, end))
gl.is_drawing_line = False # line is now done
# no need to draw the line, because it is already there
elif removeButton.is_active:
remove(universe, pos) # remove anything that is clicked on
universe.draw() # redraw universe
universe.show()
def left_click_on_the_controls(pos):
if scroll_bar.handle.move(controlsPos).collidepoint(pos):
# click on scroll bar handle
scroll_bar.is_grabed = True
scroll_bar.grab_pos = pos[0] # save x-pos of where it was garbed
return
for button in buttons: # loop all buttons
if button.rect.move(controlsPos).collidepoint(pos):
# check if we are on this button
button.is_active = not button.is_active # change is active
button.draw() # re-draw button
if button in drawingOptions:
for other_button in drawingOptions:
if other_button != button:
other_button.is_active = False
other_button.draw()
# can't have more than one of this active at the same time
if gl.is_drawing_line:
gl.is_drawing_line = False # interrupts any half finished line
universe.show() # paint over half finished line
screen.blit(controls, controlsPos)
break # no need to check other buttons
def right_click():
# the "never mind" action. Interupts what ever is about to happen
if gl.is_drawing_line: # interrupts any half finished line
gl.is_drawing_line = False
universe.show() # paint over half finished line
elif scroll_bar.is_grabed: # interrupts any Lorentz transformation
scroll_bar.is_grabed = False # dropp scroll bar
scroll_bar.draw(0)
speed_display.hide()
screen.blit(controls, controlsPos)
universe.draw() # restore
universe.show() # redraw
elif clearButton.is_active:
clearButton.is_active = False # reset button
clearButton.draw() # redraw button
screen.blit(controls, controlsPos)
def left_mouse_button_up(pos): # finalizes clear or scroll
if clearButton.is_active:
universe.clear() # clear universe
universe.draw() # redraw universe
universe.show() # paint over half finished line
clearButton.is_active = False # reset button
clearButton.draw() # redraw button
screen.blit(controls, controlsPos)
elif scroll_bar.is_grabed: # finalizes any Lorentz transformation
scroll_bar.is_grabed = False
scroll_bar.draw(0)
universe.frame += 0.01 * (pos[0] - scroll_bar.grab_pos)
speed_display.hide()
screen.blit(controls, controlsPos)
def mouse_motion(pos):
if scroll_bar.is_grabed:
shift = pos[0] - scroll_bar.grab_pos
if shift < -scroll_bar.max:
shift = -scroll_bar.max
elif shift > scroll_bar.max:
shift = scroll_bar.max
universe.draw_in_frame(universe.frame + 0.01 * shift)
universe.show()
scroll_bar.draw(shift)
speed_display.show(shift)
screen.blit(controls, controlsPos)
else:
if gl.is_drawing_line:
if in_the_universe(pos):
end = pos
if gl.shift_key_is_down:
end = straighten_line(gl.start, end)
universe.show()
color = line_color((gl.start, end))
pygame.draw.line(screen, color, gl.start, end, lineWidth)
elif in_the_universe(gl.last_pos):
universe.show()
if menu_rect.collidepoint(pos):
draw_menu(pos)
elif (not gl.is_drawing_line) and menu_rect.collidepoint(gl.last_pos):
universe.show()
gl.last_pos = pos
while running:
for event in pygame.event.get(): # what the user is doing
if event.type == pygame.QUIT:
#pygame.display.quit() # close window
running = False # time to stop running program
break # don't check more events
elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
# a left click
if menu_rect.collidepoint(event.pos): # click in menu
left_click_in_menu(event.pos)
elif in_the_universe(event.pos): # a click in universe
left_click_in_the_universe(event.pos)
else: # a click on the controls
left_click_on_the_controls(event.pos)
elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 3:
right_click()
elif event.type == pygame.MOUSEBUTTONUP and event.button == 1:
left_mouse_button_up(event.pos)
elif event.type == pygame.MOUSEMOTION:
mouse_motion(event.pos)
elif event.type == pygame.KEYDOWN and (event.key == pygame.K_LSHIFT
or event.key == pygame.K_RSHIFT):
gl.shift_key_is_down = True
if gl.is_drawing_line:
end = straighten_line(gl.start, pygame.mouse.get_pos())
universe.show()
color = line_color((gl.start, end))
pygame.draw.line(screen, color, gl.start, end, lineWidth)
elif event.type == pygame.KEYUP and (event.key == pygame.K_LSHIFT
or event.key == pygame.K_RSHIFT):
gl.shift_key_is_down = False
if gl.is_drawing_line:
end = pygame.mouse.get_pos()
universe.show()
color = line_color((gl.start, end))
pygame.draw.line(screen, color, gl.start, end, lineWidth)
elif event.type == pygame.VIDEORESIZE: # resize window
screen = pygame.display.set_mode(event.size, pygame.RESIZABLE)
universe.surface = pygame.Surface(universe_size(event.size))
universe.draw()
universe.show()
controlsPos = controls_pos(event.size)
controls = pygame.Surface(controls_size(event.size))
controls.fill(controlsBgColor)
for button in buttons:
button.draw() # draws button
scroll_bar = Scroll_bar((138, 8), (event.size[0] - 138 - 8, 18))
speed_display = Speed_display((138, 8+18), (event.size[0] - 138 - 8, 55-8-18))
scroll_bar.draw(0) # draws scroll bare, with the handle in the center.
screen.blit(controls, controlsPos)
pygame.display.flip() # show changes
clock.tick(120) # to save on CPU use
| gpl-3.0 |
CENDARI/editorsnotes | editorsnotes/main/migrations/0072_auto__chg_field_notesection_content.py | 1 | 16564 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'NoteSection.content'
db.alter_column('main_notesection', 'content', self.gf('editorsnotes.main.fields.XHTMLField')(null=True))
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'NoteSection.content'
raise RuntimeError("Cannot reverse this migration. 'NoteSection.content' and its values cannot be restored.")
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'main.alias': {
'Meta': {'unique_together': "(('topic', 'name'),)", 'object_name': 'Alias'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_alias_set'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'80'"}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'aliases'", 'to': "orm['main.Topic']"})
},
'main.citation': {
'Meta': {'ordering': "['ordering']", 'object_name': 'Citation'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_citation_set'", 'to': "orm['auth.User']"}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'citations'", 'to': "orm['main.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('editorsnotes.main.fields.XHTMLField', [], {'null': 'True', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'ordering': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'main.document': {
'Meta': {'ordering': "['ordering', 'import_id']", 'object_name': 'Document'},
'collection': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parts'", 'null': 'True', 'to': "orm['main.Document']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_document_set'", 'to': "orm['auth.User']"}),
'description': ('editorsnotes.main.fields.XHTMLField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'import_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'English'", 'max_length': '32'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'last_updater': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_to_update_document_set'", 'to': "orm['auth.User']"}),
'ordering': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
'main.documentlink': {
'Meta': {'object_name': 'DocumentLink'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_documentlink_set'", 'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'links'", 'to': "orm['main.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'main.documentmetadata': {
'Meta': {'unique_together': "(('document', 'key'),)", 'object_name': 'DocumentMetadata'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_documentmetadata_set'", 'to': "orm['auth.User']"}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'metadata'", 'to': "orm['main.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'main.footnote': {
'Meta': {'object_name': 'Footnote'},
'content': ('editorsnotes.main.fields.XHTMLField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_footnote_set'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'last_updater': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_to_update_footnote_set'", 'to': "orm['auth.User']"}),
'transcript': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'footnotes'", 'to': "orm['main.Transcript']"})
},
'main.note': {
'Meta': {'ordering': "['-last_updated']", 'object_name': 'Note'},
'content': ('editorsnotes.main.fields.XHTMLField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_note_set'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'last_updater': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_to_update_note_set'", 'to': "orm['auth.User']"}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': "'80'"})
},
'main.notesection': {
'Meta': {'object_name': 'NoteSection'},
'content': ('editorsnotes.main.fields.XHTMLField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_notesection_set'", 'to': "orm['auth.User']"}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Document']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'last_updater': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_to_update_notesection_set'", 'to': "orm['auth.User']"}),
'note': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sections'", 'to': "orm['main.Note']"})
},
'main.project': {
'Meta': {'object_name': 'Project'},
'description': ('editorsnotes.main.fields.XHTMLField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'80'"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'main.scan': {
'Meta': {'ordering': "['ordering']", 'object_name': 'Scan'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_scan_set'", 'to': "orm['auth.User']"}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'scans'", 'to': "orm['main.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'main.topic': {
'Meta': {'ordering': "['slug']", 'object_name': 'Topic'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_topic_set'", 'to': "orm['auth.User']"}),
'has_accepted_facts': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_candidate_facts': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'last_updater': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_to_update_topic_set'", 'to': "orm['auth.User']"}),
'preferred_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': "'80'"}),
'related_topics': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_topics_rel_+'", 'blank': 'True', 'to': "orm['main.Topic']"}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': "'80'", 'db_index': 'True'}),
'summary': ('editorsnotes.main.fields.XHTMLField', [], {'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'})
},
'main.topicassignment': {
'Meta': {'object_name': 'TopicAssignment'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_topicassignment_set'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assignments'", 'to': "orm['main.Topic']"})
},
'main.transcript': {
'Meta': {'object_name': 'Transcript'},
'content': ('editorsnotes.main.fields.XHTMLField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_transcript_set'", 'to': "orm['auth.User']"}),
'document': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'_transcript'", 'unique': 'True', 'to': "orm['main.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'last_updater': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_to_update_transcript_set'", 'to': "orm['auth.User']"})
},
'main.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'affiliation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Project']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'zotero_key': ('django.db.models.fields.CharField', [], {'max_length': "'24'", 'null': 'True', 'blank': 'True'}),
'zotero_uid': ('django.db.models.fields.CharField', [], {'max_length': "'6'", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['main']
| agpl-3.0 |
rembo10/headphones | lib/requests/packages/chardet/gb2312freq.py | 3132 | 36011 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# GB2312 most frequently used character table
#
# Char to FreqOrder table , from hz6763
# 512 --> 0.79 -- 0.79
# 1024 --> 0.92 -- 0.13
# 2048 --> 0.98 -- 0.06
# 6768 --> 1.00 -- 0.02
#
# Ideal Distribution Ratio = 0.79135/(1-0.79135) = 3.79
# Random Distribution Ration = 512 / (3755 - 512) = 0.157
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher that RDR
GB2312_TYPICAL_DISTRIBUTION_RATIO = 0.9
GB2312_TABLE_SIZE = 3760
GB2312CharToFreqOrder = (
1671, 749,1443,2364,3924,3807,2330,3921,1704,3463,2691,1511,1515, 572,3191,2205,
2361, 224,2558, 479,1711, 963,3162, 440,4060,1905,2966,2947,3580,2647,3961,3842,
2204, 869,4207, 970,2678,5626,2944,2956,1479,4048, 514,3595, 588,1346,2820,3409,
249,4088,1746,1873,2047,1774, 581,1813, 358,1174,3590,1014,1561,4844,2245, 670,
1636,3112, 889,1286, 953, 556,2327,3060,1290,3141, 613, 185,3477,1367, 850,3820,
1715,2428,2642,2303,2732,3041,2562,2648,3566,3946,1349, 388,3098,2091,1360,3585,
152,1687,1539, 738,1559, 59,1232,2925,2267,1388,1249,1741,1679,2960, 151,1566,
1125,1352,4271, 924,4296, 385,3166,4459, 310,1245,2850, 70,3285,2729,3534,3575,
2398,3298,3466,1960,2265, 217,3647, 864,1909,2084,4401,2773,1010,3269,5152, 853,
3051,3121,1244,4251,1895, 364,1499,1540,2313,1180,3655,2268, 562, 715,2417,3061,
544, 336,3768,2380,1752,4075, 950, 280,2425,4382, 183,2759,3272, 333,4297,2155,
1688,2356,1444,1039,4540, 736,1177,3349,2443,2368,2144,2225, 565, 196,1482,3406,
927,1335,4147, 692, 878,1311,1653,3911,3622,1378,4200,1840,2969,3149,2126,1816,
2534,1546,2393,2760, 737,2494, 13, 447, 245,2747, 38,2765,2129,2589,1079, 606,
360, 471,3755,2890, 404, 848, 699,1785,1236, 370,2221,1023,3746,2074,2026,2023,
2388,1581,2119, 812,1141,3091,2536,1519, 804,2053, 406,1596,1090, 784, 548,4414,
1806,2264,2936,1100, 343,4114,5096, 622,3358, 743,3668,1510,1626,5020,3567,2513,
3195,4115,5627,2489,2991, 24,2065,2697,1087,2719, 48,1634, 315, 68, 985,2052,
198,2239,1347,1107,1439, 597,2366,2172, 871,3307, 919,2487,2790,1867, 236,2570,
1413,3794, 906,3365,3381,1701,1982,1818,1524,2924,1205, 616,2586,2072,2004, 575,
253,3099, 32,1365,1182, 197,1714,2454,1201, 554,3388,3224,2748, 756,2587, 250,
2567,1507,1517,3529,1922,2761,2337,3416,1961,1677,2452,2238,3153, 615, 911,1506,
1474,2495,1265,1906,2749,3756,3280,2161, 898,2714,1759,3450,2243,2444, 563, 26,
3286,2266,3769,3344,2707,3677, 611,1402, 531,1028,2871,4548,1375, 261,2948, 835,
1190,4134, 353, 840,2684,1900,3082,1435,2109,1207,1674, 329,1872,2781,4055,2686,
2104, 608,3318,2423,2957,2768,1108,3739,3512,3271,3985,2203,1771,3520,1418,2054,
1681,1153, 225,1627,2929, 162,2050,2511,3687,1954, 124,1859,2431,1684,3032,2894,
585,4805,3969,2869,2704,2088,2032,2095,3656,2635,4362,2209, 256, 518,2042,2105,
3777,3657, 643,2298,1148,1779, 190, 989,3544, 414, 11,2135,2063,2979,1471, 403,
3678, 126, 770,1563, 671,2499,3216,2877, 600,1179, 307,2805,4937,1268,1297,2694,
252,4032,1448,1494,1331,1394, 127,2256, 222,1647,1035,1481,3056,1915,1048, 873,
3651, 210, 33,1608,2516, 200,1520, 415, 102, 0,3389,1287, 817, 91,3299,2940,
836,1814, 549,2197,1396,1669,2987,3582,2297,2848,4528,1070, 687, 20,1819, 121,
1552,1364,1461,1968,2617,3540,2824,2083, 177, 948,4938,2291, 110,4549,2066, 648,
3359,1755,2110,2114,4642,4845,1693,3937,3308,1257,1869,2123, 208,1804,3159,2992,
2531,2549,3361,2418,1350,2347,2800,2568,1291,2036,2680, 72, 842,1990, 212,1233,
1154,1586, 75,2027,3410,4900,1823,1337,2710,2676, 728,2810,1522,3026,4995, 157,
755,1050,4022, 710, 785,1936,2194,2085,1406,2777,2400, 150,1250,4049,1206, 807,
1910, 534, 529,3309,1721,1660, 274, 39,2827, 661,2670,1578, 925,3248,3815,1094,
4278,4901,4252, 41,1150,3747,2572,2227,4501,3658,4902,3813,3357,3617,2884,2258,
887, 538,4187,3199,1294,2439,3042,2329,2343,2497,1255, 107, 543,1527, 521,3478,
3568, 194,5062, 15, 961,3870,1241,1192,2664, 66,5215,3260,2111,1295,1127,2152,
3805,4135, 901,1164,1976, 398,1278, 530,1460, 748, 904,1054,1966,1426, 53,2909,
509, 523,2279,1534, 536,1019, 239,1685, 460,2353, 673,1065,2401,3600,4298,2272,
1272,2363, 284,1753,3679,4064,1695, 81, 815,2677,2757,2731,1386, 859, 500,4221,
2190,2566, 757,1006,2519,2068,1166,1455, 337,2654,3203,1863,1682,1914,3025,1252,
1409,1366, 847, 714,2834,2038,3209, 964,2970,1901, 885,2553,1078,1756,3049, 301,
1572,3326, 688,2130,1996,2429,1805,1648,2930,3421,2750,3652,3088, 262,1158,1254,
389,1641,1812, 526,1719, 923,2073,1073,1902, 468, 489,4625,1140, 857,2375,3070,
3319,2863, 380, 116,1328,2693,1161,2244, 273,1212,1884,2769,3011,1775,1142, 461,
3066,1200,2147,2212, 790, 702,2695,4222,1601,1058, 434,2338,5153,3640, 67,2360,
4099,2502, 618,3472,1329, 416,1132, 830,2782,1807,2653,3211,3510,1662, 192,2124,
296,3979,1739,1611,3684, 23, 118, 324, 446,1239,1225, 293,2520,3814,3795,2535,
3116, 17,1074, 467,2692,2201, 387,2922, 45,1326,3055,1645,3659,2817, 958, 243,
1903,2320,1339,2825,1784,3289, 356, 576, 865,2315,2381,3377,3916,1088,3122,1713,
1655, 935, 628,4689,1034,1327, 441, 800, 720, 894,1979,2183,1528,5289,2702,1071,
4046,3572,2399,1571,3281, 79, 761,1103, 327, 134, 758,1899,1371,1615, 879, 442,
215,2605,2579, 173,2048,2485,1057,2975,3317,1097,2253,3801,4263,1403,1650,2946,
814,4968,3487,1548,2644,1567,1285, 2, 295,2636, 97, 946,3576, 832, 141,4257,
3273, 760,3821,3521,3156,2607, 949,1024,1733,1516,1803,1920,2125,2283,2665,3180,
1501,2064,3560,2171,1592, 803,3518,1416, 732,3897,4258,1363,1362,2458, 119,1427,
602,1525,2608,1605,1639,3175, 694,3064, 10, 465, 76,2000,4846,4208, 444,3781,
1619,3353,2206,1273,3796, 740,2483, 320,1723,2377,3660,2619,1359,1137,1762,1724,
2345,2842,1850,1862, 912, 821,1866, 612,2625,1735,2573,3369,1093, 844, 89, 937,
930,1424,3564,2413,2972,1004,3046,3019,2011, 711,3171,1452,4178, 428, 801,1943,
432, 445,2811, 206,4136,1472, 730, 349, 73, 397,2802,2547, 998,1637,1167, 789,
396,3217, 154,1218, 716,1120,1780,2819,4826,1931,3334,3762,2139,1215,2627, 552,
3664,3628,3232,1405,2383,3111,1356,2652,3577,3320,3101,1703, 640,1045,1370,1246,
4996, 371,1575,2436,1621,2210, 984,4033,1734,2638, 16,4529, 663,2755,3255,1451,
3917,2257,1253,1955,2234,1263,2951, 214,1229, 617, 485, 359,1831,1969, 473,2310,
750,2058, 165, 80,2864,2419, 361,4344,2416,2479,1134, 796,3726,1266,2943, 860,
2715, 938, 390,2734,1313,1384, 248, 202, 877,1064,2854, 522,3907, 279,1602, 297,
2357, 395,3740, 137,2075, 944,4089,2584,1267,3802, 62,1533,2285, 178, 176, 780,
2440, 201,3707, 590, 478,1560,4354,2117,1075, 30, 74,4643,4004,1635,1441,2745,
776,2596, 238,1077,1692,1912,2844, 605, 499,1742,3947, 241,3053, 980,1749, 936,
2640,4511,2582, 515,1543,2162,5322,2892,2993, 890,2148,1924, 665,1827,3581,1032,
968,3163, 339,1044,1896, 270, 583,1791,1720,4367,1194,3488,3669, 43,2523,1657,
163,2167, 290,1209,1622,3378, 550, 634,2508,2510, 695,2634,2384,2512,1476,1414,
220,1469,2341,2138,2852,3183,2900,4939,2865,3502,1211,3680, 854,3227,1299,2976,
3172, 186,2998,1459, 443,1067,3251,1495, 321,1932,3054, 909, 753,1410,1828, 436,
2441,1119,1587,3164,2186,1258, 227, 231,1425,1890,3200,3942, 247, 959, 725,5254,
2741, 577,2158,2079, 929, 120, 174, 838,2813, 591,1115, 417,2024, 40,3240,1536,
1037, 291,4151,2354, 632,1298,2406,2500,3535,1825,1846,3451, 205,1171, 345,4238,
18,1163, 811, 685,2208,1217, 425,1312,1508,1175,4308,2552,1033, 587,1381,3059,
2984,3482, 340,1316,4023,3972, 792,3176, 519, 777,4690, 918, 933,4130,2981,3741,
90,3360,2911,2200,5184,4550, 609,3079,2030, 272,3379,2736, 363,3881,1130,1447,
286, 779, 357,1169,3350,3137,1630,1220,2687,2391, 747,1277,3688,2618,2682,2601,
1156,3196,5290,4034,3102,1689,3596,3128, 874, 219,2783, 798, 508,1843,2461, 269,
1658,1776,1392,1913,2983,3287,2866,2159,2372, 829,4076, 46,4253,2873,1889,1894,
915,1834,1631,2181,2318, 298, 664,2818,3555,2735, 954,3228,3117, 527,3511,2173,
681,2712,3033,2247,2346,3467,1652, 155,2164,3382, 113,1994, 450, 899, 494, 994,
1237,2958,1875,2336,1926,3727, 545,1577,1550, 633,3473, 204,1305,3072,2410,1956,
2471, 707,2134, 841,2195,2196,2663,3843,1026,4940, 990,3252,4997, 368,1092, 437,
3212,3258,1933,1829, 675,2977,2893, 412, 943,3723,4644,3294,3283,2230,2373,5154,
2389,2241,2661,2323,1404,2524, 593, 787, 677,3008,1275,2059, 438,2709,2609,2240,
2269,2246,1446, 36,1568,1373,3892,1574,2301,1456,3962, 693,2276,5216,2035,1143,
2720,1919,1797,1811,2763,4137,2597,1830,1699,1488,1198,2090, 424,1694, 312,3634,
3390,4179,3335,2252,1214, 561,1059,3243,2295,2561, 975,5155,2321,2751,3772, 472,
1537,3282,3398,1047,2077,2348,2878,1323,3340,3076, 690,2906, 51, 369, 170,3541,
1060,2187,2688,3670,2541,1083,1683, 928,3918, 459, 109,4427, 599,3744,4286, 143,
2101,2730,2490, 82,1588,3036,2121, 281,1860, 477,4035,1238,2812,3020,2716,3312,
1530,2188,2055,1317, 843, 636,1808,1173,3495, 649, 181,1002, 147,3641,1159,2414,
3750,2289,2795, 813,3123,2610,1136,4368, 5,3391,4541,2174, 420, 429,1728, 754,
1228,2115,2219, 347,2223,2733, 735,1518,3003,2355,3134,1764,3948,3329,1888,2424,
1001,1234,1972,3321,3363,1672,1021,1450,1584, 226, 765, 655,2526,3404,3244,2302,
3665, 731, 594,2184, 319,1576, 621, 658,2656,4299,2099,3864,1279,2071,2598,2739,
795,3086,3699,3908,1707,2352,2402,1382,3136,2475,1465,4847,3496,3865,1085,3004,
2591,1084, 213,2287,1963,3565,2250, 822, 793,4574,3187,1772,1789,3050, 595,1484,
1959,2770,1080,2650, 456, 422,2996, 940,3322,4328,4345,3092,2742, 965,2784, 739,
4124, 952,1358,2498,2949,2565, 332,2698,2378, 660,2260,2473,4194,3856,2919, 535,
1260,2651,1208,1428,1300,1949,1303,2942, 433,2455,2450,1251,1946, 614,1269, 641,
1306,1810,2737,3078,2912, 564,2365,1419,1415,1497,4460,2367,2185,1379,3005,1307,
3218,2175,1897,3063, 682,1157,4040,4005,1712,1160,1941,1399, 394, 402,2952,1573,
1151,2986,2404, 862, 299,2033,1489,3006, 346, 171,2886,3401,1726,2932, 168,2533,
47,2507,1030,3735,1145,3370,1395,1318,1579,3609,4560,2857,4116,1457,2529,1965,
504,1036,2690,2988,2405, 745,5871, 849,2397,2056,3081, 863,2359,3857,2096, 99,
1397,1769,2300,4428,1643,3455,1978,1757,3718,1440, 35,4879,3742,1296,4228,2280,
160,5063,1599,2013, 166, 520,3479,1646,3345,3012, 490,1937,1545,1264,2182,2505,
1096,1188,1369,1436,2421,1667,2792,2460,1270,2122, 727,3167,2143, 806,1706,1012,
1800,3037, 960,2218,1882, 805, 139,2456,1139,1521, 851,1052,3093,3089, 342,2039,
744,5097,1468,1502,1585,2087, 223, 939, 326,2140,2577, 892,2481,1623,4077, 982,
3708, 135,2131, 87,2503,3114,2326,1106, 876,1616, 547,2997,2831,2093,3441,4530,
4314, 9,3256,4229,4148, 659,1462,1986,1710,2046,2913,2231,4090,4880,5255,3392,
3274,1368,3689,4645,1477, 705,3384,3635,1068,1529,2941,1458,3782,1509, 100,1656,
2548, 718,2339, 408,1590,2780,3548,1838,4117,3719,1345,3530, 717,3442,2778,3220,
2898,1892,4590,3614,3371,2043,1998,1224,3483, 891, 635, 584,2559,3355, 733,1766,
1729,1172,3789,1891,2307, 781,2982,2271,1957,1580,5773,2633,2005,4195,3097,1535,
3213,1189,1934,5693,3262, 586,3118,1324,1598, 517,1564,2217,1868,1893,4445,3728,
2703,3139,1526,1787,1992,3882,2875,1549,1199,1056,2224,1904,2711,5098,4287, 338,
1993,3129,3489,2689,1809,2815,1997, 957,1855,3898,2550,3275,3057,1105,1319, 627,
1505,1911,1883,3526, 698,3629,3456,1833,1431, 746, 77,1261,2017,2296,1977,1885,
125,1334,1600, 525,1798,1109,2222,1470,1945, 559,2236,1186,3443,2476,1929,1411,
2411,3135,1777,3372,2621,1841,1613,3229, 668,1430,1839,2643,2916, 195,1989,2671,
2358,1387, 629,3205,2293,5256,4439, 123,1310, 888,1879,4300,3021,3605,1003,1162,
3192,2910,2010, 140,2395,2859, 55,1082,2012,2901, 662, 419,2081,1438, 680,2774,
4654,3912,1620,1731,1625,5035,4065,2328, 512,1344, 802,5443,2163,2311,2537, 524,
3399, 98,1155,2103,1918,2606,3925,2816,1393,2465,1504,3773,2177,3963,1478,4346,
180,1113,4655,3461,2028,1698, 833,2696,1235,1322,1594,4408,3623,3013,3225,2040,
3022, 541,2881, 607,3632,2029,1665,1219, 639,1385,1686,1099,2803,3231,1938,3188,
2858, 427, 676,2772,1168,2025, 454,3253,2486,3556, 230,1950, 580, 791,1991,1280,
1086,1974,2034, 630, 257,3338,2788,4903,1017, 86,4790, 966,2789,1995,1696,1131,
259,3095,4188,1308, 179,1463,5257, 289,4107,1248, 42,3413,1725,2288, 896,1947,
774,4474,4254, 604,3430,4264, 392,2514,2588, 452, 237,1408,3018, 988,4531,1970,
3034,3310, 540,2370,1562,1288,2990, 502,4765,1147, 4,1853,2708, 207, 294,2814,
4078,2902,2509, 684, 34,3105,3532,2551, 644, 709,2801,2344, 573,1727,3573,3557,
2021,1081,3100,4315,2100,3681, 199,2263,1837,2385, 146,3484,1195,2776,3949, 997,
1939,3973,1008,1091,1202,1962,1847,1149,4209,5444,1076, 493, 117,5400,2521, 972,
1490,2934,1796,4542,2374,1512,2933,2657, 413,2888,1135,2762,2314,2156,1355,2369,
766,2007,2527,2170,3124,2491,2593,2632,4757,2437, 234,3125,3591,1898,1750,1376,
1942,3468,3138, 570,2127,2145,3276,4131, 962, 132,1445,4196, 19, 941,3624,3480,
3366,1973,1374,4461,3431,2629, 283,2415,2275, 808,2887,3620,2112,2563,1353,3610,
955,1089,3103,1053, 96, 88,4097, 823,3808,1583, 399, 292,4091,3313, 421,1128,
642,4006, 903,2539,1877,2082, 596, 29,4066,1790, 722,2157, 130, 995,1569, 769,
1485, 464, 513,2213, 288,1923,1101,2453,4316, 133, 486,2445, 50, 625, 487,2207,
57, 423, 481,2962, 159,3729,1558, 491, 303, 482, 501, 240,2837, 112,3648,2392,
1783, 362, 8,3433,3422, 610,2793,3277,1390,1284,1654, 21,3823, 734, 367, 623,
193, 287, 374,1009,1483, 816, 476, 313,2255,2340,1262,2150,2899,1146,2581, 782,
2116,1659,2018,1880, 255,3586,3314,1110,2867,2137,2564, 986,2767,5185,2006, 650,
158, 926, 762, 881,3157,2717,2362,3587, 306,3690,3245,1542,3077,2427,1691,2478,
2118,2985,3490,2438, 539,2305, 983, 129,1754, 355,4201,2386, 827,2923, 104,1773,
2838,2771, 411,2905,3919, 376, 767, 122,1114, 828,2422,1817,3506, 266,3460,1007,
1609,4998, 945,2612,4429,2274, 726,1247,1964,2914,2199,2070,4002,4108, 657,3323,
1422, 579, 455,2764,4737,1222,2895,1670, 824,1223,1487,2525, 558, 861,3080, 598,
2659,2515,1967, 752,2583,2376,2214,4180, 977, 704,2464,4999,2622,4109,1210,2961,
819,1541, 142,2284, 44, 418, 457,1126,3730,4347,4626,1644,1876,3671,1864, 302,
1063,5694, 624, 723,1984,3745,1314,1676,2488,1610,1449,3558,3569,2166,2098, 409,
1011,2325,3704,2306, 818,1732,1383,1824,1844,3757, 999,2705,3497,1216,1423,2683,
2426,2954,2501,2726,2229,1475,2554,5064,1971,1794,1666,2014,1343, 783, 724, 191,
2434,1354,2220,5065,1763,2752,2472,4152, 131, 175,2885,3434, 92,1466,4920,2616,
3871,3872,3866, 128,1551,1632, 669,1854,3682,4691,4125,1230, 188,2973,3290,1302,
1213, 560,3266, 917, 763,3909,3249,1760, 868,1958, 764,1782,2097, 145,2277,3774,
4462, 64,1491,3062, 971,2132,3606,2442, 221,1226,1617, 218, 323,1185,3207,3147,
571, 619,1473,1005,1744,2281, 449,1887,2396,3685, 275, 375,3816,1743,3844,3731,
845,1983,2350,4210,1377, 773, 967,3499,3052,3743,2725,4007,1697,1022,3943,1464,
3264,2855,2722,1952,1029,2839,2467, 84,4383,2215, 820,1391,2015,2448,3672, 377,
1948,2168, 797,2545,3536,2578,2645, 94,2874,1678, 405,1259,3071, 771, 546,1315,
470,1243,3083, 895,2468, 981, 969,2037, 846,4181, 653,1276,2928, 14,2594, 557,
3007,2474, 156, 902,1338,1740,2574, 537,2518, 973,2282,2216,2433,1928, 138,2903,
1293,2631,1612, 646,3457, 839,2935, 111, 496,2191,2847, 589,3186, 149,3994,2060,
4031,2641,4067,3145,1870, 37,3597,2136,1025,2051,3009,3383,3549,1121,1016,3261,
1301, 251,2446,2599,2153, 872,3246, 637, 334,3705, 831, 884, 921,3065,3140,4092,
2198,1944, 246,2964, 108,2045,1152,1921,2308,1031, 203,3173,4170,1907,3890, 810,
1401,2003,1690, 506, 647,1242,2828,1761,1649,3208,2249,1589,3709,2931,5156,1708,
498, 666,2613, 834,3817,1231, 184,2851,1124, 883,3197,2261,3710,1765,1553,2658,
1178,2639,2351, 93,1193, 942,2538,2141,4402, 235,1821, 870,1591,2192,1709,1871,
3341,1618,4126,2595,2334, 603, 651, 69, 701, 268,2662,3411,2555,1380,1606, 503,
448, 254,2371,2646, 574,1187,2309,1770, 322,2235,1292,1801, 305, 566,1133, 229,
2067,2057, 706, 167, 483,2002,2672,3295,1820,3561,3067, 316, 378,2746,3452,1112,
136,1981, 507,1651,2917,1117, 285,4591, 182,2580,3522,1304, 335,3303,1835,2504,
1795,1792,2248, 674,1018,2106,2449,1857,2292,2845, 976,3047,1781,2600,2727,1389,
1281, 52,3152, 153, 265,3950, 672,3485,3951,4463, 430,1183, 365, 278,2169, 27,
1407,1336,2304, 209,1340,1730,2202,1852,2403,2883, 979,1737,1062, 631,2829,2542,
3876,2592, 825,2086,2226,3048,3625, 352,1417,3724, 542, 991, 431,1351,3938,1861,
2294, 826,1361,2927,3142,3503,1738, 463,2462,2723, 582,1916,1595,2808, 400,3845,
3891,2868,3621,2254, 58,2492,1123, 910,2160,2614,1372,1603,1196,1072,3385,1700,
3267,1980, 696, 480,2430, 920, 799,1570,2920,1951,2041,4047,2540,1321,4223,2469,
3562,2228,1271,2602, 401,2833,3351,2575,5157, 907,2312,1256, 410, 263,3507,1582,
996, 678,1849,2316,1480, 908,3545,2237, 703,2322, 667,1826,2849,1531,2604,2999,
2407,3146,2151,2630,1786,3711, 469,3542, 497,3899,2409, 858, 837,4446,3393,1274,
786, 620,1845,2001,3311, 484, 308,3367,1204,1815,3691,2332,1532,2557,1842,2020,
2724,1927,2333,4440, 567, 22,1673,2728,4475,1987,1858,1144,1597, 101,1832,3601,
12, 974,3783,4391, 951,1412, 1,3720, 453,4608,4041, 528,1041,1027,3230,2628,
1129, 875,1051,3291,1203,2262,1069,2860,2799,2149,2615,3278, 144,1758,3040, 31,
475,1680, 366,2685,3184, 311,1642,4008,2466,5036,1593,1493,2809, 216,1420,1668,
233, 304,2128,3284, 232,1429,1768,1040,2008,3407,2740,2967,2543, 242,2133, 778,
1565,2022,2620, 505,2189,2756,1098,2273, 372,1614, 708, 553,2846,2094,2278, 169,
3626,2835,4161, 228,2674,3165, 809,1454,1309, 466,1705,1095, 900,3423, 880,2667,
3751,5258,2317,3109,2571,4317,2766,1503,1342, 866,4447,1118, 63,2076, 314,1881,
1348,1061, 172, 978,3515,1747, 532, 511,3970, 6, 601, 905,2699,3300,1751, 276,
1467,3725,2668, 65,4239,2544,2779,2556,1604, 578,2451,1802, 992,2331,2624,1320,
3446, 713,1513,1013, 103,2786,2447,1661, 886,1702, 916, 654,3574,2031,1556, 751,
2178,2821,2179,1498,1538,2176, 271, 914,2251,2080,1325, 638,1953,2937,3877,2432,
2754, 95,3265,1716, 260,1227,4083, 775, 106,1357,3254, 426,1607, 555,2480, 772,
1985, 244,2546, 474, 495,1046,2611,1851,2061, 71,2089,1675,2590, 742,3758,2843,
3222,1433, 267,2180,2576,2826,2233,2092,3913,2435, 956,1745,3075, 856,2113,1116,
451, 3,1988,2896,1398, 993,2463,1878,2049,1341,2718,2721,2870,2108, 712,2904,
4363,2753,2324, 277,2872,2349,2649, 384, 987, 435, 691,3000, 922, 164,3939, 652,
1500,1184,4153,2482,3373,2165,4848,2335,3775,3508,3154,2806,2830,1554,2102,1664,
2530,1434,2408, 893,1547,2623,3447,2832,2242,2532,3169,2856,3223,2078, 49,3770,
3469, 462, 318, 656,2259,3250,3069, 679,1629,2758, 344,1138,1104,3120,1836,1283,
3115,2154,1437,4448, 934, 759,1999, 794,2862,1038, 533,2560,1722,2342, 855,2626,
1197,1663,4476,3127, 85,4240,2528, 25,1111,1181,3673, 407,3470,4561,2679,2713,
768,1925,2841,3986,1544,1165, 932, 373,1240,2146,1930,2673, 721,4766, 354,4333,
391,2963, 187, 61,3364,1442,1102, 330,1940,1767, 341,3809,4118, 393,2496,2062,
2211, 105, 331, 300, 439, 913,1332, 626, 379,3304,1557, 328, 689,3952, 309,1555,
931, 317,2517,3027, 325, 569, 686,2107,3084, 60,1042,1333,2794, 264,3177,4014,
1628, 258,3712, 7,4464,1176,1043,1778, 683, 114,1975, 78,1492, 383,1886, 510,
386, 645,5291,2891,2069,3305,4138,3867,2939,2603,2493,1935,1066,1848,3588,1015,
1282,1289,4609, 697,1453,3044,2666,3611,1856,2412, 54, 719,1330, 568,3778,2459,
1748, 788, 492, 551,1191,1000, 488,3394,3763, 282,1799, 348,2016,1523,3155,2390,
1049, 382,2019,1788,1170, 729,2968,3523, 897,3926,2785,2938,3292, 350,2319,3238,
1718,1717,2655,3453,3143,4465, 161,2889,2980,2009,1421, 56,1908,1640,2387,2232,
1917,1874,2477,4921, 148, 83,3438, 592,4245,2882,1822,1055, 741, 115,1496,1624,
381,1638,4592,1020, 516,3214, 458, 947,4575,1432, 211,1514,2926,1865,2142, 189,
852,1221,1400,1486, 882,2299,4036, 351, 28,1122, 700,6479,6480,6481,6482,6483, # last 512
#Everything below is of no interest for detection purpose
5508,6484,3900,3414,3974,4441,4024,3537,4037,5628,5099,3633,6485,3148,6486,3636,
5509,3257,5510,5973,5445,5872,4941,4403,3174,4627,5873,6276,2286,4230,5446,5874,
5122,6102,6103,4162,5447,5123,5323,4849,6277,3980,3851,5066,4246,5774,5067,6278,
3001,2807,5695,3346,5775,5974,5158,5448,6487,5975,5976,5776,3598,6279,5696,4806,
4211,4154,6280,6488,6489,6490,6281,4212,5037,3374,4171,6491,4562,4807,4722,4827,
5977,6104,4532,4079,5159,5324,5160,4404,3858,5359,5875,3975,4288,4610,3486,4512,
5325,3893,5360,6282,6283,5560,2522,4231,5978,5186,5449,2569,3878,6284,5401,3578,
4415,6285,4656,5124,5979,2506,4247,4449,3219,3417,4334,4969,4329,6492,4576,4828,
4172,4416,4829,5402,6286,3927,3852,5361,4369,4830,4477,4867,5876,4173,6493,6105,
4657,6287,6106,5877,5450,6494,4155,4868,5451,3700,5629,4384,6288,6289,5878,3189,
4881,6107,6290,6495,4513,6496,4692,4515,4723,5100,3356,6497,6291,3810,4080,5561,
3570,4430,5980,6498,4355,5697,6499,4724,6108,6109,3764,4050,5038,5879,4093,3226,
6292,5068,5217,4693,3342,5630,3504,4831,4377,4466,4309,5698,4431,5777,6293,5778,
4272,3706,6110,5326,3752,4676,5327,4273,5403,4767,5631,6500,5699,5880,3475,5039,
6294,5562,5125,4348,4301,4482,4068,5126,4593,5700,3380,3462,5981,5563,3824,5404,
4970,5511,3825,4738,6295,6501,5452,4516,6111,5881,5564,6502,6296,5982,6503,4213,
4163,3454,6504,6112,4009,4450,6113,4658,6297,6114,3035,6505,6115,3995,4904,4739,
4563,4942,4110,5040,3661,3928,5362,3674,6506,5292,3612,4791,5565,4149,5983,5328,
5259,5021,4725,4577,4564,4517,4364,6298,5405,4578,5260,4594,4156,4157,5453,3592,
3491,6507,5127,5512,4709,4922,5984,5701,4726,4289,6508,4015,6116,5128,4628,3424,
4241,5779,6299,4905,6509,6510,5454,5702,5780,6300,4365,4923,3971,6511,5161,3270,
3158,5985,4100, 867,5129,5703,6117,5363,3695,3301,5513,4467,6118,6512,5455,4232,
4242,4629,6513,3959,4478,6514,5514,5329,5986,4850,5162,5566,3846,4694,6119,5456,
4869,5781,3779,6301,5704,5987,5515,4710,6302,5882,6120,4392,5364,5705,6515,6121,
6516,6517,3736,5988,5457,5989,4695,2457,5883,4551,5782,6303,6304,6305,5130,4971,
6122,5163,6123,4870,3263,5365,3150,4871,6518,6306,5783,5069,5706,3513,3498,4409,
5330,5632,5366,5458,5459,3991,5990,4502,3324,5991,5784,3696,4518,5633,4119,6519,
4630,5634,4417,5707,4832,5992,3418,6124,5993,5567,4768,5218,6520,4595,3458,5367,
6125,5635,6126,4202,6521,4740,4924,6307,3981,4069,4385,6308,3883,2675,4051,3834,
4302,4483,5568,5994,4972,4101,5368,6309,5164,5884,3922,6127,6522,6523,5261,5460,
5187,4164,5219,3538,5516,4111,3524,5995,6310,6311,5369,3181,3386,2484,5188,3464,
5569,3627,5708,6524,5406,5165,4677,4492,6312,4872,4851,5885,4468,5996,6313,5709,
5710,6128,2470,5886,6314,5293,4882,5785,3325,5461,5101,6129,5711,5786,6525,4906,
6526,6527,4418,5887,5712,4808,2907,3701,5713,5888,6528,3765,5636,5331,6529,6530,
3593,5889,3637,4943,3692,5714,5787,4925,6315,6130,5462,4405,6131,6132,6316,5262,
6531,6532,5715,3859,5716,5070,4696,5102,3929,5788,3987,4792,5997,6533,6534,3920,
4809,5000,5998,6535,2974,5370,6317,5189,5263,5717,3826,6536,3953,5001,4883,3190,
5463,5890,4973,5999,4741,6133,6134,3607,5570,6000,4711,3362,3630,4552,5041,6318,
6001,2950,2953,5637,4646,5371,4944,6002,2044,4120,3429,6319,6537,5103,4833,6538,
6539,4884,4647,3884,6003,6004,4758,3835,5220,5789,4565,5407,6540,6135,5294,4697,
4852,6320,6321,3206,4907,6541,6322,4945,6542,6136,6543,6323,6005,4631,3519,6544,
5891,6545,5464,3784,5221,6546,5571,4659,6547,6324,6137,5190,6548,3853,6549,4016,
4834,3954,6138,5332,3827,4017,3210,3546,4469,5408,5718,3505,4648,5790,5131,5638,
5791,5465,4727,4318,6325,6326,5792,4553,4010,4698,3439,4974,3638,4335,3085,6006,
5104,5042,5166,5892,5572,6327,4356,4519,5222,5573,5333,5793,5043,6550,5639,5071,
4503,6328,6139,6551,6140,3914,3901,5372,6007,5640,4728,4793,3976,3836,4885,6552,
4127,6553,4451,4102,5002,6554,3686,5105,6555,5191,5072,5295,4611,5794,5296,6556,
5893,5264,5894,4975,5466,5265,4699,4976,4370,4056,3492,5044,4886,6557,5795,4432,
4769,4357,5467,3940,4660,4290,6141,4484,4770,4661,3992,6329,4025,4662,5022,4632,
4835,4070,5297,4663,4596,5574,5132,5409,5895,6142,4504,5192,4664,5796,5896,3885,
5575,5797,5023,4810,5798,3732,5223,4712,5298,4084,5334,5468,6143,4052,4053,4336,
4977,4794,6558,5335,4908,5576,5224,4233,5024,4128,5469,5225,4873,6008,5045,4729,
4742,4633,3675,4597,6559,5897,5133,5577,5003,5641,5719,6330,6560,3017,2382,3854,
4406,4811,6331,4393,3964,4946,6561,2420,3722,6562,4926,4378,3247,1736,4442,6332,
5134,6333,5226,3996,2918,5470,4319,4003,4598,4743,4744,4485,3785,3902,5167,5004,
5373,4394,5898,6144,4874,1793,3997,6334,4085,4214,5106,5642,4909,5799,6009,4419,
4189,3330,5899,4165,4420,5299,5720,5227,3347,6145,4081,6335,2876,3930,6146,3293,
3786,3910,3998,5900,5300,5578,2840,6563,5901,5579,6147,3531,5374,6564,6565,5580,
4759,5375,6566,6148,3559,5643,6336,6010,5517,6337,6338,5721,5902,3873,6011,6339,
6567,5518,3868,3649,5722,6568,4771,4947,6569,6149,4812,6570,2853,5471,6340,6341,
5644,4795,6342,6012,5723,6343,5724,6013,4349,6344,3160,6150,5193,4599,4514,4493,
5168,4320,6345,4927,3666,4745,5169,5903,5005,4928,6346,5725,6014,4730,4203,5046,
4948,3395,5170,6015,4150,6016,5726,5519,6347,5047,3550,6151,6348,4197,4310,5904,
6571,5581,2965,6152,4978,3960,4291,5135,6572,5301,5727,4129,4026,5905,4853,5728,
5472,6153,6349,4533,2700,4505,5336,4678,3583,5073,2994,4486,3043,4554,5520,6350,
6017,5800,4487,6351,3931,4103,5376,6352,4011,4321,4311,4190,5136,6018,3988,3233,
4350,5906,5645,4198,6573,5107,3432,4191,3435,5582,6574,4139,5410,6353,5411,3944,
5583,5074,3198,6575,6354,4358,6576,5302,4600,5584,5194,5412,6577,6578,5585,5413,
5303,4248,5414,3879,4433,6579,4479,5025,4854,5415,6355,4760,4772,3683,2978,4700,
3797,4452,3965,3932,3721,4910,5801,6580,5195,3551,5907,3221,3471,3029,6019,3999,
5908,5909,5266,5267,3444,3023,3828,3170,4796,5646,4979,4259,6356,5647,5337,3694,
6357,5648,5338,4520,4322,5802,3031,3759,4071,6020,5586,4836,4386,5048,6581,3571,
4679,4174,4949,6154,4813,3787,3402,3822,3958,3215,3552,5268,4387,3933,4950,4359,
6021,5910,5075,3579,6358,4234,4566,5521,6359,3613,5049,6022,5911,3375,3702,3178,
4911,5339,4521,6582,6583,4395,3087,3811,5377,6023,6360,6155,4027,5171,5649,4421,
4249,2804,6584,2270,6585,4000,4235,3045,6156,5137,5729,4140,4312,3886,6361,4330,
6157,4215,6158,3500,3676,4929,4331,3713,4930,5912,4265,3776,3368,5587,4470,4855,
3038,4980,3631,6159,6160,4132,4680,6161,6362,3923,4379,5588,4255,6586,4121,6587,
6363,4649,6364,3288,4773,4774,6162,6024,6365,3543,6588,4274,3107,3737,5050,5803,
4797,4522,5589,5051,5730,3714,4887,5378,4001,4523,6163,5026,5522,4701,4175,2791,
3760,6589,5473,4224,4133,3847,4814,4815,4775,3259,5416,6590,2738,6164,6025,5304,
3733,5076,5650,4816,5590,6591,6165,6592,3934,5269,6593,3396,5340,6594,5804,3445,
3602,4042,4488,5731,5732,3525,5591,4601,5196,6166,6026,5172,3642,4612,3202,4506,
4798,6366,3818,5108,4303,5138,5139,4776,3332,4304,2915,3415,4434,5077,5109,4856,
2879,5305,4817,6595,5913,3104,3144,3903,4634,5341,3133,5110,5651,5805,6167,4057,
5592,2945,4371,5593,6596,3474,4182,6367,6597,6168,4507,4279,6598,2822,6599,4777,
4713,5594,3829,6169,3887,5417,6170,3653,5474,6368,4216,2971,5228,3790,4579,6369,
5733,6600,6601,4951,4746,4555,6602,5418,5475,6027,3400,4665,5806,6171,4799,6028,
5052,6172,3343,4800,4747,5006,6370,4556,4217,5476,4396,5229,5379,5477,3839,5914,
5652,5807,4714,3068,4635,5808,6173,5342,4192,5078,5419,5523,5734,6174,4557,6175,
4602,6371,6176,6603,5809,6372,5735,4260,3869,5111,5230,6029,5112,6177,3126,4681,
5524,5915,2706,3563,4748,3130,6178,4018,5525,6604,6605,5478,4012,4837,6606,4534,
4193,5810,4857,3615,5479,6030,4082,3697,3539,4086,5270,3662,4508,4931,5916,4912,
5811,5027,3888,6607,4397,3527,3302,3798,2775,2921,2637,3966,4122,4388,4028,4054,
1633,4858,5079,3024,5007,3982,3412,5736,6608,3426,3236,5595,3030,6179,3427,3336,
3279,3110,6373,3874,3039,5080,5917,5140,4489,3119,6374,5812,3405,4494,6031,4666,
4141,6180,4166,6032,5813,4981,6609,5081,4422,4982,4112,3915,5653,3296,3983,6375,
4266,4410,5654,6610,6181,3436,5082,6611,5380,6033,3819,5596,4535,5231,5306,5113,
6612,4952,5918,4275,3113,6613,6376,6182,6183,5814,3073,4731,4838,5008,3831,6614,
4888,3090,3848,4280,5526,5232,3014,5655,5009,5737,5420,5527,6615,5815,5343,5173,
5381,4818,6616,3151,4953,6617,5738,2796,3204,4360,2989,4281,5739,5174,5421,5197,
3132,5141,3849,5142,5528,5083,3799,3904,4839,5480,2880,4495,3448,6377,6184,5271,
5919,3771,3193,6034,6035,5920,5010,6036,5597,6037,6378,6038,3106,5422,6618,5423,
5424,4142,6619,4889,5084,4890,4313,5740,6620,3437,5175,5307,5816,4199,5198,5529,
5817,5199,5656,4913,5028,5344,3850,6185,2955,5272,5011,5818,4567,4580,5029,5921,
3616,5233,6621,6622,6186,4176,6039,6379,6380,3352,5200,5273,2908,5598,5234,3837,
5308,6623,6624,5819,4496,4323,5309,5201,6625,6626,4983,3194,3838,4167,5530,5922,
5274,6381,6382,3860,3861,5599,3333,4292,4509,6383,3553,5481,5820,5531,4778,6187,
3955,3956,4324,4389,4218,3945,4325,3397,2681,5923,4779,5085,4019,5482,4891,5382,
5383,6040,4682,3425,5275,4094,6627,5310,3015,5483,5657,4398,5924,3168,4819,6628,
5925,6629,5532,4932,4613,6041,6630,4636,6384,4780,4204,5658,4423,5821,3989,4683,
5822,6385,4954,6631,5345,6188,5425,5012,5384,3894,6386,4490,4104,6632,5741,5053,
6633,5823,5926,5659,5660,5927,6634,5235,5742,5824,4840,4933,4820,6387,4859,5928,
4955,6388,4143,3584,5825,5346,5013,6635,5661,6389,5014,5484,5743,4337,5176,5662,
6390,2836,6391,3268,6392,6636,6042,5236,6637,4158,6638,5744,5663,4471,5347,3663,
4123,5143,4293,3895,6639,6640,5311,5929,5826,3800,6189,6393,6190,5664,5348,3554,
3594,4749,4603,6641,5385,4801,6043,5827,4183,6642,5312,5426,4761,6394,5665,6191,
4715,2669,6643,6644,5533,3185,5427,5086,5930,5931,5386,6192,6044,6645,4781,4013,
5745,4282,4435,5534,4390,4267,6045,5746,4984,6046,2743,6193,3501,4087,5485,5932,
5428,4184,4095,5747,4061,5054,3058,3862,5933,5600,6646,5144,3618,6395,3131,5055,
5313,6396,4650,4956,3855,6194,3896,5202,4985,4029,4225,6195,6647,5828,5486,5829,
3589,3002,6648,6397,4782,5276,6649,6196,6650,4105,3803,4043,5237,5830,6398,4096,
3643,6399,3528,6651,4453,3315,4637,6652,3984,6197,5535,3182,3339,6653,3096,2660,
6400,6654,3449,5934,4250,4236,6047,6401,5831,6655,5487,3753,4062,5832,6198,6199,
6656,3766,6657,3403,4667,6048,6658,4338,2897,5833,3880,2797,3780,4326,6659,5748,
5015,6660,5387,4351,5601,4411,6661,3654,4424,5935,4339,4072,5277,4568,5536,6402,
6662,5238,6663,5349,5203,6200,5204,6201,5145,4536,5016,5056,4762,5834,4399,4957,
6202,6403,5666,5749,6664,4340,6665,5936,5177,5667,6666,6667,3459,4668,6404,6668,
6669,4543,6203,6670,4276,6405,4480,5537,6671,4614,5205,5668,6672,3348,2193,4763,
6406,6204,5937,5602,4177,5669,3419,6673,4020,6205,4443,4569,5388,3715,3639,6407,
6049,4058,6206,6674,5938,4544,6050,4185,4294,4841,4651,4615,5488,6207,6408,6051,
5178,3241,3509,5835,6208,4958,5836,4341,5489,5278,6209,2823,5538,5350,5206,5429,
6675,4638,4875,4073,3516,4684,4914,4860,5939,5603,5389,6052,5057,3237,5490,3791,
6676,6409,6677,4821,4915,4106,5351,5058,4243,5539,4244,5604,4842,4916,5239,3028,
3716,5837,5114,5605,5390,5940,5430,6210,4332,6678,5540,4732,3667,3840,6053,4305,
3408,5670,5541,6410,2744,5240,5750,6679,3234,5606,6680,5607,5671,3608,4283,4159,
4400,5352,4783,6681,6411,6682,4491,4802,6211,6412,5941,6413,6414,5542,5751,6683,
4669,3734,5942,6684,6415,5943,5059,3328,4670,4144,4268,6685,6686,6687,6688,4372,
3603,6689,5944,5491,4373,3440,6416,5543,4784,4822,5608,3792,4616,5838,5672,3514,
5391,6417,4892,6690,4639,6691,6054,5673,5839,6055,6692,6056,5392,6212,4038,5544,
5674,4497,6057,6693,5840,4284,5675,4021,4545,5609,6418,4454,6419,6213,4113,4472,
5314,3738,5087,5279,4074,5610,4959,4063,3179,4750,6058,6420,6214,3476,4498,4716,
5431,4960,4685,6215,5241,6694,6421,6216,6695,5841,5945,6422,3748,5946,5179,3905,
5752,5545,5947,4374,6217,4455,6423,4412,6218,4803,5353,6696,3832,5280,6219,4327,
4702,6220,6221,6059,4652,5432,6424,3749,4751,6425,5753,4986,5393,4917,5948,5030,
5754,4861,4733,6426,4703,6697,6222,4671,5949,4546,4961,5180,6223,5031,3316,5281,
6698,4862,4295,4934,5207,3644,6427,5842,5950,6428,6429,4570,5843,5282,6430,6224,
5088,3239,6060,6699,5844,5755,6061,6431,2701,5546,6432,5115,5676,4039,3993,3327,
4752,4425,5315,6433,3941,6434,5677,4617,4604,3074,4581,6225,5433,6435,6226,6062,
4823,5756,5116,6227,3717,5678,4717,5845,6436,5679,5846,6063,5847,6064,3977,3354,
6437,3863,5117,6228,5547,5394,4499,4524,6229,4605,6230,4306,4500,6700,5951,6065,
3693,5952,5089,4366,4918,6701,6231,5548,6232,6702,6438,4704,5434,6703,6704,5953,
4168,6705,5680,3420,6706,5242,4407,6066,3812,5757,5090,5954,4672,4525,3481,5681,
4618,5395,5354,5316,5955,6439,4962,6707,4526,6440,3465,4673,6067,6441,5682,6708,
5435,5492,5758,5683,4619,4571,4674,4804,4893,4686,5493,4753,6233,6068,4269,6442,
6234,5032,4705,5146,5243,5208,5848,6235,6443,4963,5033,4640,4226,6236,5849,3387,
6444,6445,4436,4437,5850,4843,5494,4785,4894,6709,4361,6710,5091,5956,3331,6237,
4987,5549,6069,6711,4342,3517,4473,5317,6070,6712,6071,4706,6446,5017,5355,6713,
6714,4988,5436,6447,4734,5759,6715,4735,4547,4456,4754,6448,5851,6449,6450,3547,
5852,5318,6451,6452,5092,4205,6716,6238,4620,4219,5611,6239,6072,4481,5760,5957,
5958,4059,6240,6453,4227,4537,6241,5761,4030,4186,5244,5209,3761,4457,4876,3337,
5495,5181,6242,5959,5319,5612,5684,5853,3493,5854,6073,4169,5613,5147,4895,6074,
5210,6717,5182,6718,3830,6243,2798,3841,6075,6244,5855,5614,3604,4606,5496,5685,
5118,5356,6719,6454,5960,5357,5961,6720,4145,3935,4621,5119,5962,4261,6721,6455,
4786,5963,4375,4582,6245,6246,6247,6076,5437,4877,5856,3376,4380,6248,4160,6722,
5148,6456,5211,6457,6723,4718,6458,6724,6249,5358,4044,3297,6459,6250,5857,5615,
5497,5245,6460,5498,6725,6251,6252,5550,3793,5499,2959,5396,6461,6462,4572,5093,
5500,5964,3806,4146,6463,4426,5762,5858,6077,6253,4755,3967,4220,5965,6254,4989,
5501,6464,4352,6726,6078,4764,2290,5246,3906,5438,5283,3767,4964,2861,5763,5094,
6255,6256,4622,5616,5859,5860,4707,6727,4285,4708,4824,5617,6257,5551,4787,5212,
4965,4935,4687,6465,6728,6466,5686,6079,3494,4413,2995,5247,5966,5618,6729,5967,
5764,5765,5687,5502,6730,6731,6080,5397,6467,4990,6258,6732,4538,5060,5619,6733,
4719,5688,5439,5018,5149,5284,5503,6734,6081,4607,6259,5120,3645,5861,4583,6260,
4584,4675,5620,4098,5440,6261,4863,2379,3306,4585,5552,5689,4586,5285,6735,4864,
6736,5286,6082,6737,4623,3010,4788,4381,4558,5621,4587,4896,3698,3161,5248,4353,
4045,6262,3754,5183,4588,6738,6263,6739,6740,5622,3936,6741,6468,6742,6264,5095,
6469,4991,5968,6743,4992,6744,6083,4897,6745,4256,5766,4307,3108,3968,4444,5287,
3889,4343,6084,4510,6085,4559,6086,4898,5969,6746,5623,5061,4919,5249,5250,5504,
5441,6265,5320,4878,3242,5862,5251,3428,6087,6747,4237,5624,5442,6266,5553,4539,
6748,2585,3533,5398,4262,6088,5150,4736,4438,6089,6267,5505,4966,6749,6268,6750,
6269,5288,5554,3650,6090,6091,4624,6092,5690,6751,5863,4270,5691,4277,5555,5864,
6752,5692,4720,4865,6470,5151,4688,4825,6753,3094,6754,6471,3235,4653,6755,5213,
5399,6756,3201,4589,5865,4967,6472,5866,6473,5019,3016,6757,5321,4756,3957,4573,
6093,4993,5767,4721,6474,6758,5625,6759,4458,6475,6270,6760,5556,4994,5214,5252,
6271,3875,5768,6094,5034,5506,4376,5769,6761,2120,6476,5253,5770,6762,5771,5970,
3990,5971,5557,5558,5772,6477,6095,2787,4641,5972,5121,6096,6097,6272,6763,3703,
5867,5507,6273,4206,6274,4789,6098,6764,3619,3646,3833,3804,2394,3788,4936,3978,
4866,4899,6099,6100,5559,6478,6765,3599,5868,6101,5869,5870,6275,6766,4527,6767)
# flake8: noqa
| gpl-3.0 |
qtekfun/htcDesire820Kernel | external/chromium_org/chrome/common/extensions/docs/server2/app_yaml_helper_test.py | 43 | 6153 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from app_yaml_helper import AppYamlHelper
from extensions_paths import SERVER2
from host_file_system_provider import HostFileSystemProvider
from mock_file_system import MockFileSystem
from object_store_creator import ObjectStoreCreator
from test_file_system import MoveTo, TestFileSystem
from test_util import DisableLogging
_ExtractVersion, _IsGreater, _GenerateAppYaml = (
AppYamlHelper.ExtractVersion,
AppYamlHelper.IsGreater,
AppYamlHelper.GenerateAppYaml)
class AppYamlHelperTest(unittest.TestCase):
def testExtractVersion(self):
def run_test(version):
self.assertEqual(version, _ExtractVersion(_GenerateAppYaml(version)))
run_test('0')
run_test('0-0')
run_test('0-0-0')
run_test('1')
run_test('1-0')
run_test('1-0-0')
run_test('1-0-1')
run_test('1-1-0')
run_test('1-1-1')
run_test('2-0-9')
run_test('2-0-12')
run_test('2-1')
run_test('2-1-0')
run_test('2-11-0')
run_test('3-1-0')
run_test('3-1-3')
run_test('3-12-0')
def testIsGreater(self):
def assert_is_greater(lhs, rhs):
self.assertTrue(_IsGreater(lhs, rhs), '%s is not > %s' % (lhs, rhs))
self.assertFalse(_IsGreater(rhs, lhs),
'%s should not be > %s' % (rhs, lhs))
assert_is_greater('0-0', '0')
assert_is_greater('0-0-0', '0')
assert_is_greater('0-0-0', '0-0')
assert_is_greater('1', '0')
assert_is_greater('1', '0-0')
assert_is_greater('1', '0-0-0')
assert_is_greater('1-0', '0-0')
assert_is_greater('1-0-0-0', '0-0-0')
assert_is_greater('2-0-12', '2-0-9')
assert_is_greater('2-0-12', '2-0-9-0')
assert_is_greater('2-0-12-0', '2-0-9')
assert_is_greater('2-0-12-0', '2-0-9-0')
assert_is_greater('2-1', '2-0-9')
assert_is_greater('2-1', '2-0-12')
assert_is_greater('2-1-0', '2-0-9')
assert_is_greater('2-1-0', '2-0-12')
assert_is_greater('3-1-0', '2-1')
assert_is_greater('3-1-0', '2-1-0')
assert_is_greater('3-1-0', '2-11-0')
assert_is_greater('3-1-3', '3-1-0')
assert_is_greater('3-12-0', '3-1-0')
assert_is_greater('3-12-0', '3-1-3')
assert_is_greater('3-12-0', '3-1-3-0')
@DisableLogging('warning')
def testInstanceMethods(self):
test_data = {
'app.yaml': _GenerateAppYaml('1-0'),
'app_yaml_helper.py': 'Copyright notice etc'
}
updates = []
# Pass a specific file system at head to the HostFileSystemProvider so that
# we know it's always going to be backed by a MockFileSystem. The Provider
# may decide to wrap it in caching etc.
file_system_at_head = MockFileSystem(
TestFileSystem(test_data, relative_to=SERVER2))
def apply_update(update):
update = MoveTo(SERVER2, update)
file_system_at_head.Update(update)
updates.append(update)
def host_file_system_constructor(branch, revision=None):
self.assertEqual('trunk', branch)
self.assertTrue(revision is not None)
return MockFileSystem.Create(
TestFileSystem(test_data, relative_to=SERVER2), updates[:revision])
object_store_creator = ObjectStoreCreator.ForTest()
host_file_system_provider = HostFileSystemProvider(
object_store_creator,
default_trunk_instance=file_system_at_head,
constructor_for_test=host_file_system_constructor)
helper = AppYamlHelper(object_store_creator, host_file_system_provider)
def assert_is_up_to_date(version):
self.assertTrue(helper.IsUpToDate(version),
'%s is not up to date' % version)
self.assertRaises(ValueError,
helper.GetFirstRevisionGreaterThan, version)
self.assertEqual(0, helper.GetFirstRevisionGreaterThan('0-5-0'))
assert_is_up_to_date('1-0-0')
assert_is_up_to_date('1-5-0')
# Revision 1.
apply_update({
'app.yaml': _GenerateAppYaml('1-5-0')
})
self.assertEqual(0, helper.GetFirstRevisionGreaterThan('0-5-0'))
self.assertEqual(1, helper.GetFirstRevisionGreaterThan('1-0-0'))
assert_is_up_to_date('1-5-0')
assert_is_up_to_date('2-5-0')
# Revision 2.
apply_update({
'app_yaml_helper.py': 'fixed a bug'
})
self.assertEqual(0, helper.GetFirstRevisionGreaterThan('0-5-0'))
self.assertEqual(1, helper.GetFirstRevisionGreaterThan('1-0-0'))
assert_is_up_to_date('1-5-0')
assert_is_up_to_date('2-5-0')
# Revision 3.
apply_update({
'app.yaml': _GenerateAppYaml('1-6-0')
})
self.assertEqual(0, helper.GetFirstRevisionGreaterThan('0-5-0'))
self.assertEqual(1, helper.GetFirstRevisionGreaterThan('1-0-0'))
self.assertEqual(3, helper.GetFirstRevisionGreaterThan('1-5-0'))
assert_is_up_to_date('2-5-0')
# Revision 4.
apply_update({
'app.yaml': _GenerateAppYaml('1-8-0')
})
# Revision 5.
apply_update({
'app.yaml': _GenerateAppYaml('2-0-0')
})
# Revision 6.
apply_update({
'app.yaml': _GenerateAppYaml('2-2-0')
})
# Revision 7.
apply_update({
'app.yaml': _GenerateAppYaml('2-4-0')
})
# Revision 8.
apply_update({
'app.yaml': _GenerateAppYaml('2-6-0')
})
self.assertEqual(0, helper.GetFirstRevisionGreaterThan('0-5-0'))
self.assertEqual(1, helper.GetFirstRevisionGreaterThan('1-0-0'))
self.assertEqual(3, helper.GetFirstRevisionGreaterThan('1-5-0'))
self.assertEqual(5, helper.GetFirstRevisionGreaterThan('1-8-0'))
self.assertEqual(6, helper.GetFirstRevisionGreaterThan('2-0-0'))
self.assertEqual(6, helper.GetFirstRevisionGreaterThan('2-1-0'))
self.assertEqual(7, helper.GetFirstRevisionGreaterThan('2-2-0'))
self.assertEqual(7, helper.GetFirstRevisionGreaterThan('2-3-0'))
self.assertEqual(8, helper.GetFirstRevisionGreaterThan('2-4-0'))
self.assertEqual(8, helper.GetFirstRevisionGreaterThan('2-5-0'))
assert_is_up_to_date('2-6-0')
assert_is_up_to_date('2-7-0')
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
openearth/PyWPS | tests/processes/returner.py | 2 | 1394 | from pywps.Process import WPSProcess
class Process(WPSProcess):
def __init__(self):
##
# Process initialization
WPSProcess.__init__(self,
identifier = "returner",
title="Return process",
abstract="""This is demonstration process of PyWPS, returns the same file, it gets on input, as the output.""",
version = "1.0",
storeSupported = True,
statusSupported = True)
##
# Adding process inputs
self.dataIn = self.addComplexInput(identifier="data",
title="Input vector data",
formats = [{'mimeType':'text/xml'}])
self.textIn = self.addLiteralInput(identifier="text",
title = "Some width")
##
# Adding process outputs
self.dataOut = self.addComplexOutput(identifier="output",
title="Output vector data",
formats = [{'mimeType':'text/xml'}])
self.textOut = self.addLiteralOutput(identifier = "text",
title="Output literal data")
##
# Execution part of the process
def execute(self):
# just copy the input values to output values
self.dataOut.setValue( self.dataIn.getValue() )
self.textOut.setValue( self.textIn.getValue() )
return
| gpl-2.0 |
shsingh/ansible | lib/ansible/module_utils/network/vyos/facts/firewall_global/firewall_global.py | 2 | 12985 | #
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The vyos firewall_global fact class
It is in this file the configuration is collected from the device
for a given resource, parsed, and the facts tree is populated
based on the configuration.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from copy import deepcopy
from re import findall, search, M
from ansible.module_utils.network.common import utils
from ansible.module_utils.network.vyos.argspec.firewall_global.firewall_global import Firewall_globalArgs
class Firewall_globalFacts(object):
""" The vyos firewall_global fact class
"""
def __init__(self, module, subspec='config', options='options'):
self._module = module
self.argument_spec = Firewall_globalArgs.argument_spec
spec = deepcopy(self.argument_spec)
if subspec:
if options:
facts_argument_spec = spec[subspec][options]
else:
facts_argument_spec = spec[subspec]
else:
facts_argument_spec = spec
self.generated_spec = utils.generate_dict(facts_argument_spec)
def get_device_data(self, connection):
return connection.get_config()
def populate_facts(self, connection, ansible_facts, data=None):
""" Populate the facts for firewall_global
:param connection: the device connection
:param ansible_facts: Facts dictionary
:param data: previously collected conf
:rtype: dictionary
:returns: facts
"""
if not data:
# typically data is populated from the current device configuration
# data = connection.get('show running-config | section ^interface')
# using mock data instead
data = self.get_device_data(connection)
objs = {}
firewalls = findall(r'^set firewall .*$', data, M)
if firewalls:
objs = self.render_config(firewalls)
facts = {}
params = utils.validate_config(self.argument_spec, {'config': objs})
facts['firewall_global'] = utils.remove_empties(params['config'])
ansible_facts['ansible_network_resources'].update(facts)
return ansible_facts
def render_config(self, conf):
"""
Render config as dictionary structure and delete keys
from spec for null values
:param spec: The facts tree, generated from the argspec
:param conf: The configuration
:rtype: dictionary
:returns: The generated config
"""
conf = '\n'.join(filter(lambda x: ('firewall ipv6-name' and 'firewall name' not in x), conf))
a_lst = ['config_trap', 'validation', 'log_martians', 'syn_cookies', 'twa_hazards_protection']
firewall = self.parse_attr(conf, a_lst)
f_sub = {'ping': self.parse_ping(conf),
'group': self.parse_group(conf),
'route_redirects': self.route_redirects(conf),
'state_policy': self.parse_state_policy(conf)}
firewall.update(f_sub)
return firewall
def route_redirects(self, conf):
"""
This function forms the regex to fetch the afi and invoke
functions to fetch route redirects and source routes
:param conf: configuration data.
:return: generated rule list configuration.
"""
rr_lst = []
v6_attr = findall(r'^set firewall (?:ipv6-src-route|ipv6-receive-redirects) (\S+)', conf, M)
if v6_attr:
obj = self.parse_rr_attrib(conf, 'ipv6')
if obj:
rr_lst.append(obj)
v4_attr = findall(r'^set firewall (?:ip-src-route|receive-redirects|send-redirects) (\S+)', conf, M)
if v4_attr:
obj = self.parse_rr_attrib(conf, 'ipv4')
if obj:
rr_lst.append(obj)
return rr_lst
def parse_rr_attrib(self, conf, attrib=None):
"""
This function fetches the 'ip_src_route'
invoke function to parse icmp redirects.
:param conf: configuration to be parsed.
:param attrib: 'ipv4/ipv6'.
:return: generated config dictionary.
"""
cfg_dict = self.parse_attr(conf, ['ip_src_route'], type=attrib)
cfg_dict['icmp_redirects'] = self.parse_icmp_redirects(conf, attrib)
cfg_dict['afi'] = attrib
return cfg_dict
def parse_icmp_redirects(self, conf, attrib=None):
"""
This function triggers the parsing of 'icmp_redirects' attributes.
:param conf: configuration to be parsed.
:param attrib: 'ipv4/ipv6'.
:return: generated config dictionary.
"""
a_lst = ['send', 'receive']
cfg_dict = self.parse_attr(conf, a_lst, type=attrib)
return cfg_dict
def parse_ping(self, conf):
"""
This function triggers the parsing of 'ping' attributes.
:param conf: configuration to be parsed.
:return: generated config dictionary.
"""
a_lst = ['all', 'broadcast']
cfg_dict = self.parse_attr(conf, a_lst)
return cfg_dict
def parse_state_policy(self, conf):
"""
This function fetched the connecton type and invoke
function to parse other state-policy attributes.
:param conf: configuration data.
:return: generated rule list configuration.
"""
sp_lst = []
attrib = 'state-policy'
policies = findall(r'^set firewall ' + attrib + ' (\\S+)', conf, M)
if policies:
rules_lst = []
for sp in set(policies):
sp_regex = r' %s .+$' % sp
cfg = '\n'.join(findall(sp_regex, conf, M))
obj = self.parse_policies(cfg, sp)
obj['connection_type'] = sp
if obj:
rules_lst.append(obj)
sp_lst = sorted(rules_lst, key=lambda i: i['connection_type'])
return sp_lst
def parse_policies(self, conf, attrib=None):
"""
This function triggers the parsing of policy attributes
action and log.
:param conf: configuration
:param attrib: connection type.
:return: generated rule configuration dictionary.
"""
a_lst = ['action', 'log']
cfg_dict = self.parse_attr(conf, a_lst, match=attrib)
return cfg_dict
def parse_group(self, conf):
"""
This function triggers the parsing of 'group' attributes.
:param conf: configuration.
:return: generated config dictionary.
"""
cfg_dict = {}
cfg_dict['port_group'] = self.parse_group_lst(conf, 'port-group')
cfg_dict['address_group'] = self.parse_group_lst(conf, 'address-group')
cfg_dict['network_group'] = self.parse_group_lst(conf, 'network-group')
return cfg_dict
def parse_group_lst(self, conf, type):
"""
This function fetches the name of group and invoke function to
parse group attributes'.
:param conf: configuration data.
:param type: type of group.
:return: generated group list configuration.
"""
g_lst = []
groups = findall(r'^set firewall group ' + type + ' (\\S+)', conf, M)
if groups:
rules_lst = []
for gr in set(groups):
gr_regex = r' %s .+$' % gr
cfg = '\n'.join(findall(gr_regex, conf, M))
obj = self.parse_groups(cfg, type, gr)
obj['name'] = gr.strip("'")
if obj:
rules_lst.append(obj)
g_lst = sorted(rules_lst, key=lambda i: i['name'])
return g_lst
def parse_groups(self, conf, type, name):
"""
This function fetches the description and invoke
the parsing of group members.
:param conf: configuration.
:param type: type of group.
:param name: name of group.
:return: generated configuration dictionary.
"""
a_lst = ['name', 'description']
group = self.parse_attr(conf, a_lst)
key = self.get_key(type)
r_sub = {key[0]: self.parse_address_port_lst(conf, name, key[1])}
group.update(r_sub)
return group
def parse_address_port_lst(self, conf, name, key):
"""
This function forms the regex to fetch the
group members attributes.
:param conf: configuration data.
:param name: name of group.
:param key: key value.
:return: generated member list configuration.
"""
l_lst = []
attribs = findall(r'^.*' + name + ' ' + key + ' (\\S+)', conf, M)
if attribs:
for attr in attribs:
if key == 'port':
l_lst.append({"port": attr.strip("'")})
else:
l_lst.append({"address": attr.strip("'")})
return l_lst
def parse_attr(self, conf, attr_list, match=None, type=None):
"""
This function peforms the following:
- Form the regex to fetch the required attribute config.
- Type cast the output in desired format.
:param conf: configuration.
:param attr_list: list of attributes.
:param match: parent node/attribute name.
:return: generated config dictionary.
"""
config = {}
for attrib in attr_list:
regex = self.map_regex(attrib, type)
if match:
regex = match + ' ' + regex
if conf:
if self.is_bool(attrib):
attr = self.map_regex(attrib, type)
out = conf.find(attr.replace("_", "-"))
dis = conf.find(attr.replace("_", "-") + " 'disable'")
if out >= 1:
if dis >= 1:
config[attrib] = False
else:
config[attrib] = True
else:
out = search(r'^.*' + regex + ' (.+)', conf, M)
if out:
val = out.group(1).strip("'")
if self.is_num(attrib):
val = int(val)
config[attrib] = val
return config
def get_key(self, type):
"""
This function map the group type to
member type
:param type:
:return:
"""
key = ()
if type == 'port-group':
key = ('members', 'port')
elif type == 'address-group':
key = ('members', 'address')
elif type == 'network-group':
key = ('members', 'network')
return key
def map_regex(self, attrib, type=None):
"""
- This function construct the regex string.
- replace the underscore with hyphen.
:param attrib: attribute
:return: regex string
"""
regex = attrib.replace("_", "-")
if attrib == 'all':
regex = 'all-ping'
elif attrib == 'disabled':
regex = 'disable'
elif attrib == 'broadcast':
regex = 'broadcast-ping'
elif attrib == 'send':
if type == 'ipv6':
regex = 'ipv6-send-redirects'
else:
regex = 'send-redirects'
elif attrib == 'ip_src_route':
if type == 'ipv6':
regex = 'ipv6-src-route'
elif attrib == 'receive':
if type == 'ipv6':
regex = 'ipv6-receive-redirects'
else:
regex = 'receive-redirects'
return regex
def is_num(self, attrib):
"""
This function looks for the attribute in predefined integer type set.
:param attrib: attribute.
:return: True/false.
"""
num_set = ('time', 'code', 'type', 'count', 'burst', 'number')
return True if attrib in num_set else False
def get_src_route(self, attrib):
"""
This function looks for the attribute in predefined integer type set.
:param attrib: attribute.
:return: True/false.
"""
return 'ipv6_src_route' if attrib == 'ipv6' else 'ip_src_route'
def is_bool(self, attrib):
"""
This function looks for the attribute in predefined bool type set.
:param attrib: attribute.
:return: True/False
"""
bool_set = ('all',
'log',
'send',
'receive',
'broadcast',
'config_trap',
'log_martians',
'syn_cookies',
'ip_src_route',
'twa_hazards_protection')
return True if attrib in bool_set else False
| gpl-3.0 |
eayunstack/neutron | neutron/tests/unit/agent/test_agent_extensions_manager.py | 3 | 1626 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from neutron.agent import agent_extensions_manager as ext_manager
from neutron.conf.agent import agent_extensions_manager as ext_manager_config
from neutron.tests import base
class TestAgentExtensionsManager(base.BaseTestCase):
def setUp(self):
super(TestAgentExtensionsManager, self).setUp()
mock.patch('neutron.agent.l2.extensions.qos.QosAgentExtension',
autospec=True).start()
conf = cfg.CONF
ext_manager_config.register_agent_ext_manager_opts()
cfg.CONF.set_override('extensions', ['qos'], 'agent')
namespace = 'neutron.agent.l2.extensions'
self.manager = ext_manager.AgentExtensionsManager(conf, namespace)
def _get_extension(self):
return self.manager.extensions[0].obj
def test_initialize(self):
connection = object()
self.manager.initialize(connection, 'fake_driver_type')
ext = self._get_extension()
ext.initialize.assert_called_once_with(connection, 'fake_driver_type')
| apache-2.0 |
numairmansur/RoBO | robo/util/mc_part.py | 3 | 1782 | import numpy as np
import logging
logger = logging.getLogger(__name__)
def joint_pmin(m, V, Nf):
"""
Computes the probability of every given point to be the minimum
by sampling function and count how often each point has the
smallest function value.
Parameters
----------
M: np.ndarray(N, 1)
Mean value of each of the N points.
V: np.ndarray(N, N)
Covariance matrix for all points
Nf: int
Number of function samples that will be drawn at each point
Returns
-------
np.ndarray(N,1)
pmin distribution
"""
Nb = m.shape[0]
noise = 0
while(True):
try:
cV = np.linalg.cholesky(V + noise * np.eye(V.shape[0]))
break
except np.linalg.LinAlgError:
if noise == 0:
noise = 1e-10
if noise == 10000:
raise np.linalg.LinAlgError('Cholesky '
'decomposition failed.')
else:
noise *= 10
if noise > 0:
logger.error("Add %f noise on the diagonal." % noise)
# Draw new function samples from the innovated GP
# on the representer points
F = np.random.multivariate_normal(mean=np.zeros(Nb), cov=np.eye(Nb), size=Nf)
funcs = np.dot(cV, F.T)
funcs = funcs[:, :, None]
m = m[:, None, :]
funcs = m + funcs
funcs = funcs.reshape(funcs.shape[0], funcs.shape[1] * funcs.shape[2])
# Determine the minima for each function sample
mins = np.argmin(funcs, axis=0)
c = np.bincount(mins)
# Count how often each representer point was the minimum
min_count = np.zeros((Nb,))
min_count[:len(c)] += c
pmin = (min_count / funcs.shape[1])
pmin[np.where(pmin < 1e-70)] = 1e-70
return pmin
| bsd-3-clause |
Batterfii/django | django/contrib/gis/gdal/feature.py | 439 | 4153 | from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import GDALException, OGRIndexError
from django.contrib.gis.gdal.field import Field
from django.contrib.gis.gdal.geometries import OGRGeometry, OGRGeomType
from django.contrib.gis.gdal.prototypes import ds as capi, geom as geom_api
from django.utils import six
from django.utils.encoding import force_bytes, force_text
from django.utils.six.moves import range
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_F_* routines are relevant here.
class Feature(GDALBase):
"""
This class that wraps an OGR Feature, needs to be instantiated
from a Layer object.
"""
def __init__(self, feat, layer):
"""
Initializes Feature from a pointer and its Layer object.
"""
if not feat:
raise GDALException('Cannot create OGR Feature, invalid pointer given.')
self.ptr = feat
self._layer = layer
def __del__(self):
"Releases a reference to this object."
if self._ptr and capi:
capi.destroy_feature(self._ptr)
def __getitem__(self, index):
"""
Gets the Field object at the specified index, which may be either
an integer or the Field's string label. Note that the Field object
is not the field's _value_ -- use the `get` method instead to
retrieve the value (e.g. an integer) instead of a Field instance.
"""
if isinstance(index, six.string_types):
i = self.index(index)
else:
if index < 0 or index > self.num_fields:
raise OGRIndexError('index out of range')
i = index
return Field(self, i)
def __iter__(self):
"Iterates over each field in the Feature."
for i in range(self.num_fields):
yield self[i]
def __len__(self):
"Returns the count of fields in this feature."
return self.num_fields
def __str__(self):
"The string name of the feature."
return 'Feature FID %d in Layer<%s>' % (self.fid, self.layer_name)
def __eq__(self, other):
"Does equivalence testing on the features."
return bool(capi.feature_equal(self.ptr, other._ptr))
# #### Feature Properties ####
@property
def encoding(self):
return self._layer._ds.encoding
@property
def fid(self):
"Returns the feature identifier."
return capi.get_fid(self.ptr)
@property
def layer_name(self):
"Returns the name of the layer for the feature."
name = capi.get_feat_name(self._layer._ldefn)
return force_text(name, self.encoding, strings_only=True)
@property
def num_fields(self):
"Returns the number of fields in the Feature."
return capi.get_feat_field_count(self.ptr)
@property
def fields(self):
"Returns a list of fields in the Feature."
return [capi.get_field_name(capi.get_field_defn(self._layer._ldefn, i))
for i in range(self.num_fields)]
@property
def geom(self):
"Returns the OGR Geometry for this Feature."
# Retrieving the geometry pointer for the feature.
geom_ptr = capi.get_feat_geom_ref(self.ptr)
return OGRGeometry(geom_api.clone_geom(geom_ptr))
@property
def geom_type(self):
"Returns the OGR Geometry Type for this Feture."
return OGRGeomType(capi.get_fd_geom_type(self._layer._ldefn))
# #### Feature Methods ####
def get(self, field):
"""
Returns the value of the field, instead of an instance of the Field
object. May take a string of the field name or a Field object as
parameters.
"""
field_name = getattr(field, 'name', field)
return self[field_name].value
def index(self, field_name):
"Returns the index of the given field name."
i = capi.get_field_index(self.ptr, force_bytes(field_name))
if i < 0:
raise OGRIndexError('invalid OFT field name given: "%s"' % field_name)
return i
| bsd-3-clause |
dgillis/scrapy | scrapy/spidermiddlewares/offsite.py | 85 | 2120 | """
Offsite Spider Middleware
See documentation in docs/topics/spider-middleware.rst
"""
import re
import logging
from scrapy import signals
from scrapy.http import Request
from scrapy.utils.httpobj import urlparse_cached
logger = logging.getLogger(__name__)
class OffsiteMiddleware(object):
def __init__(self, stats):
self.stats = stats
@classmethod
def from_crawler(cls, crawler):
o = cls(crawler.stats)
crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)
return o
def process_spider_output(self, response, result, spider):
for x in result:
if isinstance(x, Request):
if x.dont_filter or self.should_follow(x, spider):
yield x
else:
domain = urlparse_cached(x).hostname
if domain and domain not in self.domains_seen:
self.domains_seen.add(domain)
logger.debug("Filtered offsite request to %(domain)r: %(request)s",
{'domain': domain, 'request': x}, extra={'spider': spider})
self.stats.inc_value('offsite/domains', spider=spider)
self.stats.inc_value('offsite/filtered', spider=spider)
else:
yield x
def should_follow(self, request, spider):
regex = self.host_regex
# hostname can be None for wrong urls (like javascript links)
host = urlparse_cached(request).hostname or ''
return bool(regex.search(host))
def get_host_regex(self, spider):
"""Override this method to implement a different offsite policy"""
allowed_domains = getattr(spider, 'allowed_domains', None)
if not allowed_domains:
return re.compile('') # allow all by default
regex = r'^(.*\.)?(%s)$' % '|'.join(re.escape(d) for d in allowed_domains if d is not None)
return re.compile(regex)
def spider_opened(self, spider):
self.host_regex = self.get_host_regex(spider)
self.domains_seen = set()
| bsd-3-clause |
neale/CS-program | 434-MachineLearning/final_project/linearClassifier/sklearn/manifold/locally_linear.py | 37 | 25852 | """Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# Jake Vanderplas -- <vanderplas@astro.washington.edu>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.arpack import eigsh
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg: float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = check_array(X, dtype=FLOAT_DTYPES)
Z = check_array(Z, dtype=FLOAT_DTYPES, allow_nd=True)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3, n_jobs=1):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1, n_jobs=n_jobs).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None, n_jobs=1):
"""Perform a Locally Linear Embedding analysis on the data.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1, n_jobs=n_jobs)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg, n_jobs=n_jobs)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float64)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float64)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
# build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1] *
U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
# find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
# choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
# find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
# calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
# find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = np.cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
# Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float64)
for i in range(N):
s_i = s_range[i]
# select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
# compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
# Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
# Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
# We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h) +
(1 - alpha_i) * w_reg[i, :, None])
# Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
# We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None, n_jobs=1):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
self.n_jobs = n_jobs
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm,
n_jobs=self.n_jobs)
random_state = check_random_state(self.random_state)
X = check_array(X)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state, reg=self.reg, n_jobs=self.n_jobs)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
check_is_fitted(self, "nbrs_")
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
| unlicense |
trishnaguha/ansible | docs/docsite/_extensions/pygments_lexer.py | 4 | 23487 | # -*- coding: utf-8 -*-
# pylint: disable=no-self-argument
#
# Copyright 2006-2017 by the Pygments team, see AUTHORS at
# https://bitbucket.org/birkenfeld/pygments-main/raw/7941677dc77d4f2bf0bbd6140ade85a9454b8b80/AUTHORS
# Copyright by Kirill Simonov (original author of YAML lexer).
# Copyright by Norman Richards (original author of JSON lexer).
#
# Licensed under BSD license:
#
# Copyright (c) 2006-2017 by the respective authors (see AUTHORS file).
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, print_function
from pygments.lexer import LexerContext, ExtendedRegexLexer, DelegatingLexer, RegexLexer, bygroups, include
from pygments.lexers import DjangoLexer, DiffLexer
from pygments import token
class AnsibleYamlLexerContext(LexerContext):
"""Indentation context for the YAML lexer."""
def __init__(self, *args, **kwds):
super(AnsibleYamlLexerContext, self).__init__(*args, **kwds)
self.indent_stack = []
self.indent = -1
self.next_indent = 0
self.block_scalar_indent = None
class AnsibleYamlLexer(ExtendedRegexLexer):
"""
Lexer for `YAML <http://yaml.org/>`_, a human-friendly data serialization
language.
.. versionadded:: 0.11
"""
name = 'YAML'
aliases = ['yaml']
filenames = ['*.yaml', '*.yml']
mimetypes = ['text/x-yaml']
def something(token_class):
"""Do not produce empty tokens."""
def callback(lexer, match, context):
text = match.group()
if not text:
return
yield match.start(), token_class, text
context.pos = match.end()
return callback
def reset_indent(token_class):
"""Reset the indentation levels."""
def callback(lexer, match, context):
text = match.group()
context.indent_stack = []
context.indent = -1
context.next_indent = 0
context.block_scalar_indent = None
yield match.start(), token_class, text
context.pos = match.end()
return callback
def save_indent(token_class, start=False):
"""Save a possible indentation level."""
def callback(lexer, match, context):
text = match.group()
extra = ''
if start:
context.next_indent = len(text)
if context.next_indent < context.indent:
while context.next_indent < context.indent:
context.indent = context.indent_stack.pop()
if context.next_indent > context.indent:
extra = text[context.indent:]
text = text[:context.indent]
else:
context.next_indent += len(text)
if text:
yield match.start(), token_class, text
if extra:
yield match.start() + len(text), token_class.Error, extra
context.pos = match.end()
return callback
def set_indent(token_class, implicit=False):
"""Set the previously saved indentation level."""
def callback(lexer, match, context):
text = match.group()
if context.indent < context.next_indent:
context.indent_stack.append(context.indent)
context.indent = context.next_indent
if not implicit:
context.next_indent += len(text)
yield match.start(), token_class, text
context.pos = match.end()
return callback
def set_block_scalar_indent(token_class):
"""Set an explicit indentation level for a block scalar."""
def callback(lexer, match, context):
text = match.group()
context.block_scalar_indent = None
if not text:
return
increment = match.group(1)
if increment:
current_indent = max(context.indent, 0)
increment = int(increment)
context.block_scalar_indent = current_indent + increment
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
def parse_block_scalar_empty_line(indent_token_class, content_token_class):
"""Process an empty line in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if (context.block_scalar_indent is None or
len(text) <= context.block_scalar_indent):
if text:
yield match.start(), indent_token_class, text
else:
indentation = text[:context.block_scalar_indent]
content = text[context.block_scalar_indent:]
yield match.start(), indent_token_class, indentation
yield (match.start() + context.block_scalar_indent,
content_token_class, content)
context.pos = match.end()
return callback
def parse_block_scalar_indent(token_class):
"""Process indentation spaces in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if context.block_scalar_indent is None:
if len(text) <= max(context.indent, 0):
context.stack.pop()
context.stack.pop()
return
context.block_scalar_indent = len(text)
else:
if len(text) < context.block_scalar_indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
def parse_plain_scalar_indent(token_class):
"""Process indentation spaces in a plain scalar."""
def callback(lexer, match, context):
text = match.group()
if len(text) <= context.indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
tokens = {
# the root rules
'root': [
# ignored whitespaces
(r'[ ]+(?=#|$)', token.Text),
# line breaks
(r'\n+', token.Text),
# a comment
(r'#[^\n]*', token.Comment.Single),
# the '%YAML' directive
(r'^%YAML(?=[ ]|$)', reset_indent(token.Name.Tag), 'yaml-directive'),
# the %TAG directive
(r'^%TAG(?=[ ]|$)', reset_indent(token.Name.Tag), 'tag-directive'),
# document start and document end indicators
(r'^(?:---|\.\.\.)(?=[ ]|$)', reset_indent(token.Name.Namespace),
'block-line'),
# indentation spaces
(r'[ ]*(?!\s|$)', save_indent(token.Text, start=True),
('block-line', 'indentation')),
],
# trailing whitespaces after directives or a block scalar indicator
'ignored-line': [
# ignored whitespaces
(r'[ ]+(?=#|$)', token.Text),
# a comment
(r'#[^\n]*', token.Comment.Single),
# line break
(r'\n', token.Text, '#pop:2'),
],
# the %YAML directive
'yaml-directive': [
# the version number
(r'([ ]+)([0-9]+\.[0-9]+)',
bygroups(token.Text, token.Number), 'ignored-line'),
],
# the %YAG directive
'tag-directive': [
# a tag handle and the corresponding prefix
(r'([ ]+)(!|![\w-]*!)'
r'([ ]+)(!|!?[\w;/?:@&=+$,.!~*\'()\[\]%-]+)',
bygroups(token.Text, token.Keyword.Type, token.Text, token.Keyword.Type),
'ignored-line'),
],
# block scalar indicators and indentation spaces
'indentation': [
# trailing whitespaces are ignored
(r'[ ]*$', something(token.Text), '#pop:2'),
# whitespaces preceeding block collection indicators
(r'[ ]+(?=[?:-](?:[ ]|$))', save_indent(token.Text)),
# block collection indicators
(r'[?:-](?=[ ]|$)', set_indent(token.Punctuation.Indicator)),
# the beginning a block line
(r'[ ]*', save_indent(token.Text), '#pop'),
],
# an indented line in the block context
'block-line': [
# the line end
(r'[ ]*(?=#|$)', something(token.Text), '#pop'),
# whitespaces separating tokens
(r'[ ]+', token.Text),
# key with colon
(r'([^,:?\[\]{}\n]+)(:)(?=[ ]|$)',
bygroups(token.Name.Tag, set_indent(token.Punctuation, implicit=True))),
# tags, anchors and aliases,
include('descriptors'),
# block collections and scalars
include('block-nodes'),
# flow collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^\s?:,\[\]{}#&*!|>\'"%@`-]|[?:-]\S)',
something(token.Name.Variable),
'plain-scalar-in-block-context'),
],
# tags, anchors, aliases
'descriptors': [
# a full-form tag
(r'!<[\w#;/?:@&=+$,.!~*\'()\[\]%-]+>', token.Keyword.Type),
# a tag in the form '!', '!suffix' or '!handle!suffix'
(r'!(?:[\w-]+!)?'
r'[\w#;/?:@&=+$,.!~*\'()\[\]%-]+', token.Keyword.Type),
# an anchor
(r'&[\w-]+', token.Name.Label),
# an alias
(r'\*[\w-]+', token.Name.Variable),
],
# block collections and scalars
'block-nodes': [
# implicit key
(r':(?=[ ]|$)', set_indent(token.Punctuation.Indicator, implicit=True)),
# literal and folded scalars
(r'[|>]', token.Punctuation.Indicator,
('block-scalar-content', 'block-scalar-header')),
],
# flow collections and quoted scalars
'flow-nodes': [
# a flow sequence
(r'\[', token.Punctuation.Indicator, 'flow-sequence'),
# a flow mapping
(r'\{', token.Punctuation.Indicator, 'flow-mapping'),
# a single-quoted scalar
(r'\'', token.String, 'single-quoted-scalar'),
# a double-quoted scalar
(r'\"', token.String, 'double-quoted-scalar'),
],
# the content of a flow collection
'flow-collection': [
# whitespaces
(r'[ ]+', token.Text),
# line breaks
(r'\n+', token.Text),
# a comment
(r'#[^\n]*', token.Comment.Single),
# simple indicators
(r'[?:,]', token.Punctuation.Indicator),
# tags, anchors and aliases
include('descriptors'),
# nested collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^\s?:,\[\]{}#&*!|>\'"%@`])',
something(token.Name.Variable),
'plain-scalar-in-flow-context'),
],
# a flow sequence indicated by '[' and ']'
'flow-sequence': [
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\]', token.Punctuation.Indicator, '#pop'),
],
# a flow mapping indicated by '{' and '}'
'flow-mapping': [
# key with colon
(r'([^,:?\[\]{}\n]+)(:)(?=[ ]|$)',
bygroups(token.Name.Tag, token.Punctuation)),
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\}', token.Punctuation.Indicator, '#pop'),
],
# block scalar lines
'block-scalar-content': [
# line break
(r'\n', token.Text),
# empty line
(r'^[ ]+$',
parse_block_scalar_empty_line(token.Text, token.Name.Constant)),
# indentation spaces (we may leave the state here)
(r'^[ ]*', parse_block_scalar_indent(token.Text)),
# line content
(r'[\S\t ]+', token.Name.Constant),
],
# the content of a literal or folded scalar
'block-scalar-header': [
# indentation indicator followed by chomping flag
(r'([1-9])?[+-]?(?=[ ]|$)',
set_block_scalar_indent(token.Punctuation.Indicator),
'ignored-line'),
# chomping flag followed by indentation indicator
(r'[+-]?([1-9])?(?=[ ]|$)',
set_block_scalar_indent(token.Punctuation.Indicator),
'ignored-line'),
],
# ignored and regular whitespaces in quoted scalars
'quoted-scalar-whitespaces': [
# leading and trailing whitespaces are ignored
(r'^[ ]+', token.Text),
(r'[ ]+$', token.Text),
# line breaks are ignored
(r'\n+', token.Text),
# other whitespaces are a part of the value
(r'[ ]+', token.Name.Variable),
],
# single-quoted scalars
'single-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of the quote character
(r'\'\'', token.String.Escape),
# regular non-whitespace characters
(r'[^\s\']+', token.String),
# the closing quote
(r'\'', token.String, '#pop'),
],
# double-quoted scalars
'double-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of special characters
(r'\\[0abt\tn\nvfre "\\N_LP]', token.String),
# escape codes
(r'\\(?:x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|U[0-9A-Fa-f]{8})',
token.String.Escape),
# regular non-whitespace characters
(r'[^\s"\\]+', token.String),
# the closing quote
(r'"', token.String, '#pop'),
],
# the beginning of a new line while scanning a plain scalar
'plain-scalar-in-block-context-new-line': [
# empty lines
(r'^[ ]+$', token.Text),
# line breaks
(r'\n+', token.Text),
# document start and document end indicators
(r'^(?=---|\.\.\.)', something(token.Name.Namespace), '#pop:3'),
# indentation spaces (we may leave the block line state here)
(r'^[ ]*', parse_plain_scalar_indent(token.Text), '#pop'),
],
# a plain scalar in the block context
'plain-scalar-in-block-context': [
# the scalar ends with the ':' indicator
(r'[ ]*(?=:[ ]|:$)', something(token.Text), '#pop'),
# the scalar ends with whitespaces followed by a comment
(r'[ ]+(?=#)', token.Text, '#pop'),
# trailing whitespaces are ignored
(r'[ ]+$', token.Text),
# line breaks are ignored
(r'\n+', token.Text, 'plain-scalar-in-block-context-new-line'),
# other whitespaces are a part of the value
(r'[ ]+', token.Literal.Scalar.Plain),
# regular non-whitespace characters
(r'(?::(?!\s)|[^\s:])+', token.Literal.Scalar.Plain),
],
# a plain scalar is the flow context
'plain-scalar-in-flow-context': [
# the scalar ends with an indicator character
(r'[ ]*(?=[,:?\[\]{}])', something(token.Text), '#pop'),
# the scalar ends with a comment
(r'[ ]+(?=#)', token.Text, '#pop'),
# leading and trailing whitespaces are ignored
(r'^[ ]+', token.Text),
(r'[ ]+$', token.Text),
# line breaks are ignored
(r'\n+', token.Text),
# other whitespaces are a part of the value
(r'[ ]+', token.Name.Variable),
# regular non-whitespace characters
(r'[^\s,:?\[\]{}]+', token.Name.Variable),
],
}
def get_tokens_unprocessed(self, text=None, context=None):
if context is None:
context = AnsibleYamlLexerContext(text, 0)
return super(AnsibleYamlLexer, self).get_tokens_unprocessed(text, context)
class AnsibleYamlJinjaLexer(DelegatingLexer):
"""
Subclass of the `DjangoLexer` that highlights unlexed data with the
`AnsibleYamlLexer`.
Commonly used in Saltstack salt states.
.. versionadded:: 2.0
"""
name = 'YAML+Jinja'
aliases = ['yaml+jinja']
filenames = ['*.sls']
mimetypes = ['text/x-yaml+jinja']
def __init__(self, **options):
super(AnsibleYamlJinjaLexer, self).__init__(AnsibleYamlLexer, DjangoLexer, **options)
class AnsibleOutputPrimaryLexer(RegexLexer):
name = 'Ansible-output-primary'
# The following definitions are borrowed from Pygment's JSON lexer.
# It has been originally authored by Norman Richards.
# integer part of a number
int_part = r'-?(0|[1-9]\d*)'
# fractional part of a number
frac_part = r'\.\d+'
# exponential part of a number
exp_part = r'[eE](\+|-)?\d+'
tokens = {
# #########################################
# # BEGIN: states from JSON lexer #########
# #########################################
'whitespace': [
(r'\s+', token.Text),
],
# represents a simple terminal value
'simplevalue': [
(r'(true|false|null)\b', token.Keyword.Constant),
(('%(int_part)s(%(frac_part)s%(exp_part)s|'
'%(exp_part)s|%(frac_part)s)') % vars(),
token.Number.Float),
(int_part, token.Number.Integer),
(r'"(\\\\|\\"|[^"])*"', token.String),
],
# the right hand side of an object, after the attribute name
'objectattribute': [
include('value'),
(r':', token.Punctuation),
# comma terminates the attribute but expects more
(r',', token.Punctuation, '#pop'),
# a closing bracket terminates the entire object, so pop twice
(r'\}', token.Punctuation, '#pop:2'),
],
# a json object - { attr, attr, ... }
'objectvalue': [
include('whitespace'),
(r'"(\\\\|\\"|[^"])*"', token.Name.Tag, 'objectattribute'),
(r'\}', token.Punctuation, '#pop'),
],
# json array - [ value, value, ... }
'arrayvalue': [
include('whitespace'),
include('value'),
(r',', token.Punctuation),
(r'\]', token.Punctuation, '#pop'),
],
# a json value - either a simple value or a complex value (object or array)
'value': [
include('whitespace'),
include('simplevalue'),
(r'\{', token.Punctuation, 'objectvalue'),
(r'\[', token.Punctuation, 'arrayvalue'),
],
# #########################################
# # END: states from JSON lexer ###########
# #########################################
'host-postfix': [
(r'\n', token.Text, '#pop:3'),
(r'( )(=>)( )(\{)',
bygroups(token.Text, token.Punctuation, token.Text, token.Punctuation),
'objectvalue'),
],
'host-error': [
(r'(?:( )(UNREACHABLE|FAILED)(!))?',
bygroups(token.Text, token.Keyword, token.Punctuation),
'host-postfix'),
(r'', token.Text, 'host-postfix'),
],
'host-name': [
(r'(\[)([^ \]]+)(?:( )(=>)( )([^\]]+))?(\])',
bygroups(token.Punctuation, token.Name.Variable, token.Text, token.Punctuation, token.Text, token.Name.Variable, token.Punctuation),
'host-error')
],
'host-result': [
(r'\n', token.Text, '#pop'),
(r'( +)(ok|changed|failed|skipped|unreachable)(=)([0-9]+)',
bygroups(token.Text, token.Keyword, token.Punctuation, token.Number.Integer)),
],
'root': [
(r'(PLAY|TASK|PLAY RECAP)(?:( )(\[)([^\]]+)(\]))?( )(\*+)(\n)',
bygroups(token.Keyword, token.Text, token.Punctuation, token.Literal, token.Punctuation, token.Text, token.Name.Variable, token.Text)),
(r'(fatal|ok|changed|skipping)(:)( )',
bygroups(token.Keyword, token.Punctuation, token.Text),
'host-name'),
(r'([^ ]+)( +)(:)',
bygroups(token.Name, token.Text, token.Punctuation),
'host-result'),
(r'.*\n', token.Other),
],
}
class AnsibleOutputLexer(DelegatingLexer):
name = 'Ansible-output'
aliases = ['ansible-output']
def __init__(self, **options):
super(AnsibleOutputLexer, self).__init__(DiffLexer, AnsibleOutputPrimaryLexer, **options)
# ####################################################################################################
# # Sphinx plugin ####################################################################################
# ####################################################################################################
__version__ = "0.1.0"
__license__ = "BSD license"
__author__ = "Felix Fontein"
__author_email__ = "felix@fontein.de"
def setup(app):
""" Initializer for Sphinx extension API.
See http://www.sphinx-doc.org/en/stable/extdev/index.html#dev-extensions.
"""
for lexer in [AnsibleYamlLexer(startinline=True), AnsibleYamlJinjaLexer(startinline=True), AnsibleOutputLexer(startinline=True)]:
app.add_lexer(lexer.name, lexer)
for alias in lexer.aliases:
app.add_lexer(alias, lexer)
return dict(version=__version__, parallel_read_safe=True)
| gpl-3.0 |
dydek/django | tests/proxy_model_inheritance/tests.py | 89 | 2089 | from __future__ import absolute_import, unicode_literals
import os
from django.core.management import call_command
from django.test import TestCase, TransactionTestCase
from django.test.utils import extend_sys_path
from django.utils._os import upath
from .models import (
ConcreteModel, ConcreteModelSubclass, ConcreteModelSubclassProxy,
ProxyModel,
)
class ProxyModelInheritanceTests(TransactionTestCase):
"""
Proxy model inheritance across apps can result in migrate not creating the table
for the proxied model (as described in #12286). This test creates two dummy
apps and calls migrate, then verifies that the table has been created.
"""
available_apps = []
def test_table_exists(self):
with extend_sys_path(os.path.dirname(os.path.abspath(upath(__file__)))):
with self.modify_settings(INSTALLED_APPS={'append': ['app1', 'app2']}):
call_command('migrate', verbosity=0, run_syncdb=True)
from app1.models import ProxyModel
from app2.models import NiceModel
self.assertEqual(NiceModel.objects.all().count(), 0)
self.assertEqual(ProxyModel.objects.all().count(), 0)
class MultiTableInheritanceProxyTest(TestCase):
def test_model_subclass_proxy(self):
"""
Deleting an instance of a model proxying a multi-table inherited
subclass should cascade delete down the whole inheritance chain (see
#18083).
"""
instance = ConcreteModelSubclassProxy.objects.create()
instance.delete()
self.assertEqual(0, ConcreteModelSubclassProxy.objects.count())
self.assertEqual(0, ConcreteModelSubclass.objects.count())
self.assertEqual(0, ConcreteModel.objects.count())
def test_deletion_through_intermediate_proxy(self):
child = ConcreteModelSubclass.objects.create()
proxy = ProxyModel.objects.get(pk=child.pk)
proxy.delete()
self.assertFalse(ConcreteModel.objects.exists())
self.assertFalse(ConcreteModelSubclass.objects.exists())
| bsd-3-clause |
alinbalutoiu/tempest | tempest/api/orchestration/stacks/test_neutron_resources.py | 9 | 8941 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import netaddr
from tempest.api.orchestration import base
from tempest import clients
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class NeutronResourcesTestJSON(base.BaseOrchestrationTest):
@classmethod
def skip_checks(cls):
super(NeutronResourcesTestJSON, cls).skip_checks()
if not CONF.service_available.neutron:
raise cls.skipException("Neutron support is required")
@classmethod
def setup_credentials(cls):
super(NeutronResourcesTestJSON, cls).setup_credentials()
cls.os = clients.Manager()
@classmethod
def setup_clients(cls):
super(NeutronResourcesTestJSON, cls).setup_clients()
cls.network_client = cls.os.network_client
@classmethod
def resource_setup(cls):
super(NeutronResourcesTestJSON, cls).resource_setup()
cls.neutron_basic_template = cls.load_template('neutron_basic')
cls.stack_name = data_utils.rand_name('heat')
template = cls.read_template('neutron_basic')
cls.keypair_name = (CONF.orchestration.keypair_name or
cls._create_keypair()['name'])
cls.external_network_id = CONF.network.public_network_id
tenant_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
mask_bits = CONF.network.tenant_network_mask_bits
cls.subnet_cidr = tenant_cidr.subnet(mask_bits).next()
# create the stack
cls.stack_identifier = cls.create_stack(
cls.stack_name,
template,
parameters={
'KeyName': cls.keypair_name,
'InstanceType': CONF.orchestration.instance_type,
'ImageId': CONF.compute.image_ref,
'ExternalNetworkId': cls.external_network_id,
'timeout': CONF.orchestration.build_timeout,
'DNSServers': CONF.network.dns_servers,
'SubNetCidr': str(cls.subnet_cidr)
})
cls.stack_id = cls.stack_identifier.split('/')[1]
try:
cls.client.wait_for_stack_status(cls.stack_id, 'CREATE_COMPLETE')
resources = cls.client.list_resources(cls.stack_identifier)
except exceptions.TimeoutException as e:
if CONF.compute_feature_enabled.console_output:
# attempt to log the server console to help with debugging
# the cause of the server not signalling the waitcondition
# to heat.
body = cls.client.show_resource(cls.stack_identifier,
'Server')
server_id = body['physical_resource_id']
LOG.debug('Console output for %s', server_id)
output = cls.servers_client.get_console_output(
server_id, None).data
LOG.debug(output)
raise e
cls.test_resources = {}
for resource in resources:
cls.test_resources[resource['logical_resource_id']] = resource
@test.idempotent_id('f9e2664c-bc44-4eef-98b6-495e4f9d74b3')
def test_created_resources(self):
"""Verifies created neutron resources."""
resources = [('Network', self.neutron_basic_template['resources'][
'Network']['type']),
('Subnet', self.neutron_basic_template['resources'][
'Subnet']['type']),
('RouterInterface', self.neutron_basic_template[
'resources']['RouterInterface']['type']),
('Server', self.neutron_basic_template['resources'][
'Server']['type'])]
for resource_name, resource_type in resources:
resource = self.test_resources.get(resource_name, None)
self.assertIsInstance(resource, dict)
self.assertEqual(resource_name, resource['logical_resource_id'])
self.assertEqual(resource_type, resource['resource_type'])
self.assertEqual('CREATE_COMPLETE', resource['resource_status'])
@test.idempotent_id('c572b915-edb1-4e90-b196-c7199a6848c0')
@test.services('network')
def test_created_network(self):
"""Verifies created network."""
network_id = self.test_resources.get('Network')['physical_resource_id']
body = self.network_client.show_network(network_id)
network = body['network']
self.assertIsInstance(network, dict)
self.assertEqual(network_id, network['id'])
self.assertEqual(self.neutron_basic_template['resources'][
'Network']['properties']['name'], network['name'])
@test.idempotent_id('e8f84b96-f9d7-4684-ad5f-340203e9f2c2')
@test.services('network')
def test_created_subnet(self):
"""Verifies created subnet."""
subnet_id = self.test_resources.get('Subnet')['physical_resource_id']
body = self.network_client.show_subnet(subnet_id)
subnet = body['subnet']
network_id = self.test_resources.get('Network')['physical_resource_id']
self.assertEqual(subnet_id, subnet['id'])
self.assertEqual(network_id, subnet['network_id'])
self.assertEqual(self.neutron_basic_template['resources'][
'Subnet']['properties']['name'], subnet['name'])
self.assertEqual(sorted(CONF.network.dns_servers),
sorted(subnet['dns_nameservers']))
self.assertEqual(self.neutron_basic_template['resources'][
'Subnet']['properties']['ip_version'], subnet['ip_version'])
self.assertEqual(str(self.subnet_cidr), subnet['cidr'])
@test.idempotent_id('96af4c7f-5069-44bc-bdcf-c0390f8a67d1')
@test.services('network')
def test_created_router(self):
"""Verifies created router."""
router_id = self.test_resources.get('Router')['physical_resource_id']
body = self.network_client.show_router(router_id)
router = body['router']
self.assertEqual(self.neutron_basic_template['resources'][
'Router']['properties']['name'], router['name'])
self.assertEqual(self.external_network_id,
router['external_gateway_info']['network_id'])
self.assertEqual(True, router['admin_state_up'])
@test.idempotent_id('89f605bd-153e-43ee-a0ed-9919b63423c5')
@test.services('network')
def test_created_router_interface(self):
"""Verifies created router interface."""
router_id = self.test_resources.get('Router')['physical_resource_id']
network_id = self.test_resources.get('Network')['physical_resource_id']
subnet_id = self.test_resources.get('Subnet')['physical_resource_id']
body = self.network_client.list_ports()
ports = body['ports']
router_ports = filter(lambda port: port['device_id'] ==
router_id, ports)
created_network_ports = filter(lambda port: port['network_id'] ==
network_id, router_ports)
self.assertEqual(1, len(created_network_ports))
router_interface = created_network_ports[0]
fixed_ips = router_interface['fixed_ips']
subnet_fixed_ips = filter(lambda port: port['subnet_id'] ==
subnet_id, fixed_ips)
self.assertEqual(1, len(subnet_fixed_ips))
router_interface_ip = subnet_fixed_ips[0]['ip_address']
self.assertEqual(str(self.subnet_cidr.iter_hosts().next()),
router_interface_ip)
@test.idempotent_id('75d85316-4ac2-4c0e-a1a9-edd2148fc10e')
@test.services('compute', 'network')
def test_created_server(self):
"""Verifies created sever."""
server_id = self.test_resources.get('Server')['physical_resource_id']
server = self.servers_client.show_server(server_id)
self.assertEqual(self.keypair_name, server['key_name'])
self.assertEqual('ACTIVE', server['status'])
network = server['addresses'][self.neutron_basic_template['resources'][
'Network']['properties']['name']][0]
self.assertEqual(4, network['version'])
self.assertIn(netaddr.IPAddress(network['addr']), self.subnet_cidr)
| apache-2.0 |
frankbp/robotframework-selenium2library | src/Selenium2Library/__init__.py | 31 | 6631 | import os
from keywords import *
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
execfile(os.path.join(THIS_DIR, 'version.py'))
__version__ = VERSION
class Selenium2Library(
_LoggingKeywords,
_RunOnFailureKeywords,
_BrowserManagementKeywords,
_ElementKeywords,
_TableElementKeywords,
_FormElementKeywords,
_SelectElementKeywords,
_JavaScriptKeywords,
_CookieKeywords,
_ScreenshotKeywords,
_WaitingKeywords
):
"""Selenium2Library is a web testing library for Robot Framework.
It uses the Selenium 2 (WebDriver) libraries internally to control a web browser.
See http://seleniumhq.org/docs/03_webdriver.html for more information on Selenium 2
and WebDriver.
Selenium2Library runs tests in a real browser instance. It should work in
most modern browsers and can be used with both Python and Jython interpreters.
*Before running tests*
Prior to running test cases using Selenium2Library, Selenium2Library must be
imported into your Robot test suite (see `importing` section), and the
`Open Browser` keyword must be used to open a browser to the desired location.
*Locating elements*
All keywords in Selenium2Library that need to find an element on the page
take an argument, `locator`. By default, when a locator value is provided,
it is matched against the key attributes of the particular element type.
For example, `id` and `name` are key attributes to all elements, and
locating elements is easy using just the `id` as a `locator`. For example::
Click Element my_element
It is also possible to specify the approach Selenium2Library should take
to find an element by specifying a lookup strategy with a locator
prefix. Supported strategies are:
| *Strategy* | *Example* | *Description* |
| identifier | Click Element `|` identifier=my_element | Matches by @id or @name attribute |
| id | Click Element `|` id=my_element | Matches by @id attribute |
| name | Click Element `|` name=my_element | Matches by @name attribute |
| xpath | Click Element `|` xpath=//div[@id='my_element'] | Matches with arbitrary XPath expression |
| dom | Click Element `|` dom=document.images[56] | Matches with arbitrary DOM express |
| link | Click Element `|` link=My Link | Matches anchor elements by their link text |
| css | Click Element `|` css=div.my_class | Matches by CSS selector |
| jquery | Click Element `|` jquery=div.my_class | Matches by jQuery/sizzle selector |
| sizzle | Click Element `|` sizzle=div.my_class | Matches by jQuery/sizzle selector |
| tag | Click Element `|` tag=div | Matches by HTML tag name |
Table related keywords, such as `Table Should Contain`, work differently.
By default, when a table locator value is provided, it will search for
a table with the specified `id` attribute. For example:
Table Should Contain my_table text
More complex table lookup strategies are also supported:
| *Strategy* | *Example* | *Description* |
| css | Table Should Contain `|` css=table.my_class `|` text | Matches by @id or @name attribute |
| xpath | Table Should Contain `|` xpath=//table/[@name="my_table"] `|` text | Matches by @id or @name attribute |
*Timeouts*
There are several `Wait ...` keywords that take timeout as an
argument. All of these timeout arguments are optional. The timeout
used by all of them can be set globally using the
`Set Selenium Timeout` keyword.
All timeouts can be given as numbers considered seconds (e.g. 0.5 or 42)
or in Robot Framework's time syntax (e.g. '1.5 seconds' or '1 min 30 s').
For more information about the time syntax see:
http://robotframework.googlecode.com/svn/trunk/doc/userguide/RobotFrameworkUserGuide.html#time-format.
"""
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
ROBOT_LIBRARY_VERSION = VERSION
def __init__(self, timeout=5.0, implicit_wait=0.0, run_on_failure='Capture Page Screenshot'):
"""Selenium2Library can be imported with optional arguments.
`timeout` is the default timeout used to wait for all waiting actions.
It can be later set with `Set Selenium Timeout`.
'implicit_wait' is the implicit timeout that Selenium waits when
looking for elements.
It can be later set with `Set Selenium Implicit Wait`.
See `WebDriver: Advanced Usage`__ section of the SeleniumHQ documentation
for more information about WebDriver's implicit wait functionality.
__ http://seleniumhq.org/docs/04_webdriver_advanced.html#explicit-and-implicit-waits
`run_on_failure` specifies the name of a keyword (from any available
libraries) to execute when a Selenium2Library keyword fails. By default
`Capture Page Screenshot` will be used to take a screenshot of the current page.
Using the value "Nothing" will disable this feature altogether. See
`Register Keyword To Run On Failure` keyword for more information about this
functionality.
Examples:
| Library `|` Selenium2Library `|` 15 | # Sets default timeout to 15 seconds |
| Library `|` Selenium2Library `|` 0 `|` 5 | # Sets default timeout to 0 seconds and default implicit_wait to 5 seconds |
| Library `|` Selenium2Library `|` 5 `|` run_on_failure=Log Source | # Sets default timeout to 5 seconds and runs `Log Source` on failure |
| Library `|` Selenium2Library `|` implicit_wait=5 `|` run_on_failure=Log Source | # Sets default implicit_wait to 5 seconds and runs `Log Source` on failure |
| Library `|` Selenium2Library `|` timeout=10 `|` run_on_failure=Nothing | # Sets default timeout to 10 seconds and does nothing on failure |
"""
for base in Selenium2Library.__bases__:
base.__init__(self)
self.set_selenium_timeout(timeout)
self.set_selenium_implicit_wait(implicit_wait)
self.register_keyword_to_run_on_failure(run_on_failure)
| apache-2.0 |
rjschof/gem5 | ext/ply/test/yacc_unused.py | 174 | 1669 | # -----------------------------------------------------------------------------
# yacc_unused.py
#
# A grammar with an unused rule
# -----------------------------------------------------------------------------
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.yacc as yacc
from calclex import tokens
# Parsing rules
precedence = (
('left','PLUS','MINUS'),
('left','TIMES','DIVIDE'),
('right','UMINUS'),
)
# dictionary of names
names = { }
def p_statement_assign(t):
'statement : NAME EQUALS expression'
names[t[1]] = t[3]
def p_statement_expr(t):
'statement : expression'
print(t[1])
def p_expression_binop(t):
'''expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression'''
if t[2] == '+' : t[0] = t[1] + t[3]
elif t[2] == '-': t[0] = t[1] - t[3]
elif t[2] == '*': t[0] = t[1] * t[3]
elif t[2] == '/': t[0] = t[1] / t[3]
def p_expression_uminus(t):
'expression : MINUS expression %prec UMINUS'
t[0] = -t[2]
def p_expression_group(t):
'expression : LPAREN expression RPAREN'
t[0] = t[2]
def p_expression_number(t):
'expression : NUMBER'
t[0] = t[1]
def p_expression_name(t):
'expression : NAME'
try:
t[0] = names[t[1]]
except LookupError:
print("Undefined name '%s'" % t[1])
t[0] = 0
def p_expr_list(t):
'exprlist : exprlist COMMA expression'
pass
def p_expr_list_2(t):
'exprlist : expression'
pass
def p_error(t):
print("Syntax error at '%s'" % t.value)
yacc.yacc()
| bsd-3-clause |
gencer/sentry | src/sentry/plugins/base/bindings.py | 1 | 1075 | from __future__ import absolute_import, print_function
import six
from sentry.plugins import providers
class ProviderManager(object):
type = None
def __init__(self):
self._items = {}
def __iter__(self):
return iter(self._items)
def add(self, item, id):
if self.type and not issubclass(item, self.type):
raise ValueError('Invalid type for provider: {}'.format(
type(item),
))
self._items[id] = item
def get(self, id):
return self._items[id]
def all(self):
return six.iteritems(self._items)
class RepositoryProviderManager(ProviderManager):
type = providers.RepositoryProvider
class BindingManager(object):
BINDINGS = {
'repository.provider': RepositoryProviderManager,
}
def __init__(self):
self._bindings = {k: v() for k, v in six.iteritems(self.BINDINGS)}
def add(self, name, binding, **kwargs):
self._bindings[name].add(binding, **kwargs)
def get(self, name):
return self._bindings[name]
| bsd-3-clause |
Learningtribes/edx-platform | common/test/acceptance/tests/studio/test_studio_textbooks.py | 8 | 3445 | """
Acceptance tests for Studio related to the textbooks.
"""
from common.test.acceptance.tests.studio.base_studio_test import StudioCourseTest
from ...pages.studio.textbook_upload import TextbookUploadPage
from ...pages.lms.textbook_view import TextbookViewPage
from ...tests.helpers import disable_animations
from nose.plugins.attrib import attr
@attr('shard_2')
class TextbooksTest(StudioCourseTest):
"""
Test that textbook functionality is working properly on studio side
"""
def setUp(self, is_staff=True):
"""
Install a course with no content using a fixture.
"""
super(TextbooksTest, self).setUp(is_staff)
self.textbook_upload_page = TextbookUploadPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.textbook_upload_page.visit()
disable_animations(self)
self.textbook_view_page = TextbookViewPage(self.browser, self.course_id)
def test_create_first_book_message(self):
"""
Scenario: A message is displayed on the textbooks page when there are no uploaded textbooks
Given that I am viewing the Textbooks page in Studio
And I have not yet uploaded a textbook
Then I see a message stating that I have not uploaded any textbooks
"""
message = self.textbook_upload_page.get_element_text('.wrapper-content .no-textbook-content')
self.assertIn("You haven't added any textbooks", message)
def test_new_textbook_upload(self):
"""
Scenario: View Live link for textbook is correctly populated
Given that I am viewing the Textbooks page in Studio
And I have uploaded a PDF textbook and save the new textbook information
Then the "View Live" link contains a link to the textbook in the LMS
"""
self.textbook_upload_page.upload_new_textbook()
self.assertTrue(self.textbook_upload_page.is_view_live_link_worked())
@attr('a11y')
def test_textbook_page_a11y(self):
"""
Uploads a new textbook
Runs an accessibility test on the textbook page in lms
"""
self.textbook_upload_page.upload_new_textbook()
self.textbook_view_page.visit()
self.textbook_view_page.a11y_audit.config.set_rules({
'ignore': [
'color-contrast', # AC-500
'skip-link', # AC-501
'link-href', # AC-502
'section' # AC-503
],
})
self.textbook_view_page.a11y_audit.check_for_accessibility_errors()
@attr('a11y')
def test_pdf_viewer_a11y(self):
"""
Uploads a new textbook
Runs an accessibility test on the pdf viewer frame in lms
"""
self.textbook_upload_page.upload_new_textbook()
self.textbook_view_page.visit()
self.textbook_view_page.switch_to_pdf_frame(self)
self.textbook_view_page.a11y_audit.config.set_rules({
'ignore': [
'color-contrast', # will always fail because pdf.js converts pdf to divs with transparent text
'html-lang', # AC-504
'meta-viewport', # AC-505
'skip-link', # AC-506
'link-href', # AC-507
],
})
self.textbook_view_page.a11y_audit.check_for_accessibility_errors()
| agpl-3.0 |
andrei4ka/fuel-web-redhat | fuel_agent_ci/fuel_agent_ci/objects/http.py | 6 | 1483 | # Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from fuel_agent_ci.objects import Object
LOG = logging.getLogger(__name__)
class Http(Object):
__typename__ = 'http'
def __init__(self, env, name, http_root, port, network,
status_url='/status', shutdown_url='/shutdown'):
self.name = name
self.env = env
self.http_root = http_root
self.port = port
self.network = network
self.status_url = status_url
self.shutdown_url = shutdown_url
def start(self):
if not self.status():
LOG.debug('Starting HTTP server')
self.env.driver.http_start(self)
def stop(self):
if self.status():
LOG.debug('Stopping HTTP server')
self.env.driver.http_stop(self)
def status(self):
status = self.env.driver.http_status(self)
LOG.debug('HTTP status %s' % status)
return status
| apache-2.0 |
ity/pants | tests/python/pants_test/backend/jvm/tasks/test_ivy_utils.py | 4 | 27425 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import json
import os
import xml.etree.ElementTree as ET
from collections import namedtuple
from textwrap import dedent
from twitter.common.collections import OrderedSet
from pants.backend.jvm.ivy_utils import (FrozenResolution, IvyFetchStep, IvyInfo, IvyModule,
IvyModuleRef, IvyResolveMappingError, IvyResolveResult,
IvyResolveStep, IvyUtils)
from pants.backend.jvm.jar_dependency_utils import M2Coordinate
from pants.backend.jvm.register import build_file_aliases as register_jvm
from pants.backend.jvm.subsystems.jar_dependency_management import JarDependencyManagement
from pants.backend.jvm.targets.exclude import Exclude
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.build_graph.register import build_file_aliases as register_core
from pants.ivy.ivy_subsystem import IvySubsystem
from pants.util.contextutil import temporary_dir, temporary_file, temporary_file_path
from pants_test.base_test import BaseTest
from pants_test.subsystem.subsystem_util import subsystem_instance
def coord(org, name, classifier=None, rev=None, ext=None):
rev = rev or '0.0.1'
return M2Coordinate(org=org, name=name, rev=rev, classifier=classifier, ext=ext)
def return_resolve_result_missing_artifacts(*args, **kwargs):
return namedtuple('mock_resolve', ['all_linked_artifacts_exist'])(lambda: False)
def do_nothing(*args, **kwards):
pass
class IvyUtilsTestBase(BaseTest):
@property
def alias_groups(self):
return register_core().merge(register_jvm())
class IvyUtilsGenerateIvyTest(IvyUtilsTestBase):
def setUp(self):
super(IvyUtilsGenerateIvyTest, self).setUp()
self.add_to_build_file('src/java/targets',
dedent("""
jar_library(
name='a',
jars=[
jar('org1', 'name1', 'rev1'),
jar('org2', 'name2', 'rev2', force=True),
],
)
"""))
self.b_org = 'com.example'
self.b_name = 'b'
self.add_to_build_file('src/java/targets',
dedent("""
java_library(
name='b',
dependencies=[':a'],
provides=artifact('{org}', '{name}', repo=repository()),
sources=['z.java'],
)
""".format(org=self.b_org, name=self.b_name)))
self.add_to_build_file('3rdparty',
dedent("""
jar_library(
name='example-morx',
jars = [
jar(org='commons-lang', name='commons-lang', rev='2.5', classifier='morx'),
]
)
jar_library(
name='example-fleem',
jars = [
jar(org='commons-lang', name='commons-lang', rev='2.5', classifier='fleem'),
]
)
"""))
self.add_to_build_file('src/java/targets',
dedent("""
java_library(
name='c',
dependencies=[
'3rdparty:example-morx',
'3rdparty:example-fleem',
],
sources=['w.java'],
)
""".format(org=self.b_org, name=self.b_name)))
self.add_to_build_file('src/java/targets',
dedent("""
java_library(
name='e',
dependencies=[
'3rdparty:example-morx',
'3rdparty:example-fleem',
],
excludes=[exclude(org='commons-lang', name='commons-lang')],
sources=['w.java'],
)
""".format(org=self.b_org, name=self.b_name)))
self.a = self.target('src/java/targets:a')
self.b = self.target('src/java/targets:b')
self.c = self.target('src/java/targets:c')
self.e = self.target('src/java/targets:e')
def test_exclude_exported(self):
jars, excludes = IvyUtils.calculate_classpath([self.b])
for jar in jars:
self.assertEqual(jar.excludes, (Exclude(self.b_org, self.b_name),))
self.assertEqual(excludes, set())
def test_excludes_are_generated(self):
_, excludes = IvyUtils.calculate_classpath([self.e])
self.assertSetEqual(excludes, {Exclude(org='commons-lang', name='commons-lang')})
def test_classifiers(self):
jars, _ = IvyUtils.calculate_classpath([self.c])
jars.sort(key=lambda jar: jar.classifier)
self.assertEquals(['fleem', 'morx'], [jar.classifier for jar in jars])
def test_module_ref_str_minus_classifier(self):
module_ref = IvyModuleRef(org='org', name='name', rev='rev')
self.assertEquals("IvyModuleRef(org:name:rev::jar)", str(module_ref))
def test_force_override(self):
jars = list(self.a.payload.jars)
with temporary_file_path() as ivyxml:
with subsystem_instance(JarDependencyManagement):
IvyUtils.generate_ivy([self.a], jars=jars, excludes=[], ivyxml=ivyxml, confs=['default'])
doc = ET.parse(ivyxml).getroot()
conf = self.find_single(doc, 'configurations/conf')
self.assert_attributes(conf, name='default')
dependencies = list(doc.findall('dependencies/dependency'))
self.assertEqual(2, len(dependencies))
dep1 = dependencies[0]
self.assert_attributes(dep1, org='org1', name='name1', rev='rev1')
conf = self.find_single(dep1, 'conf')
self.assert_attributes(conf, name='default', mapped='default')
dep2 = dependencies[1]
self.assert_attributes(dep2, org='org2', name='name2', rev='rev2', force='true')
conf = self.find_single(dep1, 'conf')
self.assert_attributes(conf, name='default', mapped='default')
override = self.find_single(doc, 'dependencies/override')
self.assert_attributes(override, org='org2', module='name2', rev='rev2')
def test_resolve_conflict_missing_versions(self):
v1 = JarDependency('org.example', 'foo', None, force=False)
v2 = JarDependency('org.example', 'foo', '2', force=False)
self.assertIs(v2, IvyUtils._resolve_conflict(v1, v2))
self.assertIs(v2, IvyUtils._resolve_conflict(v2, v1))
def test_resove_conflict_no_conflicts(self):
v1 = JarDependency('org.example', 'foo', '1', force=False)
v1_force = JarDependency('org.example', 'foo', '1', force=True)
v2 = JarDependency('org.example', 'foo', '2', force=False)
# If neither version is forced, use the latest version.
self.assertIs(v2, IvyUtils._resolve_conflict(v1, v2))
self.assertIs(v2, IvyUtils._resolve_conflict(v2, v1))
# If an earlier version is forced, use the forced version.
self.assertIs(v1_force, IvyUtils._resolve_conflict(v1_force, v2))
self.assertIs(v1_force, IvyUtils._resolve_conflict(v2, v1_force))
# If the same version is forced, use the forced version.
self.assertIs(v1_force, IvyUtils._resolve_conflict(v1, v1_force))
self.assertIs(v1_force, IvyUtils._resolve_conflict(v1_force, v1))
# If the same force is in play in multiple locations, allow it.
self.assertIs(v1_force, IvyUtils._resolve_conflict(v1_force, v1_force))
def test_resolve_conflict_conflict(self):
v1_force = JarDependency('org.example', 'foo', '1', force=True)
v2_force = JarDependency('org.example', 'foo', '2', force=True)
with self.assertRaises(IvyUtils.IvyResolveConflictingDepsError):
IvyUtils._resolve_conflict(v1_force, v2_force)
with self.assertRaises(IvyUtils.IvyResolveConflictingDepsError):
IvyUtils._resolve_conflict(v2_force, v1_force)
def test_get_resolved_jars_for_coordinates(self):
ivy_info = self.parse_ivy_report('ivy_utils_resources/report_with_diamond.xml')
resolved_jars = ivy_info.get_resolved_jars_for_coordinates([JarDependency(org='org1',
name='name1',
rev='0.0.1',
classifier='tests')])
expected = {'ivy2cache_path/org1/name1.jar': coord(org='org1', name='name1',
classifier='tests'),
'ivy2cache_path/org2/name2.jar': coord(org='org2', name='name2'),
'ivy2cache_path/org3/name3.tar.gz': coord(org='org3', name='name3', ext='tar.gz')}
self.maxDiff = None
coordinate_by_path = {rj.cache_path: rj.coordinate for rj in resolved_jars}
self.assertEqual(expected, coordinate_by_path)
def test_resolved_jars_with_different_version(self):
# If a jar is resolved as a different version than the requested one, the coordinates of
# the resolved jar should match the artifact, not the requested coordinates.
ivy_info = self.parse_ivy_report('ivy_utils_resources/report_with_resolve_to_other_version.xml')
resolved_jars = ivy_info.get_resolved_jars_for_coordinates([JarDependency(org='org1',
name='name1',
rev='0.0.1',
classifier='tests')])
self.maxDiff = None
self.assertEqual([coord(org='org1', name='name1',
classifier='tests',
rev='0.0.2')],
[jar.coordinate for jar in resolved_jars])
def test_does_not_visit_diamond_dep_twice(self):
ivy_info = self.parse_ivy_report('ivy_utils_resources/report_with_diamond.xml')
ref = IvyModuleRef("toplevel", "toplevelmodule", "latest")
seen = set()
def collector(r):
self.assertNotIn(r, seen)
seen.add(r)
return {r}
result = ivy_info.traverse_dependency_graph(ref, collector)
self.assertEqual({IvyModuleRef("toplevel", "toplevelmodule", "latest"),
IvyModuleRef(org='org1', name='name1', rev='0.0.1', classifier='tests'),
IvyModuleRef(org='org2', name='name2', rev='0.0.1'),
IvyModuleRef(org='org3', name='name3', rev='0.0.1', ext='tar.gz')},
result)
def test_does_not_follow_cycle(self):
ivy_info = self.parse_ivy_report('ivy_utils_resources/report_with_cycle.xml')
ref = IvyModuleRef("toplevel", "toplevelmodule", "latest")
seen = set()
def collector(r):
self.assertNotIn(r, seen)
seen.add(r)
return {r}
result = ivy_info.traverse_dependency_graph(ref, collector)
self.assertEqual(
{
IvyModuleRef("toplevel", "toplevelmodule", "latest"),
IvyModuleRef(org='org1', name='name1', rev='0.0.1'),
IvyModuleRef(org='org2', name='name2', rev='0.0.1'),
IvyModuleRef(org='org3', name='name3', rev='0.0.1')
},
result)
def test_memo_reused_across_calls(self):
ivy_info = self.parse_ivy_report('ivy_utils_resources/report_with_diamond.xml')
ref = IvyModuleRef(org='org1', name='name1', rev='0.0.1')
def collector(r):
return {r}
memo = dict()
result1 = ivy_info.traverse_dependency_graph(ref, collector, memo=memo)
result2 = ivy_info.traverse_dependency_graph(ref, collector, memo=memo)
self.assertIs(result1, result2)
self.assertEqual(
{
IvyModuleRef(org='org1', name='name1', rev='0.0.1'),
IvyModuleRef(org='org2', name='name2', rev='0.0.1'),
IvyModuleRef(org='org3', name='name3', rev='0.0.1', ext='tar.gz')
},
result1)
def test_retrieve_resolved_jars_with_coordinates_on_flat_fetch_resolve(self):
ivy_info = self.parse_ivy_report('ivy_utils_resources/report_with_flat_graph.xml')
coordinates = [coord(org='org1', name='name1', classifier='tests', rev='0.0.1')]
result = ivy_info.get_resolved_jars_for_coordinates(coordinates)
self.assertEqual(coordinates, [r.coordinate for r in result])
def test_retrieve_resolved_jars_with_coordinates_differing_on_version_on_flat_fetch_resolve(self):
ivy_info = self.parse_ivy_report('ivy_utils_resources/report_with_flat_graph.xml')
coordinates = [coord(org='org2', name='name2', rev='0.0.0')]
result = ivy_info.get_resolved_jars_for_coordinates(coordinates)
self.assertEqual([coord(org='org2', name='name2', rev='0.0.1')],
[r.coordinate for r in result])
def test_parse_fails_when_same_classifier_different_type(self):
with self.assertRaises(IvyResolveMappingError):
self.parse_ivy_report('ivy_utils_resources/report_with_same_classifier_different_type.xml')
def find_single(self, elem, xpath):
results = list(elem.findall(xpath))
self.assertEqual(1, len(results))
return results[0]
def assert_attributes(self, elem, **kwargs):
self.assertEqual(dict(**kwargs), dict(elem.attrib))
def test_construct_and_load_symlink_map(self):
self.maxDiff = None
with temporary_dir() as mock_cache_dir:
with temporary_dir() as symlink_dir:
with temporary_dir() as classpath_dir:
input_path = os.path.join(classpath_dir, 'inpath')
output_path = os.path.join(classpath_dir, 'classpath')
foo_path = os.path.join(mock_cache_dir, 'foo.jar')
with open(foo_path, 'w') as foo:
foo.write("test jar contents")
with open(input_path, 'w') as inpath:
inpath.write(foo_path)
result_classpath, result_map = IvyUtils.construct_and_load_symlink_map(symlink_dir,
mock_cache_dir,
input_path,
output_path)
symlink_foo_path = os.path.join(symlink_dir, 'foo.jar')
self.assertEquals([symlink_foo_path], result_classpath)
self.assertEquals(
{
os.path.realpath(foo_path): symlink_foo_path
},
result_map)
with open(output_path, 'r') as outpath:
self.assertEquals(symlink_foo_path, outpath.readline())
self.assertTrue(os.path.islink(symlink_foo_path))
self.assertTrue(os.path.exists(symlink_foo_path))
# Now add an additional path to the existing map
bar_path = os.path.join(mock_cache_dir, 'bar.jar')
with open(bar_path, 'w') as bar:
bar.write("test jar contents2")
with open(input_path, 'w') as inpath:
inpath.write(os.pathsep.join([foo_path, bar_path]))
result_classpath, result_map = IvyUtils.construct_and_load_symlink_map(symlink_dir,
mock_cache_dir,
input_path,
output_path)
symlink_bar_path = os.path.join(symlink_dir, 'bar.jar')
self.assertEquals(
{
os.path.realpath(foo_path): symlink_foo_path,
os.path.realpath(bar_path): symlink_bar_path,
},
result_map)
self.assertEquals([symlink_foo_path, symlink_bar_path], result_classpath)
with open(output_path, 'r') as outpath:
self.assertEquals(symlink_foo_path + os.pathsep + symlink_bar_path, outpath.readline())
self.assertTrue(os.path.islink(symlink_foo_path))
self.assertTrue(os.path.exists(symlink_foo_path))
self.assertTrue(os.path.islink(symlink_bar_path))
self.assertTrue(os.path.exists(symlink_bar_path))
# Reverse the ordering and make sure order is preserved in the output path
with open(input_path, 'w') as inpath:
inpath.write(os.pathsep.join([bar_path, foo_path]))
IvyUtils.construct_and_load_symlink_map(symlink_dir,
mock_cache_dir,
input_path,
output_path)
with open(output_path, 'r') as outpath:
self.assertEquals(symlink_bar_path + os.pathsep + symlink_foo_path, outpath.readline())
def test_missing_ivy_report(self):
self.set_options_for_scope(IvySubsystem.options_scope,
cache_dir='DOES_NOT_EXIST',
use_nailgun=False)
# Hack to initialize Ivy subsystem
self.context()
with self.assertRaises(IvyUtils.IvyResolveReportError):
IvyUtils.parse_xml_report('default', IvyUtils.xml_report_path('INVALID_CACHE_DIR',
'INVALID_REPORT_UNIQUE_NAME',
'default'))
def parse_ivy_report(self, rel_path):
path = os.path.join('tests/python/pants_test/backend/jvm/tasks', rel_path)
ivy_info = IvyUtils.parse_xml_report(conf='default', path=path)
self.assertIsNotNone(ivy_info)
return ivy_info
def test_ivy_module_ref_cmp(self):
self.assertEquals(
IvyModuleRef('foo', 'bar', '1.2.3'), IvyModuleRef('foo', 'bar', '1.2.3'))
self.assertTrue(
IvyModuleRef('foo1', 'bar', '1.2.3') < IvyModuleRef('foo2', 'bar', '1.2.3'))
self.assertTrue(
IvyModuleRef('foo2', 'bar', '1.2.3') >IvyModuleRef('foo1', 'bar', '1.2.3'))
self.assertTrue(
IvyModuleRef('foo', 'bar1', '1.2.3') < IvyModuleRef('foo', 'bar2', '1.2.3'))
self.assertTrue(
IvyModuleRef('foo', 'bar2', '1.2.3') > IvyModuleRef('foo', 'bar1', '1.2.3'))
self.assertTrue(
IvyModuleRef('foo', 'bar', '1.2.3') < IvyModuleRef('foo', 'bar', '1.2.4'))
self.assertTrue(
IvyModuleRef('foo', 'bar', '1.2.4') > IvyModuleRef('foo', 'bar', '1.2.3'))
self.assertTrue(
IvyModuleRef('foo', 'bar', '1.2.3', ext='jar') < IvyModuleRef('foo', 'bar', '1.2.3', ext='tgz'))
self.assertTrue(
IvyModuleRef('foo', 'bar', '1.2.3', ext='tgz') > IvyModuleRef('foo', 'bar', '1.2.3', ext='jar'))
self.assertTrue(
IvyModuleRef('foo', 'bar', '1.2.3', ext='jar', classifier='javadoc')
< IvyModuleRef('foo', 'bar', '1.2.3', ext='jar', classifier='sources'))
self.assertTrue(
IvyModuleRef('foo', 'bar', '1.2.3', ext='tgz', classifier='sources')
> IvyModuleRef('foo', 'bar', '1.2.3', ext='jar', classifier='javadoc'))
# make sure rev is sorted last
self.assertTrue(
IvyModuleRef('foo', 'bar', '1.2.4', classifier='javadoc')
< IvyModuleRef('foo', 'bar', '1.2.3', classifier='sources'))
self.assertTrue(
IvyModuleRef('foo', 'bar', '1.2.3', classifier='sources')
> IvyModuleRef('foo', 'bar', '1.2.4', classifier='javadoc'))
self.assertTrue(
IvyModuleRef('foo', 'bar', '1.2.4', ext='jar')
< IvyModuleRef('foo', 'bar', '1.2.3', ext='tgz'))
self.assertTrue(
IvyModuleRef('foo', 'bar', '1.2.3', ext='tgz')
> IvyModuleRef('foo', 'bar', '1.2.4', ext='jar'))
def test_traverse_dep_graph_sorted(self):
"""Make sure the modules are returned in a deterministic order by name"""
def make_ref(org, name):
return IvyModuleRef(org=org, name=name, rev='1.0')
ref1 = make_ref('foo', '1')
ref2 = make_ref('foo', 'child1')
ref3 = make_ref('foo', 'child2')
ref4 = make_ref('foo', 'child3')
ref5 = make_ref('foo', 'grandchild1')
ref6 = make_ref('foo', 'grandchild2')
module1 = IvyModule(ref1, '/foo', [])
module2 = IvyModule(ref2, '/foo', [ref1])
module3 = IvyModule(ref3, '/foo', [ref1])
module4 = IvyModule(ref4, '/foo', [ref1])
module5 = IvyModule(ref5, '/foo', [ref3])
module6 = IvyModule(ref6, '/foo', [ref3])
def assert_order(inputs):
info = IvyInfo('default')
for module in inputs:
info.add_module(module)
def collector(dep):
return OrderedSet([dep])
result = [ref for ref in info.traverse_dependency_graph(ref1, collector)]
self.assertEquals([ref1, ref2, ref3, ref5, ref6, ref4],
result)
# Make sure the order remains unchanged no matter what order we insert the into the structure
assert_order([module1, module2, module3, module4, module5, module6])
assert_order([module6, module5, module4, module3, module2, module1])
assert_order([module5, module1, module2, module6, module3, module4])
assert_order([module6, module4, module3, module1 ,module2, module5])
assert_order([module4, module2, module1, module3, module6, module5])
assert_order([module4, module2, module5, module6, module1, module3])
def test_collects_classifiers(self):
ivy_info = self.parse_ivy_report('ivy_utils_resources/report_with_multiple_classifiers.xml')
ref = IvyModuleRef("toplevel", "toplevelmodule", "latest")
def collector(r):
x = ivy_info.modules_by_ref.get(r)
if x:
return {x}
else:
return set()
result = ivy_info.traverse_dependency_graph(ref, collector, dict())
self.assertEqual(
{IvyModule(ref=IvyModuleRef(org='org1',
name='name1',
rev='0.0.1',
classifier=None,
ext=u'jar'),
artifact='ivy2cache_path/org1/name1.jar',
callers=(IvyModuleRef(org='toplevel',
name='toplevelmodule',
rev='latest',
classifier=None,
ext=u'jar'),)),
IvyModule(ref=IvyModuleRef(org='org1',
name='name1',
rev='0.0.1',
classifier='wut',
ext=u'jar'),
artifact='ivy2cache_path/org1/name1-wut.jar',
callers=(IvyModuleRef(org='toplevel',
name='toplevelmodule',
rev='latest',
classifier=None,
ext=u'jar'),))},
result)
def test_fetch_ivy_xml_requests_url_for_dependency_containing_url(self):
with temporary_dir() as temp_dir:
ivyxml = os.path.join(temp_dir, 'ivy.xml')
IvyUtils.generate_fetch_ivy([JarDependency('org-f', 'name-f', 'rev-f', url='an-url')],
ivyxml,
('default',),
'some-name')
with open(ivyxml) as f:
self.assertIn('an-url', f.read())
def test_fetch_requests_classifiers(self):
with temporary_dir() as temp_dir:
ivyxml = os.path.join(temp_dir, 'ivy.xml')
IvyUtils.generate_fetch_ivy([JarDependency('org-f', 'name-f', 'rev-f', classifier='a-classifier')],
ivyxml,
('default',),
'some-name')
with open(ivyxml) as f:
self.assertIn('a-classifier', f.read())
def test_fetch_applies_mutable(self):
with temporary_dir() as temp_dir:
ivyxml = os.path.join(temp_dir, 'ivy.xml')
IvyUtils.generate_fetch_ivy([JarDependency('org-f', 'name-f', 'rev-f', mutable=True)],
ivyxml,
('default',),
'some-name')
with open(ivyxml) as f:
self.assertIn('changing="true"', f.read())
def test_resolve_ivy_xml_requests_classifiers(self):
with temporary_dir() as temp_dir:
ivyxml = os.path.join(temp_dir, 'ivy.xml')
jar_dep = JarDependency('org-f', 'name-f', 'rev-f', classifier='a-classifier')
IvyUtils.generate_ivy(
[self.make_target('something', JarLibrary, jars=[jar_dep])],
[jar_dep],
excludes=[],
ivyxml=ivyxml,
confs=('default',),
resolve_hash_name='some-name',
jar_dep_manager=namedtuple('stub_jar_dep_manager', ['resolve_version_conflict'])(lambda x: x))
with open(ivyxml) as f:
self.assertIn('classifier="a-classifier', f.read())
def test_ivy_resolve_report_copying_fails_when_report_is_missing(self):
with temporary_dir() as dir:
with self.assertRaises(IvyUtils.IvyError):
IvyUtils._copy_ivy_reports({'default': os.path.join(dir, 'to-file')},
['default'], dir, 'another-hash-name')
class IvyUtilsResolveStepsTest(BaseTest):
def test_if_not_all_symlinked_files_exist_after_successful_resolve_fail(self):
resolve = IvyResolveStep(
['default'],
'hash_name',
None,
False,
'cache_dir',
'workdir')
# Stub resolving and creating the result, returning one missing artifacts.
resolve._do_resolve = do_nothing
resolve.load = return_resolve_result_missing_artifacts
with self.assertRaises(IvyResolveMappingError):
resolve.exec_and_load(None, None, [], None, None, None)
def test_if_not_all_symlinked_files_exist_after_successful_fetch_fail(self):
fetch = IvyFetchStep(['default'],
'hash_name',
False,
None,
'ivy_cache_dir', 'global_ivy_workdir')
# Stub resolving and creating the result, returning one missing artifacts.
fetch._do_fetch = do_nothing
fetch._load_from_fetch = return_resolve_result_missing_artifacts
with self.assertRaises(IvyResolveMappingError):
fetch.exec_and_load(None, None, [], None, None, None)
def test_missing_symlinked_jar_in_candidates(self):
empty_symlink_map = {}
result = IvyResolveResult(['non-existent-file-location'], empty_symlink_map, 'hash-name',
{'default':
self.ivy_report_path('ivy_utils_resources/report_with_diamond.xml')
})
with self.assertRaises(IvyResolveMappingError):
list(result.resolved_jars_for_each_target('default',
[self.make_target('t', JarLibrary,
jars=[JarDependency('org1',
'name1')])
]))
def ivy_report_path(self, rel_path):
return os.path.join('tests/python/pants_test/backend/jvm/tasks', rel_path)
class IvyFrozenResolutionTest(BaseTest):
def test_spec_without_a_real_target(self):
with temporary_file() as resolve_file:
json.dump(
{"default":{"coord_to_attrs":{}, "target_to_coords":{"non-existent-target":[]}}},
resolve_file)
resolve_file.close()
with self.assertRaises(FrozenResolution.MissingTarget):
FrozenResolution.load_from_file(resolve_file.name, [])
| apache-2.0 |
turbomanage/training-data-analyst | courses/machine_learning/deepdive2/structured/solutions/serving/application/lib/pyasn1_modules/rfc8360.py | 13 | 1075 | #
# This file is part of pyasn1-modules software.
#
# Created by Russ Housley.
#
# Copyright (c) 2019, Vigil Security, LLC
# License: http://snmplabs.com/pyasn1/license.html
#
# Resource Public Key Infrastructure (RPKI) Validation Reconsidered
#
# ASN.1 source from:
# https://www.rfc-editor.org/rfc/rfc8360.txt
# https://www.rfc-editor.org/errata/eid5870
#
from pyasn1.type import univ
from pyasn1_modules import rfc3779
from pyasn1_modules import rfc5280
# IP Address Delegation Extension V2
id_pe_ipAddrBlocks_v2 = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.28')
IPAddrBlocks = rfc3779.IPAddrBlocks
# Autonomous System Identifier Delegation Extension V2
id_pe_autonomousSysIds_v2 = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.29')
ASIdentifiers = rfc3779.ASIdentifiers
# Map of Certificate Extension OIDs to Extensions is added to the
# ones that are in rfc5280.py
_certificateExtensionsMapUpdate = {
id_pe_ipAddrBlocks_v2: IPAddrBlocks(),
id_pe_autonomousSysIds_v2: ASIdentifiers(),
}
rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
| apache-2.0 |
timduru/platform-external-chromium_org | third_party/closure_linter/closure_linter/gjslint.py | 135 | 7991 | #!/usr/bin/env python
# python2.6 for command-line runs using p4lib. pylint: disable-msg=C6301
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Checks JavaScript files for common style guide violations.
gjslint.py is designed to be used as a PRESUBMIT script to check for javascript
style guide violations. As of now, it checks for the following violations:
* Missing and extra spaces
* Lines longer than 80 characters
* Missing newline at end of file
* Missing semicolon after function declaration
* Valid JsDoc including parameter matching
Someday it will validate to the best of its ability against the entirety of the
JavaScript style guide.
This file is a front end that parses arguments and flags. The core of the code
is in tokenizer.py and checker.py.
"""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
import functools
import itertools
import sys
import time
import gflags as flags
from closure_linter import checker
from closure_linter import errorrecord
from closure_linter.common import erroraccumulator
from closure_linter.common import simplefileflags as fileflags
# Attempt import of multiprocessing (should be available in Python 2.6 and up).
try:
# pylint: disable-msg=C6204
import multiprocessing
except ImportError:
multiprocessing = None
FLAGS = flags.FLAGS
flags.DEFINE_boolean('unix_mode', False,
'Whether to emit warnings in standard unix format.')
flags.DEFINE_boolean('beep', True, 'Whether to beep when errors are found.')
flags.DEFINE_boolean('time', False, 'Whether to emit timing statistics.')
flags.DEFINE_boolean('check_html', False,
'Whether to check javascript in html files.')
flags.DEFINE_boolean('summary', False,
'Whether to show an error count summary.')
flags.DEFINE_list('additional_extensions', None, 'List of additional file '
'extensions (not js) that should be treated as '
'JavaScript files.')
flags.DEFINE_boolean('multiprocess', False,
'Whether to parallalize linting using the '
'multiprocessing module. Disabled by default.')
GJSLINT_ONLY_FLAGS = ['--unix_mode', '--beep', '--nobeep', '--time',
'--check_html', '--summary']
def _MultiprocessCheckPaths(paths):
"""Run _CheckPath over mutltiple processes.
Tokenization, passes, and checks are expensive operations. Running in a
single process, they can only run on one CPU/core. Instead,
shard out linting over all CPUs with multiprocessing to parallelize.
Args:
paths: paths to check.
Yields:
errorrecord.ErrorRecords for any found errors.
"""
pool = multiprocessing.Pool()
for results in pool.imap(_CheckPath, paths):
for record in results:
yield record
pool.close()
pool.join()
def _CheckPaths(paths):
"""Run _CheckPath on all paths in one thread.
Args:
paths: paths to check.
Yields:
errorrecord.ErrorRecords for any found errors.
"""
for path in paths:
results = _CheckPath(path)
for record in results:
yield record
def _CheckPath(path):
"""Check a path and return any errors.
Args:
path: paths to check.
Returns:
A list of errorrecord.ErrorRecords for any found errors.
"""
error_accumulator = erroraccumulator.ErrorAccumulator()
style_checker = checker.JavaScriptStyleChecker(error_accumulator)
style_checker.Check(path)
# Return any errors as error records.
make_error_record = functools.partial(errorrecord.MakeErrorRecord, path)
return map(make_error_record, error_accumulator.GetErrors())
def _GetFilePaths(argv):
suffixes = ['.js']
if FLAGS.additional_extensions:
suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions]
if FLAGS.check_html:
suffixes += ['.html', '.htm']
return fileflags.GetFileList(argv, 'JavaScript', suffixes)
# Error printing functions
def _PrintFileSummary(paths, records):
"""Print a detailed summary of the number of errors in each file."""
paths = list(paths)
paths.sort()
for path in paths:
path_errors = [e for e in records if e.path == path]
print '%s: %d' % (path, len(path_errors))
def _PrintFileSeparator(path):
print '----- FILE : %s -----' % path
def _PrintSummary(paths, error_records):
"""Print a summary of the number of errors and files."""
error_count = len(error_records)
all_paths = set(paths)
all_paths_count = len(all_paths)
if error_count is 0:
print '%d files checked, no errors found.' % all_paths_count
new_error_count = len([e for e in error_records if e.new_error])
error_paths = set([e.path for e in error_records])
error_paths_count = len(error_paths)
no_error_paths_count = all_paths_count - error_paths_count
if error_count or new_error_count:
print ('Found %d errors, including %d new errors, in %d files '
'(%d files OK).' % (
error_count,
new_error_count,
error_paths_count,
no_error_paths_count))
def _PrintErrorRecords(error_records):
"""Print error records strings in the expected format."""
current_path = None
for record in error_records:
if current_path != record.path:
current_path = record.path
if not FLAGS.unix_mode:
_PrintFileSeparator(current_path)
print record.error_string
def _FormatTime(t):
"""Formats a duration as a human-readable string.
Args:
t: A duration in seconds.
Returns:
A formatted duration string.
"""
if t < 1:
return '%dms' % round(t * 1000)
else:
return '%.2fs' % t
def main(argv = None):
"""Main function.
Args:
argv: Sequence of command line arguments.
"""
if argv is None:
argv = flags.FLAGS(sys.argv)
if FLAGS.time:
start_time = time.time()
suffixes = ['.js']
if FLAGS.additional_extensions:
suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions]
if FLAGS.check_html:
suffixes += ['.html', '.htm']
paths = fileflags.GetFileList(argv, 'JavaScript', suffixes)
if FLAGS.multiprocess:
records_iter = _MultiprocessCheckPaths(paths)
else:
records_iter = _CheckPaths(paths)
records_iter, records_iter_copy = itertools.tee(records_iter, 2)
_PrintErrorRecords(records_iter_copy)
error_records = list(records_iter)
_PrintSummary(paths, error_records)
exit_code = 0
# If there are any errors
if error_records:
exit_code += 1
# If there are any new errors
if [r for r in error_records if r.new_error]:
exit_code += 2
if exit_code:
if FLAGS.summary:
_PrintFileSummary(paths, error_records)
if FLAGS.beep:
# Make a beep noise.
sys.stdout.write(chr(7))
# Write out instructions for using fixjsstyle script to fix some of the
# reported errors.
fix_args = []
for flag in sys.argv[1:]:
for f in GJSLINT_ONLY_FLAGS:
if flag.startswith(f):
break
else:
fix_args.append(flag)
print """
Some of the errors reported by GJsLint may be auto-fixable using the script
fixjsstyle. Please double check any changes it makes and report any bugs. The
script can be run by executing:
fixjsstyle %s """ % ' '.join(fix_args)
if FLAGS.time:
print 'Done in %s.' % _FormatTime(time.time() - start_time)
sys.exit(exit_code)
if __name__ == '__main__':
main()
| bsd-3-clause |
xiangel/hue | desktop/core/ext-py/tablib-0.10.0/tablib/packages/yaml3/error.py | 294 | 2533 |
__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
class Mark:
def __init__(self, name, index, line, column, buffer, pointer):
self.name = name
self.index = index
self.line = line
self.column = column
self.buffer = buffer
self.pointer = pointer
def get_snippet(self, indent=4, max_length=75):
if self.buffer is None:
return None
head = ''
start = self.pointer
while start > 0 and self.buffer[start-1] not in '\0\r\n\x85\u2028\u2029':
start -= 1
if self.pointer-start > max_length/2-1:
head = ' ... '
start += 5
break
tail = ''
end = self.pointer
while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029':
end += 1
if end-self.pointer > max_length/2-1:
tail = ' ... '
end -= 5
break
snippet = self.buffer[start:end]
return ' '*indent + head + snippet + tail + '\n' \
+ ' '*(indent+self.pointer-start+len(head)) + '^'
def __str__(self):
snippet = self.get_snippet()
where = " in \"%s\", line %d, column %d" \
% (self.name, self.line+1, self.column+1)
if snippet is not None:
where += ":\n"+snippet
return where
class YAMLError(Exception):
pass
class MarkedYAMLError(YAMLError):
def __init__(self, context=None, context_mark=None,
problem=None, problem_mark=None, note=None):
self.context = context
self.context_mark = context_mark
self.problem = problem
self.problem_mark = problem_mark
self.note = note
def __str__(self):
lines = []
if self.context is not None:
lines.append(self.context)
if self.context_mark is not None \
and (self.problem is None or self.problem_mark is None
or self.context_mark.name != self.problem_mark.name
or self.context_mark.line != self.problem_mark.line
or self.context_mark.column != self.problem_mark.column):
lines.append(str(self.context_mark))
if self.problem is not None:
lines.append(self.problem)
if self.problem_mark is not None:
lines.append(str(self.problem_mark))
if self.note is not None:
lines.append(self.note)
return '\n'.join(lines)
| apache-2.0 |
vadimtk/chrome4sdp | tools/run-bisect-manual-test.py | 9 | 6350 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run Manual Test Bisect Tool
An example usage:
tools/run-bisect-manual-test.py -g 201281 -b 201290
On Linux platform, follow the instructions in this document
https://code.google.com/p/chromium/wiki/LinuxSUIDSandboxDevelopment
to setup the sandbox manually before running the script. Otherwise the script
fails to launch Chrome and exits with an error.
This script serves a similar function to bisect-builds.py, except it uses
the bisect_perf_regression.py. This means that that it can obtain builds of
Chromium for revisions where builds aren't available in cloud storage.
"""
import os
import subprocess
import sys
CROS_BOARD_ENV = 'BISECT_CROS_BOARD'
CROS_IP_ENV = 'BISECT_CROS_IP'
_TOOLS_DIR = os.path.abspath(os.path.dirname(__file__))
_BISECT_SCRIPT_PATH = os.path.join(
_TOOLS_DIR, 'auto_bisect', 'bisect_perf_regression.py')
sys.path.append(os.path.join(_TOOLS_DIR, 'telemetry'))
from telemetry.internal.browser import browser_options
def _RunBisectionScript(options):
"""Attempts to execute the bisect script (bisect_perf_regression.py).
Args:
options: The configuration options to pass to the bisect script.
Returns:
An exit code; 0 for success, 1 for failure.
"""
script_path = os.path.join(options.working_directory,
'bisect', 'src', 'tools','bisect-manual-test.py')
abs_script_path = os.path.abspath(script_path)
test_command = ('python %s --browser=%s --chrome-root=.' %
(abs_script_path, options.browser_type))
cmd = ['python', _BISECT_SCRIPT_PATH,
'-c', test_command,
'-g', options.good_revision,
'-b', options.bad_revision,
'-m', 'manual_test/manual_test',
'-r', '1',
'--working_directory', options.working_directory,
'--build_preference', 'ninja',
'--no_custom_deps',
'--builder_type', options.builder_type]
if options.extra_src:
cmd.extend(['--extra_src', options.extra_src])
if 'cros' in options.browser_type:
cmd.extend(['--target_platform', 'cros'])
if os.environ[CROS_BOARD_ENV] and os.environ[CROS_IP_ENV]:
cmd.extend(['--cros_board', os.environ[CROS_BOARD_ENV]])
cmd.extend(['--cros_remote_ip', os.environ[CROS_IP_ENV]])
else:
print ('Error: Cros build selected, but BISECT_CROS_IP or'
'BISECT_CROS_BOARD undefined.\n')
return 1
elif 'android-chrome' == options.browser_type:
if not options.extra_src:
print 'Error: Missing --extra_src to run bisect for android-chrome.'
sys.exit(-1)
cmd.extend(['--target_platform', 'android-chrome'])
elif 'android' in options.browser_type:
cmd.extend(['--target_platform', 'android'])
elif not options.target_build_type:
cmd.extend(['--target_build_type', options.browser_type.title()])
if options.target_build_type:
cmd.extend(['--target_build_type', options.target_build_type])
if options.goma_threads:
cmd.extend(['--use_goma', '--goma_threads', options.goma_threads])
cmd = [str(c) for c in cmd]
return_code = subprocess.call(cmd)
if return_code:
print 'Error: bisect_perf_regression.py had exit code %d.' % return_code
print
return return_code
def main():
"""Does a bisect based on the command-line arguments passed in.
The user will be prompted to classify each revision as good or bad.
"""
usage = ('%prog [options]\n'
'Used to run the bisection script with a manual test.')
options = browser_options.BrowserFinderOptions('release')
parser = options.CreateParser(usage)
parser.add_option('-b', '--bad_revision',
type='str',
help='A bad revision to start bisection. ' +
'Must be later than good revision. May be either a git' +
' or svn revision.')
parser.add_option('-g', '--good_revision',
type='str',
help='A revision to start bisection where performance' +
' test is known to pass. Must be earlier than the ' +
'bad revision. May be either a git or svn revision.')
parser.add_option('-w', '--working_directory',
type='str',
default='..',
help='A working directory to supply to the bisection '
'script, which will use it as the location to checkout '
'a copy of the chromium depot.')
parser.add_option('--extra_src',
type='str',
help='Path to extra source file. If this is supplied, '
'bisect script will use this to override default behavior.')
parser.add_option('--target_build_type',
type='choice',
choices=['Release', 'Debug'],
help='The target build type. Choices are "Release" '
'or "Debug".')
parser.add_option('--goma_threads', default=64,
type='int',
help='Number of goma threads to use. 0 will disable goma.')
parser.add_option('--builder_type', default='',
choices=['perf',
'full',
'android-chrome-perf', ''],
help='Type of builder to get build from. This allows '
'script to use cached builds. By default (empty), binaries '
'are built locally.')
options, _ = parser.parse_args()
error_msg = ''
if not options.good_revision:
error_msg += 'Error: missing required parameter: --good_revision\n'
if not options.bad_revision:
error_msg += 'Error: missing required parameter: --bad_revision\n'
if error_msg:
print error_msg
parser.print_help()
return 1
if 'android' not in options.browser_type and sys.platform.startswith('linux'):
if not os.environ.get('CHROME_DEVEL_SANDBOX'):
print 'SUID sandbox has not been setup.'\
' See https://code.google.com/p/chromium/wiki/'\
'LinuxSUIDSandboxDevelopment for more information.'
return 1
return _RunBisectionScript(options)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
tedlaz/pyted | sms/requests/packages/chardet/langhebrewmodel.py | 2763 | 11318 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Simon Montagu
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Shoshannah Forbes - original C code (?)
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Windows-1255 language model
# Character Mapping Table:
win1255_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 69, 91, 79, 80, 92, 89, 97, 90, 68,111,112, 82, 73, 95, 85, # 40
78,121, 86, 71, 67,102,107, 84,114,103,115,253,253,253,253,253, # 50
253, 50, 74, 60, 61, 42, 76, 70, 64, 53,105, 93, 56, 65, 54, 49, # 60
66,110, 51, 43, 44, 63, 81, 77, 98, 75,108,253,253,253,253,253, # 70
124,202,203,204,205, 40, 58,206,207,208,209,210,211,212,213,214,
215, 83, 52, 47, 46, 72, 32, 94,216,113,217,109,218,219,220,221,
34,116,222,118,100,223,224,117,119,104,125,225,226, 87, 99,227,
106,122,123,228, 55,229,230,101,231,232,120,233, 48, 39, 57,234,
30, 59, 41, 88, 33, 37, 36, 31, 29, 35,235, 62, 28,236,126,237,
238, 38, 45,239,240,241,242,243,127,244,245,246,247,248,249,250,
9, 8, 20, 16, 3, 2, 24, 14, 22, 1, 25, 15, 4, 11, 6, 23,
12, 19, 13, 26, 18, 27, 21, 17, 7, 10, 5,251,252,128, 96,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.4004%
# first 1024 sequences: 1.5981%
# rest sequences: 0.087%
# negative sequences: 0.0015%
HebrewLangModel = (
0,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,3,2,1,2,0,1,0,0,
3,0,3,1,0,0,1,3,2,0,1,1,2,0,2,2,2,1,1,1,1,2,1,1,1,2,0,0,2,2,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,
1,2,1,2,1,2,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,
1,2,1,3,1,1,0,0,2,0,0,0,1,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,1,2,2,1,3,
1,2,1,1,2,2,0,0,2,2,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,1,0,1,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,2,2,2,3,2,
1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,3,2,2,3,2,2,2,1,2,2,2,2,
1,2,1,1,2,2,0,1,2,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,0,2,2,2,2,2,
0,2,0,2,2,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,0,2,2,2,
0,2,1,2,2,2,0,0,2,1,0,0,0,0,1,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,2,1,2,3,2,2,2,
1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,2,0,2,
0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,2,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,2,2,3,2,1,2,1,1,1,
0,1,1,1,1,1,3,0,1,0,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,1,1,0,0,1,0,0,1,0,0,0,0,
0,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,2,1,2,3,3,2,3,3,3,3,2,3,2,1,2,0,2,1,2,
0,2,0,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,1,2,2,3,3,2,3,2,3,2,2,3,1,2,2,0,2,2,2,
0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,1,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,2,2,3,3,3,3,1,3,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,2,3,2,2,2,1,2,2,0,2,2,2,2,
0,2,0,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,1,3,2,3,3,2,3,3,2,2,1,2,2,2,2,2,2,
0,2,1,2,1,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,2,3,3,2,3,3,3,3,2,3,2,3,3,3,3,3,2,2,2,2,2,2,2,1,
0,2,0,1,2,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,1,2,3,3,3,3,3,3,3,2,3,2,3,2,1,2,3,0,2,1,2,2,
0,2,1,1,2,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,2,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,3,2,1,3,1,2,2,2,1,2,3,3,1,2,1,2,2,2,2,
0,1,1,1,1,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,0,2,3,3,3,1,3,3,3,1,2,2,2,2,1,1,2,2,2,2,2,2,
0,2,0,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,2,2,3,3,3,2,1,2,3,2,3,2,2,2,2,1,2,1,1,1,2,2,
0,2,1,1,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,1,0,0,0,0,0,
1,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,3,1,2,2,2,2,3,2,3,1,1,2,2,1,2,2,1,1,0,2,2,2,2,
0,1,0,1,2,2,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,0,0,1,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,2,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,0,1,0,1,1,0,1,1,0,0,0,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
3,2,2,1,2,2,2,2,2,2,2,1,2,2,1,2,2,1,1,1,1,1,1,1,1,2,1,1,0,3,3,3,
0,3,0,2,2,2,2,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
2,2,2,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,2,1,2,2,2,1,1,1,2,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,2,2,2,2,2,2,0,2,2,0,0,0,0,0,0,
0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,1,0,2,1,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,3,1,1,2,2,2,2,2,1,2,2,2,1,1,2,2,2,2,2,2,2,1,2,2,1,0,1,1,1,1,0,
0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,1,1,1,1,2,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
0,0,2,0,0,0,0,0,0,0,0,1,1,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,1,0,0,
2,1,1,2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,1,2,1,2,1,1,1,1,0,0,0,0,
0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,1,2,2,2,2,2,2,2,2,2,2,1,2,1,2,1,1,2,1,1,1,2,1,2,1,2,0,1,0,1,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,1,2,2,2,1,2,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,2,1,2,1,1,0,1,0,1,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,1,1,1,1,1,1,0,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,1,1,1,0,1,0,0,0,1,1,0,1,1,0,0,0,0,0,1,1,0,0,
0,1,1,1,2,1,2,2,2,0,2,0,2,0,1,1,2,1,1,1,1,2,1,0,1,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,1,0,0,0,0,0,1,0,1,2,2,0,1,0,0,1,1,2,2,1,2,0,2,0,0,0,1,2,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,2,1,2,0,2,0,0,1,1,1,1,1,1,0,1,0,0,0,1,0,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,1,2,2,0,0,1,0,0,0,1,0,0,1,
1,1,2,1,0,1,1,1,0,1,0,1,1,1,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,2,1,
0,2,0,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,0,0,1,0,1,1,1,1,0,0,0,0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,1,0,0,0,1,1,0,1,
2,0,1,0,1,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,1,0,1,0,0,1,1,2,1,1,2,0,1,0,0,0,1,1,0,1,
1,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,0,0,2,1,1,2,0,2,0,0,0,1,1,0,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,2,2,1,2,1,1,0,1,0,0,0,1,1,0,1,
2,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,1,0,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,2,1,1,1,0,2,1,1,0,0,0,2,1,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,0,2,1,1,0,1,0,0,0,1,1,0,1,
2,2,1,1,1,0,1,1,0,1,1,0,1,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,0,1,2,1,0,2,0,0,0,1,1,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,
0,1,0,0,2,0,2,1,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,1,0,1,0,0,1,0,0,0,1,0,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,1,0,1,1,0,0,1,0,0,2,1,1,1,1,1,0,1,0,0,0,0,1,0,1,
0,1,1,1,2,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,1,0,0,0,0,0,1,1,1,1,1,0,1,0,0,0,1,1,0,0,
)
Win1255HebrewModel = {
'charToOrderMap': win1255_CharToOrderMap,
'precedenceMatrix': HebrewLangModel,
'mTypicalPositiveRatio': 0.984004,
'keepEnglishLetter': False,
'charsetName': "windows-1255"
}
# flake8: noqa
| gpl-3.0 |
SOKP/external_chromium_org | chrome/common/extensions/docs/server2/api_categorizer_test.py | 87 | 2474 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from api_categorizer import APICategorizer
from compiled_file_system import CompiledFileSystem
from extensions_paths import CHROME_EXTENSIONS
from object_store_creator import ObjectStoreCreator
from test_file_system import TestFileSystem
def _ToTestData(obj):
'''Transforms |obj| into test data by turning a list of files into an object
mapping that file to its contents (derived from its name).
'''
return dict((name, name) for name in obj)
_TEST_DATA = {
'api': {
'_api_features.json': '{}',
'_manifest_features.json': '{}',
'_permission_features.json': '{}',
},
'docs': {
'templates': {
'json': {
'api_availabilities.json': '{}',
'manifest.json': '{}',
'permissions.json': '{}',
},
'public': {
'apps': _ToTestData([
'alarms.html',
'app_window.html',
'experimental_bluetooth.html',
'experimental_power.html',
'storage.html',
'sockets_udp.html'
]),
'extensions': _ToTestData([
'alarms.html',
'browserAction.html',
'experimental_history.html',
'experimental_power.html',
'infobars.html',
'storage.html',
'sockets_udp.html'
]),
},
},
}
}
class APICategorizerTest(unittest.TestCase):
def setUp(self):
self._test_file_system = TestFileSystem(
_TEST_DATA, relative_to=CHROME_EXTENSIONS)
self._compiled_file_system = CompiledFileSystem.Factory(
ObjectStoreCreator.ForTest())
def testGetAPICategory(self):
def assertGetEqual(expected, category, only_on=None):
for platform in ('apps', 'extensions'):
get_category = APICategorizer(
self._test_file_system,
self._compiled_file_system,
platform if only_on is None else only_on).GetCategory(category)
self.assertEqual(expected, get_category)
assertGetEqual('chrome', 'alarms')
assertGetEqual('private', 'musicManagerPrivate')
assertGetEqual('private', 'notDocumentedApi')
assertGetEqual('experimental', 'experimental.bluetooth', only_on='apps')
assertGetEqual('experimental', 'experimental.history', only_on='extensions')
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
uni2u/neutron | neutron/tests/unit/ml2/drivers/mech_sriov/test_mech_sriov_nic_switch.py | 12 | 10260 | # Copyright 2014 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo.config import cfg
from neutron.common import constants
from neutron.extensions import portbindings
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2 import config # noqa
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers.mech_sriov import mech_driver
from neutron.tests.unit.ml2 import _test_mech_agent as base
MELLANOX_CONNECTX3_PCI_INFO = '15b3:1004'
DEFAULT_PCI_INFO = ['15b3:1004', '8086:10c9']
class TestFakePortContext(base.FakePortContext):
def __init__(self, agent_type, agents, segments,
vnic_type=portbindings.VNIC_NORMAL,
profile={'pci_vendor_info':
MELLANOX_CONNECTX3_PCI_INFO}):
super(TestFakePortContext, self).__init__(agent_type,
agents,
segments,
vnic_type)
self._bound_profile = profile
@property
def current(self):
return {'id': base.PORT_ID,
'binding:vnic_type': self._bound_vnic_type,
'binding:profile': self._bound_profile}
def set_binding(self, segment_id, vif_type, vif_details, state):
self._bound_segment_id = segment_id
self._bound_vif_type = vif_type
self._bound_vif_details = vif_details
self._bound_state = state
class SriovNicSwitchMechanismBaseTestCase(base.AgentMechanismBaseTestCase):
VIF_TYPE = portbindings.VIF_TYPE_HW_VEB
CAP_PORT_FILTER = False
AGENT_TYPE = constants.AGENT_TYPE_NIC_SWITCH
VLAN_SEGMENTS = base.AgentMechanismVlanTestCase.VLAN_SEGMENTS
GOOD_MAPPINGS = {'fake_physical_network': 'fake_device'}
GOOD_CONFIGS = {'device_mappings': GOOD_MAPPINGS}
BAD_MAPPINGS = {'wrong_physical_network': 'wrong_device'}
BAD_CONFIGS = {'device_mappings': BAD_MAPPINGS}
AGENTS = [{'alive': True,
'configurations': GOOD_CONFIGS}]
AGENTS_DEAD = [{'alive': False,
'configurations': GOOD_CONFIGS}]
AGENTS_BAD = [{'alive': False,
'configurations': GOOD_CONFIGS},
{'alive': True,
'configurations': BAD_CONFIGS}]
def setUp(self):
cfg.CONF.set_override('supported_pci_vendor_devs',
DEFAULT_PCI_INFO,
'ml2_sriov')
cfg.CONF.set_override('agent_required', True, 'ml2_sriov')
super(SriovNicSwitchMechanismBaseTestCase, self).setUp()
self.driver = mech_driver.SriovNicSwitchMechanismDriver()
self.driver.initialize()
def test_check_segment(self):
"""Validate the check_segment call."""
segment = {'api.NETWORK_TYPE': ""}
segment[api.NETWORK_TYPE] = p_const.TYPE_VLAN
self.assertTrue(self.driver.check_segment(segment))
# Validate a network type not currently supported
segment[api.NETWORK_TYPE] = p_const.TYPE_GRE
self.assertFalse(self.driver.check_segment(segment))
class SriovSwitchMechGenericTestCase(SriovNicSwitchMechanismBaseTestCase,
base.AgentMechanismGenericTestCase):
pass
class SriovMechVlanTestCase(SriovNicSwitchMechanismBaseTestCase,
base.AgentMechanismBaseTestCase):
VLAN_SEGMENTS = [{api.ID: 'unknown_segment_id',
api.NETWORK_TYPE: 'no_such_type'},
{api.ID: 'vlan_segment_id',
api.NETWORK_TYPE: 'vlan',
api.PHYSICAL_NETWORK: 'fake_physical_network',
api.SEGMENTATION_ID: 1234}]
def test_type_vlan(self):
context = TestFakePortContext(self.AGENT_TYPE,
self.AGENTS,
self.VLAN_SEGMENTS,
portbindings.VNIC_DIRECT)
self.driver.bind_port(context)
self._check_bound(context, self.VLAN_SEGMENTS[1])
def test_type_vlan_bad(self):
context = TestFakePortContext(self.AGENT_TYPE,
self.AGENTS_BAD,
self.VLAN_SEGMENTS,
portbindings.VNIC_DIRECT)
self.driver.bind_port(context)
self._check_unbound(context)
class SriovSwitchMechVnicTypeTestCase(SriovNicSwitchMechanismBaseTestCase):
def _check_vif_type_for_vnic_type(self, vnic_type,
expected_vif_type):
context = TestFakePortContext(self.AGENT_TYPE,
self.AGENTS,
self.VLAN_SEGMENTS,
vnic_type)
self.driver.bind_port(context)
self.assertEqual(expected_vif_type, context._bound_vif_type)
vlan = int(context._bound_vif_details[portbindings.VIF_DETAILS_VLAN])
self.assertEqual(1234, vlan)
def test_vnic_type_direct(self):
self._check_vif_type_for_vnic_type(portbindings.VNIC_DIRECT,
portbindings.VIF_TYPE_HW_VEB)
def test_vnic_type_macvtap(self):
self._check_vif_type_for_vnic_type(portbindings.VNIC_MACVTAP,
portbindings.VIF_TYPE_HW_VEB)
class SriovSwitchMechProfileTestCase(SriovNicSwitchMechanismBaseTestCase):
def _check_vif_for_pci_info(self, pci_vendor_info, expected_vif_type):
context = TestFakePortContext(self.AGENT_TYPE,
self.AGENTS,
self.VLAN_SEGMENTS,
portbindings.VNIC_DIRECT,
{'pci_vendor_info': pci_vendor_info})
self.driver.bind_port(context)
self.assertEqual(expected_vif_type, context._bound_vif_type)
def test_profile_supported_pci_info(self):
self._check_vif_for_pci_info(MELLANOX_CONNECTX3_PCI_INFO,
portbindings.VIF_TYPE_HW_VEB)
def test_profile_unsupported_pci_info(self):
with mock.patch('neutron.plugins.ml2.drivers.mech_sriov.'
'mech_driver.LOG') as log_mock:
self._check_vif_for_pci_info('xxxx:yyyy', None)
log_mock.debug.assert_called_with('Refusing to bind due to '
'unsupported pci_vendor device')
class SriovSwitchMechProfileFailTestCase(SriovNicSwitchMechanismBaseTestCase):
def _check_for_pci_vendor_info(self, pci_vendor_info):
context = TestFakePortContext(self.AGENT_TYPE,
self.AGENTS,
self.VLAN_SEGMENTS,
portbindings.VNIC_DIRECT,
pci_vendor_info)
self.driver._check_supported_pci_vendor_device(context)
def test_profile_missing_profile(self):
with mock.patch('neutron.plugins.ml2.drivers.mech_sriov.'
'mech_driver.LOG') as log_mock:
self._check_for_pci_vendor_info({})
log_mock.debug.assert_called_with("Missing profile in port"
" binding")
def test_profile_missing_pci_vendor_info(self):
with mock.patch('neutron.plugins.ml2.drivers.mech_sriov.'
'mech_driver.LOG') as log_mock:
self._check_for_pci_vendor_info({'aa': 'bb'})
log_mock.debug.assert_called_with("Missing pci vendor"
" info in profile")
class SriovSwitchMechVifDetailsTestCase(SriovNicSwitchMechanismBaseTestCase):
def test_vif_details_contains_vlan_id(self):
VLAN_SEGMENTS = [{api.ID: 'vlan_segment_id',
api.NETWORK_TYPE: 'vlan',
api.PHYSICAL_NETWORK: 'fake_physical_network',
api.SEGMENTATION_ID: 1234}]
context = TestFakePortContext(self.AGENT_TYPE,
self.AGENTS,
VLAN_SEGMENTS,
portbindings.VNIC_DIRECT)
self.driver.bind_port(context)
vif_details = context._bound_vif_details
self.assertIsNotNone(vif_details)
vlan_id = int(vif_details.get(portbindings.VIF_DETAILS_VLAN))
self.assertEqual(1234, vlan_id)
class SriovSwitchMechConfigTestCase(SriovNicSwitchMechanismBaseTestCase):
def _set_config(self, pci_devs=['aa:bb']):
cfg.CONF.set_override('mechanism_drivers',
['logger', 'sriovnicswitch'], 'ml2')
cfg.CONF.set_override('supported_pci_vendor_devs', pci_devs,
'ml2_sriov')
def test_pci_vendor_config_single_entry(self):
self._set_config()
self.driver.initialize()
self.assertEqual(['aa:bb'], self.driver.pci_vendor_info)
def test_pci_vendor_config_multiple_entry(self):
self._set_config(['x:y', 'a:b'])
self.driver.initialize()
self.assertEqual(['x:y', 'a:b'], self.driver.pci_vendor_info)
def test_pci_vendor_config_default_entry(self):
self.driver.initialize()
self.assertEqual(DEFAULT_PCI_INFO,
self.driver.pci_vendor_info)
def test_pci_vendor_config_wrong_entry(self):
self._set_config('wrong_entry')
self.assertRaises(cfg.Error, self.driver.initialize)
| apache-2.0 |
crossroadchurch/paul | openlp/plugins/media/__init__.py | 1 | 1818 | # -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2015 OpenLP Developers #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The :mod:`media` module provides the Media plugin which allows OpenLP to display videos. The media supported depends not
only on the Python support but also extensively on the codecs installed on the underlying operating system being picked
up and usable by Python.
"""
| gpl-2.0 |
mspark93/VTK | ThirdParty/Twisted/twisted/runner/inetdtap.py | 49 | 5273 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
#
"""
Twisted inetd TAP support
Maintainer: Andrew Bennetts
Future Plans: more configurability.
"""
import os, pwd, grp, socket
from twisted.runner import inetd, inetdconf
from twisted.python import log, usage
from twisted.internet.protocol import ServerFactory
from twisted.application import internet, service as appservice
try:
import portmap
rpcOk = 1
except ImportError:
rpcOk = 0
# Protocol map
protocolDict = {'tcp': socket.IPPROTO_TCP, 'udp': socket.IPPROTO_UDP}
class Options(usage.Options):
optParameters = [
['rpc', 'r', '/etc/rpc', 'RPC procedure table file'],
['file', 'f', '/etc/inetd.conf', 'Service configuration file']
]
optFlags = [['nointernal', 'i', "Don't run internal services"]]
compData = usage.Completions(
optActions={"file": usage.CompleteFiles('*.conf')}
)
class RPCServer(internet.TCPServer):
def __init__(self, rpcVersions, rpcConf, proto, service):
internet.TCPServer.__init__(0, ServerFactory())
self.rpcConf = rpcConf
self.proto = proto
self.service = service
def startService(self):
internet.TCPServer.startService(self)
import portmap
portNo = self._port.getHost()[2]
service = self.service
for version in rpcVersions:
portmap.set(self.rpcConf.services[name], version, self.proto,
portNo)
inetd.forkPassingFD(service.program, service.programArgs,
os.environ, service.user, service.group, p)
def makeService(config):
s = appservice.MultiService()
conf = inetdconf.InetdConf()
conf.parseFile(open(config['file']))
rpcConf = inetdconf.RPCServicesConf()
try:
rpcConf.parseFile(open(config['rpc']))
except:
# We'll survive even if we can't read /etc/rpc
log.deferr()
for service in conf.services:
rpc = service.protocol.startswith('rpc/')
protocol = service.protocol
if rpc and not rpcOk:
log.msg('Skipping rpc service due to lack of rpc support')
continue
if rpc:
# RPC has extra options, so extract that
protocol = protocol[4:] # trim 'rpc/'
if not protocolDict.has_key(protocol):
log.msg('Bad protocol: ' + protocol)
continue
try:
name, rpcVersions = service.name.split('/')
except ValueError:
log.msg('Bad RPC service/version: ' + service.name)
continue
if not rpcConf.services.has_key(name):
log.msg('Unknown RPC service: ' + repr(service.name))
continue
try:
if '-' in rpcVersions:
start, end = map(int, rpcVersions.split('-'))
rpcVersions = range(start, end+1)
else:
rpcVersions = [int(rpcVersions)]
except ValueError:
log.msg('Bad RPC versions: ' + str(rpcVersions))
continue
if (protocol, service.socketType) not in [('tcp', 'stream'),
('udp', 'dgram')]:
log.msg('Skipping unsupported type/protocol: %s/%s'
% (service.socketType, service.protocol))
continue
# Convert the username into a uid (if necessary)
try:
service.user = int(service.user)
except ValueError:
try:
service.user = pwd.getpwnam(service.user)[2]
except KeyError:
log.msg('Unknown user: ' + service.user)
continue
# Convert the group name into a gid (if necessary)
if service.group is None:
# If no group was specified, use the user's primary group
service.group = pwd.getpwuid(service.user)[3]
else:
try:
service.group = int(service.group)
except ValueError:
try:
service.group = grp.getgrnam(service.group)[2]
except KeyError:
log.msg('Unknown group: ' + service.group)
continue
if service.program == 'internal':
if config['nointernal']:
continue
# Internal services can use a standard ServerFactory
if not inetd.internalProtocols.has_key(service.name):
log.msg('Unknown internal service: ' + service.name)
continue
factory = ServerFactory()
factory.protocol = inetd.internalProtocols[service.name]
elif rpc:
i = RPCServer(rpcVersions, rpcConf, proto, service)
i.setServiceParent(s)
continue
else:
# Non-internal non-rpc services use InetdFactory
factory = inetd.InetdFactory(service)
if protocol == 'tcp':
internet.TCPServer(service.port, factory).setServiceParent(s)
elif protocol == 'udp':
raise RuntimeError("not supporting UDP")
return s
| bsd-3-clause |